pax_global_header00006660000000000000000000000064133255573320014522gustar00rootroot0000000000000052 comment=ba1b692293bdef5afd46b9904052ee5072bc85c5 open-build-service-2.9.4/000077500000000000000000000000001332555733200152325ustar00rootroot00000000000000open-build-service-2.9.4/.github/000077500000000000000000000000001332555733200165725ustar00rootroot00000000000000open-build-service-2.9.4/.github/ISSUE_TEMPLATE.md000066400000000000000000000021651332555733200213030ustar00rootroot00000000000000Please only create issues/feature requests for the Open Build Service server here, github issues are not meant to be used for support requests! For support contact our OBS community, they meet online in a chat/mailing list. There you can ask questions if you have trouble understanding something, seek advice and mingle with other OBS community members. See http://openbuildservice.org/support/ for further information. Having problems with the OBS command line interface osc? It has it's own issue tracker here: https://github.com/openSUSE/osc Having problems with the obs-build tool that controls the build process? It has it's issue tracker here: https://github.com/openSUSE/obs-build Good to go? Then please remove these lines above, including this one, and help us understand your issue by answering the following: Issue/Feature description ==================== Expected result ============ How to Reproduce =============== 1. When I want to do X 2. and Y is given 3. Z is happening Further information =============== * A link to an OBS instance showing the issue * Exact server version (in case a private instance is used) open-build-service-2.9.4/.gitignore000066400000000000000000000027261332555733200172310ustar00rootroot00000000000000# Project specific ignores IN ALPHABETIC ORDER /docs/api/html /docs/api/restility/doc /docs/api/restility/log /docs/api/restility/tmp /docs/api/*.test.config /docs/api/books/en/addons/.validate /docs/api/books/en/addons/kiwi.xml /docs/api/books/en/addons/kiwi.html /docs/api/books/en/autobuild /docs/api/books/en/html /docs/api/books/en/HTML.manifest /docs/api/books/en/images/print /docs/api/books/en/images/online /docs/api/books/en/profiled /docs/api/books/en/tmp /docs/api/books/en/wiki /docs/api/books/en/*.diff /docs/api/books/en/*.fo /docs/api/books/en/*.pdf /docs/api/books/en/*.ps /docs/api/books/en/*.tar.bz2 /docs/api/books/en/*.tar.gz /docs/api/books/en/*.zip /src/api/config/database.yml /src/api/config/environments/development.*.rb /src/api/config/options.yml /src/api/config/secret.key /src/api/config/thinking_sphinx.yml /src/api/db/sphinx /src/api/config/*.sphinx.conf /src/api/coverage /src/api/coverage.data /src/api/db/schema.rb /src/api/files/distributions.xml /src/api/log /src/api/public/assets /src/api/mkmf.log /src/api/tmp /src/api/spec/examples.txt /src/backend/blib /src/backend/BSConfig.pm /src/backend/BSSolv.bs /src/backend/BSSolv.c /src/backend/BSSolv.o /src/backend/BSSolv.so /src/backend/Makefile /src/backend/pm_to_blib /src/backend/sat-solver /src/backend/t/tmp docker-compose.override.yml # All the other crap that fits nowhere specifically *~ *.bak *.tmp *.vagrant .*.sw* .directory .o .project .so nbproject *.vim *.pid *.iml *.ipr *.iws .idea/ .kanku/ open-build-service-2.9.4/.gitmodules000066400000000000000000000001511332555733200174040ustar00rootroot00000000000000[submodule "src/backend/build"] path = src/backend/build url = git://github.com/openSUSE/obs-build.git open-build-service-2.9.4/.jshintignore000066400000000000000000000002201332555733200177300ustar00rootroot00000000000000src/api/app/assets/javascripts/webui/application/cm2/ src/api/vendor/assets/javascripts/*.min.js src/api/vendor/bundle/ src/api/lib/backend/doc open-build-service-2.9.4/.rubocop.yml000066400000000000000000000056531332555733200175150ustar00rootroot00000000000000require: rubocop-rspec inherit_from: .rubocop_todo.yml AllCops: UseCache: true CacheRootDirectory: src/api/tmp/rubocop_cache MaxFilesInCache: 2000 Exclude: - 'src/api/tmp/**/*' - 'src/api/lib/templates/**/*' - 'src/api/vendor/bundle/**/*' - 'docs/api/restility/**/*' - 'src/api/test/fixtures/backend/**/*' - 'src/api/files/*' - 'dist/**/*' - 'src/backend/**/*' #################### Layout ########################### # Align the elements of a hash literal if they span more than one line. Layout/AlignHash: # Alignment of entries using hash rocket as separator. EnforcedHashRocketStyle: table # Alignment of entries using colon as separator. EnforcedColonStyle: table # Select whether hashes that are the last argument in a method call should be # inspected? EnforcedLastArgumentHashStyle: ignore_implicit # We decide disable this cop because we can't reach an agreement Layout/DotPosition: Enabled: false #################### Style ########################### # Find uses of alias where alias_method would be more appropriate (or is simply preferred due to configuration), and vice versa. # It also finds uses of alias :symbol rather than alias bareword. Style/Alias: EnforcedStyle: 'prefer_alias_method' # Use ` or %x around command literals. Style/CommandLiteral: EnforcedStyle: percent_x Style/Documentation: Enabled: false # We need to allow some variables related to rabbiMQ. Style/GlobalVars: AllowedVariables: ['$rabbitmq_conn', '$rabbitmq_exchange', '$rabbitmq_channel'] # Checks for chaining of a block after another block that spans multiple lines. # We disabled this cop because of Rantly. Style/MultilineBlockChain: Exclude: - 'src/api/spec/**/*' # Checks for redundant `return` expressions Style/RedundantReturn: Enabled: false Style/SymbolArray: EnforcedStyle: brackets Style/WordArray: EnforcedStyle: brackets ##################### Metrics ################################## # Checks if the length a class exceeds some maximum value Metrics/ClassLength: Severity: refactor # Checks the length of lines in the source code. Metrics/LineLength: Max: 150 IgnoredPatterns: ['\A#'] # Checks if the length a module exceeds some maximum value Metrics/ModuleLength: Severity: refactor ##################### Lint ################################## # Align ends correctly. Lint/EndAlignment: EnforcedStyleAlignWith: variable ##################### Rails ################################## Rails: Enabled: true # Actually is not possible to enable this cop because we have several overwritten methods. Rails/DynamicFindBy: Enabled: false # Enforces that 'exit' calls are not used. Rails/Exit: Exclude: - 'src/api/lib/memory_dumper.rb' Rails/HasAndBelongsToMany: Enabled: false # Checks for the use of output calls like puts and print Rails/Output: Exclude: - 'src/api/app/jobs/*' - 'src/api/config/*' - 'src/api/db/**/*' - 'src/api/lib/**/*' open-build-service-2.9.4/.rubocop_todo.yml000066400000000000000000000532401332555733200205350ustar00rootroot00000000000000# This configuration was generated by # `rubocop --auto-gen-config` # on 2018-01-23 15:08:32 +0000 using RuboCop version 0.51.0. # The point is for the user to remove these configuration records # one by one as the offenses are removed from the code base. # Note that changes in the inspected code, or installation of new # versions of RuboCop, may require this file to be generated again. # Offense count: 19 Capybara/CurrentPathExpectation: Exclude: - 'src/api/spec/features/webui/groups_spec.rb' - 'src/api/spec/features/webui/packages_spec.rb' - 'src/api/spec/features/webui/projects_spec.rb' # Offense count: 132 Capybara/FeatureMethods: Enabled: false # Offense count: 6 FactoryBot/DynamicAttributeDefinedStatically: Exclude: - 'src/api/spec/factories/attrib_default_values.rb' - 'src/api/spec/factories/issue.rb' - 'src/api/spec/factories/issue_tracker.rb' # Offense count: 3 Lint/DuplicateMethods: Exclude: - 'src/api/app/models/history_element.rb' - 'src/api/lib/activexml/node.rb' # Offense count: 22 Lint/HandleExceptions: Enabled: false # Offense count: 7 Lint/NonLocalExitFromIterator: Exclude: - 'src/api/app/controllers/build_controller.rb' - 'src/api/app/controllers/configurations_controller.rb' - 'src/api/app/controllers/webui/patchinfo_controller.rb' - 'src/api/app/models/bs_request.rb' - 'src/api/app/models/patchinfo.rb' - 'src/api/app/models/relationship.rb' # Offense count: 5 Lint/RescueException: Exclude: - 'src/api/app/controllers/person_controller.rb' - 'src/api/app/controllers/public_controller.rb' - 'src/api/app/models/user_ldap_strategy.rb' - 'src/api/config/environment.rb' - 'src/api/lib/tasks/extract.rake' # Offense count: 25 Lint/RescueWithoutErrorClass: Enabled: false # Offense count: 1 Lint/ShadowedException: Exclude: - 'src/api/lib/activexml/transport.rb' # Offense count: 40 Lint/UriEscapeUnescape: Enabled: false # Offense count: 893 Metrics/AbcSize: Max: 239 # Offense count: 416 # Configuration parameters: CountComments, ExcludedMethods. Metrics/BlockLength: Max: 697 # Offense count: 25 # Configuration parameters: CountBlocks. Metrics/BlockNesting: Max: 6 # Offense count: 90 # Configuration parameters: CountComments. Metrics/ClassLength: Max: 1146 # Offense count: 243 Metrics/CyclomaticComplexity: Max: 55 # Offense count: 948 # Configuration parameters: CountComments. Metrics/MethodLength: Max: 264 # Offense count: 7 # Configuration parameters: CountComments. Metrics/ModuleLength: Max: 341 # Offense count: 20 # Configuration parameters: CountKeywordArgs. Metrics/ParameterLists: Max: 9 # Offense count: 207 Metrics/PerceivedComplexity: Max: 55 # Offense count: 14 Naming/AccessorMethodName: Exclude: - 'src/api/app/controllers/application_controller.rb' - 'src/api/app/controllers/person_controller.rb' - 'src/api/app/controllers/source_controller.rb' - 'src/api/app/controllers/statistics_controller.rb' - 'src/api/app/controllers/webui/patchinfo_controller.rb' - 'src/api/app/models/bs_request.rb' - 'src/api/app/models/bs_request_action.rb' - 'src/api/app/models/bs_request_action_maintenance_release.rb' - 'src/api/app/models/group.rb' - 'src/api/app/models/issue_tracker.rb' - 'src/api/app/models/package.rb' - 'src/api/app/models/user.rb' - 'src/api/test/functional/search_controller_test.rb' # Offense count: 1 Naming/ConstantName: Exclude: - 'src/api/app/models/event/request.rb' # Offense count: 28 # Configuration parameters: Blacklist. # Blacklist: END, (?-mix:EO[A-Z]{1}) Naming/HeredocDelimiterNaming: Exclude: - 'src/api/app/models/package.rb' - 'src/api/db/seeds.rb' - 'src/api/spec/controllers/webui/package_controller_spec.rb' - 'src/api/spec/helpers/webui/flash_helper_spec.rb' - 'src/api/spec/models/project/update_from_xml_command_spec.rb' - 'src/api/test/functional/issue_trackers_controller_test.rb' - 'src/api/test/functional/source_controller_test.rb' - 'src/api/test/unit/project_test.rb' - 'src/api/test/unit/validator_test.rb' # Offense count: 12 # Configuration parameters: EnforcedStyle, SupportedStyles. # SupportedStyles: snake_case, camelCase Naming/MethodName: Exclude: - 'src/api/app/controllers/source_controller.rb' - 'src/api/app/models/maintenance_incident.rb' - 'src/api/app/models/product.rb' - 'src/api/app/models/service.rb' - 'src/api/test/functional/maintenance_test.rb' - 'src/api/test/functional/search_controller_test.rb' - 'src/api/test/test_helper.rb' # Offense count: 55 # Configuration parameters: NamePrefix, NamePrefixBlacklist, NameWhitelist, MethodDefinitionMacros. # NamePrefix: is_, has_, have_ # NamePrefixBlacklist: is_, has_, have_ # NameWhitelist: is_a? # MethodDefinitionMacros: define_method, define_singleton_method Naming/PredicateName: Exclude: - 'spec/**/*' - 'src/api/app/helpers/webui/webui_helper.rb' - 'src/api/app/models/bs_request.rb' - 'src/api/app/models/bs_request_action.rb' - 'src/api/app/models/bs_request_action_maintenance_incident.rb' - 'src/api/app/models/bs_request_action_maintenance_release.rb' - 'src/api/app/models/bs_request_action_submit.rb' - 'src/api/app/models/channel.rb' - 'src/api/app/models/flag.rb' - 'src/api/app/models/package.rb' - 'src/api/app/models/patchinfo.rb' - 'src/api/app/models/project.rb' - 'src/api/app/models/repository.rb' - 'src/api/app/models/user.rb' - 'src/api/app/models/user_ldap_strategy.rb' - 'src/api/lib/activexml/node.rb' # Offense count: 3 # Configuration parameters: EnforcedStyle, SupportedStyles. # SupportedStyles: snake_case, normalcase, non_integer Naming/VariableNumber: Exclude: - 'src/api/spec/features/webui/projects_spec.rb' - 'src/api/test/unit/project_test.rb' # Offense count: 24 # Cop supports --auto-correct. # Configuration parameters: AutoCorrect. Performance/HashEachMethods: Enabled: false # Offense count: 45 RSpec/AnyInstance: Enabled: false # Offense count: 2 RSpec/BeforeAfterAll: Exclude: - 'spec/spec_helper.rb' - 'spec/rails_helper.rb' - 'spec/support/**/*.rb' - 'src/api/spec/models/relationship_spec.rb' # Offense count: 417 # Configuration parameters: Prefixes. # Prefixes: when, with, without RSpec/ContextWording: Enabled: false # Offense count: 7 RSpec/DescribeClass: Exclude: - 'src/api/spec/routing/api_matcher_spec.rb' - 'src/api/spec/routing/source/key_info_spec.rb' - 'src/api/spec/routing/webui/projects/image_templates_spec.rb' - 'src/api/spec/routing/webui/projects/public_key_spec.rb' - 'src/api/spec/routing/webui/projects/ssl_certificate_spec.rb' - 'src/api/spec/routing/webui/users/requests_spec.rb' - 'src/api/spec/routing/webui_matcher_spec.rb' # Offense count: 158 # Configuration parameters: SkipBlocks, EnforcedStyle, SupportedStyles. # SupportedStyles: described_class, explicit RSpec/DescribedClass: Enabled: false # Offense count: 35 RSpec/EmptyLineAfterFinalLet: Enabled: false # Offense count: 20 RSpec/EmptyLineAfterSubject: Exclude: - 'src/api/spec/controllers/webui/cloud/upload_jobs_controller_spec.rb' - 'src/api/spec/controllers/webui/user_controller_spec.rb' - 'src/api/spec/models/backend_info_spec.rb' - 'src/api/spec/models/bs_request/find_for/user_spec.rb' - 'src/api/spec/models/bs_request_spec.rb' - 'src/api/spec/models/download_repository_spec.rb' - 'src/api/spec/models/review_spec.rb' - 'src/api/spec/support/shared_examples/features/flags_tables.rb' # Offense count: 147 # Configuration parameters: Max. RSpec/ExampleLength: Enabled: false # Offense count: 18 # Configuration parameters: CustomTransform, IgnoredWords. RSpec/ExampleWording: Exclude: - 'src/api/spec/controllers/webui/comments_controller_spec.rb' - 'src/api/spec/controllers/webui/packages/build_reason_controller_spec.rb' - 'src/api/spec/controllers/webui/packages/job_history_controller_spec.rb' - 'src/api/spec/controllers/webui/repositories_controller_spec.rb' - 'src/api/spec/jobs/update_package_meta_job_spec.rb' - 'src/api/spec/models/backend/file_spec.rb' - 'src/api/spec/models/branch_package_spec.rb' - 'src/api/spec/models/comment_spec.rb' - 'src/api/spec/models/project/update_from_xml_command_spec.rb' # Offense count: 8 RSpec/ExpectActual: Exclude: - 'spec/routing/**/*' - 'src/api/spec/controllers/webui/webui_controller_spec.rb' - 'src/api/spec/helpers/webui/webui_helper_spec.rb' - 'src/api/spec/script/db_checker.rb' # Offense count: 3 RSpec/ExpectInHook: Exclude: - 'src/api/spec/controllers/webui/project_controller_spec.rb' - 'src/api/spec/features/webui/projects_spec.rb' # Offense count: 11 # Configuration parameters: CustomTransform, IgnoreMethods. RSpec/FilePath: Exclude: - 'src/api/spec/controllers/webui/attributes_controller_spec.rb' - 'src/api/spec/controllers/webui/cloud/ec2/upload_job/logs_controller_spec.rb' - 'src/api/spec/controllers/webui/users/rss_token_controller_spec.rb' - 'src/api/spec/decorators/statistics/maintenance_statistic_decorator.rb' - 'src/api/spec/helpers/webui/build_result_helper.rb' - 'src/api/spec/jobs/bs_request_action_webui_infos_job.rb' - 'src/api/spec/jobs/project_log_rotate_job_spec.rb' - 'src/api/spec/models/cloud/params/ec2.rb' - 'src/api/spec/models/event/comment_spec.rb' - 'src/api/spec/models/image_template_spec.rb' - 'src/api/spec/script/db_checker.rb' # Offense count: 7 # Configuration parameters: EnforcedStyle, SupportedStyles. # SupportedStyles: implicit, each, example RSpec/HookArgument: Exclude: - 'src/api/spec/models/branch_package_spec.rb' - 'src/api/spec/models/group_spec.rb' - 'src/api/spec/rails_helper.rb' - 'src/api/spec/support/database_cleaner.rb' # Offense count: 13 # Configuration parameters: EnforcedStyle, SupportedStyles. # SupportedStyles: is_expected, should RSpec/ImplicitExpect: Exclude: - 'src/api/spec/controllers/webui/download_on_demand_controller_spec.rb' - 'src/api/spec/models/bs_request_action_spec.rb' - 'src/api/spec/models/project_spec.rb' - 'src/api/spec/models/repository_spec.rb' - 'src/api/spec/models/review_spec.rb' # Offense count: 94 # Configuration parameters: AssignmentOnly. RSpec/InstanceVariable: Enabled: false # Offense count: 63 # Configuration parameters: EnforcedStyle, SupportedStyles. # SupportedStyles: it_behaves_like, it_should_behave_like RSpec/ItBehavesLike: Exclude: - 'src/api/spec/controllers/person_controller_spec.rb' - 'src/api/spec/controllers/webui/package_controller_spec.rb' - 'src/api/spec/controllers/webui/project_controller_spec.rb' - 'src/api/spec/controllers/webui/user_controller_spec.rb' - 'src/api/spec/helpers/webui/package_helper_spec.rb' - 'src/api/spec/models/bs_request_action_spec.rb' - 'src/api/spec/models/bs_request_spec.rb' - 'src/api/spec/models/project_status/pack_info_spec.rb' - 'src/api/spec/models/review_spec.rb' - 'src/api/spec/models/user_ldap_strategy_spec.rb' # Offense count: 2 RSpec/IteratedExpectation: Exclude: - 'src/api/spec/models/user_spec.rb' - 'src/api/spec/script/db_checker.rb' # Offense count: 73 RSpec/LeadingSubject: Enabled: false # Offense count: 5 RSpec/LetBeforeExamples: Exclude: - 'src/api/spec/controllers/webui/request_controller_spec.rb' # Offense count: 194 RSpec/LetSetup: Enabled: false # Offense count: 5 # Configuration parameters: EnforcedStyle, SupportedStyles. # SupportedStyles: have_received, receive RSpec/MessageSpies: Exclude: - 'src/api/spec/controllers/webui/apidocs_controller_spec.rb' - 'src/api/spec/controllers/webui/project_controller_spec.rb' - 'src/api/spec/helpers/validation_helper_spec.rb' - 'src/api/spec/models/kiwi/image_spec.rb' # Offense count: 270 RSpec/MultipleExpectations: Max: 15 # Offense count: 454 RSpec/NamedSubject: Enabled: false # Offense count: 232 # Configuration parameters: Max. RSpec/NestedGroups: Enabled: false # Offense count: 18 # Configuration parameters: EnforcedStyle, SupportedStyles. # SupportedStyles: not_to, to_not RSpec/NotToNot: Exclude: - 'src/api/spec/controllers/person_controller_spec.rb' - 'src/api/spec/controllers/public_controller_spec.rb' - 'src/api/spec/controllers/webui/main_controller_spec.rb' - 'src/api/spec/controllers/webui/repositories_controller_spec.rb' - 'src/api/spec/controllers/webui/user_controller_spec.rb' - 'src/api/spec/helpers/validation_helper_spec.rb' - 'src/api/spec/helpers/webui/webui_helper_spec.rb' - 'src/api/spec/models/comment_spec.rb' - 'src/api/spec/models/package_spec.rb' - 'src/api/spec/models/user_spec.rb' - 'src/api/spec/routing/api_matcher_spec.rb' # Offense count: 44 # Configuration parameters: Strict, EnforcedStyle, SupportedStyles. # SupportedStyles: inflected, explicit RSpec/PredicateMatcher: Exclude: - 'src/api/spec/controllers/webui/request_controller_spec.rb' - 'src/api/spec/models/attrib_value_spec.rb' - 'src/api/spec/models/backend/file_spec.rb' - 'src/api/spec/models/cloud/backend/upload_job_spec.rb' - 'src/api/spec/models/cloud/upload_job_spec.rb' - 'src/api/spec/models/event_subscription/generate_hash_for_subscriber_spec.rb' - 'src/api/spec/models/kiwi/image_spec.rb' - 'src/api/spec/models/project_spec.rb' - 'src/api/spec/models/review_spec.rb' - 'src/api/spec/models/user_spec.rb' # Offense count: 4 RSpec/RepeatedDescription: Exclude: - 'src/api/spec/features/webui/projects_spec.rb' - 'src/api/spec/models/kiwi/repository_spec.rb' # Offense count: 9 RSpec/RepeatedExample: Exclude: - 'src/api/spec/lib/backend/connection_helper_spec.rb' - 'src/api/spec/models/bs_request/find_for/user_spec.rb' - 'src/api/spec/models/kiwi/image_spec.rb' - 'src/api/spec/models/relationship_spec.rb' # Offense count: 2 # Configuration parameters: EnforcedStyle, SupportedStyles. # SupportedStyles: and_return, block RSpec/ReturnFromStub: Exclude: - 'src/api/spec/controllers/webui/project_controller_spec.rb' # Offense count: 9 RSpec/ScatteredLet: Exclude: - 'src/api/spec/controllers/webui/patchinfo_controller_spec.rb' - 'src/api/spec/controllers/webui/subscriptions_controller_spec.rb' - 'src/api/spec/models/bs_request_spec.rb' - 'src/api/spec/models/package_spec.rb' - 'src/api/spec/models/project_spec.rb' # Offense count: 4 RSpec/ScatteredSetup: Exclude: - 'src/api/spec/controllers/webui/cloud/upload_jobs_controller_spec.rb' - 'src/api/spec/jobs/update_released_binaries_job_spec.rb' # Offense count: 1 RSpec/SubjectStub: Exclude: - 'src/api/spec/models/kiwi/image_spec.rb' # Offense count: 32 # Configuration parameters: IgnoreSymbolicNames. RSpec/VerifiedDoubles: Exclude: - 'src/api/spec/decorators/statistics/maintenance_statistic_decorator.rb' - 'src/api/spec/features/webui/login_spec.rb' - 'src/api/spec/jobs/issue_tracker_fetch_issues_job_spec.rb' - 'src/api/spec/jobs/issue_tracker_update_issues_job_spec.rb' - 'src/api/spec/lib/authenticator_spec.rb' - 'src/api/spec/lib/backend/connection_helper_spec.rb' - 'src/api/spec/mixins/build_log_support_spec.rb' - 'src/api/spec/mixins/parse_package_diff_spec.rb' - 'src/api/spec/models/issue_tracker_spec.rb' - 'src/api/spec/models/user_ldap_strategy_spec.rb' - 'src/api/spec/models/user_spec.rb' - 'src/api/spec/support/shared_contexts/a_kerberos_mock.rb' - 'src/api/spec/support/shared_contexts/setup_ldap_mock.rb' - 'src/api/spec/support/shared_examples/a_ldap_connection.rb' # Offense count: 83 Rails/FilePath: Enabled: false # Offense count: 22 # Configuration parameters: Include. # Include: app/models/**/*.rb Rails/HasManyOrHasOneDependent: Exclude: - 'src/api/app/models/architecture.rb' - 'src/api/app/models/attrib.rb' - 'src/api/app/models/package.rb' - 'src/api/app/models/project.rb' - 'src/api/app/models/repository.rb' - 'src/api/app/models/review.rb' - 'src/api/app/models/static_permission.rb' - 'src/api/app/models/user.rb' # Offense count: 30 Rails/OutputSafety: Exclude: - 'src/api/app/helpers/comment_helper.rb' - 'src/api/app/helpers/webui/package_helper.rb' - 'src/api/app/helpers/webui/project_helper.rb' - 'src/api/app/helpers/webui/request_helper.rb' - 'src/api/app/helpers/webui/webui_helper.rb' # Offense count: 8 # Configuration parameters: Include. # Include: db/migrate/*.rb Rails/ReversibleMigration: Exclude: - 'src/api/db/migrate/20140210114542_remove_project_and_package_from_subscription.rb' - 'src/api/db/migrate/20140516182719_fix_configuration_register_enum.rb' - 'src/api/db/migrate/20160824132643_fix_bs_request_counter.rb' - 'src/api/db/migrate/20170103132257_change_project_package_name_to_string.rb' # Offense count: 6 # Configuration parameters: Blacklist. # Blacklist: decrement!, decrement_counter, increment!, increment_counter, toggle!, touch, update_all, update_attribute, update_column, update_columns, update_counters Rails/SkipsModelValidations: Exclude: - 'src/api/db/migrate/20151030130011_mark_events.rb' - 'src/api/db/migrate/20161128115942_add_when_attribute_to_bs_request.rb' - 'src/api/db/migrate/20170621100321_add_channel_to_event_subscriptions.rb' - 'src/api/db/migrate/20170630144825_convert_tokens_to_service_tokens.rb' - 'src/api/spec/models/bs_request_action_spec.rb' - 'src/api/spec/models/review_spec.rb' # Offense count: 101 # Configuration parameters: EnforcedStyle, SupportedStyles. # SupportedStyles: strict, flexible Rails/TimeZone: Enabled: false # Offense count: 68 # Configuration parameters: EnforcedStyle, SupportedStyles. # SupportedStyles: nested, compact Style/ClassAndModuleChildren: Enabled: false # Offense count: 35 Style/ClassVars: Exclude: - 'src/api/app/controllers/test_controller.rb' - 'src/api/app/models/issue_tracker.rb' - 'src/api/app/models/user.rb' - 'src/api/app/models/user_ldap_strategy.rb' - 'src/api/lib/activexml/node.rb' - 'src/api/lib/activexml/transport.rb' - 'src/api/test/functional/branch_publish_flag_test.rb' - 'src/api/test/test_helper.rb' # Offense count: 8 Style/CommentedKeyword: Exclude: - 'src/api/app/helpers/webui/webui_helper.rb' - 'src/api/app/models/bs_request_action.rb' - 'src/api/test/functional/source_controller_test.rb' - 'src/api/test/unit/project_remove_test.rb' - 'src/api/test/unit/user_ldap_strategy_test.rb' - 'src/api/test/unit/user_test.rb' # Offense count: 94 # Cop supports --auto-correct. # Configuration parameters: EnforcedStyle, SupportedStyles, SingleLineConditionsOnly, IncludeTernaryExpressions. # SupportedStyles: assign_to_condition, assign_inside_condition Style/ConditionalAssignment: Enabled: false # Offense count: 17 Style/DateTime: Exclude: - 'src/api/app/controllers/request_controller.rb' - 'src/api/app/jobs/project_create_auto_cleanup_requests.rb' - 'src/api/app/models/branch_package.rb' - 'src/api/app/models/bs_request.rb' - 'src/api/spec/features/webui/requests_spec.rb' - 'src/api/test/functional/maintenance_test.rb' # Offense count: 11 # Configuration parameters: EnforcedStyle, SupportedStyles. # SupportedStyles: for, each Style/For: Exclude: - 'src/api/app/models/bs_request_action_maintenance_release.rb' - 'src/api/app/models/project.rb' - 'src/api/app/views/webui/feeds/news.rss.builder' - 'src/api/db/attribute_descriptions.rb' - 'src/api/test/functional/attributes_test.rb' - 'src/api/test/unit/build_flag_test.rb' - 'src/api/test/unit/debug_flag_test.rb' - 'src/api/test/unit/publish_flag_test.rb' # Offense count: 12 # Configuration parameters: SupportedStyles. # SupportedStyles: annotated, template Style/FormatStringToken: EnforcedStyle: template # Offense count: 8 Style/IdenticalConditionalBranches: Exclude: - 'src/api/app/controllers/build_controller.rb' - 'src/api/app/controllers/webui/package_controller.rb' - 'src/api/app/controllers/webui/search_controller.rb' # Offense count: 11 # Cop supports --auto-correct. # Configuration parameters: InverseMethods, InverseBlocks. Style/InverseMethods: Exclude: - 'src/api/app/controllers/webui/project_controller.rb' - 'src/api/app/mixins/has_attributes.rb' - 'src/api/app/models/bs_request.rb' - 'src/api/app/models/bs_request_action_submit.rb' - 'src/api/app/models/bs_request_permission_check.rb' - 'src/api/app/models/package.rb' - 'src/api/app/models/project.rb' - 'src/api/app/views/source/_common_issues.xml.builder' - 'src/api/test/functional/request_events_test.rb' # Offense count: 2 Style/MethodMissing: Exclude: - 'src/api/app/models/configuration.rb' - 'src/api/lib/opensuse/permission.rb' # Offense count: 3 Style/MixinUsage: Exclude: - 'src/api/app/controllers/request_controller.rb' - 'src/api/app/controllers/source_controller.rb' - 'src/api/app/helpers/maintenance_helper.rb' # Offense count: 5 Style/MultipleComparison: Exclude: - 'src/api/app/helpers/flag_helper.rb' - 'src/api/app/models/bs_request.rb' - 'src/api/lib/memory_debugger.rb' - 'src/api/lib/tasks/extract.rake' - 'src/api/script/reformat_memprof' # Offense count: 88 # Cop supports --auto-correct. # Configuration parameters: EnforcedStyle, SupportedStyles, AllowInnerSlashes. # SupportedStyles: slashes, percent_r, mixed Style/RegexpLiteral: Enabled: false # Offense count: 3 # Cop supports --auto-correct. # Configuration parameters: ExactNameMatch, AllowPredicates, AllowDSLWriters, IgnoreClassMethods, Whitelist. # Whitelist: to_ary, to_a, to_c, to_enum, to_h, to_hash, to_i, to_int, to_io, to_open, to_path, to_proc, to_r, to_regexp, to_str, to_s, to_sym Style/TrivialAccessors: Exclude: - 'src/api/lib/activexml/node.rb' - 'src/api/test/test_helper.rb' open-build-service-2.9.4/.travis.yml000066400000000000000000000022451332555733200173460ustar00rootroot00000000000000# Machine config dist: trusty sudo: required services: - memcached # Language and app config language: ruby cache: bundler gemfile: src/api/Gemfile rvm: 2.5.0 # Github config branches: except: # starting with depfu/ - /^depfu\/.*/ # Scripts install: - bundle install --jobs=3 --retry=3 --deployment --path=${BUNDLE_PATH:-vendor/bundle} --without=development before_install: dist/ci/travis_before_install.sh before_script: dist/ci/travis_before_script.sh after_failure: dist/ci/travis_after_failure.sh script: "dist/ci/travis_script.sh $TEST_SUITE" # Notifications notifications: irc: channels: - "chat.freenode.net#opensuse-buildservice" on_success: change on_failure: change # Jobs matrix and stages jobs: include: - stage: Linters env: TEST_SUITE=linter cache: bundler: true directories: - tmp/rubocop_cache before_install: - npm install -g jshint - gem install rubocop -v 0.51.0 - gem install rubocop-rspec -v 1.20.1 - stage: test env: TEST_SUITE=rspec - env: TEST_SUITE=api - env: TEST_SUITE=spider - env: TEST_SUITE=backend fast_finish: true open-build-service-2.9.4/AUTHORS000066400000000000000000000044451332555733200163110ustar00rootroot00000000000000Adrian Schröter Alexandr D. Kanevskiy Anas Nashif Ancor Gonzalez Sosa Andre Duffeck Andreas Bauer Andreas Jaeger Benjamin Brunner Berthold Gunreben Björn Geuken Christian Bruckmayer Christoph Thiel Christopher Hofmann Cornelius Schumacher Daniel Gollub David Greaves David Mayr Dirk Mueller Dirk Stoecker Dominik Heidler Dr. Peter Poeml Esa Kulmala Frank Schreiner Frank Sundermeyer Hemmo Nieminen Henne Vogelsang Iain Arnell Jan Engelhardt Jan Loeser Jan Matejek Jan-Christoph Bornschlegel Jan-Simon Möller Juha Kallioinen Klaas Freitag Lars Vogdt Ludwig Nussel Luke Imhoff Marcus Hüwe Marcus Rueckert Marcus Schaeffer Martin Kudlvasr Martin Mohring Matias Hilden Michael Schröder Michal Marek Michal Seben Michal Čihař Moisés Déniz Alemán Pavol Rusnak Robert Lihm Ruediger Oertel Sascha Peilicke Shyukri Shyukriev Srinidhi B Stephan Binner Stephan Kleine Stephan Kulow Susanne Oberhauser Thomas Schmidt Thomas Scholz Thomas Schraitle Tom Patzig Tuomo Tanskanen Vincent Untz Vivian Zhang tux open-build-service-2.9.4/CONTRIBUTING.md000066400000000000000000000167511332555733200174750ustar00rootroot00000000000000# Table of Contents 1. [Request for contributions](#request-for-contributions) 2. [How to contribute code](#how-to-contribute-code) 3. [How to contribute issues](#how-to-contribute-issues) 4. [How to contribute documentation](#how-to-contribute-documentation) 5. [How to conduct yourself when contributing](#how-to-conduct-yourself-when-contributing) 6. [How to setup an OBS development environment](#how-to-setup-an-obs-development-environment) # Request for contributions We are always looking for contributions to the Open Build Service. Read this guide on how to do that. In particular, this community seeks the following types of contributions: * code: contribute your expertise in an area by helping us expand the Open Build Service * ideas: participate in an issues thread or start your own to have your voice heard. * copy editing: fix typos, clarify language, and generally improve the quality of the content of the Open Build Service # How to contribute code * Prerequisites: familiarity with [GitHub Pull Requests](https://help.github.com/articles/using-pull-requests.) * Fork the repository and make a pull-request with your changes * Please make sure to mind what our test suite in [travis](https://travis-ci.org/openSUSE/open-build-service) tells you * Please always increase our [code coverage](https://codeclimate.com/github/openSUSE/open-build-service) by your pull request * A developer of the [open-build-service team](https://github.com/orgs/openSUSE/teams/open-build-service) will review your pull-request * If the pull request gets a positive review the reviewer will merge it ## How to write proper commit messages - **Tag your commits** We tag our commits depending on the area that is affected by the change. All commits should start with at least one tag from: * [api] - Changes in api related parts of app/model/ and lib/ as well as app/controllers/\*.rb and it's views * [backend] - Changes in the perl-written backend of OBS * [ci] - Changes that affect our test suite * [dist] - Modifies something inside /dist directory * [doc] - Any documentation related changes * [webui] - Changes in webui related parts of app/model/ and lib/ as well as app/controllers/webui/ and it's views In case of having more than one tag, they should be alphabetically ordered. - **Leave a blank line between the commit subject and body** Tools like rebase could not work properly otherwise. - **Preferably include a commit description** There is always some useful information to add in your commit. If you don't include a commit description, more likely you are missing something. - **Try that the commit subject is not longer than 50 characters** - **Try that each line of the commit body is not longer than 72 characters** - **Try to avoid meaningless words/phrases** When possible avoid using words/phrases such as _obviously_, _basically_, _simply_, _of course_, _everyone knows_ and _easy_. - **Preferably use `-` for lists** Do not use `*` as it is also used for _emphasis_. ## How to review code submissions We make use of github [pull request reviews](https://help.github.com/articles/about-pull-request-reviews/) and we... - ...mark nitpicks inside the comment somehow (with the 💭 emoji or *nitpick*: blah blah) - ...aprove the pull request if our review only contains nitpicks - ...request changes on the pull request if our review contains one non-nitpick - ...just submit the review as comment if we can not review all of the code and just want to leave a comment Nitpicks are things you as reviewer don't care about if they end up in the code-base. Things like - Style changes we have not agreed on in rubocop rules yet - Bigger refactorings that are out of scope for the pull-request - Things new to you that you don't understand and would like to have an explanation for # How to contribute issues * Prerequisites: familiarity with [GitHub Issues](https://guides.github.com/features/issues/). * Enter your issue and a member of the [open-build-service team](https://github.com/orgs/openSUSE/teams/open-build-service) will label and prioritize it for you. We are using priority labels from **P1** to **P4** for our issues. So if you are a member of the [open-build-service team](https://github.com/orgs/openSUSE/teams/open-build-service) you are supposed to * P1: Urgent - Fix this next even if you still have other issues assigned to you. * P2: High - Fix this after you have fixed all your other issues. * P3: Medium - Fix this when you have time. * P4: Low - Fix this when you don't see any issues with the other priorities. # How to contribute documentation The Open Build Service documentation is hosted in a separated repository called [obs-docu](https://github.com/openSUSE/obs-docu). Please send pull-requests against this repository. # How to conduct yourself when contributing The Open Build Service is part of the openSUSE project. We follow all the [openSUSE Guiding Principles!](http://en.opensuse.org/openSUSE:Guiding_principles) If you think someone doesn't do that, please let any of the [openSUSE owners](https://github.com/orgs/openSUSE/teams/owners) know! # How to setup an OBS development environment We are using [docker](https://www.docker.com/) to create our development environment. All the tools needed for this are available for Linux, MacOS and Windows. **Please note** that the OBS backend uses advanced filesystem features that require an case sensitive filesystem (default in Linux, configurable in MacOS/Windows), make sure you run all this from a filesystem that supports this. 1. Install [docker](https://www.docker.com) and [docker-compose](https://docs.docker.com/compose/). There is documentation about this for [openSUSE](https://en.opensuse.org/SDB:Docker) and various [other operating systems](https://docs.docker.com/engine/installation/) 2. Install [rake](https://github.com/ruby/rake) 3. Clone this code repository: ``` git clone --depth 1 git@github.com:openSUSE/open-build-service.git ``` 4. Inside your clone update the backend submodule ``` git submodule init git submodule update ``` 5. Build your development environment with: ``` rake docker:build ``` 6. Start your development environment with: ``` docker-compose up ``` 7. Check out your OBS frontend: You can access the frontend at [localhost:3000](http://localhost:3000). Whatever you change in your cloned repository will have effect in the development environment. **Note**: The development environment is configured with a default user 'Admin' and password 'opensuse'. 8. Building packages: The easiest way to start building is to create an interconnect to our reference server. All resources from the openSUSE instance, including the base distributions, can be used that way. To set this up, follow these steps: * Login as Admin and go to 'Configuration' page. * Go to the 'Interconnect' tab and press 'Save changes'. That creates an interconnect to build.opensuse.org. * Now in any other project you can choose from a wide range of distributions to build your packages on the 'Repositories' tab. 9. Changed something in the frontend? Test your changes! ``` rake docker:test:frontend rake docker:test:lint ``` 10. Changed something in the backend? Test your changes! ``` rake docker:test:backend ``` 11. You can find more details about the development environment [in our wiki](https://github.com/openSUSE/open-build-service/wiki/Development-Environment). Happy Hacking! - :heart: Your Open Build Service Team open-build-service-2.9.4/COPYING000066400000000000000000000431131332555733200162670ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin Steet, Fifth Floor, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. open-build-service-2.9.4/Dockerfile000066400000000000000000000023561332555733200172320ustar00rootroot00000000000000# This is just a thin layer on top of the frontend container # that makes sure different users can run it without the # contained rails app generating files in the git checkout with # some strange user... FROM openbuildservice/frontend-base ARG CONTAINER_USERID # Configure our user RUN usermod -u $CONTAINER_USERID frontend ADD src/api/Gemfile /obs/src/api/Gemfile ADD src/api/Gemfile.lock /obs/src/api/Gemfile.lock RUN chown -R frontend /obs/src/api # Now do the rest as the user with the same ID as the user who # builds this container USER frontend WORKDIR /obs/src/api # foreman, which we only run in docker, needs a different thor version than OBS. # Installing the gem directly spares us from having to rpm package two different thor versions. RUN sudo gem.ruby2.5 install thor:0.19 foreman # Ensure there is a foreman command without ruby suffix RUN sudo ln -s /usr/bin/foreman.ruby2.5 /usr/bin/foreman # FIXME: Retrying bundler if it fails is a workaround for https://github.com/moby/moby/issues/783 # which seems to happen on openSUSE (< Tumbleweed 20171001)... RUN export NOKOGIRI_USE_SYSTEM_LIBRARIES=1; bundle install --jobs=3 --retry=3 || bundle install --jobs=3 --retry=3 # Run our command CMD ["foreman", "start", "-f", "Procfile"] open-build-service-2.9.4/Dockerfile.42.3000066400000000000000000000017321332555733200176140ustar00rootroot00000000000000FROM opensuse:42.3 # FIXME: Temporarily fix pam # See https://github.com/openSUSE/docker-containers/issues/82 RUN sed -i 's/.*root.*-.*nproc.*unlimited.*$//g' /etc/security/limits.conf # Add our repo RUN echo 'solver.allowVendorChange = true' >> /etc/zypp/zypp.conf; \ zypper ar -f http://download.opensuse.org/repositories/OBS:/Server:/Unstable/openSUSE_42.3/OBS:Server:Unstable.repo; \ zypper ar -f http://download.opensuse.org/repositories/openSUSE:/Tools/openSUSE_42.3/openSUSE:Tools.repo; \ zypper --gpg-auto-import-keys refresh # Install requirements for all our containers RUN zypper -n install --no-recommends --replacefiles \ make gcc gcc-c++ patch curl vim vim-data psmisc \ timezone ack glibc-locale sudo aaa_base # Add our bootstrap script ADD contrib/docker-bootstrap.sh /root/bin/docker-bootstrap.sh # Add our user RUN useradd -m frontend # Setup sudo RUN echo 'frontend ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers # Run our command CMD ["bash", "-l"] open-build-service-2.9.4/Dockerfile.backend000066400000000000000000000010551332555733200206130ustar00rootroot00000000000000FROM openbuildservice/base # FIXME: https://bugzilla.opensuse.org/show_bug.cgi?id=957818 RUN rm -rf /var/cache/zypp/* RUN /root/bin/docker-bootstrap.sh backend # Add our sign configuration ADD dist/obs-signd-conf.tar.bz2 sign RUN rm -rf /root/.gnupg; \ mv -v /sign/root/.gnupg /root/.gnupg; \ mv -v /sign/root/.phrases /root/.phrases; \ mv -v /sign/etc/ourkeyfile.asc /sign/etc/sign.conf /etc/; \ mv -v /sign/etc/sysconfig/signd /etc/sysconfig/signd # Run our command WORKDIR /obs CMD ["contrib/start_development_backend", "-d", "/obs"] open-build-service-2.9.4/Dockerfile.frontend-base000066400000000000000000000020211332555733200217450ustar00rootroot00000000000000FROM openbuildservice/base # FIXME: https://bugzilla.opensuse.org/show_bug.cgi?id=957818 RUN rm -rf /var/cache/zypp/* RUN /root/bin/docker-bootstrap.sh frontend # Install other requirements RUN npm install -g jshint ADD src/api/Gemfile /obs/src/api/Gemfile ADD src/api/Gemfile.lock /obs/src/api/Gemfile.lock RUN chown -R frontend /obs/src/api # Now do the rest as the user with the same ID as the user who # builds this container USER frontend WORKDIR /obs/src/api # Ensure there are ruby, gem and irb commands without ruby suffix RUN for i in ruby gem irb; do ln -s /usr/bin/$i.ruby2.5 ~/bin/$i; done # Install our bundle # FIXME: Retrying bundler if it fails is a workaround for https://github.com/moby/moby/issues/783 # which seems to happen on openSUSE (< Tumbleweed 20171001)... RUN export NOKOGIRI_USE_SYSTEM_LIBRARIES=1; bundle install --jobs=3 --retry=3 || bundle install --jobs=3 --retry=3 # Switch to root again so we don't block changing our frontend user id... USER root # Run our command CMD ["/bin/bash", "-l"] open-build-service-2.9.4/Dockerfile.mariadb000066400000000000000000000012111332555733200206150ustar00rootroot00000000000000FROM openbuildservice/base # FIXME: https://bugzilla.opensuse.org/show_bug.cgi?id=957818 RUN rm -rf /var/cache/zypp/* # Install mariadb and dependencies RUN zypper -n install --no-recommends --replacefiles mariadb hostname # Setup mariadb RUN /usr/lib/mysql/mysql-systemd-helper install; \ /usr/lib/mysql/mysql-systemd-helper start & \ /usr/lib/mysql/mysql-systemd-helper wait; \ /usr/bin/mysql -u root -e "SELECT @@version; CREATE USER 'root'@'%' IDENTIFIED BY 'opensuse'; GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION;"; \ kill `cat /var/lib/mysql/*.pid`; \ sleep 10 CMD ["/usr/lib/mysql/mysql-systemd-helper", "start"] open-build-service-2.9.4/Dockerfile.memcached000066400000000000000000000003501332555733200211270ustar00rootroot00000000000000FROM openbuildservice/base # FIXME: https://bugzilla.opensuse.org/show_bug.cgi?id=957818 RUN rm -rf /var/cache/zypp/* # Install memcached RUN /root/bin/docker-bootstrap.sh memcached CMD ["/usr/sbin/memcached", "-u", "memcached"] open-build-service-2.9.4/Dockerfile.old-test-suite000066400000000000000000000016441332555733200221120ustar00rootroot00000000000000FROM openbuildservice/base # FIXME: https://bugzilla.opensuse.org/show_bug.cgi?id=957818 RUN rm -rf /var/cache/zypp/* RUN /root/bin/docker-bootstrap.sh memcached RUN /root/bin/docker-bootstrap.sh backend RUN /root/bin/docker-bootstrap.sh frontend ADD src/api/Gemfile /obs/src/api/Gemfile ADD src/api/Gemfile.lock /obs/src/api/Gemfile.lock ADD contrib/start_old_tests /obs/contrib/start_old_tests RUN chown -R frontend:users /obs/ USER frontend WORKDIR /obs/src/api # Set up some convenience ruby binaries... RUN for i in ruby gem irb; do ln -s /usr/bin/$i.ruby2.5 ~/bin/$i; done # Install our bundle # FIXME: Retrying bundler if it fails is a workaround for https://github.com/moby/moby/issues/783 # which seems to happen on openSUSE (< Tumbleweed 20171001)... RUN export NOKOGIRI_USE_SYSTEM_LIBRARIES=1; bundle install --jobs=3 --retry=3 || bundle install --jobs=3 --retry=3 # Run our command CMD ["/bin/bash", "-l"] open-build-service-2.9.4/Makefile000066400000000000000000000004231332555733200166710ustar00rootroot00000000000000all: make -C docs/api/api apidocs install: make -C dist install make -C src/api install make -C src/backend install test: make -C src/api test make -C dist test make -C src/backend test clean: make -C src/api clean rubocop: rubocop -D -F --fail-level convention open-build-service-2.9.4/Makefile.include000066400000000000000000000011271332555733200203150ustar00rootroot00000000000000INSTALL=/usr/bin/install OBS_BACKEND_PREFIX=/usr/lib/obs/server OBS_DOCUMENT_ROOT=/srv/www/obs OBS_API_PREFIX=$(OBS_DOCUMENT_ROOT)/api OBS_APIDOCS_PREFIX=$(OBS_DOCUMENT_ROOT)/docs # TODO: find fix for RH in spec/Makefile # This here is preparation for multi distro support APACHE_USER=wwwrun APACHE_GROUP=www APACHE_CONIFGDIR=/etc/apache2 APACHE_CONIFGDIR_VHOST=$(APACHE_CONIFGDIR)/vhosts.d APACHE_VHOST_CONF=obs-apache24.conf APACHE_VHOST_CONTAINER_REGISTRY_CONF=obs-container-registry.conf # some sortcuts test: test_unit test_all: test_unit test_system t: test ta: test_all ts: test_system open-build-service-2.9.4/README.md000066400000000000000000000070331332555733200165140ustar00rootroot00000000000000[![Build Status](https://secure.travis-ci.org/openSUSE/open-build-service.svg?branch=master)](https://travis-ci.org/openSUSE/open-build-service) [![Code Coverage](https://codecov.io/gh/openSUSE/open-build-service/branch/master/graph/badge.svg)](https://codecov.io/gh/openSUSE/open-build-service) [![Code Climate](https://codeclimate.com/github/openSUSE/open-build-service.png)](https://codeclimate.com/github/openSUSE/open-build-service) # Open Build Service The [Open Build Service (OBS)](http://www.open-build-service.org) is a generic system to build and distribute binary packages from sources in an automatic, consistent and reproducible way. You can release packages as well as updates, add-ons, appliances and entire distributions for a wide range of operating systems and hardware architectures. More information can be found on [openbuildservice.org](http://www.openbuildservice.org). The OBS consists of a backend and a frontend. The backend implements all the core functionality (i.e. building packages). The frontend provides a web application and XML API for interacting with the backend. Additionally there is a command line client (osc) for the API which is developed in a [separate repository](https://github.com/openSUSE/osc). ## Licensing The Open Build Service is Free Software and is released under the terms of the GPL, except where noted. Additionally, 3rd-party content (like, but not exclusively, the webui icon theme) may be released under a different license. Please check the respective files for details. ## Community You can discuss with the OBS Team via IRC on the channel [#opensuse-buildservice](irc://freenode.net/opensuse-buildservice). Or you can use our mailing list [opensuse-buildservice@opensuse.org](mailto:opensuse-buildservice+subscribe@opensuse.org). ### Development / Contribution If you want to contribute to the OBS please checkout our [contribution readme](CONTRIBUTING.md):-) ## Source Code Repository Layout The OBS source code repository is hosted on [Github](http://github.com/opensuse/open-build-service) and organized like this: dist Files relevant for our distribution packages docs Documentation, examples and schema files src/api Rails app (Ruby on Rails) src/backend Backend code (Perl) ## Installation To run the OBS in production we recommend using our [appliance](http://openbuildservice.org/download/) which is the whole package: A recent and stable Linux Operating System ([openSUSE](http://www.opensuse.org)) bundled and pre-configured with all the server and OBS components you need to get going. If that is not for you because you have some special needs for your setup (e.g. different partition schema, SLES as base system, etc.) you can also install our packages and run a setup wizard. After finishing the installation of your base system, follow these steps: 1. Add the OBS software repository with zypper. Please be aware, that the needed URL differs, depending on your Base Operating System. We use openSUSE Leap 42.3 in this example. ```shell zypper ar -f http://download.opensuse.org/repositories/OBS:/Server:/2.9/openSUSE_42.3/OBS:Server:2.9.repo ``` 2. Install the package ```shell zypper in -t pattern OBS_Server ``` 3. Run our setup wizard ```shell /usr/lib/obs/server/setup-appliance.sh ``` ## Advanced Setup If you have a more complex setup (e.g. a distributed backend) we recommend to read the Administration chapter in our [reference manual](http://openbuildservice.org/help/manuals/obs-reference-guide/cha.obs.admin.html). open-build-service-2.9.4/Rakefile000066400000000000000000000107501332555733200167020ustar00rootroot00000000000000CONTAINER_USERID = %x(id -u).freeze VERSION = '42.3'.freeze namespace :docker do desc 'Build our development environment' task :build do begin sh 'echo "# This file is generated by our Rakefile. Do not change it!" > docker-compose.override.yml' # rubocop:disable Metrics/LineLength sh "echo \"version: \'2\'\nservices:\n frontend:\n build:\n args:\n CONTAINER_USERID: #{CONTAINER_USERID}\" >> docker-compose.override.yml" # rubocop:enable Metrics/LineLength # Build the frontend container sh 'docker-compose build frontend' # Bootstrap the app sh 'docker-compose up -d db' sh 'docker-compose run --no-deps --rm frontend bundle exec rake dev:bootstrap RAILS_ENV=development' ensure sh 'docker-compose stop' end end namespace :test do desc 'Run our frontend tests in the docker container' task :frontend do begin sh 'docker-compose -f docker-compose.ci.yml up --abort-on-container-exit' ensure sh 'docker-compose -f docker-compose.ci.yml stop' end end desc 'Run our backend tests in the docker container' task :backend do begin sh 'docker-compose run --rm -w /obs backend make -C src/backend test' ensure sh 'docker-compose stop' end end desc 'Scan the code base for syntax/code problems' task :lint do begin sh 'docker-compose -f docker-compose.ci.yml run --rm rspec bundle exec rake dev:bootstrap dev:lint' ensure sh 'docker-compose -f docker-compose.ci.yml stop' end end namespace :old do desc 'Run our frontend api old test suite in the docker container' task :api, :test do |_t, args| begin if args[:test] sh "docker-compose -f docker-compose.ci_old.yml run --rm --entrypoint '/obs/contrib/start_old_tests #{args[:test]}' old-test-suite" else sh 'docker-compose -f docker-compose.ci_old.yml up --no-recreate --abort-on-container-exit' end ensure sh 'docker-compose -f docker-compose.ci_old.yml stop' end end end end namespace :maintainer do desc 'Rebuild all our static containers' task rebuild: ['rebuild:base', 'rebuild:backend', 'rebuild:frontend-base', 'rebuild:mariadb', 'rebuild:memcached', 'rebuild:old-test-suite'] do end namespace :rebuild do task :base do sh "docker build . -t openbuildservice/base:#{VERSION} -t openbuildservice/base -f Dockerfile.#{VERSION}" end task :mariadb do sh "docker build . -t openbuildservice/mariadb:#{VERSION} -t openbuildservice/mariadb -f Dockerfile.mariadb" end task :memcached do sh "docker build . -t openbuildservice/memcached:#{VERSION} -t openbuildservice/memcached -f Dockerfile.memcached" end task 'frontend-base' do sh "docker build . -t openbuildservice/frontend-base:#{VERSION} -t openbuildservice/frontend-base -f Dockerfile.frontend-base" end task :backend do sh "docker build . -t openbuildservice/backend:#{VERSION} -t openbuildservice/backend -f Dockerfile.backend" end task 'old-test-suite' do sh "docker build . -t openbuildservice/old-test-suite:#{VERSION} -t openbuildservice/old-test-suite -f Dockerfile.old-test-suite" end end desc 'Rebuild and publish all our static containers' task publish: [:rebuild, 'publish:base', 'publish:mariadb', 'publish:memcached', 'publish:backend', 'publish:frontend-base', \ 'publish:old-test-suite'] do end namespace :publish do task :base do sh "docker push openbuildservice/base:#{VERSION}" sh 'docker push openbuildservice/base' end task :mariadb do sh "docker push openbuildservice/mariadb:#{VERSION}" sh 'docker push openbuildservice/mariadb' end task :memcached do sh "docker push openbuildservice/memcached:#{VERSION}" sh 'docker push openbuildservice/memcached' end task :backend do sh "docker push openbuildservice/backend:#{VERSION}" sh 'docker push openbuildservice/backend' end task 'frontend-base' do sh "docker push openbuildservice/frontend-base:#{VERSION}" sh 'docker push openbuildservice/frontend-base' end task 'old-test-suite' do sh "docker push openbuildservice/old-test-suite:#{VERSION}" sh 'docker push openbuildservice/old-test-suite' end end end end open-build-service-2.9.4/ReleaseNotes-2.9000066400000000000000000000124161332555733200200600ustar00rootroot00000000000000 # # Open Build Service 2.9 # Please read the README.SETUP file for initial installation instructions or use the OBS Appliance from http://openbuildservice.org/download/ There is also an install medium with installs OBS on hard disc now. dist/README.UPDATERS file has informations for updaters. OBS Appliance users who have setup their LVM can just replace their appliance image without data loss. The migration will happen automatically. Features ======== Generic: * image and container maintenance support, including binary tracking * riscv64 hardware architecture support Frontend: * New Kerberos authentication mode. Read how to setup Kerberos in the OBS Admin Guide: http://openbuildservice.org/help/manuals/obs-admin-guide/ * New job history page to see why a package was built. * New GPG key details dialog. * RSS Feeds for User's Notifications is now available. * New Studio Express feature: * New central page to branch image templates from. * Add and edit repository and package lists in kiwi files. * Edit kiwi image details: name, author, contact, specification. * RabbitMQ support. OBS admins can configure their instance to send messages to a RabbitMQ server. Read more in the OBS Admin Guide. * Receive email notifications for projects that are in your watchlist. Configure at /user/notifications. * Improved UI/UX for configuration of notifications page. Now it shows a better layout and explanations to make this complex page easy to understand. * Allow users to view the full diff of large changes. * Remove the unused api_relative_url_root option from the options.yml file. * release mechanism improvements: - manual maintenance release support (avoiding requests) - operation happen atomic for entire project now - support release of single multibuild container * Ec2 cloud upload support for ec2 images (currently only available for OBS installations based on openSUSE 42.3) Backend: * support showing source files in blame view (works also via links) * support project copy with makeoriginolder option Backend: * New build formats: - native container build based on DockerFile (beside exiting kiwi support) - FISSILE build format - AppImage build format * freezelink command to freeze current sources accessed via project link * support showing source files in blame view (works also via links) * support project copy with makeoriginolder option * support automatic vrev extending via project links * Improved container support: - support build of layered containers by reusing existing contaienrs - support publishing to docker registry server - support container signing via notary server * cloud upload server supporting Amazon EC2 and Microsoft Azure * improved bootstrap cycle handling * additional SHA256 checksum in source commit handling for security * projects can be temporary suspended to avoid scheduling between multiple changes * support AirBrake for reporting problems * support new debian repository format * support for building in openstack cloud * Many smaller improvements in DownloadOnDemand and multibuild handling Shipment: * To make use of the ec2 cloud upload feature you need to: - Install the obs-cloud-uploader package. Major bugfixes: * Fix deletion of groups with users. * Fix notification generation with very big payloads. * Create history element on priority raise of request. * Fix huge bottleneck in notification emails. * Fix setting of new attributes to a project or package. Wanted changes: =============== * creating of repositories on branching has changed if repositories of the source refer each other. This gets recreated in new project. * project copy is not adding the user anymore * service dispatcher is used by default now * The editing of a user's realname, email adress or password is no longer possible if LDAP mode is activated * Unused ldap options in options.yml were dropped: - ldap_update_support - ldap_object_class - ldap_entry_base - ldap_sn_attr_required * dropping of the project/package tag functionality/api * password hashing algorithm was changed to bcrypt (blowfish) * The backend notification plugin system is not used anymore. The RabbitMQ plugin is replaced with a RabbitMQ message bus implementation in the frontend, you can find details about this in the admin manual. The Hermes plugin is dropped without replacement as it was only used for notifications which the OBS is doing on it's own since quite some time. * publish hook failures are handled as fatal failures now. => publisher will retry to publish Other changes ============= * Notes for systems using systemd: ================================ OBS is using init scripts, not systemd style .service files. This will stay until we will switch from SLES 11 to SLES 12 as default production platform. openSUSE installations may use systemd (instead of sysvinit) and it should work in general. Not working are usages like # rcobssrcserver status You will only get the systemd status here. Also stopping services may not kill all processes, which leads to abstract errors when trying to restart them. We heard also about trashed MySQL databases when using systemd. To avoid these problems you need switch directory to avoid the systemd mapper: # cd /etc/init.d # ./obssrcserver status|stop|start open-build-service-2.9.4/ReleaseNotes-2.9.1000077700000000000000000000000001332555733200230302ReleaseNotes-2.9ustar00rootroot00000000000000open-build-service-2.9.4/ReleaseNotes-2.9.2000066400000000000000000000020151332555733200202120ustar00rootroot00000000000000# # Open Build Service 2.9.2 # Please read the README.SETUP file for initial installation instructions or use the OBS Appliance from http://openbuildservice.org/download There is also an install medium with installs OBS on hard disc now. dist/README.UPDATERS file has informations for updaters. OBS Appliance users who have setup their LVM can just replace their appliance image without data loss. The migration will happen automatically. Features ======== Frontend: * Admins can now mark user to be managed locally instead via LDAP * Cloud uploads can be managed (started, aborted and listed) via API Bugfixes ======== Frontend: * Fixed issue in live build log that caused parts of the log being duplicated * Upgrading from 2.8 to 2.9 caused remote repositories with same name to get deleted - If the instance got already upgraded and an interconnect is configured, it might be necessary to restore the database with data from the backend - This can be done with 'rake.ruby2.5 fix_project ' open-build-service-2.9.4/ReleaseNotes-2.9.3000066400000000000000000000032621332555733200202200ustar00rootroot00000000000000# # Open Build Service 2.9.3 # Please read the README.SETUP file for initial installation instructions or use the OBS Appliance from http://openbuildservice.org/download There is also an install medium with installs OBS on hard disc now. dist/README.UPDATERS file has informations for updaters. OBS Appliance users who have setup their LVM can just replace their appliance image without data loss. The migration will happen automatically. Features ======= Backend: * Allow to use different scheduling strategy which handles large build dependency cycles better. Enable it via project config: BuildFlags: genmetaalgo:1 Bugfixes ======== Frontend: * Fixes permission issue that allowd unpermitted users to trigger services via the webui. * Permits setting the initial bs request state. This prevents setting the initial state to something else than 'new' (CVE-2018-7689). * Fixes permission check for project with 'InitializeDevelPackage' attribute (CVE-2018-7688). * Fixes rendering of requests with multiple submit requests. Previously switching tabs would not trigger a reload of the request content for the selected request. Backend: * Debian fixes to 2.9 - publish ONIE binary and hashsum, enable Secure Boot EFI signing for Debian packages. * New regex needssslcertforbuild for Debian builds * Support publishing via rsync syntax (allows to specify port numbers) * Make project config parser errors always visible * Fix corner case on wiping binaries * Improved .changes merge handling * Don't publish unneeded files of appdata in meta data * Fixing lost events on restarting schedulers * Make errors by not reachable remote instances better visible. open-build-service-2.9.4/ReleaseNotes-2.9.4000066400000000000000000000013741332555733200202230ustar00rootroot00000000000000# # Open Build Service 2.9.4 # Please read the README.SETUP file for initial installation instructions or use the OBS Appliance from http://openbuildservice.org/download There is also an install medium with installs OBS on hard disc now. dist/README.UPDATERS file has informations for updaters. OBS Appliance users who have setup their LVM can just replace their appliance image without data loss. The migration will happen automatically. Bugfixes ======== Frontend: * Fixes permission check for bs requests with source projects that link to another project (bsc#1098934) * Fixes permission check in the InitializeDevelPackage attribute codepath (bsc#1100217) * Fix permission check of linked projects in BsRequestAction.check_action_permission open-build-service-2.9.4/codecov.yml000066400000000000000000000003051332555733200173750ustar00rootroot00000000000000fixes: - "::src/api/" coverage: status: project: default: target: auto base: auto threshold: 1 comment: layout: "reach, diff, flags" require_changes: true open-build-service-2.9.4/contrib/000077500000000000000000000000001332555733200166725ustar00rootroot00000000000000open-build-service-2.9.4/contrib/docker-bootstrap.sh000077500000000000000000000017311332555733200225150ustar00rootroot00000000000000#!/bin/bash for option in "$@" do case "${option}" in frontend) zypper -n install --no-recommends --replacefiles \ sphinx \ phantomjs \ nodejs6 npm6 \ mariadb-client \ git-core \ ruby2.5-devel cyrus-sasl-devel openldap2-devel libxml2-devel zlib-devel libxslt-devel \ perl-XML-Parser \ ruby2.5-rubygem-mysql2 \ ruby2.5-rubygem-bundler ruby2.5-rubygem-thor-0_19 ruby2.5-rubygem-foreman ;; backend) zypper -n install --no-recommends --replacefiles \ inst-source-utils \ obs-server obs-signd \ obs-service-download_src_package obs-service-download_files \ obs-service-download_url \ obs-service-format_spec_file obs-service-kiwi_import \ perl-Devel-Cover perl-Diff-LibXDiff \ osc ;; memcached) zypper -n install --no-recommends --replacefiles memcached ;; *) echo "Error: possible options are: frontend|backend|memcached" exit ;; esac done open-build-service-2.9.4/contrib/git-diff-to-deploy000077500000000000000000000004561332555733200222300ustar00rootroot00000000000000#!/bin/bash if [ -z $1 ]; then export api_url="https://api.opensuse.org" else export api_url=$1 fi DEPLOYED_REVISION=`osc -A $api_url api /about|awk -F'[<|>]' '/revision/{printf("%s\n",$3)}'|rev|cut -d . -f 1|rev` git log -p --reverse --no-merges $DEPLOYED_REVISION..master -- . ':!src/api/spec' open-build-service-2.9.4/contrib/start_development_backend000077500000000000000000000122561332555733200240340ustar00rootroot00000000000000#!/bin/bash trap clean_up SIGHUP SIGINT SIGTERM SIGKILL #function to help with the usage function _print_syntax() { me=`basename "$0"` echo "Usage: $me [-d -l -w -r ]" echo -e "\t-d \tDirectory of source code. If not provided /vagrant ist used" echo -e "\t-l \tDirectory where the logfiles should be put. If not set STDERR and STDOUT will be printed to console" echo -e "\t-h \tSet hostname of server. (default: backend)" } #reset OPTIND OPTIND=1 #preset GIT_HOME GIT_HOME="/obs" #preset HOST export DEVHOST="backend" #get options and check if there is a space in the dir arguments while getopts "l:d:h:?" opt; do case "$opt" in l) REDIR_LOG=$OPTARG if [[ "REDIR_LOG" =~ [[:space:]] ]]; then echo "Directory may not contain whitespaces" exit fi ;; d) GIT_HOME=$OPTARG if [[ "$GIT_HOME" =~ [[:space:]] ]]; then echo "Directory may not contain whitespaces" exit fi ;; h) export DEVHOST=$OPTARG if [[ "$DEVHOST" =~ [[:space:]] ]]; then echo "Hostname may not contain whitespaces" exit fi ;; \?) _print_syntax exit 0 ;; esac done #REDIR_LOG points to the directory where the logfiles should be created. If the directory does not #exist it will be created. APPEND_ARR[*] is filled with the output redirection for each service. if [ -n "$REDIR_LOG" ]; then if [ ! -d "$REDIR_LOG" ]; then echo "$REDIR_LOG does not exist. Will try to create it" mkdir -p "$REDIR_LOG" || { echo "Failure in creating directory:"; print_error; exit; } fi APPEND_ARR[0]=">$REDIR_LOG/bs_srcserver.log 2>&1" APPEND_ARR[1]=">$REDIR_LOG/bs_repserver.log 2>&1" APPEND_ARR[2]=">$REDIR_LOG/bs_sched_i586.log 2>&1" APPEND_ARR[3]=">$REDIR_LOG/bs_sched_x86_64.log 2>&1" APPEND_ARR[4]=">$REDIR_LOG/bs_dispatch.log 2>&1" APPEND_ARR[5]=">$REDIR_LOG/bs_publish.log 2>&1" APPEND_ARR[6]=">$REDIR_LOG/bs_service.log 2>&1" APPEND_ARR[7]=">$REDIR_LOG/bs_signer.log 2>&1" APPEND_ARR[8]=">$REDIR_LOG/signd.log 2>&1" APPEND_ARR[9]=">$REDIR_LOG/bs_servicedispatch.log 2>&1" fi #check if GIT_HOME exists. If not it does not make any sense to continue. if [ ! -d "$GIT_HOME" ]; then echo "There seems to be something wrong. Directory $GIT_HOME not found." echo "Please check if you are pointing to the right directory." exit 1 fi #create BSConfig.pm and change hostname to localhost if [ ! -f $GIT_HOME/src/backend/BSConfig.pm ]; then cp $GIT_HOME/src/backend/BSConfig.pm.template $GIT_HOME/src/backend/BSConfig.pm fi perl -pi -e 's/our \$bsserviceuser.*/our \$bsserviceuser="obsrun";/' $GIT_HOME/src/backend/BSConfig.pm perl -pi -e 's/my \$hostname.*/my \$hostname=\"$ENV{'DEVHOST'}\";/' $GIT_HOME/src/backend/BSConfig.pm perl -pi -e 's/\$ipaccess/\$removed_by_start_development_backend /' $GIT_HOME/src/backend/BSConfig.pm perl -pi -e 's/.*our \$gpg_standard_key.*/our \$gpg_standard_key="\/etc\/ourkeyfile.asc";/' $GIT_HOME/src/backend/BSConfig.pm perl -pi -e 's/.*our \$sign .*/our \$sign="\/usr\/bin\/sign";/' $GIT_HOME/src/backend/BSConfig.pm #start backend services (the minimum needed) with two arch(i586/x86_64) schedulers and one worker echo "Starting bs_srcserver" COMMAND_STRING="sudo $GIT_HOME/src/backend/bs_srcserver ${APPEND_ARR[0]} &" eval $COMMAND_STRING sleep 4 echo "Starting bs_repserver" COMMAND_STRING="sudo $GIT_HOME/src/backend/bs_repserver ${APPEND_ARR[1]} &" eval $COMMAND_STRING sleep 2 echo "Starting bs_sched i586" COMMAND_STRING="sudo $GIT_HOME/src/backend/bs_sched i586 ${APPEND_ARR[2]} &" eval $COMMAND_STRING echo "Starting bs_sched x86_64" COMMAND_STRING="sudo $GIT_HOME/src/backend/bs_sched x86_64 ${APPEND_ARR[3]} &" eval $COMMAND_STRING echo "Starting bs_dispatch" COMMAND_STRING="sudo $GIT_HOME/src/backend/bs_dispatch ${APPEND_ARR[4]} &" eval $COMMAND_STRING echo "Starting bs_publish" COMMAND_STRING="sudo $GIT_HOME/src/backend/bs_publish ${APPEND_ARR[5]} &" eval $COMMAND_STRING echo "Starting bs_service" COMMAND_STRING="sudo $GIT_HOME/src/backend/bs_service ${APPEND_ARR[6]} &" eval $COMMAND_STRING echo "Starting bs_signer" COMMAND_STRING="sudo $GIT_HOME/src/backend/bs_signer ${APPEND_ARR[7]} &" eval $COMMAND_STRING echo "Starting signd" COMMAND_STRING="sudo /usr/sbin/signd ${APPEND_ARR[8]} &" eval $COMMAND_STRING echo "Starting bs_servicedispatch" COMMAND_STRING="sudo $GIT_HOME/src/backend/bs_servicedispatch ${APPEND_ARR[9]} &" eval $COMMAND_STRING #Cleanup function to terminate all backend services function clean_up { echo -e "\ncleaning up and exit" echo -e "Terminating Services" sudo killall bs_srcserver echo -e "Terminated SRC Server" sudo killall bs_repserver echo -e "Terminated REP Server" sudo killall bs_sched echo -e "Terminated Scheduler" sudo killall bs_dispatch echo -e "Terminated Dispatcher" sudo killall bs_publish echo -e "Terminated Publisher" sudo killall bs_service echo -e "Terminated Publisher" sudo killall bs_signer echo -e "Terminated Signer" sudo killall bs_servicedispatch echo -e "Terminated service dispatcher" exit; } if [ -n "$REDIR_LOG" ]; then echo "Logfiles will be written to $REDIR_LOG" echo "Each service has it's own logfile" fi echo "If you want to terminate the backend, just hit Ctrl-C" wait open-build-service-2.9.4/contrib/start_development_worker000077500000000000000000000072161332555733200237560ustar00rootroot00000000000000#!/bin/bash trap clean_up SIGHUP SIGINT SIGTERM SIGKILL #function to help with the usage function _print_syntax() { me=`basename "$0"` echo "Usage: $me [-d -w ]" echo -e "\t-d \tDirectory of source code. (default: /obs)" echo -e "\t-l \tDirectory where the logfiles should be put. If not set STDERR and STDOUT will be printed to console" echo -e "\t-w \tNumber of workers that should be spawned. (default: 2)" echo -e "\t-h \tSet hostname of server. (default: backend)" echo -e "\t-r \tRegister to this reposerver. Can be used multiple times." echo -e "\t-i \tExecute immediately. Don't wait 60 seconds for the backend to become available." } #reset OPTIND OPTIND=1 #preset GIT_HOME GIT_HOME="/obs" #preset WORKER_COUNT WORKER_N=2 #preset HOST HOST="backend" #preset reg_server haverep=0 #preset NOSLEEP NOSLEEP=0 #get options and check if there is a space in the dir arguments while getopts "d:w:h:l:r:i?" opt; do case "$opt" in l) REDIR_LOG=$OPTARG if [[ "REDIR_LOG" =~ [[:space:]] ]]; then echo "Directory may not contain whitespaces" exit fi ;; d) GIT_HOME=$OPTARG if [[ "$GIT_HOME" =~ [[:space:]] ]]; then echo "Directory may not contain whitespaces" exit fi ;; w) WORKER_N=$OPTARG ;; r) rep_server+=("$OPTARG") haverep=1 ;; i) NOSLEEP=1 ;; h) HOST=$OPTARG if [[ "$HOST" =~ [[:space:]] ]]; then echo "Hostname may not contain whitespaces" exit fi ;; \?) _print_syntax exit 0 ;; esac done REP_SERVER_STRING="--reposerver http://$HOST:5252" if [ -n "$REDIR_LOG" ]; then if [ ! -d "$REDIR_LOG" ]; then echo "$REDIR_LOG does not exist. Will try to create it" mkdir -p "$REDIR_LOG" || { echo "Failure in creating directory:"; print_error; exit; } fi w_count=1 while [ $w_count -le $WORKER_N ] do APPEND_ARR[$w_count]=">$REDIR_LOG/bs_worker_$w_count.log 2>&1" let w_count=$w_count+1 done fi if [[ "$haverep" -eq "1" ]]; then REP_SERVER_STRING="" REP_PREFIX="--reposerver http://" REP_POSTFIX=":5252 " for server in "${rep_server[@]}" do REP_SERVER_STRING=$REP_SERVER_STRING$REP_PREFIX$server$REP_POSTFIX done fi #check if GIT_HOME exists. If not it does not make any sense to continue. if [ ! -d "$GIT_HOME" ]; then echo "There seems to be something wrong. Directory $GIT_HOME not found." echo "Please check if you are pointing to the right directory." exit 1 fi if [[ $NOSLEEP -eq "0" ]]; then # We need to wait for the backend to be available... sleep 60 fi w_count=1 while [ $w_count -le $WORKER_N ] do if [ ! -d /srv/obs/run/worker/$w_count ]; then sudo mkdir -p /srv/obs/run/worker/$w_count fi if [ ! -d /var/cache/obs/worker/root_$w_count ]; then sudo mkdir -p /var/cache/obs/worker/root_$w_count fi sudo chown -R obsrun:obsrun /srv/obs/run/ echo "Starting bs_worker number $w_count" COMMAND_STRING="sudo $GIT_HOME/src/backend/bs_worker --hardstatus --root /var/cache/obs/worker/root_$w_count --statedir /srv/obs/run/worker/$w_count --id $HOSTNAME:$w_count $REP_SERVER_STRING --hostlabel OBS_WORKER_SECURITY_LEVEL_ --jobs 1 --cachedir /var/cache/obs/worker/cache --cachesize 3967 ${APPEND_ARR[$w_count]} &" eval $COMMAND_STRING let w_count=$w_count+1 done # Cleanup function to terminate all backend services function clean_up { echo -e "\ncleaning up and exit" sudo killall bs_worker echo -e "Terminated Worker" exit; } if [ -n "$REDIR_LOG" ]; then echo "Logfiles will be written to $REDIR_LOG" echo "Each worker has it's own logfile" fi echo "If you want to terminate the workers, just hit Ctrl-C" wait open-build-service-2.9.4/contrib/start_old_tests000077500000000000000000000020031332555733200220300ustar00rootroot00000000000000#!/bin/bash # Only sync what is needed to run the tests echo "Syncing files..." sudo rsync -aq --ignore-missing-args --include-from=- /obs_readonly/ /obs/ < /etc/my.cnf.d/obs.cnf /usr/lib/mysql/mysql-systemd-helper start open-build-service-2.9.4/contrib/test-airbrake000077500000000000000000000012231332555733200213530ustar00rootroot00000000000000#!/usr/bin/env perl use strict; use warnings; use FindBin; use UUID qw/uuid/; use Data::Dumper; BEGIN { push @INC, "$FindBin::Bin/../src/backend"; }; use BSAirBrake; BSUtil::setdebuglevel(7); raise(); exit 0; sub raise { my $ab = BSAirBrake->new( api_key => $::ENV{ERRBIT_API_KEY}, base_url => $::ENV{ERRBIT_BASE_URL}, project_id => 1 ); my $error = "Hello World from BSAirBrake"; my $options = { environment => { SCRIPT => $0 }, session => { "session-id" => uuid() }, params => { 'param1' => 'value1' }, }; my $backtrace =1; print Dumper($ab->notify($error, $options, $backtrace)); } open-build-service-2.9.4/dist/000077500000000000000000000000001332555733200161755ustar00rootroot00000000000000open-build-service-2.9.4/dist/.distrc000066400000000000000000000022501332555733200174650ustar00rootroot00000000000000#!/bin/bash # options to all osc commands : ${OSCOPTS:="-A https://api.opensuse.org"} # the obs project in question : ${PROJECT:=openSUSE:Tools:Devel} : ${PACKAGE:=obs-all-svn} : ${TARNAME:=obs-all} : ${SPECFILE:=obs-all-svn.spec} : ${TARNAME:=obs-server} # the specfile to test-build locally (default: all spec files in $PWD) #: ${SPECFILE:=} : ${TARGET:=openSUSE_11.1} : ${ARCH:=i586} # Files from $PWD that are not part of the distribution, one on a line # There is a second list, $CANONICAL_EXCLUDES, of files which usually # never are part of the package (.svn, the .distrc, ..), see the # 'distribute' script for details. # # These are taken from the tar ball or irrelevant during the build: : ${EXCLUDES:=README.SETUP README.UPDATERS TODO .dist* obs.conf.template sysconfig.obs-worker.template obs-all-svn.spec.template obs-server*.*} # this package is not in maintenance mode here # that would mean the tar ball isn't udpated any longer, only patches # would be added. : ${MAINTENANCE:=} # The version and # The tar ball is updated from here : ${SVNDIR:=../../buildservice} # The svnversion of SVNDIR plus this VERSION make up the rpm package # 'Version:' : ${VERSION:=1.6.0} open-build-service-2.9.4/dist/.distrc.template000066400000000000000000000022141332555733200212770ustar00rootroot00000000000000#!/bin/bash # options to all osc commands : ${OSCOPTS:="-A https://api.opensuse.org"} # the obs project in question : ${PROJECT:=openSUSE:Tools:Devel} : ${PACKAGE:=obs-server-svn} : ${TARNAME:=obs-server} : ${SPECFILE:=*.spec} # the specfile to test-build locally (default: all spec files in $PWD) #: ${SPECFILE:=} : ${TARGET:=openSUSE_11.1} : ${ARCH:=i586} # Files from $PWD that are not part of the distribution, one on a line # There is a second list, $CANONICAL_EXCLUDES, of files which usually # never are part of the package (.svn, the .distrc, ..), see the # 'distribute' script for details. # # These are taken from the tar ball or irrelevant during the build: : ${EXCLUDES:=README.SETUP README.UPDATERS TODO .dist* obs.conf.template sysconfig.obs-worker.template obs-all-svn.spec.template obs-server*.*} # this package is not in maintenance mode here # that would mean the tar ball isn't udpated any longer, only patches # would be added. : ${MAINTENANCE:=} # The version and # The tar ball is updated from here : ${SVNDIR:=../../buildservice} # The svnversion of SVNDIR plus this VERSION make up the rpm package # 'Version:' : ${VERSION:=1.5.b1} open-build-service-2.9.4/dist/Makefile000066400000000000000000000075471332555733200176520ustar00rootroot00000000000000include ../Makefile.include INIT_SCRIPTS := obssrcserver obsrepserver obsscheduler obsworker obspublisher obsdispatcher obssigner obswarden obsapidelayed obsapisetup obsstoragesetup obsservice obsdodup obsservicedispatch obsclouduploadserver obsclouduploadworker LOGROTATE_CONFIGS := obs-api obs-server obs-source_service OBS_BIN_SCRIPTS := obs_productconvert OBS_SBIN_SCRIPTS := obs_admin obs_serverstatus SYSTEMD_SERVICE_FILES := obsdeltastore UNITDIR=/usr/lib/systemd/system/ install: install_obsapisetup install_apache install_initscripts install_project_update install_logrotate install_fillups install_slp install_obs_bin install_devel_docs install_overview install_tests_appliance install_crontabs install_systemd_services install_registry_dirs #install_overview install_obsapisetup: system_dirs $(INSTALL) -m 755 obsapisetup $(DESTDIR)/etc/init.d/obsapisetup $(INSTALL) -m 755 setup-appliance.sh $(DESTDIR)$(OBS_BACKEND_PREFIX)/setup-appliance.sh install_apache: $(INSTALL) -d -m 755 $(DESTDIR)$(APACHE_CONIFGDIR_VHOST) $(INSTALL) -m 644 $(APACHE_VHOST_CONF) $(DESTDIR)$(APACHE_CONIFGDIR_VHOST)/obs.conf $(INSTALL) -m 644 $(APACHE_VHOST_CONTAINER_REGISTRY_CONF) $(DESTDIR)$(APACHE_CONIFGDIR_VHOST)/$(APACHE_VHOST_CONTAINER_REGISTRY_CONF) install_initscripts: system_dirs $(foreach script,$(INIT_SCRIPTS),$(shell $(INSTALL) -m 755 $(script) $(DESTDIR)/etc/init.d/$(script)) ) $(foreach script,$(INIT_SCRIPTS),$(shell ln -sf /etc/init.d/$(script) $(DESTDIR)/usr/sbin/rc$(script)) ) install_systemd_services: system_dirs $(foreach service,$(SYSTEMD_SERVICE_FILES),$(shell $(INSTALL) -m 644 ./systemd/$(service).service $(DESTDIR)$(UNITDIR)$(service).service) ) $(foreach service,$(SYSTEMD_SERVICE_FILES),$(shell ln -sf /usr/sbin/service $(DESTDIR)/usr/sbin/rc$(service) ) ) install_project_update: system_dirs $(INSTALL) -m 0755 obs_project_update $(DESTDIR)/usr/sbin/obs_project_update install_logrotate: system_dirs $(foreach config,$(LOGROTATE_CONFIGS),$(shell $(INSTALL) -m 644 $(config).logrotate $(DESTDIR)/etc/logrotate.d/$(config)) ) install_fillups: system_dirs $(INSTALL) -m 0644 sysconfig.obs-server $(DESTDIR)/var/adm/fillup-templates/ install_slp: system_dirs $(INSTALL) -m 644 obs.source_server.reg $(DESTDIR)/etc/slp.reg.d/ $(INSTALL) -m 644 obs.repo_server.reg $(DESTDIR)/etc/slp.reg.d/ install_obs_bin: system_dirs $(foreach script,$(OBS_BIN_SCRIPTS),$(shell $(INSTALL) -m 755 $(script) $(DESTDIR)/usr/bin/$(script)) ) $(foreach script,$(OBS_SBIN_SCRIPTS),$(shell $(INSTALL) -m 755 $(script) $(DESTDIR)/usr/sbin/$(script)) ) install_crontabs: $(INSTALL) -m 644 cleanup_scm_cache.cron $(DESTDIR)/etc/cron.d/cleanup_scm_cache $(INSTALL) -m 644 obs_api_delayed_jobs_monitor.cron $(DESTDIR)/etc/cron.d/obs_api_delayed_jobs_monitor system_dirs: $(INSTALL) -d -m 755 $(DESTDIR)$(OBS_BACKEND_PREFIX) $(INSTALL) -d -m 755 $(DESTDIR)/etc/init.d/ $(INSTALL) -d -m 755 $(DESTDIR)/etc/logrotate.d/ $(INSTALL) -d -m 755 $(DESTDIR)/etc/slp.reg.d/ $(INSTALL) -d -m 755 $(DESTDIR)/etc/cron.d/ $(INSTALL) -d -m 755 $(DESTDIR)/usr/bin/ $(INSTALL) -d -m 755 $(DESTDIR)/usr/sbin/ $(INSTALL) -d -m 755 $(DESTDIR)/var/adm/fillup-templates $(INSTALL) -d -m 755 $(DESTDIR)/usr/share/doc/packages/obs-devel $(INSTALL) -d -m 755 $(DESTDIR)/usr/lib/obs/tests/appliance $(INSTALL) -d -m 755 $(DESTDIR)$(UNITDIR) install_devel_docs: $(INSTALL) -m 644 README.devel $(DESTDIR)/usr/share/doc/packages/obs-devel/README.devel install_overview: $(INSTALL) -d -m 755 $(DESTDIR)$(OBS_DOCUMENT_ROOT)/overview $(INSTALL) -m 644 overview.html.TEMPLATE $(DESTDIR)$(OBS_DOCUMENT_ROOT)/overview install_registry_dirs: mkdir -p $(DESTDIR)/srv/www/obs/container-registry{,/log,/htdocs} install_tests_appliance: cp -r ./t/* $(DESTDIR)/usr/lib/obs/tests/appliance/ test_unit: prove -v t/*.t test_system: prove -v t/*.ts test_appliance: prove -v t/*.ta .PHONY: test_unit test_system open-build-service-2.9.4/dist/OBS-statistics.R000066400000000000000000000234751332555733200211460ustar00rootroot00000000000000# We use ggplot2, a library that makes painting graphics much easier. # You can check the types of graphs and examples code here: https://plot.ly/ggplot2 # REQUIRE NEEDED LIBRARIES, in case that you don't have then installed, use: install.packages("ggplot2") library(ggplot2) library(reshape2) library(plotly) # require scales library to show only the years in the dates axis, # in case that you don't have then installed, use: install.packages("scales") library(scales) # FORMAT GRAPHS, to export in 2000x1200 and not valid for the pie charts theme_set(theme_gray(base_size = 56)) # NUMBER OF USERS GRAPH # Read users data and save it in a dataframe variable dataUsers <- read.csv('number_users.txt') # In this case, we are going to use a simple geom_line without points users_data <- data.frame( users = dataUsers$users, date = as.Date(dataUsers$date, format = "%Y-%m-%d") # convert String to Date ) ggplot(data=users_data, aes(x=date, y=users)) + geom_line(size=3) # Users increase over time, related to Diffusion of innovations theory users_increase_data <- data.frame( users = dataUsers$users_increase, date = as.Date(dataUsers$date, format = "%Y-%m-%d") # convert String to Date ) ggplot(data=users_increase_data, aes(x=date, y=users)) + geom_point() + stat_smooth(size=3) ############################################################################################### # NUMBER OF BS REQUESTS GRAPH # Read bs_requests data and save it in a dataframe variable dataBsRequests <- read.csv('number_bs_requests.txt') bs_requests_data <- data.frame( bs_requests = dataBsRequests$bs_requests, date = as.Date(dataBsRequests$date, format = "%Y-%m-%d") # convert String to Date ) ggplot(data=bs_requests_data, aes(x=date, y=bs_requests)) + geom_line(size=1, colour="red") # NUMBER OF BS REQUESTS FROM 2015 # Take subset with the data from 2015 bs_requests_data_from_2015 <- subset(bs_requests_data, date >= as.Date("2015-01-01") ) # We scale to show only the year. # Otherwise, as this period of time if shorter it will show 2015-01, 2015-07, 2016-01,... on the date axis ggplot(data=bs_requests_data_from_2015, aes(x=date, y=bs_requests)) + geom_line() + scale_x_date(breaks = date_breaks("1 years"), labels = date_format("%Y")) # SEVERAL BS REQUESTS # for this graphs the font has to be decreased and EXPORT IT in 2000x1000 theme_set(theme_gray(base_size = 36)) several_bs_requests_data <- data.frame( all = dataBsRequests$bs_requests, openSUSE_Factory = read.csv('number_bs_requests_for_openSUSE:Factory.txt')$bs_requests, openSUSE_Maintenance = read.csv('number_bs_requests_for_openSUSE:Maintenance.txt')$bs_requests, openSUSE_Leap_42.1 = read.csv('number_bs_requests_for_openSUSE:Leap:42.1.txt')$bs_requests, openSUSE_Leap_42.2 = read.csv('number_bs_requests_for_openSUSE:Leap:42.2.txt')$bs_requests, openSUSE_Leap_42.3 = read.csv('number_bs_requests_for_openSUSE:Leap:42.3.txt')$bs_requests, GNOME_Factory = read.csv('number_bs_requests_for_GNOME:Factory.txt')$bs_requests, devel_languages = read.csv('number_bs_requests_for_devel:languages.txt')$bs_requests, date = as.Date(dataBsRequests$date, format = "%Y-%m-%d") ) test_data_long <- melt(several_bs_requests_data, id="date") # convert to long format ggplot(data=test_data_long, aes(x=date, y=value, colour=variable)) + geom_line(size=1) + theme(legend.text=element_text(size=26),legend.key.height=unit(2,"line")) # SEVERAL BS REQUESTS FROM 2015 # Take subset with the data from 2015 several_bs_requests_data_from_2015 <- subset(several_bs_requests_data, date >= as.Date("2015-01-01") ) test_data_long <- melt(several_bs_requests_data_from_2015, id="date") # convert to long format ggplot(data=test_data_long, aes(x=date, y=value, colour=variable)) + geom_line() # NUMBER OF ACTIVE PROJECTS # reset font and EXPORT IT in 2000x1200 theme_set(theme_gray(base_size = 56)) dataProjects <- read.csv('number_projects_bs_requests.txt') projects_data <- data.frame( number_projects = dataProjects$projects_bs_requests, date = as.Date(dataProjects$date, format = "%Y-%m-%d") ) ggplot(data=projects_data, aes(x=date, y=number_projects)) + geom_line(size=2, color="#0ECBD9") # NUMBER OF ACTIVE PROJECTS VS ACTIVITY # for this graphs the font has to be decreased and EXPORT IT in 2000x1000 theme_set(theme_gray(base_size = 46)) dataBsRequests <- read.csv('number_bs_requests.txt') bs_requests_data <- data.frame( bs_requests = dataBsRequests$bs_requests, date = as.Date(dataBsRequests$date, format = "%Y-%m-%d") # convert String to Date ) dataBsRequestsFrom2013 <- subset(bs_requests_data, date >= as.Date("2013-01-01") ) projects_bs_requests_data <- data.frame( all = dataBsRequestsFrom2013$bs_requests, number_projects = read.csv('number_projects_bs_requests.txt')$projects_bs_requests, date = as.Date(dataBsRequestsFrom2013$date, format = "%Y-%m-%d") ) test_data_long <- melt(projects_bs_requests_data, id="date") # convert to long format ggplot(data=test_data_long, aes(x=date, y=value, colour=variable)) + geom_line(size=1) + theme(legend.text=element_text(size=28),legend.key.height=unit(2,"line")) # NUMBER OF ACTIVE PROJECTS VS DIFF ACTIVITY projects_diff_bs_requests_data <- data.frame( diff_all = c(0,diff(dataBsRequestsFrom2013$bs_requests)), number_projects = read.csv('number_projects_bs_requests.txt')$projects_bs_requests, date = as.Date(dataBsRequestsFrom2013$date, format = "%Y-%m-%d") ) test_data_long <- melt(projects_diff_bs_requests_data, id="date") # convert to long format ggplot(data=test_data_long, aes(x=date, y=value, colour=variable)) + geom_line(size=2) + theme(legend.text=element_text(size=28),legend.key.height=unit(2,"line")) # BS REQUEST CORRELATION diff_all <- projects_diff_bs_requests_data$diff_all number_projects <- projects_diff_bs_requests_data$number_projects # get correlation value cor(diff_all, number_projects) # paint the correlation graph ccf(diff_all, number_projects) ############################################################################################### # GITHUB # PRs # reset font and EXPORT IT in 2000x1200 theme_set(theme_gray(base_size = 56)) dataPrs <- read.csv('pull_requests_merged.csv') prs_data <- data.frame( PRs = dataPrs$number_pull_requests_merged, date = as.Date(dataPrs$week, format = "%Y-%m-%d") # convert String to Date ) ggplot(data=prs_data, aes(x=date, y=PRs)) + geom_line(size=2, color="#5F04B4") # CODE FRECUENCY # for this graphs the font has to be decreased and EXPORT IT in 2000x1000 theme_set(theme_gray(base_size = 46)) code_frequency <- read.csv('code_frequency.csv') code_frequency_data <- data.frame( additions = code_frequency$additions, deletions = code_frequency$deletions, date = as.Date(code_frequency$date, format = "%Y-%m-%d") ) test_data_long <- melt(code_frequency_data, id="date") # convert to long format ggplot(data=test_data_long, aes(x=date, y=value, colour=variable)) + geom_line(size=1) + theme(legend.text=element_text(size=28),legend.key.height=unit(2,"line")) # COMMIT ACTIVITY # reset font and EXPORT IT in 2000x1200 theme_set(theme_gray(base_size = 56)) dataCommits <- read.csv('commit_activity.csv') commits_data <- data.frame( commits = dataCommits$commits, date = as.Date(dataCommits$date, format = "%Y-%m-%d") # convert String to Date ) ggplot(data=commits_data, aes(x=date, y=commits)) + geom_line(size=2, color="#04B404") ############################################################################################### # BAR GRAPH FOR NUMBER OF BUILDS # for this graphs the font has to be decreased and EXPORT IT in 2000x1000 theme_set(theme_gray(base_size = 38)) build <- data.frame( status = factor(c("unchanged", "unchanged", "unchanged", "unchanged", "failed", "failed", "failed", "failed", "succeeded", "succeeded", "succeeded", "succeeded")), hosts = factor(c("distributions", "home", "staging", "rest", "distributions", "home", "staging", "rest", "distributions", "home", "staging", "rest"), levels=c("distributions", "home", "staging", "rest")), builds = c(4000, 15300, 49000, 50000, 100, 5000, 800, 8800, 6200, 15000, 20000, 34000) ) ggplot(data=build, aes(x=hosts, y=builds, fill=status)) + geom_bar(colour="black", stat="identity", position=position_dodge(), size=.5) + # Thinner lines scale_fill_manual(values=c("#FF0000", "#04B404", "#0080FF")) + theme(legend.text=element_text(size=30),legend.key.height=unit(2,"line")) ############################################################################################### # PIE CHART OF REQUESTS STATES slices <- c(10763, 1684, 369727, 97, 44014, 5352, 46547) lbls <- c("declined", "review", "accepted", "deleted", "revoked", "new", "superseded") pct <- round(slices/sum(slices)*100, 2) # add percents and % lbls <- paste(paste(lbls, pct),"%",sep="") # print the pie chart with a beautiful rainbow color # cex is though to export the image in 2000x1200 pie(slices,labels = lbls, col=rainbow(length(lbls)), radius = 1, cex = 3) ############################################################################################### # PIE CHART OF CODE LINES slices <- c(217801, 30622, 33769) lbls <- c("Code Lines", "Comment Lines", "Blank Lines") pct <- round(slices/sum(slices)*100) # add percents and % lbls <- paste(paste(lbls, pct),"%",sep="") # cex is though to export the image in 2000x1200 (to show it small) pie(slices,labels = lbls, cex = 3) # PIE CHART OF CODE LINES BY LANGUAGE slices <- c(92288, 81912, 54648, 14854, 14913, 12827, 4470, 6280) lbls <- c("Ruby", "Perl", "Python", "XML", "JavaScript", "shell script", "CSS", "Others") pct <- round(slices/sum(slices)*100, 1) # add percents and % lbls <- paste(paste(lbls, pct),"%",sep="") # print the pie chart with a beautiful rainbow color # cex is though to export the image in 2000x1200 pie(slices,labels = lbls, col=rainbow(length(lbls)), radius = 1, cex = 3) open-build-service-2.9.4/dist/README.SETUP000077700000000000000000000000001332555733200214412../README.mdustar00rootroot00000000000000open-build-service-2.9.4/dist/README.UPDATERS000066400000000000000000000356371332555733200203210ustar00rootroot00000000000000For Updaters to OBS 2.9 from OBS 2.8 ==================================== Note: Update from OBS 2.5 should also work, but is untested. A direct update from OBS 2.4 or older will not work. 1) Remove the OBS 2.8 Repository zypper rs OBS:Server:2.8 2) Add the OBS 2.9 Repository and update repository cache zypper ar http://download.opensuse.org/repositories/OBS:/Server:/2.9/$YOUR_DISTRIBUTION/OBS:Server:2.9.repo zypper ref 3) Update packages zypper dup --from OBS_Server_2.9 You will be asked to deinstall ruby2.4-rubygem-passenger, since OBS 2.9 is using ruby 2.5 instead of ruby 2.4. Agree to it. 4) Change to ruby2.5 edit /etc/apache2/conf.d/mod_passenger.conf: PassengerRuby "/usr/bin/ruby.ruby2.5" 5) Migrate database cd /srv/www/obs/api/ RAILS_ENV="production" rails.ruby2.5 db:migrate:with_data 6) Make sure that log and tmp are owned by wwwrun chown -R wwwrun.www /srv/www/obs/api/log chown -R wwwrun.www /srv/www/obs/api/tmp 7) Restart following services in this order systemctl restart apache2 systemctl restart obsapidelayed systemctl restart memcached 8) Enable and start the new services systemctl enable obsservicedispatch systemctl start obsservicedispatch obsservicedispatch must run on the host where the src server is running. For Updaters to OBS 2.8 from OBS 2.7 ==================================== Note: Update from OBS 2.5 should also work, but is untested. A direct update from OBS 2.4 or older will not work. 1) Remove the OBS 2.7 Repository zypper rs OBS:Server:2.7 2) Add the OBS 2.8 Repository and update repository cache zypper ar http://download.opensuse.org/repositories/OBS:/Server:/2.8/$YOUR_DISTRIBUTION/OBS:Server:2.8.repo zypper ref 3) Update packages zypper dup --from OBS_Server_2.8 4) Migrate database cd /srv/www/obs/api/ RAILS_ENV="production" rails.ruby2.4 db:migrate 5) Change to ruby2.4 edit /etc/apache2/conf.d/mod_passenger.conf: PassengerRuby "/usr/bin/ruby.ruby2.4" 6) Make sure that log and tmp are owned by wwwrun chown -R wwwrun.www /srv/www/obs/api/log chown -R wwwrun.www /srv/www/obs/api/tmp 7) Restart following services in this order systemctl restart apache2 systemctl restart obsapidelayed systemctl restart memcached 8) Optional: you may enable serivce dispatcher in BSConfig.pm and enable the service systemctl enable obsservicedispatch systemctl start obsservicedispatch obsservicedispatch must run on the host where the src server is running. For Updaters to OBS 2.7 from OBS 2.6 ==================================== Note: Update from OBS 2.5 should also work, but is untested. A direct update from OBS 2.4 or older will not work. 1) Remove the OBS 2.6 Repository zypper rs OBS:Server:2.6 2) Add the OBS 2.7 Repository and update repository cache zypper ar http://download.opensuse.org/repositories/OBS:/Server:/2.7/$YOUR_DISTRIBUTION/OBS:Server:2.7.repo zypper ref -s 3) Remove packages which might case conflicts zypper -n remove ruby2.1-rubygem-passenger ruby2.1-rubygem-activerecord-4_1 ruby2.1-rubygem-rails-4_1 4) Update packages zypper dup --no-recommends --from OBS_Server_2.7 5) Migrate database cd /srv/www/obs/api/ RAILS_ENV="production" rails.ruby2.3 db:migrate 6) Make sure that log and tmp are owned by wwwrun chown -R wwwrun.www /srv/www/obs/api/log chown -R wwwrun.www /srv/www/obs/api/tmp 7) Restart following services in this order systemctl restart apache2 systemctl restart obsapidelayed systemctl restart memcached 8) Enable and start the new services systemctl enable obsdodup systemctl enable obsdeltastore systemctl start obsdodup systemctl start obsdeltastore obsdodup must run on each backend server which runs also a repostory server. obsdeltastore must run where the src server is running. For Updaters to OBS 2.6 from OBS 2.5 ==================================== Appliance users can just do a package update and reboot or use the new appliance image to update their configurations. In case the installation is not done via packages all rubygems according to Gemfile need to get installed. 1) Database migration # cd /srv/www/obs/api/ # RAILS_ENV="production" rails db:migrate # chown -R wwwrun.www log tmp 2) Restart following services in this order * apache2 * obsapidelayed * memcached The backend daemons are restarting automatically on update. For Updaters to OBS 2.5 from OBS 2.4 ==================================== Appliance users can just do a package update and reboot or use the new appliance image to update their configurations. In case the installation is not done via packages all rubygems according to Gemfile need to get installed. Biggest change setup wise is the unification of API and WEBUI instance. This means only one rails stack is running which can be used for both, browsing via web browser and also as api to be used with osc. In case you do not use the obs apache configuration which comes with package, you need to disable your webui configuration. Users which used port 444 as api have to reconfigure their clients to use port 443 now. 1) Database migration # cd /srv/www/obs/api/ # RAILS_ENV="production" rails db:migrate # chown -R wwwrun.www log tmp 2) Restart memcached after database migration to wipe possible broken cache data. 3) configuration settings moved from various palces to unique /configuration api route as much as possible to allow a uniq configuration through all OBS frontend and backend parts. The following settings have been migrated: from BSConfig.pm: obsname => name enforce_project_keys => enforce_project_keys download_on_demand => download_on_demand http_proxy => http_proxy no_proxy => no_proxy from api options.yml: new_user_registration => registration allow_anonymous => anonymous default_access_disabled => default_access_disabled allow_user_to_create_home_project => allow_user_to_create_home_project disallow_group_creation_with_api => disallow_group_creation change_passwd => change_password download_url => download_url ymp_url => ymp_url http_proxy => http_proxy from webui options.yml: hide_private_options => hide_private_options use_gravatar => gravatar http_proxy => http_proxy download_url => download_url bugzilla_host => bugzilla_url theme => theme 4) api interface configuration must be configured in /srv/www/obs/api/config/options.yml now. Ensure that at least the following parameters are configured there: frontend_host: frontend_port: frontend_protocol: 5) Non-Appliance users who do not have obsapisetup init script running need also to call the following once to have a working search: # cd /srv/www/obs/api/ # RAILS_ENV="production" rails ts:index For Updaters to OBS 2.4 from OBS 2.3 ==================================== Appliance users can just do a package update and reboot or use the new appliance image to update their configurations. Other installations need merge manually configurations as described below (This is required due to the Rails 2.x to Rails 3.x switch). In case the installation is not done via packages all rubygems according to Gemfile need to get installed. Please check the following files: 1) database driver needs to be changed to "mysql2" in /srv/www/obs/api/config/database.yml This is usally done automatically by the package. 2) all config options with capital letters have been moved from /srv/www/obs/.../config/environments/production.rb to small letter variables in /srv/www/obs/.../config/options.yml The old production.rb files will not work anymore, please transfer your configuration in the new file (available as production.rb.rpmnew) and the options.yml. Do not forget to rename production.rb.rpmnew to production.rb. 3) Database migration # cd /srv/www/obs/api/ # RAILS_ENV="production" rails db:migrate # chown -R wwwrun.www log tmp # cd /srv/www/obs/webui/ # RAILS_ENV="production" rails db:migrate # chown -R wwwrun.www log tmp 4) Restart memcached after database migration to wipe possible broken cache data. For Updaters to OBS 2.3 from OBS 2.1 ==================================== 1) Database migration # cd /srv/www/obs/api/ # RAILS_ENV="production" rails db:migrate # chown -R wwwrun.www log tmp # cd /srv/www/obs/webui/ # RAILS_ENV="production" rails db:migrate # chown -R wwwrun.www log tmp 2) Switch from lighttpd to apache2 The default httpd is apache2 since OBS 2.3. lighttpd should still work, but we recommend to switch to apache to get a maintained base and load optimizations. Please read the section 3 from README.SETUP file to learn how to configure apache. For Updaters to OBS 2.1 from OBS 2.0 ==================================== 1) Database migration # cd /srv/www/obs/api/ # RAILS_ENV="production" rails db:migrate # chown -R lighttpd.lighttpd log # cd /srv/www/obs/webui/ # RAILS_ENV="production" rails db:migrate # chown -R lighttpd.lighttpd log 2) Build backend source database On the system, where bs_srcserver is running you need to build the source database to get an index of existing linked packages. # su -c "bs_admin --update-source-db" 3) OPTIONAL step It is recommended to use a MySQL database for OBS webui from now on. The default has changed away from sqlite, but it does still work, except from occasional connect errors between lighttpd/ruby and sqlite. To setup the MySQL webui database instead of a sqlite one do the following steps: * Create the database: # mysql -u root -p mysql> create database webui_production; mysql> GRANT all privileges ON webui_production.* TO 'obs'@'%', 'obs'@'localhost'; mysql> FLUSH PRIVILEGES; mysql> quit * Configure your MySQL user and password in the "production:" section of the webui config: /srv/www/obs/webui/config/database.yml A template for this file can be found in same directory as "database.yml.example". * populate the database # cd /srv/www/obs/webui/ # sudo RAILS_ENV="production" rails db:setup # sudo chown lighttpd.lighttpd log/* For Updaters to OBS 2.0 from OBS 1.7 ==================================== NOTE: Do not update from the special MeeGo 1.8 release yet. You will miss features and run into database migration errors. You need to wait for OBS 2.1. After running the package update to obs-* 2.0 packages you need to do the following steps manually: 1) Database migration # cd /srv/www/obs/api/ # RAILS_ENV="production" rails db:migrate # chown -R lighttpd.lighttpd log # cd /srv/www/obs/webui/ # RAILS_ENV="production" rails db:migrate # chown -R lighttpd.lighttpd log 2) Default distribution configuration The default targets are now defined in one place on the api. Create the default config via # cd /srv/www/obs/api/files/ # cp distributions.xml.template distributions.xml 3) restart all services. easiest is maybe to restart your entire system. For Updaters from OBS 1.6 ========================= 1) Rename of frontend and webclient - The former called "frontend" has been renamed to "api" - The former called "webclient" has been renamed to "webui" Following this change quite a number of places have been adapated, for example the directory names below /src/www/obs. The packages shall convert this rename nicely. Your former used database config, most likely using "frontend_production" database is not touched by this, the server will still use this one. 2) Required rails migrate update When the obs-api package has been updated, it is required to upgrade the database. Therefore, you should do # cd /srv/www/obs/api/ # RAILS_ENV="production" rails db:migrate and # cd /srv/www/obs/webui/ # RAILS_ENV="production" rails db:migrate 3) Enable the new services In case you use the package signing, you need to run the new bs_signer process. This has been split out of bs_repserver for better scalability. # insserv obssigner # rcobs_signer start You may want to run also the new bs_warden daemon, this monitors the build hosts and restarts jobs, in case a build host is not responding correctly anymore: # insserv obswarden # rcobswarden start The experimental new obsserver daemon can optionally be started and be used for automatic file downloads (or own source services) # insserv obsservice # rcobsservice start To update api and webui informations, you need to start the following processes # insserv obsapidelayed # rcobsapidelayed start By default, you should *NOT* start the following services. These are intended to autoconfigure services on the OBS Appliance image based on existing LVM volumes and your local network setup. They will replace local configuration files on each boot. # obsstoragesetup # obsapisetup For Updaters from OBS 0.9.x releases ==================================== 1) Define download URL in rails config. The new introduced "DOWNLOAD_URL" need to be defined in your settings below /srv/www/obs/webui/config/environments/ directory. If you do not have a download server, simply define DOWNLOAD_URL = nil in your configuration. 2) Recommended rails migrate update A rails migrate is recommended, but not neccessary. It would reimport buginfo flags from the backend to fix broken usage in api before. When the obs-api package has been updated, it might be required to upgrade the database. Therefore, you should do # cd /srv/www/obs/api/ # RAILS_ENV="production" rails db:migrate after updating obs-api. Updater from before 0.9 release =============================== Updaters of build server versions before 0.9 needs to rename their databases from "development" to production. This can be performed by the following steps: # mysql -u root -p mysql> create database api_production; mysql> create database webui_production; mysql> quit # mysqldump -p api_development > DUMP # mysql -u root -p api_production < DUMP # mysqldump -p webui_development > DUMP # mysql -u root -p webui_production < DUMP All Updaters needs to update their database schema in any case by running the following commands: # cd /srv/www/obs/api/ # RAILS_ENV="production" rails db:migrate open-build-service-2.9.4/dist/README.devel000066400000000000000000000002341332555733200201520ustar00rootroot00000000000000This package does not contain any development files. But it helps you start with git development - look at http://github.com/opensuse/open-build-service open-build-service-2.9.4/dist/ReleaseNotes-2.8000066400000000000000000000066651332555733200210330ustar00rootroot00000000000000 # # Open Build Service 2.8 # WARNING: WARNING: This is a development release, not for production usage! WARNING: Please read the README.SETUP file for initial installation instructions or use the OBS Appliance from http://openbuildservice.org/download/ There is also an install medium with installs OBS on hard disc now. dist/README.UPDATERS file has informations for updaters. OBS Appliance users who have setup their LVM can just replace their appliance image without data loss. The migration will happen automatically. Features ======== UI: * Allow triggering services from the UI. * Show a hint to project maintainers, when he/she is not a package maintainer of the target package of a request * Main projects list is now filtered based on a configurable (by the admin) regular expression * Users can download the public key and SSL certificate for a project via the project home page * import of kiwi build descriptions is supported (obs-service-kiwi_import) API: * Allow admins to lock or delete users and their home projects via new command * Users can be declared as sub accounts of other users. Useful for automated scripts. * New API route to get public key and SSL certificate: GET /source/:project_name/_keyinfo * New feature toggle config file. Use config/feature.yml to enable/disable features in the OBS. Backend: * multibuild: allow to build multiple jobs from one source package without the need of creating local links * experimental support of snap package format * workers are now also tracked when they went away (new states "down", "away" and "dead") * worker capabilities can be requested * usable workers can be requested with uncommited constraints * functionality to remove published packages (osc unpublish) * New obsservicedispatch service to handle source service runs in a queue and asynchron. * preinstall images can be used for local building * improved speed of diffing sources * Support caching of pulled git sources Shipment: * optional docker container to run source services is provided Wanted changes: =============== * kiwi builds: build configuration changes from the project where the kiwi file is stored have always an effect now. * maintenance_release requests are locking only the source packages on creation now. They don't lock the patchinfos. The project gets locked on release now. * service wrapper script for LXC got replaced by a docker alternative Other changes ============= * Server side pagination on user show page for improving the performance. * The way to identify spiders got changed. A separate configuration via apache is no longer required. See the Administration Guide. * Frontend stack is using ruby 2.4 and rails 5.0.1 now Notes for systems using systemd: ================================ OBS is using init scripts, not systemd style .service files. This will stay until we will switch from SLES 11 to SLES 12 as default production platform. openSUSE installations may use systemd (instead of sysvinit) and it should work in general. Not working are usages like # rcobssrcserver status You will only get the systemd status here. Also stopping services may not kill all processes, which leads to abstract errors when trying to restart them. We heard also about trashed MySQL databases when using systemd. To avoid these problems you need switch directory to avoid the systemd mapper: # cd /etc/init.d # ./obssrcserver status|stop|start open-build-service-2.9.4/dist/aws_credentials.example000066400000000000000000000001261332555733200227200ustar00rootroot00000000000000[default] aws_access_key_id = ACCESS_KEY_ID aws_secret_access_key = SECRET_ACCESS_KEY open-build-service-2.9.4/dist/bugz-login000077500000000000000000000054531332555733200202070ustar00rootroot00000000000000#!/usr/bin/python # log into bugzilla and write bugz cookie file import sys, os def update_bugz_cookie(cookie): """overwrites ~/.bugz_cookie file with bugzilla cookie in LWP format""" jar = os.path.expanduser('~/.bugz_cookie') jarfd = open(jar, 'w') os.chmod(jar, 0600) jarfd.write("""#LWP-Cookies-1.0 Set-Cookie3: IPCZQX018ef15359=%s; path="/"; domain=.novell.com; expires="2010-10-10 12:34:56" """ % cookie.split()[2]) jarfd.close() def get_cookie(): import httplib import getpass from urllib import urlencode from urlparse import urlparse, urljoin import os usernamefile = os.path.expanduser('~/.bzuser') if os.path.isfile(usernamefile): username = open(usernamefile).readlines()[0] else: username = getpass.getuser() print >>sys.stderr, 'Password: ', password = getpass.getpass(prompt='') url_base, url_rel = 'https://bugzilla.novell.com', '/ICSLogin/' target_url = 'https://bugzilla.novell.com/ichainlogin.cgi?target=index.cgi' params = {'url': target_url, 'context': 'default', 'message': 'Please log In', 'proxypath': 'reverse', 'username': username, 'password': password, } data = urlencode(params) host = urlparse(url_base)[1] #host = 'aust.suse.de' h = httplib.HTTPS(host) #h.set_debuglevel(1) h.putrequest('POST', url_rel) h.putheader('User-agent', 'python-httplib 1.0') h.putheader('Host', host) h.putheader('Content-Length', str(len(data))) h.putheader('Content-Type', 'application/x-www-form-urlencoded') h.endheaders() h.send(data) errcode, errmsg, headers = h.getreply() #print >>sys.stderr, 'errcode: ', errcode #print >>sys.stderr, 'headers: ', headers #h.close() if errcode == 302: if not headers.has_key('set-cookie'): print >>sys.stderr, 'no cookie received...' return None else: c = headers['set-cookie'] c = c.split('; ') cookie = '\t'.join([urljoin(url_base, url_rel), c[0].split('=')[0], c[0].split('=')[1], '1877472000', c[2].split('=')[1], c[1].split('=')[1], '9', '0', ]) return cookie else: print >>sys.stderr, 'could not log in' print >>sys.stderr, errcode print >>sys.stderr, errmsg print >>sys.stderr, headers if headers: print >>sys.stderr, headers.has_key('set-cookie') return None if __name__ == '__main__': cookie = get_cookie() if not cookie: sys.exit(1) else: update_bugz_cookie(cookie) sys.exit(0) open-build-service-2.9.4/dist/ci/000077500000000000000000000000001332555733200165705ustar00rootroot00000000000000open-build-service-2.9.4/dist/ci/travis_after_failure.sh000077500000000000000000000020071332555733200233260ustar00rootroot00000000000000#!/bin/bash if [ -z "$ARTIFACTS_KEY" ] || [ -z "$ARTIFACTS_SECRET" ]; then echo 'No AWS secrets set...' exit 0 fi pushd src/api function upload_to_s3 { dateValue=`date -R` stringToSign="PUT\n\ntext/plain\n${dateValue}\n/obs-travis-articafts/$TRAVIS_BUILD_NUMBER/$TRAVIS_JOB_NUMBER/$1" signature=`echo -en ${stringToSign} | openssl sha1 -hmac ${ARTIFACTS_SECRET} -binary | base64` curl -X PUT -T $1 \ -H "Host: obs-travis-articafts.s3.amazonaws.com" \ -H "Date: ${dateValue}" \ -H "Content-Type: text/plain" \ -H "Authorization: AWS ${ARTIFACTS_KEY}:${signature}" \ https://obs-travis-articafts.s3.amazonaws.com/$TRAVIS_BUILD_NUMBER/$TRAVIS_JOB_NUMBER/$1 } if [ -f log/test.log ]; then upload_to_s3 log/test.log echo "Posted: https://obs-travis-articafts.s3.amazonaws.com/$TRAVIS_BUILD_NUMBER/$TRAVIS_JOB_NUMBER/log/test.log" fi for file in tmp/capybara/*; do upload_to_s3 $file echo "Posted: https://obs-travis-articafts.s3.amazonaws.com/$TRAVIS_BUILD_NUMBER/$TRAVIS_JOB_NUMBER/tmp/capybara/$file" done open-build-service-2.9.4/dist/ci/travis_before_install.sh000077500000000000000000000020171332555733200235070ustar00rootroot00000000000000#!/bin/bash # This script installs dependencies for the CI build # Be verbose and fail script on the first error set -xe sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys C5C219E7 # Install updates from our own repository sudo chmod a+w /etc/apt/sources.list.d echo 'deb http://download.opensuse.org/repositories/OBS:/Server:/Unstable/xUbuntu_14.04 /' > /etc/apt/sources.list.d/opensuse.list # We could use this to only update the package list from the OBS, # but apprently this is not possible anymore. So we update all package lists. # sudo apt-get update -o APT::Get::List-Cleanup "false" -o Dir::Etc::sourcelist "/etc/apt/sources.list.d/opensuse.list" -o Dir::Etc::sourceparts ""; sudo apt-get update # Install the dependencies of the backend sudo apt-get install --force-yes travis-deps libxml-parser-perl libfile-sync-perl python-rpm python-urlgrabber python-sqlitecachec python-libxml2 createrepo=0.9.9 libbssolv-perl sphinxsearch libjson-xs-perl libxml-simple-perl libgd-gd2-perl libdevel-cover-perl libxml2-utils open-build-service-2.9.4/dist/ci/travis_before_script.sh000077500000000000000000000023671332555733200233550ustar00rootroot00000000000000#!/bin/bash # This script prepares the CI build for running echo "Configuring backend" sed -i -e "s|my \$hostname = .*$|my \$hostname = 'localhost';|" \ -e "s|our \$bsuser = 'obsrun';|our \$bsuser = 'jenkins';|" \ -e "s|our \$bsgroup = 'obsrun';|our \$bsgroup = 'jenkins';|" src/backend/BSConfig.pm.template cp src/backend/BSConfig.pm.template src/backend/BSConfig.pm chmod a+x src/api/script/start_test_backend echo "Configuring git-cop" mkdir -p ~/.config/git-cop cp dist/git-cop_configuration.yml ~/.config/git-cop/configuration.yml pushd src/api echo "Creating database" mysql -e 'create database ci_api_test;' echo "Configuring database" cp config/database.yml.example config/database.yml sed -e 's,password:.*,password:,' -i config/database.yml sed -i "s|database: api|database: ci_api|" config/database.yml echo "Configuring frontend" cp config/options.yml.example config/options.yml cp config/thinking_sphinx.yml.example config/thinking_sphinx.yml echo "Initialize database" bundle exec rails db:drop db:create db:setup --trace # Stuff # Clear temp data rm -rf log/* tmp/cache tmp/sessions tmp/sockets popd # travis rvm can not deal with our extended executable names sed -i 1,1s,\.ruby2\.5,, src/api/{script,bin}/* docs/api/restility/bin/* open-build-service-2.9.4/dist/ci/travis_script.sh000077500000000000000000000022441332555733200220250ustar00rootroot00000000000000#!/bin/bash # This script runs the test suites for the CI build # Be verbose and fail script on the first error set -xe # Everything happens here pushd src/api if test -z "$SUBTEST"; then export DO_COVERAGE=1 export TESTOPTS="-v" case $TEST_SUITE in api) bundle exec rails assets:precompile &> /dev/null perl -pi -e 's/source_port: 5352/source_port: 3200/' config/options.yml bundle exec rails test:api ;; spider) unset DO_COVERAGE bundle exec rails assets:precompile &> /dev/null perl -pi -e 's/source_port: 5352/source_port: 3200/' config/options.yml bundle exec rails test:spider ;; linter) bundle exec rake db:structure:verify bundle exec rake db:structure:verify_no_bigint make -C ../../ rubocop bundle exec rake haml_lint jshint . ;; rspec) perl -pi -e 's/source_host: localhost/source_host: backend/' config/options.yml bundle exec rails assets:precompile &> /dev/null bundle exec rspec ;; backend) pushd ../backend make test_unit ;; *) echo "ERROR: test suite is not matching" exit 1 ;; esac fi open-build-service-2.9.4/dist/cleanup_scm_cache.cron000066400000000000000000000002111332555733200224660ustar00rootroot00000000000000# /etc/cron.d/cleanup_scm_cache 33 * * * * root /usr/lib/obs/server/cleanup_scm_cache --log=/srv/obs/service/log/cleanup_scm_cache.log open-build-service-2.9.4/dist/cleanurl-v5.lua000066400000000000000000000017621332555733200210430ustar00rootroot00000000000000-- little helper function function file_exists(path) local attr = lighty.stat(path) if (attr and attr["is_file"]) then return true else return false end end -- the magic ;) if (not file_exists(lighty.env["physical.path"])) then -- file does not exist. check if we have a cached version lighty.env["physical.path"] = lighty.env["physical.path"] .. ".html" if (not file_exists(lighty.env["physical.path"])) then -- file still missing. pass it to the fastcgi backend lighty.env["uri.path"] = "/dispatch.fcgi" lighty.env["physical.rel-path"] = lighty.env["uri.path"] lighty.env["request.orig-uri"] = lighty.env["request.uri"] lighty.env["physical.path"] = lighty.env["physical.doc-root"] .. lighty.env["physical.rel-path"] end end -- fallthrough will put it back into the lighty request loop -- that means we get the 304 handling for free. ;) -- debugging code -- print ("final file is " .. lighty.env["physical.path"]) open-build-service-2.9.4/dist/clouduploader.rb000066400000000000000000000052441332555733200213710ustar00rootroot00000000000000#!/usr/bin/ruby require 'fileutils' require 'json' require 'ostruct' require 'open3' start = Time.now THIRTY_MINUTES = 1800 HOME = '/etc/obs/cloudupload'.freeze ENV['HOME'] = "/etc/obs/cloudupload" ENV['PYTHONUNBUFFERED'] = '1' STDOUT.sync = true if ARGV.length != 6 raise 'Wrong number of arguments, please provide: user platform upload_file targetdata filename result_path' end platform = ARGV[1] image_path = ARGV[2] data_path = ARGV[3] filename = ARGV[4] result_path = ARGV[5] data = JSON.parse(File.read(data_path)) # link file into working directory FileUtils.ln_s(image_path, File.join(Dir.pwd, filename)) def get_ec2_credentials(data) command = [ 'aws', 'sts', 'assume-role', "--role-arn=#{data['arn']}", "--external-id=#{data['external_id']}", '--role-session-name=obs', "--duration-seconds=#{THIRTY_MINUTES}" ] # Credentials are stored in ~/.aws/credentials out, err, status = Open3.capture3(*command) if status.success? STDOUT.write("Successfully authenticated.\n") json = JSON.parse(out) OpenStruct.new( access_key_id: json['Credentials']['AccessKeyId'], secret_access_key: json['Credentials']['SecretAccessKey'], session_token: json['Credentials']['SessionToken'] ) else abort(err) end end def upload_image_to_ec2(image, data, result_path) STDOUT.write("Start uploading image #{image}.\n") credentials = get_ec2_credentials(data) command = [ 'ec2uploadimg', "--description='obs uploader'", '--machine=x86_64', "--name=#{data['ami_name']}", "--region=#{data['region']}", "--secret-key=#{credentials.secret_access_key}", "--access-id=#{credentials.access_key_id}", "--session-token=#{credentials.session_token}", '--verbose' ] if data['vpc_subnet_id'] command << "--vpc-subnet-id=#{data['vpc_subnet_id']}" end command << image Open3.popen2e(*command) do |_stdin, stdout_stderr, wait_thr| Signal.trap("TERM") { # We just omit the SIGTERM because otherwise we would not get logs from ec2uploadimg STDOUT.write("Received abort signal, waiting for ec2uploadimg to properly clean up.\n") } while line = stdout_stderr.gets STDOUT.write(line) write_result($1, result_path) if line =~ /^Created\simage:\s+(ami-[\w]+)$/ end status = wait_thr.value abort unless status.success? end end def write_result(result, result_path) File.open(result_path, "w+") { |file| file.write(result) } end if platform == 'ec2' upload_image_to_ec2(filename, data, result_path) else abort('No valid platform. Valid platforms is ec2.') end diff = Time.now - start STDOUT.write("Upload took: #{Time.at(diff).utc.strftime("%H:%M:%S")}") open-build-service-2.9.4/dist/distribute000077500000000000000000000226031332555733200203040ustar00rootroot00000000000000#!/bin/bash # copyright (C) 2007, Novell Inc. # License: GPL v2 or later # Authors: Susanne Oberhauser froh@novell.com # Author: Martin Mohring martin.mohring@opensuse.org usage="${0} [--help] Prepare and update an open build server package to the current repository code level. \$PWD is a directory with all files that need to go to the package. The distribution and build server specifics are read from ./.distrc LASTSTEP= one of checkout create a local osc working directory like this osc \${OSCOPTS} co \${PROJECT} \${PACKAGE} tar update the tar ball _unless_ MAINTENANCE is set currently works only with svn. creates a tar ball like this: \${PACKAGE}-\$(svnversion \${SVNDIR//:/_}).tar.bz2 reference that from the spec file like this: %define svnversion updated_by_script Source: %{name}-%{svnversion}.tar.bz2 diff compare \$PWD with what is in obs update update the obs working directory with what is in \$PWD localbuild build on the local machine, like this: 'osc \${OSCOPTS} build \\ \${BUILDOPTS} \${PROJECT} \${PACKAGE} \${ARCH}' upload 'osc \${OSCOPTS} ci' to the build server --help display this message" ################################################################### # be scrupulous set -o nounset set -o errexit set -o pipefail function verbose { ${VERBOSE:+$*} } function debug { ${DEBUG:+$*} } debug verbose set -x ################################################################### source ./.distrc # check the environment : ${PROJECT:?PROJECT unset. set in .distrc or environment.} : ${PACKAGE:?PACKAGE unset. set in .distrc or environment.} : ${TARGET:?TARGET unset. set in .distrc or environment.} : ${ARCH:=i586} : ${SPECFILE:=*.spec} : ${TARNAME:=$PACKAGE} : ${SVNDIR:?SVNDIR unset. set in .distrc or environment.} : ${BUILDOPTS:=} : ${OSCOPTS:=} : ${MAINTENANCE:=} : ${EXCLUDES:=} # be smart: the following files are hardly ever part of a package. # after checkpout, there will also be a directory with the project name in $PWD : ${CANONICAL_EXCLUDES:=svn-commit*.tmp \#*\# .\#* distribute .distrc *~ .*~ .svn .osc RCS .cvs . .. $PROJECT} : ${LASTSTEP=localbuild} ################################################################### # command line # At this time, there is no command line arguments, if there is one, # give help on usage, then exit. if [ $# != 0 ] then echo "$usage Environment: " for var in \ PROJECT PACKAGE TARGET ARCH SPECFILE \ EXCLUDES CANONICAL_EXCLUDES \ LASTSTEP OSCOPTS BUILDOPTS \ MAINTENANCE \ DEBUG VERBOSE do echo "$var=${!var:-null or unset}" done exit 1 fi ################################################################### # check what needs to be done # defaults, something means yes, null means no : ${DO_CHECKOUT:=no} : ${DO_TAR:=yes} : ${DO_DIFF:=yes} : ${DO_ASK=yes} : ${DO_UPDATE:=no} : ${DO_BUILD:=no} : ${DO_CHECKIN:=no} case $LASTSTEP in checkout) DO_CHECKOUT=y unset DO_DIFF DO_TAR DO_ASK DO_UPDATE DO_BUILD DO_CHECKIN ;; tar) # self detect wether checkout is needed DO_TAR=y unset DO_UPDATE DO_ASK DO_BUILD DO_CHECKIN ;; diff) # self detect wether checkout is needed DO_TAR=y DO_DIFF=y unset DO_UPDATE DO_ASK DO_BUILD DO_CHECKIN ;; update) # self detect wether checkout is needed DO_TAR=y DO_DIFF=y DO_ASK=y #default is ask DO_UPDATE=y # leave DO_ASK allone here, needs to be unset explicitely unset DO_BUILD DO_CHECKIN ;; localbuild) # self detect wether checkout is needed DO_TAR=y DO_DIFF=y DO_ASK=y DO_UPDATE=y DO_BUILD=y unset DO_CHECKIN ;; upload) # self detect wether checkout is needed DO_TAR=y DO_DIFF=y DO_ASK=y DO_UPDATE=y DO_BUILD=y DO_CHECKIN=y ;; *) echo "unknown LASTSTEP" exit 1 esac # make sure no means no: set 'no' to null for var in DO_CHECKOUT DO_DIFF DO_ASK DO_UPDATE DO_BUILD DO_CHECKIN do case X${!var:-} in Xy|XY|Xyes|XYes|XYES) eval $var=yes ;; Xn|XN|Xno|XNo|XNO|X) eval $var= ;; *) echo "please set $var to (y|Y|yes|Yes|YES) or (n|N|no|No|NO|). Current value: <${!var}>" exit 1 ;; esac done export DO_CHECKOUT DO_DIFF DO_ASK DO_UPDATE DO_BUILD DO_CHECKIN ################################################################### # do what needs to be done ################################################################### # checkout if the package is not there if test ! -d "${PROJECT}/${PACKAGE}/.osc"; then DO_CHECKOUT=true fi if [ ${DO_CHECKOUT} ]; then osc ${OSCOPTS} co "${PROJECT}" "${PACKAGE}" fi ################################################################### # update the tar ball # TODO: check oscupstream for this if [ ${MAINTENANCE} ]; then echo "MAINTENANCE mode, skipping tar ball update." fi if [ ${DO_TAR} -a ! ${MAINTENANCE} ]; then # Actually the method to create the tar ball and figure the # version information depends on the SCM and the working style of # the project. # TODO: I think factoring this into a helper function that creates # the tar ball and echoes the revision, and then have just one # variable in .distrc that controls the export method will be a # smart thing to do. # Could also do a svn export and parse the revsion from the # output. # For the time being: create the tar ball from a checked out # working copy. SVNVERSION=$( svnversion ${SVNDIR} ) # Get rid of the colons in favour of underbars: Svnversion can # give a REV1:REV2 label. tar then interprets REV1 as hostname to # connect to. SVNVERSION=${SVNVERSION//:/_} FULL_VERSION=${VERSION:+$VERSION.}${SVNVERSION} : ${TARFILE:=${TARNAME}-${VERSION}.${SVNVERSION}} if [ ! -f ${TARFILE}.tar.bz2 ] ; then # add the tar file to the file system, so svn status knows about it. touch ${TARFILE}.tar.bz2 tar cvjf ${TARFILE}.tar.bz2 \ --exclude-from <( svn status --no-ignore ${SVNDIR} | grep -E "^(\?|I)" | cut -c8-) \ --exclude=".svn" \ ${SVNDIR} fi # remove old tar balls if they still are there. comm -23 \ <( ls -1 ${TARNAME}-*.tar.bz2 | sed -e"s,${TARNAME}-\(.*\)\.tar\.bz2,\1," | sort -u ) \ <( echo $FULL_VERSION ) | while read ver; do # Don't ask. It will annoy soon. rm -v ${TARNAME}-${ver}.tar.bz2 done # The version information in the specfile is only updated in the # copy in the osc build directory. This allows the tar ball to # contain a locally unmodified file fi ################################################################### # check the package file list EXCLUDES=$( echo ${EXCLUDES} ${CANONICAL_EXCLUDES} | tr --squeeze "[:blank:]" "\n" | sort -u ) debug echo EXCLUDES="${EXCLUDES}" PACKAGE_FILES=$( comm -13 <(echo "${EXCLUDES}") <(ls -1d * .*) ) debug echo PACKAGE_FILES="$PACKAGE_FILES" DEPRECATED_FILES=$( debug set -x; comm -13 \ <( cat <( echo "$PACKAGE_FILES" ) <( echo "${CANONICAL_EXCLUDES}") | tr --squeeze "[:blank:]" "\n" | sort -u ) \ <( cat <( find "${PROJECT}/${PACKAGE}" -maxdepth 1 -type f -printf "%f\n" ) <( echo "$EXCLUDES" ) | tr --squeeze "[:blank:]" "\n" | sort -u ) ) debug echo DEPRECATED_FILES="$DEPRECATED_FILES" ################################################################### # compare what's in svn to what is in the osc working copy if [ ${DO_DIFF} ]; then echo "You are about to update the following files in the osc working copy:" for f in $PACKAGE_FILES; do # ignore errors from diff. diff -Nu "${PROJECT}/${PACKAGE}/$f" $f || true done; echo "You are about to remove the following files from the osc working copy:" for f in $DEPRECATED_FILES; do test ! -f "${PROJECT}/${PACKAGE}/$f" || diff -Nu "${PROJECT}/${PACKAGE}/$f" $f || true done fi if [ ${DO_ASK} ]; then echo "Update the local working copy? Interrupt (^c) to stop here. Hit RETURN to update the local working copy." read fi ################################################################### # update the osc working copy if [ ${DO_UPDATE} ]; then for f in $PACKAGE_FILES; do if \ test -f $f && ( ! test -f ${PROJECT}/${PACKAGE}/$f || ! cmp --quiet "$f" "${PROJECT}/${PACKAGE}/$f" ) then cp -vdpP "$f" "${PROJECT}/${PACKAGE}/$f" fi done pushd ${PROJECT}/${PACKAGE} if [ "${DEPRECATED_FILES}" ] ; then rm -vf $DEPRECATED_FILES ; fi # update version information in the specfile unless in maintenance # mode for spf in ${SPECFILE} do if [ ! ${MAINTENANCE} ] then sed --in-place -e "s/^%define svnversion updated_by_script.*/%define svnversion ${FULL_VERSION}/" ${spf} fi done popd fi ################################################################### # build the package and check in pushd "${PROJECT}/${PACKAGE}" if [ ${DO_BUILD} ]; then for spec in ${SPECFILE} do osc ${OSCOPTS} build ${BUILDOPTS} "${TARGET}" "${ARCH}" "${spec}" # TODO: somehow smartly deal with the results of different # builds in case of several spec files. Currently the last # one survives, the other ones are lost. Only relevant in # case of several spec files in one package done fi if [ ${DO_CHECKIN} ]; then if [ ${DO_ASK} ]; then echo "Check in to the build server? Interrupt (^c) to stop here. Hit RETURN to check in to the build server." read fi osc ${OSCOPTS} ci fi popd # EOF open-build-service-2.9.4/dist/ec2utils.conf.example000066400000000000000000000112111332555733200222240ustar00rootroot00000000000000# One can configure as many accounts as one chooses. Configured accounts # start with the "account-" prefix. Account information is retrieved by any # of the ec2utils tools by combining the prefix "account-" with the name # specified with the --account command line option # # For example the information for the "account-servers" configuration is # used by specifying --account servers on the command line of any of the # tools [account-servers] access_key_id = secret_access_key = ssh_key_name = ssh_private_key = subnet_id_us-east-1 = subnet-123456 # The aki IDs are published by Amazon on the following page: # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs # The ami, instance_type, and user are needed for those utilities that require # a running instance in Amazon EC2 to perform their operations. [region-ap-northeast-1] ami = ami-383c1956 instance_type = t2.micro aki_i386 = aki-136bf512 aki_x86_64 = aki-176bf516 g2_aki_x86_64 = aki-f1ad9bf0 user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-ap-northeast-2] ami = ami-249b554a instance_type = t2.micro aki_i386 = aki-01a66b6f aki_x86_64 = aki-01a66b6f g2_aki_x86_64 = aki-0ea66b60 user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-ap-southeast-1] ami = ami-c9b572aa instance_type = t2.micro aki_i386 = aki-ae3973fc aki_x86_64 = aki-503e7402 g2_aki_x86_64 = aki-ca755498 user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-ap-southeast-2] ami = ami-48d38c2b instance_type = t2.micro aki_i386 = aki-cd62fff7 aki_x86_64 = aki-c362fff9 g2_aki_x86_64 = aki-8faec3b5 user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-cn-north-1] ami = ami-bcc45885 instance_type = t2.micro aki_i386 = aki-908f1da9 aki_x86_64 = aki-9e8f1da7 g2_aki_x86_64 = aki-9c4ad8a5 user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-eu-central-1] ami = ami-bc5b48d0 instance_type = t2.micro aki_i386 = aki-3e4c7a23 aki_x86_64 = aki-184c7a05 g2_aki_x86_64 = aki-e23f09ff user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-eu-west-1] ami = ami-bff32ccc instance_type = t2.micro aki_i386 = aki-68a3451f aki_x86_64 = aki-52a34525 g2_aki_x86_64 = aki-fce8478b user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-sa-east-1] ami = ami-6817af04 instance_type = t2.micro aki_i386 = aki-5b53f446 aki_x86_64 = aki-5553f448 g2_aki_x86_64 = aki-b99024a4 user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-us-east-1] #Amazon Linux #ami = ami-4b814f22 # SLES 12 ami = ami-60b6c60a instance_type = t2.micro aki_i386 = aki-8f9dcae6 aki_x86_64 = aki-919dcaf8 g2_aki_x86_64 = aki-f4bc469c user = ec2-user backing-store = mag,ssd # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-us-east-2] ami = ami-71ca9114 instance_type = t2.micro aki_i386 = aki-da055ebf aki_x86_64 = aki-d83a61bd user = ec2-user backing-store = ssd # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-us-gov-west-1] ami = ami-c2b5d7e1 instance_type = t2.micro aki_i386 = aki-1fe98d3c aki_x86_64 = aki-1de98d3e g2_aki_x86_64 = aki-07026424 user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-us-west-1] ami = ami-d5ea86b5 instance_type = t2.micro aki_i386 = aki-8e0531cb aki_x86_64 = aki-880531cd g2_aki_x86_64 = aki-f9786dbc user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-us-west-2] ami = ami-f0091d91 instance_type = t2.micro aki_i386 = aki-f08f11c0 aki_x86_64 = aki-fc8f11cc g2_aki_x86_64 = aki-5f125d6f user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-ca-central-1] ami = ami-a954d1cd instance_type = t2.micro user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-eu-west-2] ami = ami-403e2524 instance_type = t2.micro user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-eu-west-3] ami = ami-8ee056f3 instance_type = t2.micro user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = [region-ap-south-1] ami = ami-531a4c3c instance_type = t2.micro user = ec2-user # Allow a region to overwrite the account key # ssk_key_name = # ssh_private_key = open-build-service-2.9.4/dist/elastic_build_service_manager.py000066400000000000000000000552151332555733200245740ustar00rootroot00000000000000#!/usr/bin/python # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # Copyright (C) 2011 Tieto Corporation # Contact information: Ville Seppanen, ville.seppanen@tieto.com # Updated: 2011-06-20 # Tested with: OBS 2.1.6, Boto 2.0b4, Python 2.6.5, openSUSE 11.3 # # # Elastic Build Service Manager # # This script is used to control the cloudbursting of an Elastic Build Service # that is based on openSUSE Build Service (OBS). The idea is to use virtual # machines from IaaS providers as build hosts. This script will fetch metrics # of virtual machines from Amazon Web Services (AWS) as well as build job # metrics from local OBS server. Based on these metrics this script will # decide whether to create or destroy virtual machines from AWS EC2. Running # this script may cost money. This is designed to run as a cron job every # minute or two. # # # Prerequisites for successfully running this script: # - AWS account # - access to an Amazon Machine Image (AMI) that will on startup: # -- create a VPN tunnel to your OBS server network # -- start OBS worker(s) # - private installation of OBS # - Boto installed http://boto.cloudhackers.com # # Example .boto config: # [Credentials] # aws_access_key_id = AVTIOJERVAIOEJTIAOTVJ # aws_secret_access_key = a369840983a6n03a986bah34098g # [Boto] # debug = 1 # num_retries = 3 # https_validate_certificates = True # # Example .oscrc config: # [general] # # URL to access API server # apiurl = http://localhost:81 # # run osc to get these auto-generated for you # [http://localhost:81] # user=exampleuser # passx=yzerMZYRNrzyNYZRNYRnxdryXNDRYDXNRY # # Refer to OBS and boto documentation for more help. # # TO-DO LIST: # - fail nicely if AWS or OBS are unreachable or reject requests # - allow more customization info to be passed through (e.g. IP addresses) # - use libcloud to avoid AWS lock-in, use separate monitoring software # - lock the script to prevent multiple running at the same time import osc.conf # for OBS usage import osc.core # for OBS usage from xml.dom import minidom # for XML parsing from OBS API import boto.ec2 # Python library for EC2 access import boto.ec2.cloudwatch # for EC2 monitoring info import boto.utils from boto.exception import BotoServerError import time import datetime # for timestamps from pprint import pprint # for debug prints import sys # for arguments ### Configuration variables ################################################### # Path to log file (if you run this script manually, make sure you have rights # to write to the log as well) log_path = "/home/user/build_service_manager.log" # ID of the AWS Machine Image (AMI) that this script will boot. This image # should contain obs-worker that autoconnects to your OBS server on boot. aws_ami_image = "ami-47cefa33" # AWS keypair to use with the machines. This is needed for SSH connections to # the workers (which are needed for e.g. debugging). aws_keypair_name = "MyKeyPair" # Name of the AWS security group to use. No open ports is ok as your worker # should create a VPN outwards. aws_security_group = "MySecurityGroup" # Type of AWS instance to use. For the first year, t1.micro is free and good # for testing, but with only 600MB of RAM it fails bigger builds. # Good choices: t1.micro (for testing), m1.small, c1.medium aws_instance_type = "m1.small" # AWS region to use and CloudWatch (AWS monitoring) address to use aws_cloudwatch_url = "monitoring.eu-west-1.amazonaws.com" aws_region = "eu-west-1" # If you want more debugging info than what you get by adding debug=2 to the # .boto config file, uncomment the following line. # boto.set_stream_logger('debug') ### Function declarations ##################################################### # Starts a new VM instance in EC2. Parameter obs_worker_id is set as the # hostname and as EC2 instance metatag "Name" to identify build hosts. def run_instance(obs_worker_id): global print_only if not print_only: # WARNING: Do NOT start more than 1 instance at a time here! # Multilaunching is managed at higher level to create unique tags etc. instances_min = 1 instances_max = 1 worker_customization = "#!/bin/sh\nhostname " + obs_worker_id +\ ";echo " + obs_worker_id + " > /etc/HOSTNAME;" global aws_ami_image global aws_keypair_name global aws_security_group global aws_instance_type reservation = ec2.run_instances( image_id = aws_ami_image, min_count = instances_min, max_count = instances_max, key_name = aws_keypair_name, security_groups = [aws_security_group], user_data = worker_customization, instance_type = aws_instance_type) #placement='eu-west-1b') instance = reservation.instances[0] ec2.create_tags([instance.id],{'Name':obs_worker_id}) global elastic_build_hosts elastic_build_hosts.append({'instance_name':obs_worker_id, 'instance_id':instance.id, 'instance_type':instance.instance_type, 'cpu':"-", 'disk':"-", 'instance_state':"SPAWNING", 'launch_time':0, 'time_left':60, 'ip':"-", 'workers':[]}) return # Fetches metrics from OBS and EC2 and saves the data to parameters # elastic_build_hosts and job_status. def fetch_metrics(elastic_build_hosts, job_status): global cloud_buildhost_prefix global local_buildhost_prefix global aws_cloudwatch_url global debug_status_string cw = boto.connect_cloudwatch(host=aws_cloudwatch_url,port=443) # Get EC2 metrics # returns a list of boto.ec2.instance.Reservation for machine in ec2.get_all_instances(): instance = machine.instances[0] # tags are not required and thus may not exist try: name = instance.tags['Name'] except Exception: name = "Unnamed" date_now = datetime.datetime.now() launch_time = datetime.datetime.strptime( instance.launch_time, "%Y-%m-%dT%H:%M:%S.000Z") time_left = (3600-(((date_now - launch_time).seconds)%3600))/60 cpu=-1 ip_address = "-" if instance.ip_address != None: ip_address = instance.ip_address # Fetch instance CPU utilization metrics from CloudWatch try: end_time = date_now start_time = end_time - datetime.timedelta(minutes=15) stats = cw.get_metric_statistics( 300, start_time, end_time, 'CPUUtilization', 'AWS/EC2', 'Average', {"InstanceId":instance.id}) # Find latest value from the history list latest_value = -1 latest_time = start_time for value in stats: if value['Timestamp'] > latest_time: latest_time = value['Timestamp'] latest_value = value['Average'] # Let's make sure only absolute zero is shown as zero if latest_value > 0 and latest_value < 1: cpu = 1 else: cpu = int(latest_value) except BotoServerError as serverErr: dbg(json.dumps({ "error": "Error retrieving CloudWatch metrics." })) if name.startswith(cloud_buildhost_prefix): elastic_build_hosts.append({'instance_name':name, 'instance_id':instance.id, 'instance_type':instance.instance_type, 'cpu':cpu, 'disk':-1, 'instance_state':instance.state, 'launch_time':launch_time, 'time_left':time_left, 'ip':ip_address, 'workers':[]}) # Get OBS metrics # Initialize osc configuration, API URL and credentials osc.conf.get_config() api = osc.conf.config['apiurl'] apitest = osc.core.http_GET(api + '/build/_workerstatus') dom = minidom.parseString(apitest.read()) # store all idle workers for node in dom.getElementsByTagName('idle'): build_host_ok = False # parse build host id from the worker id parsed_hostname = node.getAttribute('workerid')\ [0:node.getAttribute('workerid').find("/")] if parsed_hostname.startswith(cloud_buildhost_prefix): # try to find it from the list for build_host in elastic_build_hosts: if build_host['instance_name'] == parsed_hostname: build_host['workers'].append('IDLE') build_host_ok = True break elif parsed_hostname.startswith(local_buildhost_prefix): build_host_ok = True # ignore local build hosts debug_status_string += "." if not build_host_ok: dbg("WARN - Strange host " + parsed_hostname) # store all busy workers for node in dom.getElementsByTagName('building'): build_host_ok = False # parse build host id from the worker id parsed_hostname = node.getAttribute('workerid')\ [0:node.getAttribute('workerid').find("/")] if parsed_hostname.startswith(cloud_buildhost_prefix): for build_host in elastic_build_hosts: if build_host['instance_name'] == parsed_hostname: build_host['workers'].append(node.getAttribute('package')) build_host_ok = True break elif parsed_hostname.startswith(local_buildhost_prefix): build_host_ok = True # ignore local build hosts debug_status_string += "o" if not build_host_ok: dbg("WARN - Strange host " + parsed_hostname + " or not building") # count the total amount of waiting jobs for node in dom.getElementsByTagName('waiting'): jobs_status['jobs_waiting_sum'] += int(node.getAttribute('jobs')) # count the total amount of blocked jobs for node in dom.getElementsByTagName('blocked'): jobs_status['jobs_blocked_sum'] += int(node.getAttribute('jobs')) return # Creates the initial connection to AWS EC2 to the region specified in # parameter region. Region names can be obtained from AWS. def connect_to_ec2(region): connection = boto.ec2.connect_to_region(region) dbg("INFO - Connected to: " + connection.host + ":" + str(connection.port)) dbg("INFO - Secure connection: " + str(connection.is_secure)) return connection # Terminates the VM instance in EC2 specified by the instance_id parameter. def terminate_instance(instance_id): global print_only if not print_only: ec2.terminate_instances([instance_id]) return # Prints a table of cloud workers and their status for debugging purposes. def print_metrics(elastic_build_hosts, job_status): print "LISTING ALL BUILD HOSTS\n",'EC2 ID'.ljust(10), 'HOSTNAME'.ljust(14),\ 'TYPE'.ljust(10),'CPU'.ljust(3), 'STATE'.ljust(13), 'TTL'.ljust(3),\ 'IP'.ljust(15), 'CURRENT JOB' print "__________________________________________________" +\ "____________________________________________________" for host in elastic_build_hosts: workers = "Not connected to OBS" if host['instance_state'] == 'stopped' or \ host['instance_state'] == 'terminated': workers = "-" if len(host['workers']) > 0: workers = "" for job in host['workers']: workers = workers + job + " " # Show time to next bill only if the machine is actually running time_left = "-" if host['instance_state'] == 'running': time_left = str(host['time_left']) cpu = "-" if host['cpu'] >= 0: cpu = str(host['cpu']) print host['instance_id'].ljust(10), host['instance_name'].ljust(14),\ host['instance_type'].ljust(10), cpu.ljust(3), \ host['instance_state'].ljust(13), \ time_left.ljust(3), host['ip'].ljust(15), workers return # Prints a debug message if the script is ran in verbose mode and not ran # with cron-mode. def dbg(message): global cron_mode global verbose if verbose and not cron_mode: print datetime.datetime.now(), message return # Writes a message to a log file, summarizing current situation and showing # changes made this run. Writes a single line per script run. Appends "MAN" # to the end of the line if the script was ran manually and not with cron. def log_write(message): global cron_mode manual_mode_notice = " MAN" if cron_mode: manual_mode_notice = "" # Log only real actual events and not simulated runs global print_only global log_path if not print_only: f = open(log_path, 'a') f.write(str(datetime.datetime.now()) + " " + message + manual_mode_notice + "\n") f.close() return # Starts a new build host and figures out a unique name for it. def start_new(): global cloud_buildhost_prefix time.sleep(1) # ugly hack to make sure everyone gets a unique name obs_worker_id = cloud_buildhost_prefix + str(int(time.time()))[2:] run_instance(obs_worker_id) return # This function will decide to either create or terminate instances or to do # nothing. It will also try to detect various problematic situations. def analyze_current_situation(elastic_build_hosts, jobs): global debug_status_string global cloud_buildhost_prefix global cron_mode # These variables are to balance between efficiency and minimal flapping # Time in minutes how close idle workers should live until new fee ttl_threshold = 4 # Max time a host can take to connect to server, must be less than 60 - # ttl_threshold. 5 mins should be enough for a normal limit (small booted # in 2,5mins). This should also include the time to start a job from the # server. 3-5 should be good. booting_time = 5 max_kills_per_run = 5 # Max amount of hosts to terminate on one run max_hosts = 15 # Max number of _running_ instances at any time max_instances_starting_up = 5 # Max amount of hosts starting at same time cpu_too_high = 20 # CPU percentage that is considered too high for idle cpu_too_low = 5 # CPU percentage that is considered too low for building instances_terminated_this_cycle = 0 instances_started_this_cycle = 0 instances_alive = 0 # running, pending, i.e. not shutdown or terminated instances_starting_up = 0 #### KILL BAD AND UNNECESSARY BUILD HOSTS # Check what every build host is doing currently and whether it is ok for host in elastic_build_hosts: # This is implemented in a reverse fashion so that every machine # will be terminated unless there is a specific reason to let it live should_be_terminated = True name = host['instance_name'] # Check all not terminated workers if ((host['instance_state'] == 'running') or \ (host['instance_state'] == 'pending')) and \ host['instance_name'].startswith(cloud_buildhost_prefix): instances_alive += 1 workers_total = len(host['workers']) # Check connection to OBS server if workers_total <= 0: dbg("WARN - " + name + " is not connected") debug_status_string += "C" # not connected else: # Count idle workers workers_idle = 0 for worker in host['workers']: if worker == 'IDLE': workers_idle += 1 # All workers are idle if workers_total == workers_idle: if host['cpu'] > cpu_too_high: dbg("WARN - " + name + " has high cpu (" +\ str(host['cpu']) + ") for idle. Crashed?") if jobs['jobs_waiting_sum'] > 0: dbg("ERR - "+name+" has all idle but there's work") debug_status_string += "L" # lazy worker else: if host['time_left'] < ttl_threshold: dbg("OK - " + name + " idle and time to die.") debug_status_string += "u" # unemployed else: dbg("OK - " + name + " is idle, wait " +\ str(host['time_left']-ttl_threshold) +\ " more mins") debug_status_string += "i" # idle should_be_terminated = False # All workers are working elif workers_idle == 0: if host['cpu'] < cpu_too_low: dbg("WARN - " + name + " has quite low cpu (" +\ str(host['cpu'])+ ") for building.") debug_status_string += "W" # working + warning should_be_terminated = False else: dbg("OK - " + name + " has all workers busy.") debug_status_string += "w" # working should_be_terminated = False # Some are working and some idling else: if jobs['jobs_waiting_sum'] > 0: dbg("WARN - " + name +\ " some workers idle but there's work") debug_status_string += "M" # working + warning should_be_terminated = False else: dbg("OK - " + name + " some workers idle, no jobs") debug_status_string += "m" # working should_be_terminated = False # Terminate extra worker age = ((datetime.datetime.now() - host['launch_time']).seconds)/60 if should_be_terminated: if age < booting_time: instances_starting_up += 1 dbg("OK - " + name + " is " + str(age) +\ " mins old and may still be starting up. Give it "\ + str(booting_time-(60 - host['time_left'])) +\ " more minutes to boot.") elif instances_terminated_this_cycle >= max_kills_per_run: dbg("WARN - max amount of kills reached.") else: instances_terminated_this_cycle += 1 instances_alive -= 1 dbg("TERM - " + host['instance_id']) terminate_instance(host['instance_id']) host['instance_state'] = "TERMINATING" debug_status_string += "-" # terminating instance dbg("INFO - alive:"+str(instances_alive) + ", terminated_this_cycle:" +\ str(instances_terminated_this_cycle) +", jobs:" +\ str(jobs['jobs_waiting_sum']) + ", starting:" +\ str(instances_starting_up)) #### START NEW BUILD HOSTS IF NEEDED if jobs['jobs_waiting_sum'] > 0: dbg("OK - " + str(jobs['jobs_waiting_sum']) + " jobs ready, spawn!") # Start more if limits have not been met # TODO: This expects that 1 build host == 1 worker == 1 core! while (jobs['jobs_waiting_sum']-instances_starting_up) > 0 and \ (instances_alive <= max_hosts) and \ (max_instances_starting_up > instances_starting_up): start_new() instances_alive += 1 instances_starting_up += 1 instances_started_this_cycle += 1 debug_status_string += "+" # starting instance dbg("INFO - alive:"+str(instances_alive) + ", started_this_cycle:" +\ str(instances_started_this_cycle) +", jobs:" +\ str(jobs['jobs_waiting_sum']) + ", starting:" +\ str(instances_starting_up)) #### WRITE TO LOG if instances_started_this_cycle > 0 or \ instances_terminated_this_cycle > 0 or not cron_mode: log_write("srvman: job_queue=" + str(jobs['jobs_waiting_sum']) +"+"+\ str(jobs['jobs_blocked_sum']) + " [" +\ debug_status_string + "]") return ### Main script ############################################################### # Argument handling print_only = False cron_mode = False verbose = False for arg in sys.argv: if arg == "--print-only": print_only = True break elif arg == "--cron-mode": cron_mode = True break elif arg == "--verbose": verbose = True elif arg == "--help": print "Elastic Build Service Manager\nUsage: esm [OPTIONS]\n",\ " --print-only: no changes are made to the cluster\n",\ " --cron-mode: no printing, logs cluster changes",\ " --verbose: print more debug messages in non-cron-mode" exit() # These are used in hostnames to identify different types of build hosts cloud_buildhost_prefix = "cbh-" local_buildhost_prefix = "lbh-" # create datastorages for all metrics elastic_build_hosts = [] jobs_status = {'jobs_waiting_sum':0, 'jobs_blocked_sum':0} debug_status_string = "" if cron_mode: # OBS updates its metrics with cron, let's not fetch metrics from OBS # at the same time. Seemed to work ok without this sleep though. time.sleep(20) # connect to a specific EC2 region ec2 = connect_to_ec2(aws_region) # fetch all metrics from OBS and EC2 fetch_metrics(elastic_build_hosts, jobs_status) # figure out current status and what should be done analyze_current_situation(elastic_build_hosts, jobs_status) # print metrics for debug usage if not cron_mode: print_metrics(elastic_build_hosts, jobs_status) open-build-service-2.9.4/dist/find-requires.sh000066400000000000000000000011171332555733200213060ustar00rootroot00000000000000#!/bin/bash sourcearchive=$1 shift prefix=$1 shift limit=$1 shift tdir=`mktemp -d` # extract files tar xJf $sourcearchive -C $tdir >&/dev/null pushd $tdir/open-build-service*/src/api >& /dev/null ruby.ruby2.5 -rbundler -e 'exit' || echo "___ERROR_BUNDLER_NOT_INSTALLED___" mode="resolve" if [ "$limit" == "production" ]; then mode="specs_for([:default, :production, :assets])" fi ruby.ruby2.5 -rbundler -e 'Bundler.definition.'"$mode"'.any? { |s| puts "rubygem('$prefix':#{s.name}) = #{s.version}" }' | while read i; do echo -n $i", "; done popd >& /dev/null #cleanup rm -rf $tdir open-build-service-2.9.4/dist/git-cop_configuration.yml000066400000000000000000000027741332555733200232230ustar00rootroot00000000000000:commit_author_email: :enabled: true :severity: :error :commit_author_name_capitalization: :enabled: false :commit_author_name_parts: :enabled: false :commit_body_bullet: :enabled: true :severity: :warn :blacklist: - "\\*" - "•" :commit_body_bullet_capitalization: :enabled: false :commit_body_issue_tracker_link: :enabled: false :commit_body_leading_line: :enabled: true :severity: :error :commit_body_leading_space: :enabled: false :commit_body_line_length: :enabled: true :severity: :warn :length: 72 :commit_body_paragraph_capitalization: :enabled: false :commit_body_phrase: :enabled: true :severity: :warn :blacklist: - obviously - basically - simply - of course - everyone knows - easy :commit_body_presence: :enabled: true :severity: :warn :minimum: 1 :commit_body_single_bullet: :enabled: false :commit_subject_length: :enabled: true :severity: :warn :length: 60 :commit_subject_prefix: :enabled: true :severity: :error :whitelist: # Commit header is allowed to start with either # * api, backend, ci, dist, doc, frontend and webui tags in edgy brackets in any order # or # * 'Update' or 'Upgrade' (needed by depfu gem) # * Revert (for revert commits) # In both cases they should be followed by the actual commit message subject - \A(?!\s)(?:Merge|Revert|Update|Upgrade|\[api\]|\[backend\]|\[ci\]|\[contrib\]|\[dist\]|\[doc\]|\[frontend\]|\[webui\])+ \S+ :commit_subject_suffix: :enabled: false open-build-service-2.9.4/dist/internal_redirect.include000066400000000000000000000007221332555733200232400ustar00rootroot00000000000000location ~* ^/internal_redirect/(.*?)/(.*?)/(.*) { # Do not allow people to mess with this location directly # Only internal redirects are allowed internal; resolver 8.8.8.8; # Extract download url from the request set $download_uri $3; set $download_host $2; set $download_proto $1; # Compose download url set $download_url $download_proto://$download_host/$download_uri?$args; # Download the file and send it to client proxy_pass $download_url; } open-build-service-2.9.4/dist/munin/000077500000000000000000000000001332555733200173235ustar00rootroot00000000000000open-build-service-2.9.4/dist/munin/obs_dispatcher000077500000000000000000000014001332555733200222350ustar00rootroot00000000000000#!/bin/sh case $1 in config) cat <<'EOM' graph_title OBS Dispatcher States graph_vlabel Number of dispatched repositories graph_category obs graph_order dispatching dispatchprios dispatching.label obs_dispatching_repos dispatching.info The number of repositories dispatching to obs workers dispatching.type GAUGE dispatching.draw STACK dispatchprios.label obs_dispatchprios_projects dispatchprios.info The number of dispatched projects with discriminative priority. dispatchprios.type GAUGE dispatchprios.draw STACK EOM exit 0;; esac num_dispatchprios=`grep --text -c project /srv/obs/jobs/dispatchprios` num_dispatching=`ls /srv/obs/run/dispatch.data | wc -l` echo "dispatching.value $num_dispatching" echo "dispatchprios.value $num_dispatchprios" open-build-service-2.9.4/dist/munin/obs_scheduler_buildavg000077500000000000000000000007721332555733200237550ustar00rootroot00000000000000#!/bin/sh sched=`grep arch /srv/obs/configuration.xml |sed -e 's/<[^>]*>//g'` case $1 in config) cat <<'EOM' graph_title OBS Scheduler: Average Build Time graph_vlabel Build time (seconds) graph_category obs EOM for i in $sched; do echo "sched_"$i"_buildavg.label" "sched_"$i"_buildavg" echo "sched_"$i"_buildavg.type GAUGE" done exit 0;; esac for i in $sched; do val=`grep 'buildavg' /srv/obs/info/schedulerinfo.$i | sed 's/[^0-9.]*//g'` echo "sched_"$i"_buildavg.value $val" done open-build-service-2.9.4/dist/munin/obs_scheduler_projects000077500000000000000000000011001332555733200237730ustar00rootroot00000000000000#!/bin/sh # https://github.com/openSUSE/open-build-service/blob/master/src/backend/bs_sched sched=`grep arch /srv/obs/configuration.xml |sed -e 's/<[^>]*>//g'` case $1 in config) cat <<'EOM' graph_title OBS Scheduler: Projects graph_vlabel Number of projects graph_category obs EOM for i in $sched; do echo "sched_"$i"_projects.label" "sched_"$i"_projects" echo "sched_"$i"_projects.type GAUGE" done exit 0;; esac for i in $sched; do val=`grep 'projects' /srv/obs/info/schedulerinfo.$i | sed 's/[^0-9]*//g'` echo "sched_"$i"_projects.value $val" done open-build-service-2.9.4/dist/munin/obs_scheduler_queue000077500000000000000000000040421332555733200232760ustar00rootroot00000000000000#!/bin/sh sched=`grep arch /srv/obs/configuration.xml |sed -e 's/<[^>]*>//g'` case $1 in config) cat <<'EOM' graph_title OBS Scheduler: Scheduling Queue (high, medium, low, and next) graph_vlabel Number of queues graph_category obs graph_info The load average of the scheduler describes how many packages are in the ready-queue (scheduled to run "immediately"). EOM for i in $sched; do echo "sched_"$i"_queue_high.label" "sched_"$i"_queue_high" echo "sched_"$i"_queue_high.info" "User interaction, do those ASAP." echo "sched_"$i"_queue_high.type GAUGE" echo "sched_"$i"_queue_high.draw AREA" echo "sched_"$i"_queue_medium.label" "sched_"$i"_queue_medium" echo "sched_"$i"_queue_medium.info" "Builds are finished here." echo "sched_"$i"_queue_medium.type GAUGE" echo "sched_"$i"_queue_medium.draw AREA" echo "sched_"$i"_queue_low.label" "sched_"$i"_queue_low" echo "sched_"$i"_queue_low.info" "Not so important." echo "sched_"$i"_queue_low.type GAUGE" echo "sched_"$i"_queue_low.draw AREA" echo "sched_"$i"_queue_next.label" "sched_"$i"_queue_next" echo "sched_"$i"_queue_next.info" "Not so important, next series." echo "sched_"$i"_queue_next.type GAUGE" echo "sched_"$i"_queue_next.draw AREA" done exit 0;; esac for i in $sched; do # high val=` grep 'queue' /srv/obs/info/schedulerinfo.$i | sed 's/med="[^"]"*//g' | sed 's/low="[^"]"*//g' | sed 's/next="[^"]"*//g' | sed 's/[^0-9]*//g'` echo "sched_"$i"_queue_high.value $val" # medium val=` grep 'queue' /srv/obs/info/schedulerinfo.$i | sed 's/high="[^"]"*//g' | sed 's/low="[^"]"*//g' | sed 's/next="[^"]"*//g' | sed 's/[^0-9]*//g'` echo "sched_"$i"_queue_medium.value $val" # low val=` grep 'queue' /srv/obs/info/schedulerinfo.$i | sed 's/high="[^"]"*//g' | sed 's/med="[^"]"*//g' | sed 's/next="[^"]"*//g' | sed 's/[^0-9]*//g'` echo "sched_"$i"_queue_low.value $val" # next val=` grep 'queue' /srv/obs/info/schedulerinfo.$i | sed 's/high="[^"]"*//g' | sed 's/med="[^"]"*//g' | sed 's/low="[^"]"*//g' | sed 's/[^0-9]*//g'` echo "sched_"$i"_queue_next.value $val" done open-build-service-2.9.4/dist/munin/obs_scheduler_repositories000077500000000000000000000011351332555733200247010ustar00rootroot00000000000000#!/bin/sh # https://github.com/openSUSE/open-build-service/blob/master/src/backend/bs_sched sched=`grep arch /srv/obs/configuration.xml |sed -e 's/<[^>]*>//g'` case $1 in config) cat <<'EOM' graph_title OBS Scheduler: Repositories graph_vlabel Number of repositories graph_category obs EOM for i in $sched; do echo "sched_"$i"_repositories.label" "sched_"$i"_repositories" echo "sched_"$i"_repositories.type GAUGE" done exit 0;; esac for i in $sched; do val=`grep 'repositories' /srv/obs/info/schedulerinfo.$i | sed 's/[^0-9]*//g'` echo "sched_"$i"_repositories.value $val" done open-build-service-2.9.4/dist/munin/obs_scheduler_worker_state000077500000000000000000000010431332555733200246610ustar00rootroot00000000000000#!/bin/sh case $1 in config) cat <<'EOM' graph_title OBS Scheduler: obs worker (building, idle) graph_vlabel Number of workers graph_category obs graph_order building idle building.label obs_building_workers building.type GAUGE building.draw AREA building.colour 0022ff idle.label obs_idle_workers idle.type GAUGE idle.draw STACK idle.colour c0c0c0 EOM exit 0;; esac num_idle=`ls /srv/obs/workers/idle/ | wc -l` num_building=`ls /srv/obs/workers/building/ | wc -l` echo "building.value $num_building" echo "idle.value $num_idle" open-build-service-2.9.4/dist/munin/obs_srcserver_filesizes000077500000000000000000000011651332555733200242120ustar00rootroot00000000000000#!/bin/sh case $1 in config) cat <<'EOM' graph_title OBS Source Server: Source File Sizes graph_vlabel All file sizes (bytes) graph_category obs graph_order srcsizes srcsizes.label obs_src_server_all_file_sizes srcsizes.info Total of file sizes of all packages srcsizes.type GAUGE EOM exit 0;; esac # Please, Use mount /srv/obs/sources/ folder with a disk partition (e.g., /dev/sda3) # to avoid very much IO. For example, # vi /etc/fstab # /dev/sda3 /srv/obs/sources ext4 defaults 0 0 disk_partition="/dev/sda3" num_srcsizes=`df | grep "$disk_partition" | awk '{print $3}'` echo "srcsizes.value " $num_srcsizes open-build-service-2.9.4/dist/munin/obs_srcserver_packages000077500000000000000000000006641332555733200237760ustar00rootroot00000000000000#!/bin/sh case $1 in config) cat <<'EOM' graph_title OBS Source Server: The number of packages graph_vlabel Source packages graph_category obs graph_order numpackages numpackages.label obs_src_server_number_of_packages numpackages.info The number of source packages in obs source server numpackages.type GAUGE EOM exit 0;; esac num_numpackages=`ls /srv/obs/sources/ | wc -l` echo "numpackages.value " $num_numpackages open-build-service-2.9.4/dist/munin/obs_worker_cpu_mem000077500000000000000000000202701332555733200231330ustar00rootroot00000000000000#!/bin/sh : <<=cut =head1 NAME cpu - Plugin to monitor CPU usage. =head1 APPLICABLE SYSTEMS All Linux systems =head1 CONFIGURATION The following is default configuration [cpu] env.HZ 100 See "BUGS" for a explanation of this setting. =head2 EXAMPLE WARNING AND CRITICAL SETTINGS You can also set warning and critical levels for each of the data series the plugin reports. The following environment variables are used as default for all fields: env.warning env.critical But each field can be controlled separately: env.system_warning env.system_critical env.user_warning env.user_critical env.nice_warning env.nice_critical env.idle_warning env.idle_critical For some kernels there is also the following settings: env.iowait_warning env.iowait_critical env.irq_warning env.irq_critical env.softirq_warning env.softirq_critical env.steal_warning env.steal_critical env.guest_warning env.guest_critical =head1 INTERPRETATION The plugin shows cpu usage in percent. In case of more than one core it displays 100% for each core. If a core is 100% busy there will be no "iowait" showing, that only shows if the CPU has nothing else to do while it waits on IO. Therefore a 100% busy core can hide a lot of iowait. Please refer to the IO latency and other disk related graphs for further information about IO performance. =head1 MAGIC MARKERS #%# family=auto #%# capabilities=autoconf =head1 VERSION $Id$ =head1 BUGS Some combinations of hardware and Linux (probably only 2.4 kernels) use 1000 units/second in /proc/stat corresponding to the systems HZ. (see /usr/src/linux/include/asm/param.h). But Almost all systems use 100 units/second and this is our default. Even if Documentation/proc.txt in the kernel source says otherwise. - Finding and fix by dz@426.ch Otherwise none known =head1 AUTHOR Unknown =head1 LICENSE GPLv2 =cut . $MUNIN_LIBDIR/plugins/plugin.sh # Define colours BUILDING='22ff22' # Green IDLE='0022ff' # Blue MEMORY='990000' # Darkest Red SWAP='ff1493' # Deep pink if [ "$1" = "autoconf" ]; then if [ -r /proc/stat ]; then echo yes exit 0 else echo no exit 0 fi fi HZ=${HZ:-100} extinfo="" if egrep -q '^cpu +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+' /proc/stat; then extinfo="iowait irq softirq" if egrep -q '^cpu +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+' /proc/stat; then extextinfo="steal" fi if egrep -q '^cpu +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+ +[0-9]+' /proc/stat; then extextextinfo="guest" fi fi if [ "$1" = "config" ]; then NCPU=$(egrep '^cpu[0-9]+ ' /proc/stat | wc -l) if [ "$scaleto100" = "yes" ]; then graphlimit=100 else graphlimit=$(($NCPU * 100)) fi echo 'graph_title OBS Scheduler: obs worker & system resources (CPU, Memory)' echo "graph_order system user nice idle iowait irq softirq steal guest obs_worker_building obs_worker_idle memory swap" echo "graph_args --base 1000 -r --lower-limit 0 --upper-limit $graphlimit" echo 'graph_vlabel %' echo 'graph_scale no' echo 'graph_info This graph shows how CPU time and memory usage is spent and the number of building workers.' echo 'graph_category obs' echo 'graph_period second' echo 'system.label system' echo 'system.draw AREA' echo 'system.min 0' echo 'system.type DERIVE' echo "system.info CPU time spent by the kernel in system activities" echo 'user.label user' echo 'user.draw STACK' echo 'user.min 0' echo 'user.type DERIVE' echo 'user.info CPU time spent by normal programs and daemons' echo 'nice.label nice' echo 'nice.draw STACK' echo 'nice.min 0' echo 'nice.type DERIVE' echo 'nice.info CPU time spent by nice(1)d programs' echo 'idle.label idle' echo 'idle.draw STACK' echo 'idle.min 0' echo 'idle.type DERIVE' echo 'idle.info Idle CPU time' for field in system user nice idle; do print_adjusted_thresholds "$field" "$graphlimit" done if [ "$scaleto100" = "yes" ]; then echo "system.cdef system,$NCPU,/" echo "user.cdef user,$NCPU,/" echo "nice.cdef nice,$NCPU,/" echo "idle.cdef idle,$NCPU,/" fi if [ ! -z "$extinfo" ] then echo 'iowait.label iowait' echo 'iowait.draw STACK' echo 'iowait.min 0' echo 'iowait.type DERIVE' echo 'iowait.info CPU time spent waiting for I/O operations to finish when there is nothing else to do.' echo 'irq.label irq' echo 'irq.draw STACK' echo 'irq.min 0' echo 'irq.type DERIVE' echo 'irq.info CPU time spent handling interrupts' echo 'softirq.label softirq' echo 'softirq.draw STACK' echo 'softirq.min 0' echo 'softirq.type DERIVE' echo 'softirq.info CPU time spent handling "batched" interrupts' if [ "$scaleto100" = "yes" ]; then echo "iowait.cdef iowait,$NCPU,/" echo "irq.cdef irq,$NCPU,/" echo "softirq.cdef softirq,$NCPU,/" fi for field in iowait irq softirq; do print_adjusted_thresholds "$field" "$graphlimit" done fi if [ ! -z "$extextinfo" ] then echo 'steal.label steal' echo 'steal.draw STACK' echo 'steal.min 0' echo 'steal.type DERIVE' echo 'steal.info The time that a virtual CPU had runnable tasks, but the virtual CPU itself was not running' if [ "$scaleto100" = "yes" ]; then echo "steal.cdef steal,$NCPU,/" fi for field in steal; do print_adjusted_thresholds "$field" "$graphlimit" done fi if [ ! -z "$extextextinfo" ] then echo 'guest.label guest' echo 'guest.draw STACK' echo 'guest.min 0' echo 'guest.type DERIVE' echo 'guest.info The time spent running a virtual CPU for guest operating systems under the control of the Linux kernel.' if [ "$scaleto100" = "yes" ]; then echo "guest.cdef guest,$NCPU,/" fi for field in guest; do print_adjusted_thresholds "$field" "$graphlimit" done fi # obs worker (building and idle workers) echo "obs_worker_building.label obs_building_workers (#)" echo "obs_worker_building.type GAUGE" echo "obs_worker_building.draw LINE2" echo "obs_worker_building.colour $BUILDING" echo "obs_worker_building.info The number of building workers" echo "obs_worker_idle.label obs_idle_workers (#)" echo "obs_worker_idle.type GAUGE" echo "obs_worker_idle.draw LINE2" echo "obs_worker_idle.colour $IDLE" echo "obs_worker_idle.info The number of idle workers" echo "memory.label memory (Gbytes)" echo "memory.type GAUGE" echo "memory.draw LINE2" echo "memory.colour $MEMORY" echo "memory.info The number of used memory (including buffers/cache)" echo "swap.label swap (Gbytes)" echo "swap.type GAUGE" echo "swap.draw LINE2" echo "swap.colour $SWAP" echo "swap.info The number of used swap spaces" exit 0 fi # Note: Counters/derive need to report integer values. Also we need # to avoid 10e+09 and the like %.0f should do this. # cpu info if [ ! -z "$extextextinfo" ]; then awk -v hz=$HZ '/^cpu / { printf "user.value %.0f\nnice.value %.0f\nsystem.value %.0f\nidle.value %.0f\niowait.value %.0f\nirq.value %.0f\nsoftirq.value %.0f\nsteal.value %.0f\nguest.value %.0f\n", $2*100/hz, $3*100/hz, $4*100/hz, $5*100/hz, $6*100/hz, $7*100/hz, $8*100/hz, $9*100/hz, $10*100/hz }' < /proc/stat elif [ ! -z "$extextinfo" ]; then awk -v hz=$HZ '/^cpu / { printf "user.value %.0f\nnice.value %.0f\nsystem.value %.0f\nidle.value %.0f\niowait.value %.0f\nirq.value %.0f\nsoftirq.value %.0f\nsteal.value %.0f\n", $2*100/hz, $3*100/hz, $4*100/hz, $5*100/hz, $6*100/hz, $7*100/hz, $8*100/hz, $9*100/hz }' < /proc/stat elif [ ! -z "$extinfo" ]; then awk -v hz=$HZ '/^cpu / { printf "user.value %.0f\nnice.value %.0f\nsystem.value %.0f\nidle.value %.0f\niowait.value %.0f\nirq.value %.0f\nsoftirq.value %.0f\n", $2*100/hz, $3*100/hz, $4*100/hz, $5*100/hz, $6*100/hz, $7*100/hz, $8*100/hz }' < /proc/stat else awk -v hz=$HZ '/^cpu / { printf "user.value %.0f\nnice.value %.0f\nsystem.value %.0f\nidle.value %.0f\n", $2*100/hz, $3*100/hz, $4*100/hz, $5*100/hz }' < /proc/stat fi # obs info obs_worker_num_idle=`ls /srv/obs/workers/idle/ | wc -l` obs_worker_num_building=`ls /srv/obs/workers/building/ | wc -l` echo "obs_worker_building.value $obs_worker_num_building" echo "obs_worker_idle.value $obs_worker_num_idle" # mem/swap info mem_used=` free | grep Mem | awk '{print $3/1024/1024}'` echo "memory.value $mem_used" swap_used=` free | grep Swap | awk '{print $3/1024/1024}'` echo "swap.value $swap_used" open-build-service-2.9.4/dist/nginx-obs-api.conf000066400000000000000000000006531332555733200215230ustar00rootroot00000000000000server { listen 443; server_name api; root /srv/www/obs/api/public; access_log /srv/www/obs/api/log/obs-api-access.log; error_log /srv/www/obs/api/log/obs-api-error.log; passenger_enabled on; passenger_spawn_method smart; passenger_min_instances 2; passenger_user wwwrun; passenger_group www; rails_env production; include vhosts.d/internal_redirect.include; } passenger_pre_start http://localhost:443/; open-build-service-2.9.4/dist/obs-apache2.conf000066400000000000000000000070231332555733200211320ustar00rootroot00000000000000 Listen 82 # May needed on old distributions or after an update from them. #Listen 443 # Passenger defaults PassengerSpawnMethod "smart" PassengerMaxPoolSize 20 #RailsEnv "development" # allow long request urls and being part of headers LimitRequestLine 20000 LimitRequestFieldsize 20000 # Just the overview page # just give an overview about this OBS instance via static web page DocumentRoot "/srv/www/obs/overview" Options Indexes Allow from all # Build Results # The resulting repositories DocumentRoot "/srv/obs/repos" Options Indexes FollowSymLinks Allow from all # OBS WEBUI & API ServerName api # General setup for the virtual host DocumentRoot "/srv/www/obs/api/public" ErrorLog /srv/www/obs/api/log/apache_error.log TransferLog /srv/www/obs/api/log/apache_access.log # Enable maintenance mode. All requests will be redirected # to the maintenance page and return 503 as http status. # Start your apache with -D MAINTENANCE to enable this. # On (open)SUSE you can do this by setting # APACHE_SERVER_FLAGS="MAINTENANCE" in /etc/sysconfig/apache ErrorDocument 503 /503.html RewriteEngine on RewriteCond %{REQUEST_URI} !=/503.html RewriteRule ^ - [R=503,L] PassengerMinInstances 2 PassengerPreStart https://api SSLEngine on # SSL protocols # Supporting TLS only is adequate nowadays SSLProtocol all -SSLv2 -SSLv3 # SSL Cipher Suite: # List the ciphers that the client is permitted to negotiate. # We disable weak ciphers by default. # See the mod_ssl documentation or "openssl ciphers -v" for a # complete list. SSLCipherSuite ALL:!aNULL:!eNULL:!SSLv2:!LOW:!EXP:!MD5:@STRENGTH SSLCertificateFile /srv/obs/certs/server.crt SSLCertificateKeyFile /srv/obs/certs/server.key AllowOverride all Options -MultiViews # This requires mod_xforward loaded in apache # Enable the usage via options.yml # This will decrease the load due to long running requests a lot (unloading from rails stack) XForward on Allow from all # Uncomment this if you're on Apache >= 2.4: #Require all granted SetEnvIf User-Agent ".*MSIE [1-5].*" \ nokeepalive ssl-unclean-shutdown \ downgrade-1.0 force-response-1.0 CustomLog /var/log/apache2/ssl_request_log ssl_combined # from http://guides.rubyonrails.org/asset_pipeline.html Header unset ETag FileETag None # RFC says only cache for 1 year ExpiresActive On ExpiresDefault "access plus 1 year" SetEnvIf User-Agent ".*MSIE [1-5].*" \ nokeepalive ssl-unclean-shutdown \ downgrade-1.0 force-response-1.0 ## Older firefox versions needs this, otherwise it wont cache anything over SSL. Header append Cache-Control "public" open-build-service-2.9.4/dist/obs-apache24.conf000066400000000000000000000063541332555733200212240ustar00rootroot00000000000000 Listen 82 # May needed on old distributions or after an update from them. #Listen 443 # Passenger defaults PassengerSpawnMethod "smart" PassengerMaxPoolSize 20 #RailsEnv "development" # allow long request urls and being part of headers LimitRequestLine 20000 LimitRequestFieldsize 20000 # Just the overview page # just give an overview about this OBS instance via static web page DocumentRoot "/srv/www/obs/overview" Options Indexes Require all granted # Build Results # The resulting repositories DocumentRoot "/srv/obs/repos" Options Indexes FollowSymLinks Require all granted # OBS WEBUI & API ServerName api # General setup for the virtual host DocumentRoot "/srv/www/obs/api/public" ErrorLog /srv/www/obs/api/log/apache_error.log TransferLog /srv/www/obs/api/log/apache_access.log # Enable maintenance mode. All requests will be redirected # to the maintenance page and return 503 as http status. # Start your apache with -D MAINTENANCE to enable this. # On (open)SUSE you can do this by setting # APACHE_SERVER_FLAGS="MAINTENANCE" in /etc/sysconfig/apache ErrorDocument 503 /503.html RewriteEngine on RewriteCond %{REQUEST_URI} !=/503.html RewriteRule ^ - [R=503,L] PassengerMinInstances 2 PassengerPreStart https://api SSLEngine on # SSL protocols # Supporting TLS only is adequate nowadays SSLProtocol all -SSLv2 -SSLv3 # SSL Cipher Suite: # List the ciphers that the client is permitted to negotiate. # We disable weak ciphers by default. # See the mod_ssl documentation or "openssl ciphers -v" for a # complete list. SSLCipherSuite ALL:!aNULL:!eNULL:!SSLv2:!LOW:!EXP:!MD5:@STRENGTH SSLCertificateFile /srv/obs/certs/server.crt SSLCertificateKeyFile /srv/obs/certs/server.key AllowOverride all Options -MultiViews # This requires mod_xforward loaded in apache # Enable the usage via options.yml # This will decrease the load due to long running requests a lot (unloading from rails stack) XForward on Require all granted SetEnvIf User-Agent ".*MSIE [1-5].*" \ nokeepalive ssl-unclean-shutdown \ downgrade-1.0 force-response-1.0 CustomLog /var/log/apache2/ssl_request_log ssl_combined # from http://guides.rubyonrails.org/asset_pipeline.html Header unset ETag FileETag None # RFC says only cache for 1 year ExpiresActive On ExpiresDefault "access plus 1 year" SetEnvIf User-Agent ".*MSIE [1-5].*" \ nokeepalive ssl-unclean-shutdown \ downgrade-1.0 force-response-1.0 ## Older firefox versions needs this, otherwise it wont cache anything over SSL. Header append Cache-Control "public" open-build-service-2.9.4/dist/obs-api-deps.spec000066400000000000000000000052021332555733200213330ustar00rootroot00000000000000# # spec file for package obs-api-deps # # Copyright (c) 2014 SUSE LINUX Products GmbH, Nuernberg, Germany. # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed # upon. The license for this file, and modifications and additions to the # file, is the same license as for the pristine package itself (unless the # license for the pristine package is not an Open Source License, in which # case the license is the MIT License). An "Open Source License" is a # license that conforms to the Open Source Definition (Version 1.9) # published by the Open Source Initiative. # Please submit bugfixes or comments via http://bugs.opensuse.org/ # Name: obs-api-deps Summary: The Open Build Service -- Gem dependencies License: MIT Group: Productivity/Networking/Web/Utilities Version: 2.7.5020140303 Release: 0 Url: http://en.opensuse.org/Build_Service BuildRoot: %{_tmppath}/%{name}-%{version}-build Source0: open-build-service-%version.tar.xz Source1: find-requires.sh BuildRequires: ruby2.5 BuildRequires: ruby2.5-rubygem-bundler %if 0%{?suse_version} < 1210 BuildRequires: xz %endif %description This package serves one purpose only: to list the dependencies in Gemfile.lock %package -n obs-api-testsuite-deps Summary: The Open Build Service -- The Testsuite dependencies Group: Productivity/Networking/Web/Utilities # dependencies not needed in production Requires: %(echo `bash %{S:1} %{S:0} "ruby:2.5.0"`) Requires: perl-BSSolv >= 0.18.0 # Required by source server Requires: createrepo Requires: diffutils Requires: git-core Requires: patch # needed for api test suite %if 0%{suse_version} > 1210 Requires: libxml2-tools %else Requires: libxml2 %endif Recommends: yum yum-metadata-parser repoview dpkg Recommends: deb >= 1.5 Recommends: lvm2 Recommends: openslp-server Recommends: obs-signd Recommends: inst-source-utils Requires: perl-Compress-Zlib Requires: perl-File-Sync >= 0.10 Requires: perl-JSON-XS Requires: perl-Net-SSLeay Requires: perl-Socket-MsgHdr Requires: perl-XML-Parser Requires: perl-XML-Simple Requires: perl(GD) Requires: sphinx >= 2.1.8 %description -n obs-api-testsuite-deps This is the API server instance, and the web client for the OBS. %prep echo > README < ServerName registry # General setup for the virtual host DocumentRoot "/srv/www/obs/container-registry/htdocs" ErrorLog /srv/www/obs/container-registry/log/apache_error.log TransferLog /srv/www/obs/container-registry/log/apache_access.log # Enable maintenance mode. All requests will be redirected # to the maintenance page and return 503 as http status. # Start your apache with -D MAINTENANCE to enable this. # On (open)SUSE you can do this by setting # APACHE_SERVER_FLAGS="MAINTENANCE" in /etc/sysconfig/apache ErrorDocument 503 /503.html RewriteEngine on RewriteCond %{REQUEST_URI} !=/503.html RewriteRule ^ - [R=503,L] SSLEngine on # SSL protocols # Supporting TLS only is adequate nowadays SSLProtocol all -SSLv2 -SSLv3 # SSL Cipher Suite: # List the ciphers that the client is permitted to negotiate. # We disable weak ciphers by default. # See the mod_ssl documentation or "openssl ciphers -v" for a # complete list. SSLCipherSuite ALL:!aNULL:!eNULL:!SSLv2:!LOW:!EXP:!MD5:@STRENGTH SSLCertificateFile /srv/obs/certs/server.crt SSLCertificateKeyFile /srv/obs/certs/server.key AllowOverride all Options -MultiViews # This requires mod_xforward loaded in apache # Enable the usage via options.yml # This will decrease the load due to long running requests a lot (unloading from rails stack) XForward on Require all granted SetEnvIf User-Agent ".*MSIE [1-5].*" \ nokeepalive ssl-unclean-shutdown \ downgrade-1.0 force-response-1.0 CustomLog /var/log/apache2/ssl_request_log ssl_combined RequestHeader set X-Forwarded-Proto "https" ProxyPass "/" "http://localhost:5000/" ProxyPassReverse "/" "http://localhost:5000/" Require host localhost open-build-service-2.9.4/dist/obs-lighttpd.conf000066400000000000000000000012261332555733200214450ustar00rootroot00000000000000# api/webui $SERVER["socket"] == ":443" { rails_app = "api" rails_root = "/srv/www/obs/api" rails_procs = 5 # production/development are typical values here rails_mode = "production" log_root = "/srv/www/obs/api/log" ssl.engine = "enable" ssl.pemfile = "/etc/ssl/private/server.pem" ssl.ca-file = "/etc/ssl/certs/server.crt" include "vhosts.d/rails.inc" } # redirect web browsers to https $SERVER["socket"] == ":80" { $HTTP["host"] =~ "(.*)" { url.redirect = ( "^/(.*)" => "https://%1/$1" ) } } # ftp download tree $SERVER["socket"] == ":82" { server.document-root = "/srv/obs/repos/" dir-listing.activate = "enable" } open-build-service-2.9.4/dist/obs-server.logrotate000066400000000000000000000001461332555733200222070ustar00rootroot00000000000000/srv/obs/log/*.log { compress dateext rotate 2 daily missingok copytruncate } open-build-service-2.9.4/dist/obs-server.spec000066400000000000000000000555631332555733200211560ustar00rootroot00000000000000# # spec file for package obs-server # # Copyright (c) 2014 SUSE LINUX Products GmbH, Nuernberg, Germany. # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed # upon. The license for this file, and modifications and additions to the # file, is the same license as for the pristine package itself (unless the # license for the pristine package is not an Open Source License, in which # case the license is the MIT License). An "Open Source License" is a # license that conforms to the Open Source Definition (Version 1.9) # published by the Open Source Initiative. # Please submit bugfixes or comments via http://bugs.opensuse.org/ # %if 0%{?fedora} %global sbin /usr/sbin %else %global sbin /sbin %endif %if 0%{?fedora} || 0%{?rhel} %global apache_user apache %global apache_group apache %else %global apache_user wwwrun %global apache_group www %endif %define secret_key_file /srv/www/obs/api/config/secret.key %if 0%{?suse_version} >= 1315 %define reload_on_update() %{?nil: test -n "$FIRST_ARG" || FIRST_ARG=$1 if test "$FIRST_ARG" -ge 1 ; then test -f /etc/sysconfig/services && . /etc/sysconfig/services if test "$YAST_IS_RUNNING" != "instsys" -a "$DISABLE_RESTART_ON_UPDATE" != yes ; then test -x /bin/systemctl && /bin/systemctl daemon-reload >/dev/null 2>&1 || : for service in %{?*} ; do test -x /bin/systemctl && /bin/systemctl reload $service >/dev/null 2>&1 || : done fi fi %nil } %endif Name: obs-server Summary: The Open Build Service -- Server Component License: GPL-2.0 and GPL-3.0 %if 0%{?suse_version} < 1210 && 0%{?suse_version:1} Group: Productivity/Networking/Web/Utilities %endif Version: 2.7.50_113_g117c617 Release: 0 Url: http://www.openbuildservice.org BuildRoot: %{_tmppath}/%{name}-%{version}-build Source0: open-build-service-%version.tar.xz Source1: find-requires.sh BuildRequires: python-devel # make sure this is in sync with the RAILS_GEM_VERSION specified in the # config/environment.rb of the various applications. # atm the obs rails version patch above unifies that setting among the applications # also see requires in the obs-server-api sub package BuildRequires: build >= 20170315 BuildRequires: inst-source-utils BuildRequires: perl-BSSolv >= 0.28 BuildRequires: perl-Compress-Zlib BuildRequires: perl-Diff-LibXDiff BuildRequires: perl-File-Sync >= 0.10 BuildRequires: perl-JSON-XS BuildRequires: perl-Net-SSLeay BuildRequires: perl-Socket-MsgHdr BuildRequires: perl-TimeDate BuildRequires: perl-XML-Parser BuildRequires: perl-XML-Simple BuildRequires: perl(Devel::Cover) BuildRequires: perl(Test::Simple) > 1 BuildRequires: procps # Required by the test suite (contains /usr/bin/Xvfb) BuildRequires: xorg-x11-server PreReq: /usr/sbin/useradd /usr/sbin/groupadd BuildArch: noarch Requires(pre): obs-common Requires: build >= 20170315 Requires: perl-BSSolv >= 0.28 Requires: perl(Date::Parse) # Required by source server Requires: diffutils PreReq: git-core Requires: patch # require the createrepo and python-yum version which got validated during testsuite run Requires: %(/bin/bash -c 'rpm -q --qf "%%{name} = %%{version}-%%{release}" createrepo') Requires: %(/bin/bash -c 'rpm -q --qf "%%{name} = %%{version}-%%{release}" python-yum') BuildRequires: xz %if 0%{?suse_version:1} BuildRequires: fdupes PreReq: %insserv_prereq permissions pwdutils %endif %if 0%{?suse_version:1} Recommends: yum yum-metadata-parser repoview dpkg Recommends: deb >= 1.5 Recommends: lvm2 Recommends: openslp-server Recommends: obs-signd Recommends: inst-source-utils Recommends: perl-Diff-LibXDiff %else Requires: dpkg Requires: yum Requires: yum-metadata-parser %endif Requires: perl-Compress-Zlib Requires: perl-File-Sync >= 0.10 Requires: perl-JSON-XS Requires: perl-Net-SSLeay Requires: perl-Socket-MsgHdr Requires: perl-XML-Parser Requires: perl-XML-Simple Obsoletes: obs-source_service < 2.9 Obsoletes: obs-productconverter < 2.9 Provides: obs-source_service = %version Provides: obs-productconverter = %version Recommends: obs-service-download_url Recommends: obs-service-verify_file %if 0%{?suse_version} >= 1210 BuildRequires: systemd-rpm-macros %endif %{?systemd_requires} %description The Open Build Service (OBS) backend is used to store all sources and binaries. It also calculates the need for new build jobs and distributes it. %package -n obs-worker Requires(pre): obs-common Requires: cpio Requires: curl Requires: perl-Compress-Zlib Requires: perl-TimeDate Requires: perl-XML-Parser Requires: screen # for build script Requires: psmisc # For runlevel script: Requires: curl Recommends: openslp lvm2 Requires: bash Requires: binutils Requires: bsdtar Summary: The Open Build Service -- Build Host Component %if 0%{?suse_version} && 0%{?suse_version} < 1210 Group: Productivity/Networking/Web/Utilities %endif %if 0%{?suse_version} PreReq: %insserv_prereq %endif %if 0%{?suse_version} <= 1030 Requires: lzma %endif Requires: util-linux >= 2.16 # the following may not even exist depending on the architecture Recommends: powerpc32 %description -n obs-worker This is the obs build host, to be installed on each machine building packages in this obs installation. Install it alongside obs-server to run a local playground test installation. %package -n obs-common Summary: The Open Build Service -- base configuration files Requires(pre): shadow %if 0%{?suse_version} && 0%{?suse_version} < 1210 Group: Productivity/Networking/Web/Utilities %endif %if 0%{?suse_version} PreReq: %fillup_prereq %endif %description -n obs-common This is a package providing basic configuration files. %package -n obs-api Summary: The Open Build Service -- The API and WEBUI %if 0%{?suse_version} && 0%{?suse_version} < 1210 Group: Productivity/Networking/Web/Utilities %endif %if 0%{?suse_version} PreReq: %insserv_prereq Requires(pre): obs-common %endif %if 0%{?suse_version} >= 1330 Requires(pre): group(www) %endif #For apache Requires: apache2 apache2-mod_xforward rubygem-passenger-apache2 ruby2.5-rubygem-passenger # memcache is required for session data Requires: memcached Conflicts: memcached < 1.4 Requires: mysql Requires: ruby(abi) >= 2.0 # needed for fulltext searching Requires: sphinx >= 2.1.8 BuildRequires: obs-api-testsuite-deps BuildRequires: rubygem(ruby-ldap) # For doc generation BuildRequires: rubygem(i18n) # for test suite: BuildRequires: createrepo BuildRequires: curl BuildRequires: memcached >= 1.4 BuildRequires: mysql BuildRequires: netcfg BuildRequires: xorg-x11-Xvnc BuildRequires: xorg-x11-server BuildRequires: xorg-x11-server-extra # write down dependencies for production BuildRequires: rubygem(bundler) Requires: %(echo `bash %{S:1} %{S:0} "ruby:2.5.0" "production"`) # for rebuild_time Requires: perl(GD) Requires: ghostscript-fonts-std %description -n obs-api This is the API server instance, and the web client for the OBS. %package -n obs-devel Summary: The Open Build Service -- The API and WEBUI Testsuite %if 0%{?suse_version} < 1210 && 0%{?suse_version:1} Group: Productivity/Networking/Web/Utilities %endif Obsoletes: obs-webui-testsuite Requires: obs-api = %{version}-%{release} %requires_eq obs-api-testsuite-deps %description -n obs-devel Install to track dependencies for git %package -n obs-utils Summary: The Open Build Service -- utilities %if 0%{?suse_version} < 1210 && 0%{?suse_version:1} Group: Productivity/Networking/Web/Utilities %endif Requires: build Requires: osc %description -n obs-utils obs_project_update is a tool to copy a packages of a project from one obs to another %package -n obs-tests-appliance Summary: The Open Build Service -- Test cases for installed appliances Requires: obs-server = %{version} Requires: obs-api = %{version} %if 0%{?suse_version} < 1210 && 0%{?suse_version:1} Group: Productivity/Networking/Web/Utilities %endif %description -n obs-tests-appliance This package contains test cases for testing a installed appliances. Test cases can be for example: * checks for setup-appliance.sh * checks if database setup worked correctly * checks if required service came up properly %package -n obs-cloud-uploader Summary: The Open Build Service -- Image Cloud Uploader Requires: obs-server Requires: aws-cli %if 0%{?suse_version} > 1315 Requires: python3-ec2uploadimg %else Requires: python-ec2uploadimg %endif Group: Productivity/Networking/Web/Utilities %description -n obs-cloud-uploader This package contains all the necessary tools for upload images to the cloud. #-------------------------------------------------------------------------------- %prep %setup -q -n open-build-service-%version # drop build script, we require the installed one from own package rm -rf src/backend/build find . -name .git\* -o -name Capfile -o -name deploy.rb | xargs rm -rf %build export DESTDIR=$RPM_BUILD_ROOT # we need it for the test suite or it may silently succeed test -x /usr/bin/Xvfb # # generate apidocs # make %install export DESTDIR=$RPM_BUILD_ROOT %if 0%{?suse_version} < 1300 perl -p -i -e 's/^APACHE_VHOST_CONF=.*/APACHE_VHOST_CONF=obs-apache2.conf/' Makefile.include %endif %if 0%{?fedora} || 0%{?rhel} # Fedora use different user:group for apache perl -p -i -e 's/^APACHE_USER=.*/APACHE_USER=apache/' Makefile.include perl -p -i -e 's/^APACHE_GROUP=.*/APACHE_GROUP=apache/' Makefile.include %endif export OBS_VERSION="%{version}" DESTDIR=%{buildroot} make install # # turn duplicates into hard links # # There's dupes between webui and api: %if 0%{?suse_version} >= 1030 %fdupes $RPM_BUILD_ROOT/srv/www/obs %endif # fix build for SLE 11 %if 0%{?suse_version} < 1315 touch %{buildroot}/%{secret_key_file} chmod 0640 %{buildroot}/%{secret_key_file} %endif # drop testcases for now rm -rf %{buildroot}/srv/www/obs/api/spec # fail when Makefiles created a directory if ! test -L %{buildroot}/usr/lib/obs/server/build; then echo "/usr/lib/obs/server/build is not a symlink!" exit 1 fi install -m 755 $RPM_BUILD_DIR/open-build-service-%version/dist/clouduploader.rb $RPM_BUILD_ROOT/%{_bindir}/clouduploader mkdir -p $RPM_BUILD_ROOT/etc/obs/cloudupload install -m 644 $RPM_BUILD_DIR/open-build-service-%version/dist/ec2utils.conf.example $RPM_BUILD_ROOT/etc/obs/cloudupload/.ec2utils.conf mkdir -p $RPM_BUILD_ROOT/etc/obs/cloudupload/.aws install -m 644 $RPM_BUILD_DIR/open-build-service-%version/dist/aws_credentials.example $RPM_BUILD_ROOT/etc/obs/cloudupload/.aws/credentials %check %if 0%{?disable_obs_test_suite} echo "WARNING:" echo "WARNING: OBS test suite got skipped!" echo "WARNING:" exit 0 %endif export DESTDIR=$RPM_BUILD_ROOT # check installed backend pushd $RPM_BUILD_ROOT/usr/lib/obs/server/ rm -rf build ln -sf /usr/lib/build build # just for %%check, it is a %%ghost popd # run in build environment pushd src/backend/ rm -rf build ln -sf /usr/lib/build build popd #### # start backend testing pushd $RPM_BUILD_ROOT/usr/lib/obs/server/ %if 0%{?disable_obs_backend_test_suite:1} < 1 # TODO: move syntax check to backend test suite for i in bs_*; do perl -wc "$i" done bash $RPM_BUILD_DIR/open-build-service-%version/src/backend/testdata/test_dispatcher || exit 1 popd make -C src/backend test %endif #### # start api testing # %if 0%{?disable_obs_frontend_test_suite:1} < 1 make -C src/api test %endif #### # distribution tests %if 0%{?disable_obs_dist_test_suite:1} < 1 make -C dist test %endif %pre getent passwd obsservicerun >/dev/null || \ /usr/sbin/useradd -r -g obsrun -d /usr/lib/obs -s %{sbin}/nologin \ -c "User for the build service source service" obsservicerun %service_add_pre obsdeltastore exit 0 # create user and group in advance of obs-server %pre -n obs-common getent group obsrun >/dev/null || /usr/sbin/groupadd -r obsrun getent passwd obsrun >/dev/null || \ /usr/sbin/useradd -r -g obsrun -d /usr/lib/obs -s %{sbin}/nologin \ -c "User for build service backend" obsrun exit 0 %preun %stop_on_removal obssrcserver obsrepserver obsdispatcher obsscheduler obspublisher obswarden obssigner obsdodup obsservicedispatch obsservice %service_del_preun obsdeltastore %preun -n obs-worker %stop_on_removal obsworker %preun -n obs-cloud-uploader %stop_on_removal obsclouduploadworker obsclouduploadserver %post %if 0%{?suse_version} >= 1315 %reload_on_update obssrcserver obsrepserver obsdispatcher obspublisher obswarden obssigner obsdodup obsservicedispatch obsservice %else %restart_on_update obssrcserver obsrepserver obsdispatcher obspublisher obswarden obssigner obsdodup obsservicedispatch obsservice %endif # systemd kills the init script executing the reload first on reload.... %restart_on_update obsscheduler %service_add_post obsdeltastore %post -n obs-cloud-uploader %if 0%{?suse_version} >= 1315 %reload_on_update obsservice obsclouduploadworker obsclouduploadserver %else %restart_on_update obsservice obsclouduploadworker obsclouduploadserver %endif %posttrans [ -d /srv/obs ] || install -d -o obsrun -g obsrun /srv/obs # this changes from directory to symlink. rpm can not handle this itself. if [ -e /usr/lib/obs/server/build -a ! -L /usr/lib/obs/server/build ]; then rm -rf /usr/lib/obs/server/build fi if [ ! -e /usr/lib/obs/server/build ]; then ln -sf ../../build /usr/lib/obs/server/build fi %postun %insserv_cleanup %service_del_postun obsdeltastore # cleanup empty directory just in case rmdir /srv/obs 2> /dev/null || : %verifyscript -n obs-server %verify_permissions %post -n obs-worker # NOT used on purpose: restart_on_update obsworker # This can cause problems when building chroot # and bs_worker is anyway updating itself at runtime based on server code %pre -n obs-api getent passwd obsapidelayed >/dev/null || \ /usr/sbin/useradd -r -s /bin/bash -c "User for build service api delayed jobs" -d /srv/www/obs/api -g www obsapidelayed %post -n obs-common %{fillup_and_insserv -n obs-server} %post -n obs-api if [ -e /srv/www/obs/frontend/config/database.yml ] && [ ! -e /srv/www/obs/api/config/database.yml ]; then cp /srv/www/obs/frontend/config/database.yml /srv/www/obs/api/config/database.yml fi for i in production.rb ; do if [ -e /srv/www/obs/frontend/config/environments/$i ] && [ ! -e /srv/www/obs/api/config/environments/$i ]; then cp /srv/www/obs/frontend/config/environments/$i /srv/www/obs/api/config/environments/$i fi done if [ ! -e %{secret_key_file} ]; then pushd . cd /srv/www/obs/api/config ( umask 0077; RAILS_ENV=production bundle.ruby2.5 exec rails.ruby2.5 secret > %{secret_key_file} ) || exit 1 popd fi chmod 0640 %{secret_key_file} chown root.www %{secret_key_file} # update config sed -i -e 's,[ ]*adapter: mysql$, adapter: mysql2,' /srv/www/obs/api/config/database.yml touch /srv/www/obs/api/log/production.log chown %{apache_user}:%{apache_group} /srv/www/obs/api/log/production.log %restart_on_update memcached %postun -n obs-api %insserv_cleanup %restart_on_update obsapisetup %restart_on_update apache2 %restart_on_update obsapidelayed %files %defattr(-,root,root) %doc dist/{README.UPDATERS,README.SETUP} docs/openSUSE.org.xml ReleaseNotes-* README.md COPYING AUTHORS %dir /etc/slp.reg.d %dir /usr/lib/obs %dir /usr/lib/obs/server %config(noreplace) /etc/logrotate.d/obs-server /etc/init.d/obsdispatcher /etc/init.d/obspublisher /etc/init.d/obsrepserver /etc/init.d/obsscheduler /etc/init.d/obssrcserver /etc/init.d/obswarden /etc/init.d/obsdodup %{_unitdir}/obsdeltastore.service /etc/init.d/obsservicedispatch /etc/init.d/obssigner /usr/sbin/obs_admin /usr/sbin/obs_serverstatus /usr/sbin/rcobsdispatcher /usr/sbin/rcobspublisher /usr/sbin/rcobsrepserver /usr/sbin/rcobsscheduler /usr/sbin/rcobssrcserver /usr/sbin/rcobswarden /usr/sbin/rcobsdodup /usr/sbin/rcobsdeltastore /usr/sbin/rcobsservicedispatch /usr/sbin/rcobssigner /usr/lib/obs/server/plugins /usr/lib/obs/server/BSDispatcher /usr/lib/obs/server/BSRepServer /usr/lib/obs/server/BSSched /usr/lib/obs/server/BSSrcServer /usr/lib/obs/server/XML /usr/lib/obs/server/*.pm /usr/lib/obs/server/BSConfig.pm.template /usr/lib/obs/server/DESIGN /usr/lib/obs/server/License /usr/lib/obs/server/README /usr/lib/obs/server/bs_admin /usr/lib/obs/server/bs_cleanup /usr/lib/obs/server/bs_archivereq /usr/lib/obs/server/bs_check_consistency /usr/lib/obs/server/bs_deltastore /usr/lib/obs/server/bs_servicedispatch /usr/lib/obs/server/bs_dodup /usr/lib/obs/server/bs_getbinariesproxy /usr/lib/obs/server/bs_mergechanges /usr/lib/obs/server/bs_mkarchrepo /usr/lib/obs/server/bs_notar /usr/lib/obs/server/bs_dispatch /usr/lib/obs/server/bs_publish /usr/lib/obs/server/bs_repserver /usr/lib/obs/server/bs_sched /usr/lib/obs/server/bs_serverstatus /usr/lib/obs/server/bs_srcserver /usr/lib/obs/server/bs_worker /usr/lib/obs/server/bs_signer /usr/lib/obs/server/bs_warden /usr/lib/obs/server/worker /usr/lib/obs/server/worker-deltagen.spec %config(noreplace) /usr/lib/obs/server/BSConfig.pm %config(noreplace) /etc/slp.reg.d/* # created via %%post, since rpm fails otherwise while switching from # directory to symlink %ghost /usr/lib/obs/server/build # formerly obs-source_service /etc/init.d/obsservice %config(noreplace) /etc/logrotate.d/obs-source_service /etc/cron.d/cleanup_scm_cache /usr/sbin/rcobsservice /usr/lib/obs/server/bs_service /usr/lib/obs/server/call-service-in-docker.sh /usr/lib/obs/server/cleanup_scm_cache # formerly obs-productconverter /usr/bin/obs_productconvert /usr/lib/obs/server/bs_productconvert # add obsservicerun user into docker group if docker # gets installed %triggerin -n obs-server -- docker usermod -a -G docker obsservicerun %files -n obs-worker %defattr(-,root,root) /etc/init.d/obsworker /usr/sbin/rcobsworker %files -n obs-api %defattr(-,root,root) %doc dist/{README.UPDATERS,README.SETUP} docs/openSUSE.org.xml ReleaseNotes-* README.md COPYING AUTHORS /srv/www/obs/overview /srv/www/obs/api/config/thinking_sphinx.yml.example /etc/cron.d/obs_api_delayed_jobs_monitor %config(noreplace) /srv/www/obs/api/config/thinking_sphinx.yml %attr(-,%{apache_user},%{apache_group}) %config(noreplace) /srv/www/obs/api/config/production.sphinx.conf %dir /srv/www/obs %dir /srv/www/obs/api %dir /srv/www/obs/api/config %config(noreplace) /srv/www/obs/api/config/cable.yml %config(noreplace) /srv/www/obs/api/config/feature.yml %config(noreplace) /srv/www/obs/api/config/puma.rb %config(noreplace) /srv/www/obs/api/config/secrets.yml %config(noreplace) /srv/www/obs/api/config/spring.rb %config(noreplace) /srv/www/obs/api/config/crawler-user-agents.json /srv/www/obs/api/config/initializers %dir /srv/www/obs/api/config/environments %dir /srv/www/obs/api/files %dir /srv/www/obs/api/db /srv/www/obs/api/db/checker.rb /srv/www/obs/api/Gemfile /srv/www/obs/api/Gemfile.lock /srv/www/obs/api/config.ru /srv/www/obs/api/config/application.rb /srv/www/obs/api/config/clock.rb %config(noreplace) /etc/logrotate.d/obs-api /etc/init.d/obsapidelayed /etc/init.d/obsapisetup /usr/sbin/rcobsapisetup /usr/sbin/rcobsapidelayed /srv/www/obs/api/app %attr(-,%{apache_user},%{apache_group}) /srv/www/obs/api/db/structure.sql /srv/www/obs/api/db/attribute_descriptions.rb /srv/www/obs/api/db/data /srv/www/obs/api/db/migrate /srv/www/obs/api/db/seeds.rb /srv/www/obs/api/files/wizardtemplate.spec /srv/www/obs/api/lib /srv/www/obs/api/public /srv/www/obs/api/Rakefile /srv/www/obs/api/script /srv/www/obs/api/bin /srv/www/obs/api/test /srv/www/obs/docs /srv/www/obs/api/config/locales /srv/www/obs/api/vendor /srv/www/obs/api/vendor/diststats # # some files below config actually are _not_ config files # so here we go, file by file # /srv/www/obs/api/config/boot.rb /srv/www/obs/api/config/routes.rb /srv/www/obs/api/config/environments/development.rb /srv/www/obs/api/config/unicorn %attr(0640,root,%apache_group) %config(noreplace) %verify(md5) /srv/www/obs/api/config/database.yml %attr(0640,root,%apache_group) /srv/www/obs/api/config/database.yml.example %attr(0644,root,root) %config(noreplace) %verify(md5) /srv/www/obs/api/config/options.yml %attr(0644,root,root) /srv/www/obs/api/config/options.yml.example %dir %attr(0755,%apache_user,%apache_group) /srv/www/obs/api/db/sphinx %dir %attr(0755,%apache_user,%apache_group) /srv/www/obs/api/db/sphinx/production /srv/www/obs/api/.bundle %config /srv/www/obs/api/config/environment.rb %config /srv/www/obs/api/config/environments/production.rb %config /srv/www/obs/api/config/environments/test.rb %config /srv/www/obs/api/config/environments/stage.rb %dir %attr(-,%{apache_user},%{apache_group}) /srv/www/obs/api/log %attr(-,%{apache_user},%{apache_group}) /srv/www/obs/api/tmp # these dirs primarily belong to apache2: %dir /etc/apache2 %dir /etc/apache2/vhosts.d %config(noreplace) /etc/apache2/vhosts.d/obs.conf %defattr(0644,wwwrun,www) %ghost /srv/www/obs/api/log/access.log %ghost /srv/www/obs/api/log/backend_access.log %ghost /srv/www/obs/api/log/delayed_job.log %ghost /srv/www/obs/api/log/error.log %ghost /srv/www/obs/api/log/lastevents.access.log %ghost /srv/www/obs/api/log/production.log %ghost %attr(0640,root,www) %secret_key_file %files -n obs-common %defattr(-,root,root) /var/adm/fillup-templates/sysconfig.obs-server /usr/lib/obs/server/setup-appliance.sh /etc/init.d/obsstoragesetup /usr/sbin/rcobsstoragesetup %files -n obs-utils %defattr(-,root,root) /usr/sbin/obs_project_update %files -n obs-devel %defattr(-,root,root) %dir %_docdir/obs-devel %_docdir/obs-devel/README.devel %files -n obs-tests-appliance %defattr(-,root,root) %dir /usr/lib/obs/tests/ %dir /usr/lib/obs/tests/appliance /usr/lib/obs/tests/appliance/* %files -n obs-cloud-uploader %defattr(-,root,root) /etc/init.d/obsclouduploadworker /etc/init.d/obsclouduploadserver /usr/sbin/rcobsclouduploadworker /usr/sbin/rcobsclouduploadserver /usr/lib/obs/server/bs_clouduploadserver /usr/lib/obs/server/bs_clouduploadworker %{_bindir}/clouduploader %dir /etc/obs %dir /etc/obs/cloudupload %dir /etc/obs/cloudupload/.aws %config(noreplace) /etc/obs/cloudupload/.aws/credentials %config /etc/obs/cloudupload/.ec2utils.conf %package -n obs-container-registry Summary: The Open Build Service -- container registry %if 0%{?suse_version} < 1210 && 0%{?suse_version:1} Group: Productivity/Networking/Web/Utilities %endif Requires: docker-distribution-registry %description -n obs-container-registry The OBS Container Registry, based on the docker registry, which allows * anonymous pulls from anywhere * anonymous pushes from localhost. This is done by proxying access to the registry through apache and restricting any other http method than GET and HEAD to localhost. %files -n obs-container-registry %defattr(-,root,root) %dir /srv/www/obs/container-registry %dir /srv/www/obs/container-registry/log %dir /srv/www/obs/container-registry/htdocs %config /etc/apache2/vhosts.d/obs-container-registry.conf %changelog open-build-service-2.9.4/dist/obs-signd-conf.tar.bz2000066400000000000000000000245531332555733200222220ustar00rootroot00000000000000BZh91AY&SYJf \#€ooaQGlm:4<^}5`"@<Q@1U6Di44`Di)4M ~FѣBbmhLA)ia4ɣM4щ1@FAT#RA0F0ɩF=S1AM2қSbO#$zO)S &=jiz56ilQ6a<Ѩ )CM5?S y=)2PhdCzzF44&@hhM"by FC@i@hi 444 h4 4Fh 4Ѡ4=@d=@"DdD&hjm2e3Sba53M51ИL#ihdښ2iP\1o%\ %?"xDŽx+妦{f/}uW̃%u+MIa$s$&!ACe m#V] C $C j U 1ZcU*ÚIGf)7B~ߓ&$pe4*PY[məxd*>jǛ&~ZfcVJT^E^ҲC$,am;UD!_H! q d#b9+J!?Kf9T|K_ԏx,r8urڭ#l~VØ#$n] {"՝KD_v˕@t`~xe &cZ˞Ujgפ'=32ߎh_˩>/|r9ťAJgP.<9D+ppQn啫L7'eg5IlehۖmgMy~pWk=D`p6C&H|&NmSaQ?3+6XvΝ繟`v^Hz5pظR2hfם3 O\Kcr8YcI4>ݝ,je-$I&f".-O28)pQhgPWaHN wј\b%긜 oVflk *dы]ᨘ8gN:l/kh(c1dLS"Z 0u~@L6۩, lSQ"Lhb=Z[ɀ ~EЊwjU0$@'T gGWwn('f!}'EA weܙl[C 3]\_#_)SWUsM|o&Y}6&ʯfT$]q%0hi]tW ^u3ކ 3 ܡ(,ureiUvO,ziJYV3DF] e)E,-e j y᱕RdaG+rYB2r:UA|㷊6:$Ӗ0fRtV=e @xۃM)q̀q& fVSiCl罥u_Q\h=' ,AbIrxce# ZN T1Hдe\E%!$@ɇ2<v:sؑ1L,43SvvƓ/"OgmtjC #וVpƊq]xJ$ҾM)!p$"v(CQA&Qs. esz9c״L^'yW[mDΓPTݶ:!VP-]ZUlݢFei-܁Z2R_DGHBF+FVW^N[!*Yꨜ~K+SOsN5]+TK\ -nYZ0J) McF]D,BwEe+%M99*,vy[ʔ;aR(eeM%*,tJqr8@9rЈ(˜M-Eŭ|k(f"Qr!HlV1$PeRhQ";Ntuz~Ϳ,:8gweuϩaNEsZW$q.MHz.1r"*Jpl {sl"RA(8!_cUj={Kzsg؎w]z-Ɋ5]善oدyq\I/YX"$0$VN^9|)lK7u_^qt3sٞEa_1P;N`FtZ.Q0. NocMw )}8ekM#iEu5N/ {  -m76 ˸]6Cet}o7qPo1/͹ .ވ9RxNADLㅮ J)ꏬŴ}S5}`[?k%(f.k X@. s6m"CWf0|n~lfQyHT6v=Kp#VB<-zRwD?o&/`r`9I\z{~cO\3e˽5JZ~@eNT8ǕUW# ܹ 4`bGec1mLf stwW(˲0=nwZY^SY){E -xLMzݍq%`v"9V3/5뜮¨m+k]q4㙄 *w-T_x0426xN*tW H"2Law/fBI-)(FuFad4mTQpa 5d:Q^au"yJ&xXl]KJUI:z5׋1Ke@yy}>SC]"\mqM7U p"MgtIzz@մ+7uBZt9AdS[i5|'8X;Ο>Hք AdA3cNVLhDM^5j_hUj,nr *xulT:2q@x0ད Z3eUwSfO?@+s1t?B%{cK]$I|o/uU㦏B>quYy cW8@96QwXɗ!in]= >NǠ fXuM׮:ᎎyD6! M,8G]RIeT7ٟw\[EŶ0ΪBɶfV9]YXWt8[]~΂$Tl(zWG)W>C3'0#_Ԩ}Xx'&K0mD<3Ѹ#h(%Vh_K8B1DՂ78,HUK xb#q[SZ'JwrmVWlޞGܣ5NX7ØVPtPfŞrdx8Uq;F/^0Āa1W'IDsq5>W "22b%3DBI 'VP]2;w-7a.'L pTo3$fu{Z#L1gzhܭakA5s!簠BD$ly]RSR\MY e GK&a?Y$pz={~ˋyfcU v} k|msLOA6ndM{[=GwN#]p!De$ܹ2F@I)460 &,+SEzyb 5yY.cB0)( KeD1"m9fm'nDNΟqԮ='c-b{63 p4[nܬ? >ꅏbiswY 쟫.y}]ŗl.r9,$ezZ)v*8v1pWUI†E|d4e,S:JR9!)p 6B]0#%g|U-DJ):^+lVӕDˑ HA_]\r,v% 1!H1Xo02֫FѨVr4.3㷨/-!)fy*5r0m_/*A4N@ۓ^h_;U  x nF:eSTM2Eb&Z@ TbN@EA{䬔$$yRja<"nYB5TMqJz#$͸[x 5s1R\f#1!\P!Z Hڽ-6D&ӁCdtCqqyPZyh($[]]w&O*#"-k(E9fxp+.e.r-i$x)3!@Pl58:.r7ktǸ,C8zrZ0p|xz%R0kUw:]Ys |N-[PY6 KWwS3{|Mi," ђoZB M;& bi`mQ4=k=%SϐS*(YZ9sQɻT~o<2\Yy`wWaƨi]^KMäPaIV;*wlICFRs^+&|T,Tѵ5ۂq*0s:7b4Z.&QMP 1CatYA2mXƓbi411LX$9Ah+fN{x6BydC'aWH^U b|5 qՙ}ߠ w&^%&t8Fլ[?H3ep, 8P#x7Ė#L˙2Ȝ]h6/vxcn"{W}ӎ=H * ENj@EQ)&28AM~F#%=5q% JoF*bϙq~G4V-bbCVQah3MWXC*^;6꺬[ΈGgV?9fVr0 7os!6ղh?TŒ |M(u F1gsef?:.ە*>BG@4+5^G`Ε2Pt0TpB޷mĩ=9'u 0PUze R6h**U ml3ꄮFRf )2M^0=h,Հ@ $>ݓBK(d&x!#scLr<J' KU&D?yke֩m_#x Xx{ W: yxXbvj0PI9(x65tKx[K5Axc 3e[$igk).m ATh>.KE!w[ X-5 l!䲢8eRH*^褐DiU"!.y%V53Sb,4&sB3(">1 51]ؔ ֺs-!>Od酑 o{&WF H]V-|+NjL)66lbmOUᤘ E,\PWeZ(Ot^b,2-0>؂IV+e+É:/:#["e<r)ˎ 8DI3L)9]e~_CX&ϊhG-.WO gg0sjd12;a=mb} mX}fkպS1Uj3sGl:+fˮ9&D7)6SeC[C\37ԛhs;<xmecG|R.<]uꁂvfTqbic[1E1gqjmթ<'<=¿({鐵Ϯ3Șb 9Ny) kkd9U{*;Wql#=`aC7Iw'K@Rae 0 iEi 3[B )>qIG9.MZ\SM+ Ӭ3`iQnncOÈq Q[, (dmQ-zd[hs54_@J;D#eMǺt7˧`X}JC}~L:QgMp ԕDe:Ѷ%&ٔDaH,>[k#Ia5}TsˌL{$HdwrhR^-ms@߭WT͇T/zLT kvhз"=76 YQHk?/ Χ&/ k̋0K h?.[ik3BEX5텅BjMyb=rq}yʓt^,&/h?7"P'8 NW<&sޱ NȗV91;e1Eq쌷R8RS/mųg<׫G۾Yd358t2蔜,1h,e \ԔqGAaFIrFP1eN\/~vI'K IV%OPc#]#7F7[M}fԪ4p<oCNcZf!(xeEA<BnGbɦZj@FcOxpVrMGi6c̢6#A,c.ɰwY1WO yAUHՒ|'oӆXvj@;p!zѺ1CY'hw3>SdSE5$>LjVt Itå&(bDXr]ukB1HWQ!ILQ5D= %3nx{֕u X[D] agQb]ޛ>'>W?ԟsj % ?{*uC0|$FY6ҢrX]n5Fw*8` &#sQw1eT [4'G0[\s?A׍:Rv+?+ zx˪GT+ɳxNg)\PѐM̋tjkG3.N<5Gv%.GJԿȚe?Q=epr;3vnJ;-n$̳)/'ץ+r&ۍ8&ѧ+z&[%-hlw t Z'&:Ҹ8sh::a}sx..9(減ԺOl!{NcF1T pr=C*5Dḩ]B@E)(open-build-service-2.9.4/dist/obs-source_service.logrotate000066400000000000000000000001561332555733200237220ustar00rootroot00000000000000/srv/obs/service/log/*.log { compress dateext rotate 2 daily missingok copytruncate } open-build-service-2.9.4/dist/obs.repo_server.reg000066400000000000000000000005351332555733200220140ustar00rootroot00000000000000############################################################################# # # OpenSLP registration file # # register OBS repository server # ############################################################################# service:obs.repo_server:http://$HOSTNAME:5252,en,65535 watch-port-tcp=5252 description=Open Build Service Repository Server open-build-service-2.9.4/dist/obs.source_server.reg000066400000000000000000000005271332555733200223500ustar00rootroot00000000000000############################################################################# # # OpenSLP registration file # # register OBS source server # ############################################################################# service:obs.source_server:http://$HOSTNAME:5352,en,65535 watch-port-tcp=5352 description=Open Build Service Source Server open-build-service-2.9.4/dist/obs_admin000077500000000000000000000000631332555733200200550ustar00rootroot00000000000000#!/bin/bash exec /usr/lib/obs/server/bs_admin "$@" open-build-service-2.9.4/dist/obs_api_delayed_jobs_monitor.cron000077500000000000000000000001451332555733200247520ustar00rootroot00000000000000*/10 * * * * /usr/bin/bundle.ruby2.5 exec /srv/www/obs/api/script/delayed_job_monitor.rb > /dev/null open-build-service-2.9.4/dist/obs_import_srcdebtree000077500000000000000000000145501332555733200225070ustar00rootroot00000000000000#!/bin/bash # 2009, dl9pf@gmx.de - initial version # ########################################################## #Setup Vars export debug=false # usage="usage: $0 target obs = http(s)://your-api/ target project = test:import baseurl = http://source-repo (without dist/pool) Sources.bz2 = http://source-repo/.../Sources.bz2 (Sources/Sources.gz/Sources.bz2) " targetobs=$1 targetprj=$2 baseurl=$3 urlbase=$4 if test x"$4" == x""; then echo "$usage" exit 0 fi # empty stack popd 2>&1 > /dev/null # # create tno abd fetch sources{.gz,.bz2} ######################################## mkdir -p tmp && pushd tmp && rm -f Sources && wget -nd "$urlbase" || ( popd && echo "error on fetching $urlbase" && exit 1 ) if [ "$(echo $urlbase | grep .bz2)" ]; then # extract bz2 bunzip2 Sources.bz2 || exit 1 elif [ "$(echo $urlbase | grep .gz)" ]; then # extract gzip gunzip Sources.gz || exit 1 fi popd # generate list of packages on local server ########################################### osc -A $targetobs ls $targetprj 2>/dev/null | sort >tmp/.ls # generate list of remote packages out of Sources file ###################################################### cat tmp/Sources | grep "Package:" | cut -d":" -f2 | sed -e "s/ //g" | sort | uniq > tmp/.pkglist pkglist=$(cat tmp/.pkglist) lslist=$(cat tmp/.ls ) echo "$0 $targetobs $targetprj $baseurl $urlbase" echo "Urllist:" echo "$pkglist" echo echo "Lslist:" echo "$lslist" echo echo "Difflist:" diff tmp/.ls tmp/.pkglist echo ########################################################## # Check out target prj if $debug; then echo "debug" # skip on debug else ( pushd tmp if [ x"$lslist" != x"" ]; then # checkout only 1st pkg (others will follow on-the-fly) osc -A $targetobs co "$targetprj" `echo $lslist | cut -d" " -f1 | head -n 1` >& /dev/null else echo "Checking out empty project." osc -A $targetobs co $targetprj >& /dev/null fi popd ) fi ############################################################# # sync if true; then newlist="" # if we did run already, then if test -f .last; then regexp=`cat .last` newlist=`echo "$pkglist" | sed -n -e "/$regexp/,$ {/$regexp/d;p}"` export pkglist="$newlist" fi for f in $pkglist do ( pushd tmp && ( if [ x$(echo $f | egrep "_[0-9]") != x"" ]; then pkgname=${f%%-[0-9].[*-[0-9]*-[0-9]*.*} else pkgname=${f%%-[0-9]*-[0-9]*.*} fi # folder on the server myurlbase=`cat Sources | grep "Directory:" | grep "/$f\$" | cut -d":" -f2 | sed -e "s/ //g" | uniq` # highest version of a .dsc file -> critical section # * " $(echo $f)_" to grep the pkgname_ without false positive from lib_ # * we only look for the .dsc first and get the filenames lateron from that file # * either sort -nr or sort -gr - both have some false corner-cases myversion=`cat Sources | grep " $(echo $f)_" | grep .dsc | sed -e "s/_//g" | sed -e "s/.dsc//g" | sed -e "s/.*$(echo $f)//g" | sort -nr | head -n1 ` # .dsc to fetch and inspect togetdsc=`cat Sources | grep " $(echo $f)_" | grep "_$(echo $myversion)" | grep dsc | cut -d" " -f4` (set -x ; wget -c -q $baseurl/$myurlbase/$togetdsc || wget -c -q $baseurl/$myurlbase/$togetdsc || wget -c -q $baseurl/$myurlbase/$togetdsc || wget -c -q $baseurl/$myurlbase/$togetdsc || exit 1) export downloadme="" if test -f $togetdsc; then export downloadme=`cat $(echo \$togetdsc) | grep "$(echo $f)_" | cut -d" " -f4 ` else echo "Failed to download .dsc" exit 1 fi if $debug; then echo "myurlbase: $myurlbase" echo "myversion: $myversion" echo "togetdsc: $togetdsc" echo "toget: $downloadme" # exit 1 else for j in $downloadme; do echo "download: $j" (set -x ; wget -c -q $baseurl/$myurlbase/$j || wget -c -q $baseurl/$myurlbase/$j || wget -c -q $baseurl/$myurlbase/$j || wget -c -q $baseurl/$myurlbase/$j || exit 1) done # if pkg doesn't exists on local server if [ x"$(osc -A $targetobs ls $targetprj/$pkgname 2>/dev/null)" == x"" ]; then ( pushd $targetprj osc -A $targetobs mkpac $f pushd $pkgname for i in $downloadme $togetdsc ; do cp ../../$i . ; done osc -A $targetobs addremove 2>/dev/null; osc -A $targetobs ci -m "imported: $urlbase/$f @ $targetobs/$pkgname" 2>/dev/null popd popd ) # update existing pkg else ( pushd $targetprj if ! test -d $pkgname ; then osc -A $targetobs co $pkgname 2>/dev/null else pushd $pkgname osc -A $targetobs up popd fi pushd $pkgname && rm ./* for i in $downloadme $togetdsc ; do cp ../../$i . ; done osc -A $targetobs addremove 2>/dev/null; osc -A $targetobs ci -m "updated: $urlbase/$f @ $targetobs/$pkgname" 2>/dev/null popd popd ) fi fi ) cd .. echo "$f" > .last ) done fi rm -f .lastopen-build-service-2.9.4/dist/obs_import_srcrpmtree000077500000000000000000000046231332555733200225530ustar00rootroot00000000000000#!/bin/bash ########################################################## #Setup Vars # usage="usage: $0 " targetobs=$1 targetprj=$2 urlbase=$3 mkdir -p tmp && curl -s -l $urlbase >& tmp/.listing osc -A $targetobs ls $targetprj 2>/dev/null | sort >tmp/.ls if [ x"$(grep '' tmp/.listing)" != x"" ] then cat tmp/.listing | awk '{print $2}' | grep 'src.rpm' | sed -e "s/^href=\"//g" | awk -F'"' '{print $1}' | sort >tmp/.ftplist else cat tmp/.listing | sort >tmp/.ftplist fi ftplist=$(cat tmp/.ftplist) lslist=$(cat tmp/.ls) rm -f tmp/.ftpls for f in $ftplist do if [ x$(echo $f | egrep "_[0-9]") != x"" ] then pkgname=${f%%-[0-9].[*-[0-9]*-[0-9]*.*} else pkgname=${f%%-[0-9]*-[0-9]*.*} fi echo $pkgname >>tmp/.ftpls done echo "$0 $targetobs $targetprj $urlbase" echo "Urllist:" echo "$ftplist" echo echo "Lslist:" echo "$lslist" echo echo "Difflist:" diff tmp/.ls tmp/.ftpls echo ########################################################## # Check out target packages ( cd tmp && rm -rf $targetprj if [ x"$lslist" != x"" ] then for f in $lslist do osc -A $targetobs co $targetprj/$f >& /dev/null break done else osc -A $targetobs co $targetprj >& /dev/null fi ) for f in $ftplist do (cd tmp && ( if [ x$(echo $f | egrep "_[0-9]") != x"" ] then pkgname=${f%%-[0-9].[*-[0-9]*-[0-9]*.*} else pkgname=${f%%-[0-9]*-[0-9]*.*} fi (set -x ; wget -c -q $urlbase/$f || wget -c -q $urlbase/$f || wget -c -q $urlbase/$f || wget -c -q $urlbase/$f) if [ x"$(osc -A $targetobs ls $targetprj/$pkgname 2>/dev/null)" == x"" ] then ( rm -rf $targetprj/$pkgname && osc -A $targetobs rdelete $targetprj $pkgname >& /dev/null cd $targetprj && osc -A $targetobs importsrcpkg -n $pkgname ../$f 2>/dev/null cd $pkgname && (osc -A $targetobs addremove 2> /dev/null ; osc -A $targetobs ci -m "imported: $urlbase/$f @ $targetobs/$pkgname" 2>/dev/null) ) else ( osc -A $targetobs co $targetprj/$pkgname 2>/dev/null && rm -rf $targetprj/$pkgname/* >& /dev/null cd $targetprj/$pkgname && rpm2cpio ../../$f | cpio -ivd osc -A $targetobs addremove 2>/dev/null; osc -A $targetobs ci -m "imported: $urlbase/$f @ $targetobs/$pkgname" 2>/dev/null ) fi ) ) done open-build-service-2.9.4/dist/obs_mirror_project000077500000000000000000000207301332555733200220300ustar00rootroot00000000000000#!/usr/bin/ruby # This script mirrors a base distribution from the opensuse.org Build Service. # You can use it to create initial base projects in your build service to build for. # # This script does mirror only build packages, not the sources. # require 'optparse' # This hash will hold all of the options # parsed from the command-line by OptionParser. options = {} optparse = OptionParser.new do |opts| # Set a banner, displayed at the top of the help screen. opts.banner = "------------------------------------------------------------------------------------------- Usage: obs_mirror_project.rb -p PROJECT -r REPOSITORY [-a ARCHITECTURE] [-d DESTINATION] [-A APIURL] [-t] [-v] Example: (mirror openSUSE 13.1 as base distro) obs_mirror_project -p openSUSE:13.1 -r standard -a i586,x86_64 ------------------------------------------------------------------------------------------- Options help: " # Define the options, and what they do options[:proj] = "" opts.on( '-p', '--proj PROJECT', "Project Name: eg. openSUSE:13.1,Ubuntu:14.04,etc." ) do|f| options[:proj] = f end options[:repo] = "" opts.on( '-r', '--repo REPOSITORY', "Repository Name: eg. standard,qemu,etc." ) do|f| options[:repo] = f end options[:arch] = "" opts.on( '-a', '--arch Architecture', "Architecture Name: eg. i586,x86_64,etc.") do|f| options[:arch] = f end options[:dest] = "" opts.on( '-d', '--dest DESTINATION', "Destination Path: eg. /obs Default: PWD (current working directory)" ) do|f| options[:dest] = f end options[:apiurl] = "" opts.on( '-A', '--api APIURL', "OSC API URL: Default: https://api.opensuse.org" ) do|f| options[:apiurl] = f end options[:trialrun] = false opts.on( '-t', '--trialrun', "Trial run: not executing actions" ) do options[:trialrun] = true end options[:verbose] = false opts.on( '-v', '--verbose', "Verbose" ) do options[:verbose] = true end # This displays the help screen, all programs are # assumed to have this option. opts.on( '-h', '--help', 'Display this screen' ) do puts opts exit end end # Parse the command-line. Remember there are two forms of the parse method: # 1. 'parse' method simply parses ARGV, # 2. 'parse!' method parses ARGV and removes any options found there, # as well as any parameters for the options. # What's left is the list of files to resize. optparse.parse! proj = options[:proj] repo = options[:repo] arch = options[:arch] !options[:dest].empty? ? dest=options[:dest] : dest=Dir.pwd !options[:apiurl].empty? ? apiurl=options[:apiurl] : apiurl="https://api.opensuse.org" trialrun = options[:trialrun] verbose = options[:verbose] if verbose puts Options: options puts ARGC: ARGV puts proj: proj puts repo: repo puts arch: arch puts dest: dest puts apiurl: apiurl puts trialrun: trialrun puts verbose: verbose end puts " ##################### # Data Verification # #####################" # proj and repo are mandatory if proj.empty? || repo.empty? || arch.empty? puts "ERROR! Miss mandatory options: '-p' or '-r' or '-a'" puts "Options: #{options}" puts optparse.help() exit(1) end # verify apiurl puts "\nVerify API URL '#{apiurl}': osc -A #{apiurl} api -m GET /about > /dev/null" if verbose if !system("osc -A #{apiurl} api -m GET /about > /dev/null") puts "Verify API URL '#{apiurl}': failed" exit(1) else puts "Verify API URL '#{apiurl}': ok" end # verify proj puts "\nVerify proj '#{proj}': osc -A #{apiurl} api -m GET /build/#{proj} > /dev/null" if verbose if !system("osc -A #{apiurl} api -m GET /build/#{proj} > /dev/null") puts "Verify proj '#{proj}': failed" exit(1) else puts "Verify proj '#{proj}': ok" end # verify repo puts "\nVerify repo '#{repo}': osc -A #{apiurl} api -m GET /build/#{proj}/#{repo} > /dev/null" if verbose if !system("osc -A #{apiurl} api -m GET /build/#{proj}/#{repo} > /dev/null") puts "Verify repo '#{repo}': failed" exit(1) else puts "Verify repo '#{repo}': ok" end # verify arch puts "\nVerify arch '#{arch}': osc -A #{apiurl} api -m GET /build/#{proj}/#{repo}/#{arch} > /dev/null" if verbose if !system("osc -A #{apiurl} api -m GET /build/#{proj}/#{repo}/#{arch} > /dev/null") puts "Verify arch '#{arch}': failed" exit(1) else puts "Verify arch '#{arch}': ok" end # method for verify directory def directory_verify (path, name, verbose, trialrun) puts "\nVerify #{name} directory '#{path}':" if verbose if !FileTest.directory?("#{path}") puts "Creating #{name} directory: #{path}" system("mkdir -p #{path}") if !trialrun end if FileTest.directory?("#{path}") && FileTest.writable?("#{path}") puts "Verify #{name} directory '#{path}': ok" else puts "Verify #{name} directory '#{path}': failed" exit(1) if !trialrun end return end # verify dest dir directory_verify("#{dest}", "dest", verbose, trialrun) # verify project dir directory_verify("#{dest}/projects", "projects", verbose, trialrun) # verify :full dir directory_verify("#{dest}/build/#{proj}/#{repo}/#{arch}/:full", ":full", verbose, trialrun) puts " ################ # Project meta # ################" # retrieve project meta and configuration data puts "\nRetrieve project meta data: osc -A #{apiurl} meta prj #{proj} > #{dest}/projects/#{proj}.xml" if verbose system(" osc -A #{apiurl} meta prj #{proj} > #{dest}/projects/#{proj}.xml") if !trialrun puts "\nRetrieve project configuration data: osc -A #{apiurl} meta prjconf #{proj} > #{dest}/projects/#{proj}.conf" if verbose system(" osc -A #{apiurl} meta prjconf #{proj} > #{dest}/projects/#{proj}.conf") if !trialrun puts " #################### # Project binaries # ####################" require 'rexml/document' include REXML # retrieve full binary lists if !trialrun puts "\nRetrieve full binary list: osc -A #{apiurl} api -m GET /build/#{proj}/#{repo}/#{arch}/_repository?view=names > #{dest}/build/#{proj}/#{repo}/#{arch}/binarylist.lst" system(" osc -A #{apiurl} api -m GET /build/#{proj}/#{repo}/#{arch}/_repository?view=names > #{dest}/build/#{proj}/#{repo}/#{arch}/binarylist.lst") end # open full binary list file if File.file?("#{dest}/build/#{proj}/#{repo}/#{arch}/binarylist.lst") puts "\nOpen full binary list file: File::open(\"#{dest}/build/#{proj}/#{repo}/#{arch}/binarylist.lst\", \"r\")" process = File::open("#{dest}/build/#{proj}/#{repo}/#{arch}/binarylist.lst", "r") else puts "\nOpen full binary list file: File::popen(\"osc -A #{apiurl} api -m GET /build/#{proj}/#{repo}/#{arch}/_repository?view=names\", \"r\")" process = File::popen("osc -A #{apiurl} api -m GET /build/#{proj}/#{repo}/#{arch}/_repository?view=names", "r") end # process full binary list file puts "" filelist = Document.new(process) filelist.elements.each("binarylist/binary") { |binary| fname = binary.attributes["filename"] fsize = binary.attributes["size"] fmtime = binary.attributes["mtime"] puts "Process: #{fname} (#{fsize})" # skip src, nosrc, debuginfo, and debugsource packages if fname[-7..-1]=="src.rpm" || fname.include?("-debuginfo") || fname.include?("-debugsource") puts " skip debug: #{fname}" next end # check for existing files if File.file?("#{dest}/build/#{proj}/#{repo}/#{arch}/:full/#{fname}") puts " #{dest}/build/#{proj}/#{repo}/#{arch}/:full/#{fname} already exists!" tsize = File.size?("#{dest}/build/#{proj}/#{repo}/#{arch}/:full/#{fname}") puts " size from existing target: #{tsize}" if verbose puts " size from original source: #{fsize}" if verbose if tsize.to_i == fsize.to_i puts " skip download: size identical" next else puts " remove: size differnt" puts " remove: #{dest}/build/#{proj}/#{repo}/#{arch}/:full/#{fname}" File.delete("#{dest}/build/#{proj}/#{repo}/#{arch}/:full/#{fname}") if !trialrun end end puts " download: osc -A #{apiurl} api -m GET /build/#{proj}/#{repo}/#{arch}/_repository/#{fname} > #{dest}/build/#{proj}/#{repo}/#{arch}/:full/#{fname}" if verbose if trialrun puts " trialrun: skip download" else # re-try download 3 times for i in 0..2 unless system("osc -A #{apiurl} api -m GET /build/#{proj}/#{repo}/#{arch}/_repository/#{fname} > #{dest}/build/#{proj}/#{repo}/#{arch}/:full/#{fname}") puts " retry #{i}: download failure" if verbose puts " failure: download failure" if i == 2 next else puts " success: download finished" if verbose break end end end } open-build-service-2.9.4/dist/obs_productconvert000077500000000000000000000000741332555733200220500ustar00rootroot00000000000000#!/bin/bash exec /usr/lib/obs/server/bs_productconvert "$@" open-build-service-2.9.4/dist/obs_project_srcimport000077500000000000000000000022431332555733200225370ustar00rootroot00000000000000#!/bin/sh ########################################################## #Setup Vars ########################################################### # usage="usage: $0 [listfile]" targetobs=$1 targetprj=$2 urlbase=$3 listfile=$4 prjlist=$(cat $listfile) mkdir -p tmp && curl -s -l $urlbase >& tmp/.listing ftplist=$(cat tmp/.listing) echo "$0 $targetobs $targetprj $urlbase $listfile" echo "Projects:" echo "$prjlist" echo echo "Urllist:" echo "$ftplist" echo ########################################################## # Check out target packages ########################################################### (cd tmp && set -x ; rm -rf $targetprj/*) for f in $prjlist do (cd tmp && for j in $(grep "$f" .listing|sort -n -r) do (set -x && rm -rf $targetprj/$f && osc -A $targetobs deletepac $targetprj $f >& /dev/null osc -A $targetobs importsrcpkg --delete-old-files -n $f -p $targetprj $urlbase/$j >& /dev/null chmod -R u+rw $targetprj/$f && cd $targetprj/$f && osc -A $targetobs addremove && osc -A $targetobs ci -m"added: imported $f @ $(date) from $urlbase/$j" ) break done ) done #rm -rf tmp open-build-service-2.9.4/dist/obs_project_update000077500000000000000000000106221332555733200217770ustar00rootroot00000000000000#!/bin/sh ########################################################## #Setup Vars ########################################################### # script to copy update complete projects with the packages meta data from obs to obs # - uses "osc ci" to update target, so it generates a version history in the target obs for the packages # - packages in the target with are not in the source will not be touched # - if the list contains packages, which are not in the source, they are ignored # - devel project/person userid are ignored # - files with name "ready" are ignored # - not copied or nor created are project wide data/meta data like build repos # # example: # $ osc -A https://api.opensuse.org ls Base:build | obs_project_update https://api.opensuse.org openSUSE:Factory https://api.yourdomain.ext openSUSE:Factory # $ echo "kiwi deb" | obs_project_update -u https://api.opensuse.org OBS:Server:2.4 https://api.yourdomain.ext OBS:Server:2.4 # usage="usage: $0 {-e | -u} [listfile]" srcexpand='-e' dstexpand='-u' if [ X"$1" == "X-u" ]; then srcexpand='-u' shift elif [ X"$1" == "X-e" ]; then shift fi sourceobs=$1 sourceprj=$2 targetobs=$3 targetprj=$4 listfile=$5 prjlist=$(cat $listfile | sort | uniq) echo "$0 $srcexpand $sourceobs $sourceprj $targetobs $targetprj $listfile" echo "Projects:" echo "$prjlist" echo ########################################################## # Check out source packages ########################################################### rm -rf S && mkdir S ( cd S && for f in $prjlist do (set -x && (rm -rf $sourceprj/$f; osc -A $sourceobs co $srcexpand $sourceprj/$f) || (rm -rf $sourceprj/$f; osc -A $sourceobs co $srcexpand $sourceprj/$f) || (rm -rf $sourceprj/$f; osc -A $sourceobs co $srcexpand $sourceprj/$f) || (rm -rf $sourceprj/$f; osc -A $sourceobs co $srcexpand $sourceprj/$f) || (rm -rf $sourceprj/$f; osc -A $sourceobs co $srcexpand $sourceprj/$f) || (rm -rf $sourceprj/$f; osc -A $sourceobs co $srcexpand $sourceprj/$f) ) done ) ########################################################## # Check out target packages ########################################################### rm -rf T && mkdir T ( cd T && for f in $prjlist do # Copy pkg meta info before checking out target package (set -x && (osc -A $sourceobs meta pkg $sourceprj/$f | grep -v " existing in (cd M/$targetprj && for f in * do DATE=$(date) (set -x && cd $f && osc -A $targetobs addremove && osc -A $targetobs ci -m "updated: on $DATE from -A $sourceobs $sourceprj/$f to $targetprj/$f" ) done ) #rm -rf S T M open-build-service-2.9.4/dist/obs_rebuild_db000077500000000000000000000025341332555733200210650ustar00rootroot00000000000000#!/bin/bash echo "This script rebuilds the DB for an OBS system from an existing backend" echo "since OBS api does not support user / group operations, you have to restore users by hand first" echo "you have to 1. setup working OBS 2. restore all backend data, incl. projects dir" echo "3. copy the data from projects (.xml, .conf, .pkg files and dirs) to the . dir" echo "call this script from . and a parameter to your api server" echo "since the backend xml data contain also the revision control info, you get back also commit history" echo "also, .conf files for a project are also not stored in the DB so you get them also back with this method" echo echo "last but not least, be carefuly with project dependencies, they are not resolved with this script" echo date echo APISERVER="$1" test -n "$APISERVER" || echo "usage: $0 , e.g. $0 https://api.opensuse.org" echo "OBS recreating projects for api server "$APISERVER"" echo for f in *.xml do base=${f%%.xml} (set -x && osc -A "$APISERVER" meta prj -F $f $base ) done echo echo "OBS recreating packages" echo for f in *.pkg do base=${f%%.pkg} echo echo "OBS recreating packages for project $f" echo ( cd $f for g in *.xml do pkgbase=${g%%.xml} (set -x && osc -A "$APISERVER" meta pkg -F $g $base $pkgbase ) done ) done echo date echo open-build-service-2.9.4/dist/obs_serverstatus000077500000000000000000000000721332555733200215370ustar00rootroot00000000000000#!/bin/bash exec /usr/lib/obs/server/bs_serverstatus "$@" open-build-service-2.9.4/dist/obsapidelayed000077500000000000000000000117271332555733200207400ustar00rootroot00000000000000#! /bin/bash # Copyright (c) 2009 SUSE Linux AG, Nuernberg, Germany. # All rights reserved. # # Author: Adrian Schroeter # Please send feedback to http://www.suse.de/feedback/ # # /etc/init.d/obsapidelayed # and its symbolic link # /(usr/)sbin/rcobsapidelayed # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # ### BEGIN INIT INFO # Provides: obsapidelayed # Required-Start: $network $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Should-Start: obsapisetup # Should-Stop: $none # Default-Start: 3 5 # Default-Stop: 0 1 2 6 # Short-Description: OBS api delayed jobs # Description: Start the delayed job handler for OBS api ### END INIT INFO . /etc/rc.status API_ROOT=/srv/www/obs/api CLOCKWORKD=/usr/bin/clockworkd.ruby2.5 # If you are using newrelic_rpm you need this # to fix detection of our delay_job.api daemons export NEW_RELIC_DISPATCHER=delayed_job function run_in_api { export RAILS_ENV="production" # startproc only works for classical daemons chroot --userspec=wwwrun:www / /bin/bash -c "cd $API_ROOT && /usr/bin/bundle.ruby2.5 exec $*" } # number of parallel delayed jobs NUM=3 # Reset status of this service rc_reset case "$1" in start) echo -n "Starting OBS api delayed job handler " run_in_api script/delayed_job.api.rb --queue=quick start -n $NUM run_in_api script/delayed_job.api.rb --queue=releasetracking start -i 1000 run_in_api script/delayed_job.api.rb --queue=issuetracking start -i 1010 run_in_api script/delayed_job.api.rb --queue=mailers start -i 1020 # The default queue used by ActiveJob (jobs scheduled with .perform_later) run_in_api script/delayed_job.api.rb --queue=default start -i 1030 run_in_api script/delayed_job.api.rb --queue=project_log_rotate start -i 1040 run_in_api script/delayed_job.api.rb --queue=consistency_check start -i 1050 rc_status -v echo -n "Starting OBS searchd daemon " FILE_SIZE=`stat -c '%s' /srv/www/obs/api/config/production.sphinx.conf` if [ $FILE_SIZE -eq 0 ];then run_in_api rails.ruby2.5 ts:rebuild else run_in_api rails.ruby2.5 ts:start fi rc_status -v echo -n "Starting OBS api clock daemon " run_in_api $CLOCKWORKD --log-dir=log -l -c config/clock.rb start rc_status -v ;; stop) echo -n "Shutting down OBS api delayed job handler " run_in_api script/delayed_job.api.rb --queue=quick stop -n $NUM run_in_api script/delayed_job.api.rb --queue=releasetracking stop -i 1000 run_in_api script/delayed_job.api.rb --queue=issuetracking stop -i 1010 run_in_api script/delayed_job.api.rb --queue=mailers stop -i 1020 run_in_api script/delayed_job.api.rb --queue=default stop -i 1030 run_in_api script/delayed_job.api.rb --queue=project_log_rotate stop -i 1040 run_in_api script/delayed_job.api.rb --queue=consistency_check stop -i 1050 rc_status -v echo -n "Shutting down OBS api clock daemon " run_in_api $CLOCKWORKD -l -c config/clock.rb stop rc_status -v echo -n "Shutting down OBS searchd daemon " run_in_api rails.ruby2.5 ts:stop rc_status -v ;; try-restart|condrestart) if test "$1" = "condrestart"; then echo "${attn} Use try-restart ${done}(LSB)${attn} rather than condrestart ${warn}(RH)${norm}" fi $0 status if test $? = 0; then $0 restart else rc_reset # Not running is not a failure. fi rc_status ;; clean-restart) $0 stop rm -f $API_ROOT/db/sphinx/production/* $0 start run_in_api rails.ruby2.5 ts:index rc_status ;; restart) $0 stop $0 start rc_status ;; force-reload) echo -n "Reload service OBS api delayed jobs " $0 try-restart rc_status ;; reload) ## Not supported rc_failed 3 rc_status -v ;; status) echo -n "Checking for service delayed OBS api jobs " checkproc delayed_job.0 [ $? == $NUM ] rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsapisetup000077500000000000000000000025511332555733200204640ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2010, Novell Inc. # # Author: adrian@suse.de # # /etc/init.d/obsapisetup # and its symbolic link # /usr/sbin/rcobsapisetup # ### BEGIN INIT INFO # Provides: obsapisetup # X-Start-Before: apache2 # Should-Start: obsstoragesetup obssrcserver obsrepserver # Should-Stop: $none # Required-Start: mysql # Required-Stop: $null # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: Initialize and update api database, only used in OBS Appliance ### END INIT INFO . /etc/rc.status # make parsed output predictable export LC_ALL=C # package or appliance defaults if [ -e /etc/sysconfig/obs-server ]; then source /etc/sysconfig/obs-server fi if [ "$OBS_API_AUTOSETUP" != "yes" ]; then echo "OBS API Autosetup is not enabled in sysconfig, skipping!" exit 0 fi rc_reset case "$1" in start) [[ $HOME == '' ]] && export HOME=/root /usr/lib/obs/server/setup-appliance.sh --non-interactive --setup-only rc_status -v ;; stop) # nothing to do rc_status -v ;; restart) # nothing to do rc_status ;; try-restart) # nothing to do rc_status ;; reload) # nothing to do rc_status ;; status) # nothing to do rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsclouduploadserver000077500000000000000000000041531332555733200223740ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2007, Novell Inc. # # Author: adrian@suse.de # # /etc/init.d/obsclouduploadserver # and its symbolic link # /usr/sbin/rcobsclouduploadserver # ### BEGIN INIT INFO # Provides: obsclouduploadserver # Required-Start: $time $syslog # Required-Stop: $null # Should-Start: $none # Should-Stop: $none # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service cloup upload server ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rundir_perm() { # make sure rundir is group writable test "$(stat -c "%A" "$rundir" | cut -c6)" = "-" && chmod 0775 "$rundir" } rc_reset case "$1" in start) echo -n "Initializing obsclouduploadserver" mkdir -p "$rundir" "$logdir" rundir_perm chown obsrun:obsrun "$logdir" "$rundir" startproc -f -l "$logdir"/clouduploadserver.log "$obsdir"/bs_clouduploadserver rc_status -v ;; stop) echo -n "Shutting down obsservice" "$obsdir"/bs_clouduploadserver --stop rc_status -v ;; restart) ## If first returns OK call the second, if first or ## second command fails, set echo return value. rundir_perm "$obsdir"/bs_clouduploadserver --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then rundir_perm "$obsdir"/bs_clouduploadserver --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obsclouduploadserver and running processes: " "$obsdir"/bs_serverstatus "$OBS_RUN_DIR"/bs_clouduploadserver.status checkproc "$obsdir"/bs_clouduploadserver rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsclouduploadworker000077500000000000000000000040401332555733200223720ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2007, Novell Inc. # # Author: adrian@suse.de # # /etc/init.d/obsclouduploadworker # and its symbolic link # /usr/sbin/rcobsclouduploadworker # ### BEGIN INIT INFO # Provides: obsclouduploadworker # Required-Start: $time $syslog # Required-Stop: $null # Should-Start: $none # Should-Stop: $none # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service cloup upload worker ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rundir_perm() { # make sure rundir is group writable test "$(stat -c "%A" "$rundir" | cut -c6)" = "-" && chmod 0775 "$rundir" } rc_reset case "$1" in start) echo -n "Initializing obsclouduploadworker" mkdir -p "$rundir" "$logdir" rundir_perm chown obsrun:obsrun "$logdir" "$rundir" startproc -f -l "$logdir"/clouduploadworker.log "$obsdir"/bs_clouduploadworker rc_status -v ;; stop) echo -n "Shutting down obsclouduploadworker" "$obsdir"/bs_clouduploadworker --stop rc_status -v ;; restart) ## If first returns OK call the second, if first or ## second command fails, set echo return value. rundir_perm "$obsdir"/bs_clouduploadworker --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then rundir_perm "$obsdir"/bs_clouduploadworker --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obsclouduploadworker and running processes: " checkproc "$obsdir"/bs_clouduploadworker rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsdeltastore000077500000000000000000000041761332555733200210050ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2016, SUSE LLC # # Author: mls@suse.de # # /etc/init.d/obsdeltastore # and its symbolic link # /usr/sbin/rcobsdeltastore # ### BEGIN INIT INFO # Provides: obsdeltastore # Required-Start: $time $syslog # Required-Stop: $null # Should-Start: obsstoragesetup # Should-Stop: $none # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service source delta generator ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rc_reset case "$1" in start) echo -n "Initializing obsdeltastore" # need logdir before startproc and doing chown to prevent other daemons sharing logdir # from failing because of permissions problems mkdir -p $logdir bsuser=`perl -I/usr/lib/obs/server -MBSConfig -e 'print ( $BSConfig::bsuser || "obsrun" )'` bsgroup=`perl -I/usr/lib/obs/server -MBSConfig -e 'print ( $BSConfig::bsgroup || "obsrun" )'` chown $bsuser:$bsgroup "$logdir" # FIXME: not nice, this should receive a proper daemon handling, # including real logging, pid file and startproc startproc -f -l "$logdir"/deltastore.log \ "$obsdir"/bs_deltastore rc_status -v ;; stop) echo -n "Shutting down obsdeltastore" "$obsdir"/bs_deltastore --stop rc_status -v ;; restart) "$obsdir"/bs_deltastore --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then "$obsdir"/bs_deltastore --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obsdeltastore: " checkproc "$obsdir"/bs_deltastore rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsdispatcher000077500000000000000000000035731332555733200207650ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2007, Novell Inc. # # Author: adrian@suse.de # Martin Mohring # Susanne Oberhauser # # /etc/init.d/obsdispatcher # and its symbolic link # /usr/sbin/rcobsdispatcher # ### BEGIN INIT INFO # Provides: obsdispatcher # Required-Start: $time $syslog obsrepserver # Should-Start: obssrcserver obsscheduler $remote_fs # Should-Stop: $none # Required-Stop: $null # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service job dispatcher ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rc_reset case "$1" in start) echo -n "Initializing obsdispatcher" # FIXME: not nice, this should receive a proper daemon handling, # including real logging, pid file and startproc startproc -f -l "$logdir"/dispatcher.log "$obsdir"/bs_dispatch rc_status -v ;; stop) echo -n "Shutting down obsdispatcher" "$obsdir"/bs_dispatch --stop rc_status -v ;; restart) "$obsdir"/bs_dispatch --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then "$obsdir"/bs_dispatch --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obsdispatcher:" checkproc "$obsdir"/bs_dispatch rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsdodup000077500000000000000000000034531332555733200177470ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2015, SUSE LLC # # Author: mls@suse.com # # /etc/init.d/obsdodup # and its symbolic link # /usr/sbin/rcobsdodup # ### BEGIN INIT INFO # Provides: obsdodup # Required-Start: $time $syslog # Required-Stop: obssrcserver $null # Should-Start: obsstoragesetup # Should-Stop: $none # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service dodup, updates download on demand metadata ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rc_reset case "$1" in start) echo -n "Initializing obsdodup" mkdir -p $rundir $logdir chown obsrun:obsrun "$logdir" "$rundir" # FIXME: not nice, this should receive a proper daemon handling, # including real logging, pid file and startproc startproc -f -l "$logdir"/dodup.log \ "$obsdir"/bs_dodup rc_status -v ;; stop) echo -n "Shutting down obsdodup" "$obsdir"/bs_dodup --stop rc_status -v ;; restart) "$obsdir"/bs_dodup --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then "$obsdir"/bs_dodup --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obsdodup: " checkproc "$obsdir"/bs_dodup rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obspublisher000077500000000000000000000036121332555733200206260ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2007, Novell Inc. # # Author: Adrian Schroeter # Martin Mohring # Susanne Oberhauser # # /etc/init.d/obspublisher # and its symbolic link # /usr/sbin/rcobspublisher # ### BEGIN INIT INFO # Provides: obspublisher # Required-Start: $time $syslog # Should-Start: obssrcserver obsrepserver obsscheduler # Should-Stop: $none # Required-Stop: $null # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service repository publisher ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rc_reset case "$1" in start) echo -n "Initializing obspublisher" # FIXME: not nice, this should receive a proper daemon handling, # including real logging, pid file and startproc startproc -f -l "$logdir"/publisher.log \ "$obsdir"/bs_publish rc_status -v ;; stop) echo -n "Shutting down obspublisher" "$obsdir"/bs_publish --stop rc_status -v ;; restart) "$obsdir"/bs_publish --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then "$obsdir"/bs_publish --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obspublisher: " checkproc "$obsdir"/bs_publish rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsrepserver000077500000000000000000000036731332555733200206550ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2007, Novell Inc. # # Author: adrian@suse.de # # /etc/init.d/obsrepserver # and its symbolic link # /usr/sbin/rcobsrepserver # ### BEGIN INIT INFO # Provides: obsrepserver # Required-Start: $time $syslog $network $remote_fs # Should-Start: obssigner obsstoragesetup slpd # Should-Stop: $none # Required-Stop: $null # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service repository server ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rc_reset case "$1" in start) echo -n "Initializing obsrepserver" mkdir -p "$rundir" "$logdir" chown obsrun:obsrun "$logdir" "$rundir" # FIXME: not nice, this should receive a proper daemon handling, # including real logging, pid file and startproc startproc -f -l "$logdir"/rep_server.log "$obsdir"/bs_repserver rc_status -v ;; stop) echo -n "Shutting down obsrepserver" "$obsdir"/bs_repserver --stop rc_status -v ;; restart) "$obsdir"/bs_repserver --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then "$obsdir"/bs_repserver --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo "Checking for obsrepserver and running processes: " "$obsdir"/bs_serverstatus "$OBS_RUN_DIR"/bs_repserver.status checkproc "$obsdir"/bs_repserver rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsscheduler000077500000000000000000000053131332555733200206070ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2007, Novell Inc. # # Author: adrian@suse.de # # /etc/init.d/obsscheduler # and its symbolic link # /usr/sbin/rcobsscheduler # ### BEGIN INIT INFO # Provides: obsscheduler # Required-Start: $time $syslog obsrepserver # Should-Start: obssrcserver $network $remote_fs obsapisetup # Should-Stop: $none # Required-Stop: $null # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service job scheduler ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" OBS_NEW_SCHEDULER_ARCHITECTURES=`$obsdir/bs_admin --show-scheduler-architectures 2>/dev/null` if test -n "$OBS_NEW_SCHEDULER_ARCHITECTURES" ; then if test -n "$OBS_SCHEDULER_ARCHITECTURES" ; then echo "WARNING: ignoring old scheduler architectures configuration from /etc/sysconfig/obs-server" fi OBS_SCHEDULER_ARCHITECTURES="$OBS_NEW_SCHEDULER_ARCHITECTURES" fi if test -z "$OBS_SCHEDULER_ARCHITECTURES" ; then echo "WARNING: no scheduler architectures configured, falling back to i586" OBS_SCHEDULER_ARCHITECTURES=i586 fi rc_reset case "$1" in start) echo -n "Initializing obsscheduler" mkdir -p "$rundir" "$logdir" chown obsrun:obsrun "$logdir" "$rundir" cd "$obsdir" # FIXME: not nice, this should receive a proper daemon handling, # including real logging, pid file and startproc for i in $OBS_SCHEDULER_ARCHITECTURES; do ./bs_sched $i >> "$logdir"/scheduler_$i.log 2>&1 & done rc_status -v ;; stop|shutdown) echo -n "Shutting down obsscheduler" if checkproc bs_sched; then for i in $OBS_SCHEDULER_ARCHITECTURES; do $obsdir/bs_admin --shutdown-scheduler "$i" done for i in `seq 600`; do checkproc bs_sched || break sleep 1 done if checkproc bs_sched ; then killall bs_sched fi fi rc_status -v ;; restart) $0 stop $0 start rc_status ;; try-restart|reload) $0 status if test $? = 0; then $0 restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obsscheduler: " # FIXME: needs proper checking for all invoked schedulers checkproc bs_sched rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsservice000077500000000000000000000040011332555733200202620ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2007, Novell Inc. # # Author: adrian@suse.de # # /etc/init.d/obsservice # and its symbolic link # /usr/sbin/rcobsservice # ### BEGIN INIT INFO # Provides: obsservice # Required-Start: $time $syslog # Required-Stop: $null # Should-Start: obssrcserver # Should-Stop: $none # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service source service server ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rundir_perm() { # make sure rundir is group writable test "$(stat -c "%A" "$rundir" | cut -c6)" = "-" && chmod 0775 "$rundir" } rc_reset case "$1" in start) echo -n "Initializing obsservice" mkdir -p "$rundir" "$logdir" rundir_perm chown obsrun:obsrun "$logdir" "$rundir" startproc -f -l "$logdir"/src_service.log "$obsdir"/bs_service rc_status -v ;; stop) echo -n "Shutting down obsservice" "$obsdir"/bs_service --stop rc_status -v ;; restart) ## If first returns OK call the second, if first or ## second command fails, set echo return value. rundir_perm "$obsdir"/bs_service --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then rundir_perm "$obsdir"/bs_service --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obsservice and running processes: " "$obsdir"/bs_serverstatus "$OBS_RUN_DIR"/bs_service.status checkproc "$obsdir"/bs_service rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsservicedispatch000077500000000000000000000042751332555733200220170ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2016, SUSE LLC # # Author: mls@suse.de # # /etc/init.d/obsservicedispatch # and its symbolic link # /usr/sbin/rcobsservicedispatch # ### BEGIN INIT INFO # Provides: obsservicedispatch # Required-Start: $time $syslog # Required-Stop: $null # Should-Start: obsstoragesetup # Should-Stop: $none # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service source service dispatcher ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rc_reset case "$1" in start) echo -n "Initializing obsservicedispatch" # need logdir before startproc and doing chown to prevent other daemons sharing logdir # from failing because of permissions problems mkdir -p $logdir bsuser=`perl -I/usr/lib/obs/server -MBSConfig -e 'print ( $BSConfig::bsuser || "obsrun" )'` bsgroup=`perl -I/usr/lib/obs/server -MBSConfig -e 'print ( $BSConfig::bsgroup || "obsrun" )'` chown $bsuser:$bsgroup "$logdir" # FIXME: not nice, this should receive a proper daemon handling, # including real logging, pid file and startproc startproc -f -l "$logdir"/servicedispatch.log \ "$obsdir"/bs_servicedispatch rc_status -v ;; stop) echo -n "Shutting down obsservicedispatch" "$obsdir"/bs_servicedispatch --stop rc_status -v ;; restart) "$obsdir"/bs_servicedispatch --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then "$obsdir"/bs_servicedispatch --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obsservicedispatch: " checkproc "$obsdir"/bs_servicedispatch rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obssigner000077500000000000000000000035711332555733200201240ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2007, Novell Inc. # # Author: adrian@suse.de # Martin Mohring # Susanne Oberhauser # # /etc/init.d/obssigner # and its symbolic link # /usr/sbin/rcobssigner # ### BEGIN INIT INFO # Provides: obssigner # Required-Start: $time $syslog # Required-Stop: $null # Should-Start: obsstoragesetup signd # Should-Stop: $none # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service signer ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rc_reset case "$1" in start) echo -n "Initializing obssigner" mkdir -p "$rundir" "$logdir" chown obsrun:obsrun "$logdir" "$rundir" # FIXME: not nice, this should receive a proper daemon handling, # including real logging, pid file and startproc startproc -f -l "$logdir"/signer.log \ "$obsdir"/bs_signer rc_status -v ;; stop) echo -n "Shutting down obssigner" "$obsdir"/bs_signer --stop rc_status -v ;; restart) "$obsdir"/bs_signer --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then "$obsdir"/bs_signer --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obssigner: " checkproc "$obsdir"/bs_signer rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obssrcserver000077500000000000000000000043731332555733200206540ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2007, Novell Inc. # # Author: adrian@suse.de # # /etc/init.d/obssrcserver # and its symbolic link # /usr/sbin/rcobssrcserver # ### BEGIN INIT INFO # Provides: obssrcserver # Required-Start: $time $syslog # Required-Stop: $null # Should-Start: $network $remote_fs obsstoragesetup slpd signd # Should-Stop: $none # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service source repository server ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rc_reset case "$1" in start) echo -n "Initializing obssrcserver" mkdir -p "$rundir" "$logdir" chown obsrun:obsrun "$logdir" "$rundir" # FIXME: not nice, this should receive a proper daemon handling, # including real logging, pid file and startproc startproc -f -l "$logdir"/src_server.log "$obsdir"/bs_srcserver rc_status -v # FIXME: # We'd need to wait for the sockets to come up, or for # some other interaction between the server and the # start script, or the server should fork itself into # background _after_ the sockets are up and the server # is reachable. # until then a little ugly nap does the trick... sleep 4 rc_status -v ;; stop) echo -n "Shutting down obssrcserver" "$obsdir"/bs_srcserver --stop rc_status -v ;; restart) "$obsdir"/bs_srcserver --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then "$obsdir"/bs_srcserver --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo "Checking for obssrcserver and running processes: " "$obsdir"/bs_serverstatus "$OBS_RUN_DIR"/bs_srcserver.status checkproc "$obsdir"/bs_srcserver rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsstoragesetup000077500000000000000000000434651332555733200213700ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2010-12, SUSE Inc. # # Author: adrian@suse.de # # /etc/init.d/obsstoragesetup # and its symbolic link # /usr/sbin/rcobsstoragesetup # ### BEGIN INIT INFO # Provides: obsstoragesetup # X-Start-Before: mysql sshd obsapisetup # Should-Start: xendomains haveged # Should-Stop: $none # Required-Start: $network # Required-Stop: $null # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: Finds the storage device to be used for OBS server and/or worker ### END INIT INFO . /etc/rc.status # BOOTSTRAP_TEST_MODE prevents setup-appliance.sh execution # this is also sourcing /etc/sysconfig/obs-server export BOOTSTRAP_TEST_MODE=1 source /usr/lib/obs/server/setup-appliance.sh unset BOOTSTRAP_TEST_MODE # instance defaults if [ -e /etc/buildhost.config ]; then source /etc/buildhost.config fi if [ "$OBS_STORAGE_AUTOSETUP" != "yes" ]; then echo "OBS Storage Autosetup is not enabled in sysconfig, skipping!" exit 0 fi # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_BASE_DIR" ]; then backenddir=/srv/obs else backenddir="$OBS_BASE_DIR" fi if [ -z "$OBS_WORKER_DIRECTORY" ]; then OBS_WORKER_DIRECTORY="/var/cache/obs/worker" fi rc_reset case "$1" in start) round_down_to_pe() # round_down_to_pe # # Round size supplied to allign with the nearest lower # PE count and print the result { local ARG_SIZE=$1 local ARG_PE=$2 echo $(( ( ( $ARG_SIZE * 1024 ) - ( ( $ARG_SIZE * 1024 ) % $ARG_PE ) ) / 1024 )) } # configure sshd if wanted if [ ! -e /root/.ssh/authorized_keys ]; then if [ -n "$OBS_ROOT_SSHD_KEY_URL" ]; then echo "Enabling sshd as requested" [ -e /root/.ssh ] || mkdir /root/.ssh curl $OBS_ROOT_SSHD_KEY_URL > /root/.ssh/authorized_keys insserv sshd # avoid dead lock in systemctl export SYSTEMCTL_OPTIONS=--ignore-dependencies rcsshd start fi fi # support usage of lvm on md devices if [ -x /sbin/mdadm -a -x /etc/init.d/boot.lvm ]; then /sbin/mdadm --assemble --scan /etc/init.d/boot.lvm start fi if [ "$OBS_SETUP_WORKER_PARTITIONS" = "take_all" ]; then if [ -e /dev/OBS/server ]; then echo "ERROR: A OBS server partition exists, aborting, do no take all space!" else echo "Collect all LVM partitions for the worker" # remove everything first vgreduce --removemissing --force OBS vgremove -ff OBS pvremove -ff `pvdisplay | grep "PV Name" | awk '{ print $3 }'` # Find unpartitioned disks and create LVM partition on them for disk in `hwinfo --disk | grep "Device File:" |\ cut -f2 -d: | cut -f2 -d" " | grep -v /dev/ram`;do count=0 used= for i in `sfdisk -l $disk 2>/dev/null | tr -d '*' | grep ^/ |\ sed -e s"@\s\+@:@g" | cut -f1,5,6 -d:`;do blocks=`echo $i | cut -f2 -d:` if [ $blocks = "0" ];then count=`expr $count + 1` continue fi used=1 done if [ $count -eq 4 ] || [ -z "$used" ];then echo ",,8e,-" | sfdisk $disk >/dev/null 2>&1 fi done # Collect all LVM partitions DEVICES="" for i in `sfdisk -l 2>/dev/null | tr -d '*' | grep ^/ |\ sed -e s"@\s\+@:@g" | cut -f1,5,6 -d:`;do device=`echo $i | cut -f1 -d:` blocks=`echo $i | cut -f2 -d:` partid=`echo $i | cut -f3 -d:` if [ $blocks = "0" ];then continue fi if [ $partid = "8e" ];then # metadata size is needed to align PV inside of partition pvcreate --metadatasize 499k $device && DEVICES="$DEVICES $device" fi done vgcreate OBS $DEVICES vgscan fi elif [ "$OBS_SETUP_WORKER_PARTITIONS" = "use_obs_vg" ]; then echo "Remove all LVM cache and worker partitions in VG OBS." # Cannot test on existence of "/dev/OBS", in case no LVs in VG OBS have been created! if [ -n "$(vgdisplay -c OBS 2>/dev/null)" ]; then vgchange -ay OBS lvremove -f /dev/OBS/worker_* /dev/OBS/cache 2>/dev/null else echo "WARNING: The LVM volume group 'OBS' can not be used." echo " Please create one to get worker partitions configured" fi fi echo "Looking for existing OBS Server LVM Volume" [ -d "$backenddir" ] || mkdir -p "$backenddir" if [ -e /dev/OBS/server ]; then mount /dev/OBS/server "$backenddir" if [ -e /usr/lib/obs/server/BSConfig.pm ];then echo "Found BSConfig.pm" if [ -d "$backenddir/run" ]; then echo "Directory '$backenddir/run' exists" # make sure that directories needed by mostly all other daemons are created now # and set proper ownership bsuser=`perl -I/usr/lib/obs/server -MBSConfig -e 'print ( $BSConfig::bsuser || "obsrun" )'` bsgroup=`perl -I/usr/lib/obs/server -MBSConfig -e 'print ( $BSConfig::bsgroup || "obsrun" )'` owner_rundir=`stat -L --printf="%U" "$backenddir/run"` if [ "$owner_rundir" != "root" ];then echo "owner of '$backenddir/run' is $owner_rundir" if [ "$owner_rundir" != "$bsuser" ];then echo "owner $owner_rundir is not configured bsuser '$bsuser'" echo "changing ownership" uid_rundir=`stat -L --printf="%u" "$backenddir/run"` find $backenddir -uid $uid_rundir -exec chown $bsuser:$bsgroup {} \; else echo "Owner of '$backenddir/run' is '$bsuser'. Nothing to fix!" fi else echo "Owner of '$backenddir/run' is root. Not changing anything" fi else echo "Directory '$backenddir/run' not found" fi else echo "No BSConfig found while setting up /dev/OBS/server" fi obs_server_size=$(( $(lvdisplay -c OBS/server 2>/dev/null | cut -d: -f7) / 2 / 1024 )) else echo "No logical volume found under /dev/OBS/server" fi # setup signer and signd if possible prepare_obssigner # Force mysql to update database. FIXME: only on version update [ -d "$backenddir"/MySQL/ ] && touch "$backenddir"/MySQL/.run-mysql_upgrade if [ "$OBS_SETUP_WORKER_PARTITIONS" = "take_all" -o "$OBS_SETUP_WORKER_PARTITIONS" = "use_obs_vg" ]; then if [ 0"$OBS_WORKER_INSTANCES" -gt 0 ]; then # got config setting from sysconfig or PXE server NUM="$OBS_WORKER_INSTANCES" else # auto detect max possible instances # start one build backend per CPU by default NUM=`ls -d /sys/devices/system/cpu/cpu[0-9]* | wc -l` # but be sure that we have at least 512MB per instance if [ -e /sys/hypervisor/type ] && grep -q xen /sys/hypervisor/type; then MEMORY=`xm info | sed -n 's/^max_free_memory[ ]*:[ ]*\(.*\)$/\1/p'` MEMORY=$(( $MEMORY * 1024 )) else MEMORY=`sed -n 's/^MemTotal:[ ]*\(.*\).kB$/\1/p' /proc/meminfo` fi if [ $MEMORY -lt $(( $NUM * 512 * 1024 )) ]; then NUM=$(( $MEMORY / ( 1024 * 512 ) )) NUM=$(( $NUM - 1 )) # for Dom0 fi fi if [ ! "0$NUM" -gt 0 ]; then echo "WARNING: OBS worker instances are 0, either misconfiguration or not enough resources" exit 0 fi # Look for PV devices in OBS VG pvs="" for i in `vgdisplay -v OBS 2>/dev/null | grep "PV Name" | awk '{ print $3 }' | sort`; do if [ -L $i ]; then pvs="$pvs `readlink -f $i`" else pvs="$pvs $i" fi done disks_per_instance=1 pvs=( $pvs ) pv_count=${#pvs[@]} PE_SIZE=`vgdisplay -c OBS | cut -d: -f13` if [ "0$PE_SIZE" -gt 0 ]; then if [ -z "$OBS_WORKER_CACHE_SIZE" ]; then # 25 GB sounds like a good default for cache. OBS_WORKER_CACHE_SIZE=$(( 25 * 1024 )) fi OBS_WORKER_CACHE_SIZE=`round_down_to_pe $OBS_WORKER_CACHE_SIZE $PE_SIZE` if [ -z "$OBS_WORKER_SWAP_SIZE" ]; then OBS_WORKER_SWAP_SIZE=512 fi OBS_WORKER_SWAP_SIZE=`round_down_to_pe $OBS_WORKER_SWAP_SIZE $PE_SIZE` if [ -z "$OBS_WORKER_ROOT_SIZE" ]; then VG_SIZE=`vgdisplay -c OBS | cut -d: -f16` PE_SIZE_IN_MB=$(( $PE_SIZE / 1024 )) VG_SIZE=$(( $VG_SIZE * $PE_SIZE_IN_MB )) OBS_SERVER_SIZE=${obs_server_size:-0} TOTAL_SWAP_SIZE=$(( $NUM * $OBS_WORKER_SWAP_SIZE )) FINAL_VG_SIZE=$(( $VG_SIZE - $OBS_SERVER_SIZE - $OBS_WORKER_CACHE_SIZE - $TOTAL_SWAP_SIZE )) OBS_WORKER_ROOT_SIZE=$(( $FINAL_VG_SIZE / $NUM )) MIN_WORKER_ROOT_SIZE=$(( 4 * 1024 )) if test $OBS_WORKER_ROOT_SIZE -lt $(( 4 * 1024 )); then echo "ERROR: Not enough space for worker root LVs, just $OBS_WORKER_ROOT_SIZE MB, but at least 4 GB needed." echo "NUM=$NUM" echo "VG_SIZE=$VG_SIZE" echo "PE_SIZE=$PE_SIZE" echo "PE_SIZE_IN_MB=$PE_SIZE_IN_MB" echo "OBS_SERVER_SIZE=$OBS_SERVER_SIZE" echo "TOTAL_SWAP_SIZE=$TOTAL_SWAP_SIZE" echo "FINAL_VG_SIZE = $VG_SIZE - $OBS_SERVER_SIZE - $OBS_WORKER_CACHE_SIZE - $TOTAL_SWAP_SIZE" echo "FINAL_VG_SIZE=$FINAL_VG_SIZE"; echo "OBS_WORKER_ROOT_SIZE=$OBS_WORKER_ROOT_SIZE" echo "MIN_WORKER_ROOT_SIZE=$MIN_WORKER_ROOT_SIZE" exit 1 fi fi OBS_WORKER_ROOT_SIZE=`round_down_to_pe $OBS_WORKER_ROOT_SIZE $PE_SIZE` if test "$NUM" -ge "$pv_count"; then # More or equal build instances than disks # create LV's and try to distribute them on PV's best as possible o1=0 o2=1 # MAGIC AT WORK! we append the first items here for the code later to find a 2nd and 3rd offset in the loop pvs=( ${pvs[*]} ${pvs[0]} ${pvs[0]}) pv_idx=0 I="0" while test "$NUM" -gt "$I"; do I=$(( $I + 1 )) lverr=$(mktemp) if ! lvcreate --wipesignatures y --zero y -n worker_root_${I} -L ${OBS_WORKER_ROOT_SIZE}M OBS ${pvs[$(( $pv_idx + $o1 ))]} 2> $lverr; then if [ $? -gt 0 ];then echo "An error occured while creating LV" cat $lverr else echo "Creation of worker_root_${I} succeed" fi if grep "Insufficient free space" $lverr; then I=$(( $I - 1 )) else cat $lverr >&2 exit fi else lvcreate --wipesignatures y --zero y -n worker_swap_${I} -L ${OBS_WORKER_SWAP_SIZE}M OBS ${pvs[$(( $pv_idx + $o2 ))]} || exit if [ $? -gt 0 ];then echo "An error occured while creating LV worker_swap_${I} " exit 1 else echo "Creation of worker_swap_${I} succeed" fi fi rm -f $lverr pv_idx=$(( $pv_idx + 2 )) if [ $pv_idx -eq $pv_count ]; then pv_idx=0 # swap offset, so that swap and root partitions are not on same device a=$o1 o1=$o2 o2=$a elif [ $pv_idx -gt $pv_count ]; then pv_idx=1 fi done else # More disks than build instances # Use striping to boost IO performance disks_per_instance=$pv_count I="0" while test "$NUM" -gt "$I"; do I=$(( $I + 1 )) DEVS="" J="0" while test "$disks_per_instance" -lt "$J"; do J=$(( $J + 1 )) DEVS="$DEVS ${pvs[$(( $I * $disks_per_instance + $J ))]}" done lvcreate --wipesignatures y --zero y -n worker_root_${I} -i $disks_per_instance -L ${OBS_WORKER_ROOT_SIZE}M OBS $DEVS || exit lvcreate --wipesignatures y --zero y -n worker_swap_${I} -i $disks_per_instance -L ${OBS_WORKER_SWAP_SIZE}M OBS $DEVS || exit done fi # Create cache partition on remaining space #lvcreate -n cache -i $disks_per_instance -l 100%FREE OBS || exit lvcreate --wipesignatures y --zero y -n cache -i $disks_per_instance -L "${OBS_WORKER_CACHE_SIZE}M" OBS || exit mkfs -text4 /dev/OBS/cache || exit fi fi if [ ! "0$NUM" -gt 0 -a -z "$OBS_WORKER_SCRIPT_URL" ]; then exit 0 fi echo "Looking for OBS Worker Cache LVM Volume" if [ -e /dev/OBS/cache ]; then mkdir -p $OBS_WORKER_DIRECTORY mount /dev/OBS/cache $OBS_WORKER_DIRECTORY mkdir -p $OBS_WORKER_DIRECTORY/cache fi echo "Setting up OBS Workers according to LVM Volumes" if [ ! -e /etc/buildhost.config.presets ]; then mv /etc/buildhost.config /etc/buildhost.config.presets fi echo "### autoconfigured values by obsstoragesetup init script" > /etc/buildhost.config echo "OBS_WORKER_DIRECTORY=\"$OBS_WORKER_DIRECTORY\"" >> /etc/buildhost.config echo "OBS_CACHE_DIR=\"$OBS_WORKER_DIRECTORY/cache\"" >> /etc/buildhost.config CACHEMB=`df -m /$OBS_CACHE_DIR | tail -n 1 | sed -n 's,^/dev/[^ ]*[ ]*\([^ ]*\).*,\1,p'` [ -z "$CACHEMB" ] && CACHEMB=`df -m /$OBS_CACHE_DIR | tail -n 1 | sed -n 's,[^ ]*[ ]*\([^ ]*\).*,\1,p'` [ -n "$CACHEMB" ] && echo "OBS_CACHE_SIZE=\"$(( $CACHEMB / 2 ))\"" >> /etc/buildhost.config if [ -e /sys/hypervisor/type ] && grep -q xen /sys/hypervisor/type; then echo "Found XEN virtualization" RUN_VIRT=1 else if grep ^flags /proc/cpuinfo | egrep -q " (svm|vmx) " && modprobe kvm; then echo "Found KVM virtualization" RUN_VIRT=1 # support virtio else echo "*** NO virtualization found, BUILDING IN UNSECURE ENVIROMENT ***" unset RUN_VIRT fi fi OBS_WORKER_INSTANCES="0" for i in /dev/OBS/worker_root* ; do name="${i##*/worker_}" swap="/dev/OBS/worker_swap${i#/dev/OBS/worker_root}" [ -e $swap ] || continue if [ -n "$RUN_VIRT" ]; then #prepare xen or kvm setup mkdir -p "$OBS_WORKER_DIRECTORY/$name" ln -sf "$i" "$OBS_WORKER_DIRECTORY/$name/root" ln -sf "$swap" "$OBS_WORKER_DIRECTORY/$name/swap" else #plain chroot build mkdir -p "$OBS_WORKER_DIRECTORY/$name" mkfs -text4 $i || exit mount $i "$OBS_WORKER_DIRECTORY/$name" mkswap -f "$swap" swapon "$swap" fi OBS_WORKER_INSTANCES=$(( $OBS_WORKER_INSTANCES + 1 )) done if [ "$OBS_WORKER_INSTANCES" -gt 0 ]; then echo "OBS_WORKER_INSTANCES=\"$OBS_WORKER_INSTANCES\"" >> /etc/buildhost.config # How many parallel jobs make sense ? NUM=`ls -d /sys/devices/system/cpu/cpu[0-9]* | wc -l` MYJOBS=1 if [ "$OBS_WORKER_INSTANCES" -gt 1 ]; then MYJOBS=$(( $NUM / ( $OBS_WORKER_INSTANCES - 1 ) )) # catch e.g. 1/2 and bad NUM to OBS_WORKER_INSTANCES ratio if [ "$MYJOBS" == "0" ] ; then export MYJOBS=1 fi fi echo "OBS_WORKER_JOBS=\"$MYJOBS\"" >> /etc/buildhost.config if [ -z "$OBS_INSTANCE_MEMORY" ]; then # Guess how much memory can be used TOTALMEM=$(( `free | sed -n 's/^Mem:[ ]*\([^ ]*\).*/\1/p'` / 1024 )) OBS_INSTANCE_MEMORY=$(( $TOTALMEM / ( 2 * $OBS_WORKER_INSTANCES ) )) echo "OBS_INSTANCE_MEMORY=\"$OBS_INSTANCE_MEMORY\"" >> /etc/buildhost.config fi if grep ^flags /proc/cpuinfo | egrep -q " (svm|vmx) " && test -n "$RUN_VIRT"; then # try to use hugetlb on kvm mkdir -p /dev/hugetlbfs # systemd may have mounted it already HUGETLBINSTANCEMEM=$(( ($OBS_INSTANCE_MEMORY * 512) / 1024 )) # 2M page sizes HUGETLMEM=$(( $HUGETLBINSTANCEMEM * $OBS_WORKER_INSTANCES )) # register huge table memory pages echo "$HUGETLMEM" > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages # enable it if it was successful if [ "$HUGETLMEM" == `cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages` ]; then grep -q \ /dev/hugetlbfs /proc/mounts || \ mount hugetlbfs /dev/hugetlbfs -t hugetlbfs grep -q \ /dev/hugetlbfs /proc/mounts && \ echo "OBS_VM_USE_HUGETLBFS=\"/dev/hugetlbfs\"" >> /etc/buildhost.config else echo "WARNING: registration of huge table memory pages failed!" echo "Just `cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages` of $HUGETLMEM registered, resetting ..." echo "0" > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages fi fi # append user presets echo "" >> /etc/buildhost.config echo "### preconfigured values from /etc/buildhost.config.presets" >> /etc/buildhost.config if [ -e /etc/buildhost.config.presets ]; then cat /etc/buildhost.config.presets >> /etc/buildhost.config fi else echo "WARNING: No OBS workers are configured on this system, no package will built." if [ -e /etc/buildhost.config.presets ]; then mv /etc/buildhost.config.presets /etc/buildhost.config fi fi if [ -e /usr/lib/obs/server -a -e /usr/lib/obs/server/BSConfig.pm ]; then # make sure that directories needed by mostly all other daemons are created now # and set proper ownership bsuser=`perl -I/usr/lib/obs/server -MBSConfig -e 'print ( $BSConfig::bsuser || "obsrun" )'` bsgroup=`perl -I/usr/lib/obs/server -MBSConfig -e 'print ( $BSConfig::bsgroup || "obsrun" )'` [ -d $backenddir/run ] || mkdir -p $backenddir/run [ -d $backenddir/log ] || mkdir -p $backenddir/log # fix ownership chown $bsuser:$bsgroup $backenddir fi # offer hook to make random special things in your setup if [ -n "$OBS_WORKER_SCRIPT_URL" ]; then echo "Running special script for this worker from $OBS_WORKER_SCRIPT_URL" curl $OBS_WORKER_SCRIPT_URL > /tmp/obsworkerscript.$$ chmod 0755 /tmp/obsworkerscript.$$ export OBS_WORKER_DIRECTORY export OBS_WORKER_DISK /tmp/obsworkerscript.$$ rm /tmp/obsworkerscript.$$ fi rc_status -v ;; stop) # nothing to do rc_status -v ;; restart) # nothing to do rc_status ;; try-restart) # nothing to do rc_status ;; reload) # nothing to do rc_status ;; status) # nothing to do rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obswarden000077500000000000000000000035321332555733200201120ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2007, Novell Inc. # # Author: adrian@suse.de # Martin Mohring # Susanne Oberhauser # # /etc/init.d/obswarden # and its symbolic link # /usr/sbin/rcobswarden # ### BEGIN INIT INFO # Provides: obswarden # Required-Start: $time $syslog obsrepserver # Required-Stop: obssrcserver $null # Should-Start: obsstoragesetup # Should-Stop: $none # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: open build service warden, monitors the workers ### END INIT INFO . /etc/rc.status . /etc/sysconfig/obs-server # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/srv/obs/run" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/srv/obs/log" fi if [ -n "$OBS_BACKENDCODE_DIR" ]; then obsdir="$OBS_BACKENDCODE_DIR" else obsdir=/usr/lib/obs/server fi rundir="$OBS_RUN_DIR" logdir="$OBS_LOG_DIR" rc_reset case "$1" in start) echo -n "Initializing obswarden" # FIXME: not nice, this should receive a proper daemon handling, # including real logging, pid file and startproc startproc -f -l "$logdir"/warden.log \ "$obsdir"/bs_warden rc_status -v ;; stop) echo -n "Shutting down obswarden" "$obsdir"/bs_warden --stop rc_status -v ;; restart) "$obsdir"/bs_warden --restart rc_status ;; try-restart|reload) $0 status if test $? = 0; then "$obsdir"/bs_warden --restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obswarden: " checkproc "$obsdir"/bs_warden rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/obsworker000077500000000000000000000422421332555733200201440ustar00rootroot00000000000000#! /bin/sh # Copyright (c) 2007-2011, SUSE Inc. # # Author: adrian@suse.de # # /etc/init.d/obsworker # and its symbolic link # /usr/sbin/rcobsworker # ### BEGIN INIT INFO # Provides: obsworker # Required-Start: $time $network $syslog # Required-Stop: $time $network $syslog # Should-Start: $remote_fs obsstoragesetup obssrcserver obsrepserver xendomains # Should-Stop: $none # Default-Start: 3 5 # Default-Stop: 0 1 2 4 6 # Description: Open Build Service worker ### END INIT INFO . /etc/rc.status # Is the worker running on Fedora >= 17 ? # It lacks the complete path to build other distros then bash_bin=`type -p bash` if [ "$bash_bin" != "${bash_bin#/usr}" ]; then export PATH="/bin:$PATH" fi if test -e /etc/sysconfig/proxy; then . /etc/sysconfig/proxy export http_proxy="$HTTP_PROXY" export HTTPS_PROXY export NO_PROXY fi if test -e /etc/sysconfig/obs-server; then # optional on workers . /etc/sysconfig/obs-server fi # This file may still exist from OBS 2.1 and before. if test -e /etc/sysconfig/obs-worker; then . /etc/sysconfig/obs-worker fi # Determine the base and follow a runlevel link name. base=${0##*/} link=${base#*[SK][0-9][0-9]} # Preconfigured by obsstoragesetup runlevel setup script if [ -f /etc/buildhost.config ];then . /etc/buildhost.config fi if [ -z "$OBS_WORKER_DIRECTORY" ]; then OBS_WORKER_DIRECTORY="/var/cache/obs/worker" fi mkdir -p "$OBS_WORKER_DIRECTORY" if [ -z "$OBS_RUN_DIR" ]; then OBS_RUN_DIR="/var/run/obs" fi if [ -z "$OBS_LOG_DIR" ]; then OBS_LOG_DIR="/var/log/obs" fi if [ -z "$OBS_REPO_SERVERS" ]; then OBS_REPO_SERVERS="localhost:5252" fi if [ -n "$OBS_WORKER_TEST_MODE" ]; then OBS_TEST="--test" fi if [ -n "$OBS_WORKER_JOBS" ]; then OBS_JOBS="--jobs $OBS_WORKER_JOBS" fi if [ -n "$OBS_WORKER_THREADS" ]; then OBS_THREADS="--threads $OBS_WORKER_THREADS" fi if [ -n "$OBS_WORKER_NICE_LEVEL" ]; then OBS_NICE=$OBS_WORKER_NICE_LEVEL else OBS_NICE=18 fi if [ -n "$OBS_WORKER_CLEANUP_CHROOT" ]; then OBS_CLEANUP_CHROOT="--cleanup-chroot" fi if [ -n "$OBS_WORKER_WIPE_AFTER_BUILD" ]; then OBS_WIPE_AFTER_BUILD="--wipeafterbuild" fi if [ -n "OBS_WORKER_SECURITY_LEVEL" ]; then OBS_WORKER_HOSTLABELS="OBS_WORKER_SECURITY_LEVEL_${OBS_WORKER_SECURITY_LEVEL} $OBS_WORKER_HOSTLABELS" fi REPO_PARAM= for i in $OBS_REPO_SERVERS; do REPO_PARAM="$REPO_PARAM --reposerver http://$i" WORKER_CODE="http://$i" done obsrundir="$OBS_RUN_DIR" workerdir="$obsrundir"/worker workerbootdir="$workerdir"/boot screenrc="$workerdir"/boot/screenrc OBS_WORKER_OPT="" if [ -n "$OBS_CACHE_SIZE" -a -n "$OBS_CACHE_DIR" ]; then OBS_WORKER_OPT="--cachedir $OBS_CACHE_DIR" mkdir -p $OBS_CACHE_DIR OBS_WORKER_OPT="$OBS_WORKER_OPT --cachesize $OBS_CACHE_SIZE" fi if [ -n "$OBS_VM_KERNEL" -a "$OBS_VM_KERNEL" != "none" -a "$OBS_VM_TYPE" != "openstack" ] ; then OBS_WORKER_OPT="$OBS_WORKER_OPT --vm-kernel $OBS_VM_KERNEL" fi if [ -n "$OBS_VM_INITRD" -a "$OBS_VM_INITRD" != "none" ] ; then OBS_WORKER_OPT="$OBS_WORKER_OPT --vm-initrd $OBS_VM_INITRD" fi if [ -n "$OBS_VM_CUSTOM_OPTION" -a "$OBS_VM_CUSTOM_OPTION" != "none" ] ; then OBS_WORKER_OPT="$OBS_WORKER_OPT --vm-custom-option \"$OBS_VM_CUSTOM_OPTION\"" fi if [ -n "$OBS_WORKER_LOCALKIWI_DIRECTORY" ]; then OBS_WORKER_OPT="$OBS_WORKER_OPT --localkiwi $OBS_WORKER_LOCALKIWI_DIRECTORY --arch local" fi if [ -n "$OBS_WORKER_BINARIES_PROXY" ]; then OBS_WORKER_OPT="$OBS_WORKER_OPT --getbinariesproxy $OBS_WORKER_BINARIES_PROXY" fi if [ -n "$OBS_VM_ENABLE_CONSOLE" ]; then OBS_WORKER_OPT="$OBS_WORKER_OPT --vm-enable-console" fi [ -z "$OBS_INSTANCE_MEMORY" ] && OBS_INSTANCE_MEMORY=512 vmopt= ARCH="" EMULATOR="" if [ -n "$OBS_VM_TYPE" -a "$OBS_VM_TYPE" != "auto" ] ; then if [ "${OBS_VM_TYPE#emulator:}" != "$OBS_VM_TYPE" ] ; then vmopt="--emulator" options=(${OBS_VM_TYPE//:/ }) ARCH="--arch ${options[1]}" [ -n "${options[2]}" ] && EMULATOR="--emulator-script ${options[2]}" elif [ "$OBS_VM_TYPE" != "none" ] ; then vmopt="--$OBS_VM_TYPE" fi elif [ -e /dev/kvm -a -x /usr/bin/qemu-kvm ] ; then vmopt=--kvm OBS_VM_TYPE="kvm" elif [ -e /sys/hypervisor/type ] && [ -x /usr/sbin/xl -o -x /usr/sbin/xm ] && grep -q xen /sys/hypervisor/type; then vmopt=--xen OBS_VM_TYPE="xen" fi if [ "$OBS_VM_TYPE" = "zvm" ]; then # for z/VM, the disks are already setup with the workers. if [ -n "$OBS_VM_DISK_AUTOSETUP_FILESYSTEM" ]; then VMDISK_FILESYSTEM="--vmdisk-filesystem ${OBS_VM_DISK_AUTOSETUP_FILESYSTEM}" fi if [ -n "$OBS_VM_DISK_AUTOSETUP_MOUNT_OPTIONS" ]; then VMDISK_MOUNT_OPTIONS="--vmdisk-mount-options ${OBS_VM_DISK_AUTOSETUP_MOUNT_OPTIONS}" fi if [ -n "$OBS_VM_DISK_CLEAN" ];then VMDISK_CLEAN="--vmdisk-clean" fi fi if [ "$OBS_VM_TYPE" = "xen" -o "$OBS_VM_TYPE" = "kvm" -o "${OBS_VM_TYPE#emulator:}" != "$OBS_VM_TYPE" ] ; then # we start up in VM mode, check for the worker disk options if [ -n "$OBS_VM_DISK_AUTOSETUP_ROOT_FILESIZE" ]; then VMDISK_ROOT_FILESIZE="--vmdisk-rootsize ${OBS_VM_DISK_AUTOSETUP_ROOT_FILESIZE}" if [ -n "$OBS_VM_DISK_AUTOSETUP_SWAP_FILESIZE" ]; then VMDISK_SWAP_FILESIZE="--vmdisk-swapsize ${OBS_VM_DISK_AUTOSETUP_SWAP_FILESIZE}" fi if [ -n "$OBS_VM_DISK_AUTOSETUP_FILESYSTEM" ]; then VMDISK_FILESYSTEM="--vmdisk-filesystem ${OBS_VM_DISK_AUTOSETUP_FILESYSTEM}" fi if [ -n "$OBS_VM_DISK_AUTOSETUP_MOUNT_OPTIONS" ]; then VMDISK_MOUNT_OPTIONS="--vmdisk-mount-options ${OBS_VM_DISK_AUTOSETUP_MOUNT_OPTIONS}" fi if [ -n "$OBS_VM_DISK_CLEAN" ];then VMDISK_CLEAN="--vmdisk-clean" fi fi fi if [ "$OBS_VM_TYPE" = "openstack" ]; then # if [ -z "$OBS_WORKER_CONTROL_INSTANCE" ];then echo "Please specify OBS_WORKER_CONTROL_INSTANCE in /etc/sysconfig/obs-server!" exit 1 fi # if [ -z "$OBS_WORKER_OS_FLAVOR" ];then echo "Please specify OBS_WORKER_OS_FLAVOR in /etc/sysconfig/obs-server!" exit 1 fi # if [ "$OBS_WORKER_INSTANCES" -lt 1 ]; then OBS_WORKER_INSTANCES=1 fi # ENVIRONMENT variables for openstack clients # like nova, cinder, glance, etc. # Checking, setting and exporting of defaults # * OS_INTERFACE if [ -z "$OS_INTERFACE" ];then OS_INTERFACE=public fi export OS_INTERFACE # * OS_IDENTITY_API_VERSION if [ -z "$OS_IDENTITY_API_VERSION" ];then OS_IDENTITY_API_VERSION=3 fi export OS_IDENTITY_API_VERSION # Checking and exporting of required variables # * OS_AUTH_URL if [ -z "$OS_AUTH_URL" ];then echo "ERROR: OBS_VM_TYPE=openstack needs OS_AUTH_URL to be set! Exiting" exit 1 else export OS_AUTH_URL fi # * OS_PROJECT_ID if [ -z "$OS_PROJECT_ID" ];then echo "ERROR: OBS_VM_TYPE=openstack needs OS_PROJECT_ID to be set! Exiting" exit 1 else export OS_PROJECT_ID fi # * OS_PROJECT_NAME if [ -z "$OS_PROJECT_NAME" ];then echo "ERROR: OBS_VM_TYPE=openstack needs OS_PROJECT_NAME to be set! Exiting" exit 1 else export OS_PROJECT_NAME fi # * OS_USER_DOMAIN_NAME if [ -z "$OS_USER_DOMAIN_NAME" ];then echo "ERROR: OBS_VM_TYPE=openstack needs OS_USER_DOMAIN_NAME to be set! Exiting" exit 1 else export OS_USER_DOMAIN_NAME fi # * OS_USERNAME if [ -z "$OS_USERNAME" ];then echo "ERROR: OBS_VM_TYPE=openstack needs OS_USERNAME to be set! Exiting" exit 1 else export OS_USERNAME fi # * OS_PASSWORD if [ -z "$OS_PASSWORD" ];then echo "ERROR: OBS_VM_TYPE=openstack needs OS_PASSWORD to be set! Exiting" exit 1 else export OS_PASSWORD fi if [ -n "$OBS_OPENSTACK_DISK_SIZE" ];then VMDISK_ROOT_FILESIZE="--vmdisk-rootsize ${OBS_OPENSTACK_DISK_SIZE}" fi if [ -n "$OBS_OPENSTACK_SWAP_SIZE" ];then VMDISK_SWAP_FILESIZE="--vmdisk-swapsize ${OBS_OPENSTACK_SWAP_SIZE}" fi if [ -n "$OBS_OPENSTACK_MEMORY_SIZE" ];then MEMORY="--vm-memory ${OBS_OPENSTACK_MEMORY_SIZE}" fi fi check_vmcp() { # try to load the kernel module modprobe vmcp 2> /dev/null || : # run a vmcp command that always works from within z/VM vmcp q privclass } create_initrd() { # $1 name of kernel # $2 name of initrd # 0150 is already included from the local guest if test -z "$2" ; then echo "Please define a name for the new initrd in /etc/sysconfig/obs-server" return 1 else # create initrd with system scripts mkinitrd -k $1 -i $2 -m "xfs reiserfs ext3 ext4 fat vfat" -B -S TEMPDIR=$(mktemp -d /tmp/initrd.XXX) pushd $TEMPDIR # unpack initrd to add some extra files zcat $2 | cpio -i cat - > etc/udev/rules.d/51-dasd-0.0.0250.rules < etc/udev/rules.d/51-dasd-0.0.0${device}.rules < $2 popd rm -rf $TEMPDIR fi } rc_reset case "$1" in start) # reset screenrc mkdir -p "$obsrundir" chown obsrun:obsrun "$obsrundir" rm -rf "$workerdir" mkdir -p "$workerbootdir" echo "zombie on" > $screenrc echo "defscrollback 10000" >> $screenrc echo 'caption always "%3n %t%? [%h]%?"' >> $screenrc if [ 0"$OBS_WORKER_INSTANCES" -gt 0 ]; then NUM="$OBS_WORKER_INSTANCES" else # start one build backend per CPU NUM=`ls -d /sys/devices/system/cpu/cpu[0-9]* | wc -l` fi if [ "--zvm" == "$vmopt" ]; then check_vmcp || rc_status -v create_initrd $OBS_VM_KERNEL $OBS_VM_INITRD || rc_status -v if [ -n "$OBS_WORKER_INSTANCE_NAMES" ]; then WORKERS=($OBS_WORKER_INSTANCE_NAMES) NUM=${#WORKERS[*]} fi fi if [ -n "$OBS_WORKER_OWNER" ]; then HOSTOWNER="--owner $OBS_WORKER_OWNER" fi # print some config data echo "Run $NUM obsworker using $OBS_WORKER_DIRECTORY" echo -n "Type of obsworker is " if [ "--kvm" == "$vmopt" ]; then echo "KVM virtual machine" elif [ "--xen" == "$vmopt" ]; then echo "XEN virtual machine" elif [ "--zvm" == "$vmopt" ]; then echo "z/VM virtual machine" elif [ "--pvm" == "$vmopt" ]; then echo "PowerVM LPAR" elif [ "--emulator" == "$vmopt" ]; then echo "System emulated virtual machine" elif [ "--lxc" == "$vmopt" ]; then echo "LXC container" elif [ "openstack" == "$OBS_VM_TYPE" ]; then echo "OpenStack virtual machine" else echo "chroot" fi # find SLP announced OBS servers if [ "$OBS_USE_SLP" == "yes" ]; then for i in `slptool findsrvs service:obs.repo_server | sed -n 's/service:obs.repo_server:\([^,]*\),.*/\1/p'`; do [ "${i#http://localhost}" != "$i" ] && continue [ "${i#http://127.}" != "$i" ] && continue REPO_PARAM="$REPO_PARAM --reposerver $i" # any of them should be okay WORKER_CODE="$i" done fi # fetch worker sources from server echo "Fetching initial worker code from $WORKER_CODE/getworkercode" mkdir -p "$workerbootdir" pushd "$workerbootdir" > /dev/null I=0 while ! curl -s "$WORKER_CODE"/getworkercode | cpio --quiet --extract ; do # we need to wait for rep server maybe echo >&2 "WARNING: Could not reach rep server $WORKER_CODE. Trying again." I=$(( $I + 1 )) if test "10" -lt "$I"; then echo >&2 "ERROR: Unable to reach rep server $WORKER_CODE!" exit 1 fi sleep 10 done ln -s . XML chmod 755 bs_worker popd > /dev/null for i in $OBS_WORKER_HOSTLABELS; do HOSTLABELS="$HOSTLABELS --hostlabel $i" done OBS_WORKER_OPT1="$OBS_WORKER_OPT" I=0 [ -z "$OBS_WORKER_PREFIX" ] && OBS_WORKER_PREFIX=worker while test "$NUM" -gt "$I"; do if [ 0"$OBS_WORKER_PORTBASE" -gt 0 ]; then port="--port $((OBS_WORKER_PORTBASE + I))" else port="" fi I_INDEX=$I I=$(( $I + 1 )) if [ "$OBS_VM_TYPE" = 'zvm' ]; then WORKERID="${HOSTNAME}:${WORKERS[$I_INDEX]}" else WORKERID="${HOSTNAME}:$I" fi R=$OBS_WORKER_DIRECTORY/root_$I # prepare obsworker startup in screen... TMPFS= if [ "$OBS_VM_TYPE" = "xen" -o "$OBS_VM_TYPE" = "kvm" -o "${OBS_VM_TYPE#emulator:}" != "$OBS_VM_TYPE" ] ; then mkdir -p $R DEVICE="$OBS_WORKER_DIRECTORY/root_$I/root" SWAP="$OBS_WORKER_DIRECTORY/root_$I/swap" if [ -n "$OBS_VM_DISK_AUTOSETUP_ROOT_FILESIZE" ]; then OBS_WORKER_OPT="$OBS_WORKER_OPT1 $VMDISK_AUTOSETUP $VMDISK_ROOT_FILESIZE $VMDISK_SWAP_FILESIZE $VMDISK_FILESYSTEM $VMDISK_MOUNT_OPTIONS $VMDISK_CLEAN" elif [ ! -e "$DEVICE" ]; then echo "ERROR: worker is configured to use a VM, but the root device do not exist: $DEVICE" exit 1 fi if [ -n "$OBS_VM_USE_TMPFS" ]; then TMPFS="--tmpfs" fi DEVICE="--device $DEVICE" SWAP="--swap $SWAP" MEMORY="--vm-memory $OBS_INSTANCE_MEMORY" if [ -n "$OBS_VM_USE_HUGETLBFS" ]; then HUGETLBFS="--hugetlbfs $OBS_VM_USE_HUGETLBFS" fi elif [ "$OBS_VM_TYPE" = 'openstack' ]; then mkdir -p $R # Without a worker being defined, we would not be in this loop. VM_SERVER="--vm-server $OBS_WORKER_CONTROL_INSTANCE" VM_FLAVOR="--openstack-flavor $OBS_WORKER_OS_FLAVOR" WORKER="--vm-worker $OBS_WORKER_PREFIX$I" VM_KERNEL="--vm-kernel $OBS_WORKER_PREFIX$I-grub-image" VM_DISK="--device $OBS_WORKER_PREFIX$I-root" VM_SWAP="--swap $OBS_WORKER_PREFIX$I-swap" OBS_WORKER_OPT="$OBS_WORKER_OPT1 $WORKER $VM_KERNEL $VM_DISK $VM_SWAP $VM_SERVER $VM_FLAVOR $VMDISK_ROOT_FILESIZE $VMDISK_SWAP_FILESIZE " elif [ -n "$vmopt" -a "$OBS_VM_TYPE" = 'zvm' ]; then mkdir -p $R # Without a worker being defined, we would not be in this loop. WORKER="--vm-worker ${WORKERS[$I_INDEX]}" WORKER_NR="--vm-worker-nr $I" OBS_WORKER_OPT="$OBS_WORKER_OPT1 $WORKER $WORKER_NR $VMDISK_FILESYSTEM $VMDISK_MOUNT_OPTIONS $VMDISK_CLEAN" else mkdir -p $R DEVICE= SWAP= MEMORY= fi echo "screen -t $WORKERID nice -n $OBS_NICE ./bs_worker --hardstatus $vmopt $port --root $R" \ "--statedir $workerdir/$I --id $WORKERID $REPO_PARAM $HUGETLBFS $HOSTLABELS" \ "$HOSTOWNER $OBS_JOBS $OBS_THREADS $OBS_TEST $OBS_WORKER_OPT $TMPFS $DEVICE $SWAP $MEMORY" \ "$OBS_CLEANUP_CHROOT $OBS_WIPE_AFTER_BUILD $ARCH $EMULATOR" \ >> $screenrc mkdir -p $workerdir/$I done pushd "$workerbootdir" > /dev/null screen -S obsworker -m -d -c $screenrc popd > /dev/null ;; stop) echo -n "Shutting down obsworker" for I in "$workerdir"/*; do test -d "$I" || continue test -e "$I/state" || continue pushd "$workerbootdir" > /dev/null ./bs_worker --statedir "$I" --exit & popd > /dev/null done wait killall bs_worker 2>/dev/null sleep 2 killall -s 9 bs_worker 2>/dev/null screen -S obsworker -X quit rc_status -v ;; restart) ## If first returns OK call the second, if first or ## second command fails, set echo return value. $0 stop $0 start rc_status ;; try-restart|reload) $0 status if test $? = 0; then $0 restart else rc_reset # Not running is not a failure. fi # Remember status and be quiet rc_status ;; status) echo -n "Checking for obsworker: " checkproc bs_worker rc_status -v ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|reload}" exit 1 ;; esac rc_exit open-build-service-2.9.4/dist/openQA_mail_notification.rb000066400000000000000000000050131332555733200234540ustar00rootroot00000000000000#!/usr/bin/ruby require 'net/https' require 'net/smtp' require 'uri' require 'json' require 'mail' require 'yaml/store' FROM = 'obs-admin@opensuse.org' TO = 'obs-tests@opensuse.org' SMTP_SERVER = '' OPEN_QA = 'https://openqa.opensuse.org/' DISTRIBUTION = 'obs' VERSION = 'Unstable' GROUP = '17' def get_build_information begin uri = URI.parse("#{OPEN_QA}api/v1/jobs?distri=#{DISTRIBUTION}&version=#{VERSION}") http = Net::HTTP.new(uri.host, uri.port) http.use_ssl = true http.verify_mode = OpenSSL::SSL::VERIFY_NONE request = Net::HTTP::Get.new(uri.request_uri) response = http.request(request) JSON.parse(response.body)['jobs'].last rescue Exception => ex $stderr.puts "Error while fetching openQA data: #{ex.inspect}" abort end end def modules_to_sentence(modules) modules.map { |m| "#{m['name']} #{m['result']}" } end def build_message(build, successful_modules, failed_modules) < ex $stderr.puts "#{SMTP_SERVER}: #{ex.inspect}" abort end end build = get_build_information store = YAML::Store.new('builds.yml') last_build = store.transaction { store[:name] } result = last_build <=> build['name'] if result != 0 && build['state'] == 'done' modules = build['modules'] successful_modules = modules.select { |m| m['result'] == 'passed' } failed_modules = modules.select { |m| m['result'] == 'failed' } successful_modules = modules_to_sentence(successful_modules) failed_modules = modules_to_sentence(failed_modules) subject = "Build #{build['result']} in openQA: #{build['name']}" message = build_message(build['settings']['BUILD'], successful_modules, failed_modules) send_notification(FROM, TO, subject, message) store.transaction do store[:name] = build['name'] store[:last_run] = build['t_finished'] end end open-build-service-2.9.4/dist/openstack/000077500000000000000000000000001332555733200201645ustar00rootroot00000000000000open-build-service-2.9.4/dist/openstack/README.txt000066400000000000000000000072371332555733200216730ustar00rootroot00000000000000* Create an separate project e.g. "OBS" * Create flavors * nova flavor-create obs-server auto 4096 32 4 * nova flavor-create obs-flavor auto 1024 32 1 * Create security groups * dist/openstack/create-secgroups.sh # OBS-Server * Download obs-server and upload it to openstack "OBS" Project * wget http://download.opensuse.org/repositories/OBS:/Server:/Unstable/images/obs-server.x86_64.qcow2 * glance image-create --name obs-server-2.7.52 --file obs-server.x86_64.qcow2 --progress --container-format bare --disk-format qcow2 * Create volume from obs-server image * cinder create --image obs-server-2.7.52 --name obs-server-root 32 * Make volume bootable * cinder set-bootable $VOL_ID true * Start instance with create volume * nova boot --flavor obs-server --boot-volume $VOL_ID --nic net-name=fixed obs-server * Associate floating ip * FIXEDIPADDR=`nova show obs-server|grep "fixed network"|cut -f 3 -d\| |cut -f 1 -d,|perl -p -e 's/\s//g'` * PORT_ID=`neutron port-list |grep $FIXEDIPADDR|cut -f2 -d\| | perl -p -e 's/\s//g'` + neutron floatingip-list * neutron floatingip-associate $PORT_ID * Set security groups for obs-server * nova remove-secgroup obs-server default * nova add-secgroup obs-server obs-server * Login on console, set password and start sshd * passwd root * systemctl start sshd * systemctl enable sshd * Login and stop worker * systemctl stop obsworker * systemctl disable obsworker * DONT FORGET TO CHECK YOUR GRUB CONFIG # OBS-Worker * Download JeOS image and upload it to openstack "OBS" Project * wget http://download.opensuse.org/repositories/openSUSE:/infrastructure:/Images:/openSUSE_Leap_42.2/images/admin-openSUSE-Leap-42.2.x86_64-0.1.0-Build9.35.raw.xz * xzcat admin-openSUSE-Leap-42.2.x86_64-0.1.0-Build9.35.raw.xz > admin-openSUSE-Leap-42.2.x86_64-0.1.0-Build9.35.raw * glance image-create --name admin-openSUSE-Leap-42.2 --file admin-openSUSE-Leap-42.2.x86_64-0.1.0-Build9.35.raw --progress --container-format bare --disk-format raw * Create volume from JeOS image * cinder create --image admin-openSUSE-Leap-42.2 --name obs-worker-root 16 * Make volume bootable * cinder set-bootable $VOL_ID true * Start instance from uploaded image * nova boot --flavor obs-worker --boot-volume $VOL_ID --nic net-name=fixed obs-worker * Associate IP * configure hostname and dhcp * SSHD (optional) * passwd root * systemctl start sshd * systemctl enable sshd * Add O:S:U * zypper -n ar http://download.opensuse.org/repositories/OBS:/Server:/Unstable/openSUSE_42.2/OBS:Server:Unstable.repo * zypper ar http://download.opensuse.org/update/leap/42.2/oss/ repo-update * zypper ar -t yast http://download.opensuse.org/distribution/leap/42.2/repo/oss/ repo-oss * zypper ref -s * install obs-worker and openstack related packages * zypper -n in obs-worker python-websocket-client python-glanceclient python-cinderclient python-novaclient python-neutronclient * get access settings (OBS-openrc.sh) * source OBS-openrc.sh * Create and upload grub-image (dist/openstack/create-grub-image.sh) * Configure OBS worker in /etc/sysconfig/obs-server * You have to configure at least the following variables * OBS_SRC_SERVER="$OBSSERVER_IP:5352" * OBS_REPO_SERVERS="$OBSSERVER_IP:5252" * OBS_VM_TYPE="openstack" * OBS_WORKER_CONTROL_INSTANCE= * OBS_WORKER_OS_FLAVOR= * OS_AUTH_URL= * OS_PROJECT_ID= * OS_PROJECT_NAME= * OS_USER_DOMAIN_NAME= * OS_USERNAME= * OS_PASSWORD= * OS_REGION_NAME= * Configure OpenStack settings (in /etc/sysconfig/obs-server) * create Volumes (boot/root/swap) for NUM of workers (dist/openstack/create-vm-volumes.sh) * Configure SecGroup for access to worker * open-build-service-2.9.4/dist/openstack/create-grub-image.sh000077500000000000000000000022041332555733200240010ustar00rootroot00000000000000#!/bin/bash OUTFILE=./obs-worker-grub-image.raw # SIZE in MB SIZE=10 MOUNT=/mnt LOOP_DEV=/dev/loop0 PART_DEV=/dev/mapper/loop0p1 OS_IMAGE_NAME=obs-worker-grub-image dd if=/dev/zero of=$OUTFILE bs=1M count=$SIZE fdisk $OUTFILE < $MOUNT/boot/grub2/grub.cfg insmod part_msdos insmod ext2 set root='hd0,msdos1' set default=1 set timeout=0 serial --unit=0 --speed=115200 terminal_input serial terminal_output serial menuentry 'OBS Build' { insmod gzio insmod part_msdos insmod ext2 search --label obsrootfs --no-floppy --set=root #set root='hd1' echo 'Loading Linux ...' linux /.build.kernel.kvm root=LABEL=obsrootfs console=ttyS0 init=/sbin/init echo 'Loading initial ramdisk ...' initrd /.build.initrd.kvm } EOF grub2-install --boot-directory $MOUNT/boot $LOOP_DEV umount $MOUNT kpartx -dv $LOOP_DEV losetup -d $LOOP_DEV glance image-create --name $OS_IMAGE_NAME --file $OUTFILE --container-format bare --disk-format raw --progress exit 0 open-build-service-2.9.4/dist/openstack/create-secgroups.sh000077500000000000000000000043261332555733200240030ustar00rootroot00000000000000#!/bin/bash NEUTRONCLIENT=`type -p neutron` if [ -z "$NEUTRONCLIENT" ];then echo "Please install neutron client!" exit 1 fi if [ -z "$1" ];then echo "Usage: "`basename $0`" " exit 1 else OBSSERVER_IP=$1 fi if [ -n "$2" -a -n "$3" ];then SEC_GROUP_NAME=$2 CREATE_SEC_GROUP=$3 else CREATE_SEC_GROUP="server worker" fi function create_secgroup_obsserver { if [ -z "$SEC_GROUP_NAME" ];then SEC_GROUP_NAME=obs-server fi neutron security-group-create $SEC_GROUP_NAME SEC_GROUP_ID=$(neutron security-group-list -f csv -F id -F name | grep $SEC_GROUP_NAME | cut -f1 -d',' | tr -d '"') neutron security-group-rule-create --direction ingress --ethertype IPv4 --port-range-min 22 --port-range-max 22 --protocol tcp $SEC_GROUP_ID neutron security-group-rule-create --direction ingress --ethertype IPv4 --port-range-min 80 --port-range-max 80 --protocol tcp $SEC_GROUP_ID neutron security-group-rule-create --direction ingress --ethertype IPv4 --port-range-min 443 --port-range-max 443 --protocol tcp $SEC_GROUP_ID neutron security-group-rule-create --direction ingress --ethertype IPv4 --port-range-min 5252 --port-range-max 5252 --protocol tcp $SEC_GROUP_ID neutron security-group-rule-create --direction ingress --ethertype IPv4 --port-range-min 5352 --port-range-max 5352 --protocol tcp $SEC_GROUP_ID neutron security-group-rule-create --direction ingress --ethertype IPv4 --port-range-min 427 --port-range-max 427 --protocol udp $SEC_GROUP_ID } function create_secgroup_obsworker { if [ -z "$SEC_GROUP_NAME" ];then SEC_GROUP_NAME=obs-worker fi neutron security-group-create $SEC_GROUP_NAME SEC_GROUP_ID=$(neutron security-group-list -f csv -F id -F name | grep $SEC_GROUP_NAME | cut -f1 -d',' | tr -d '"') neutron security-group-rule-create --direction ingress --ethertype IPv4 --port-range-min 1 --port-range-max 65535 --protocol tcp --remote-ip-prefix $OBSSERVER_IP/32 $SEC_GROUP_ID neutron security-group-rule-create --direction ingress --ethertype IPv4 --port-range-min 1 --port-range-max 65535 --protocol udp --remote-ip-prefix $OBSSERVER_IP/32 $SEC_GROUP_ID # TODO: Restrict egress also to $OBSSERVER } for i in $CREATE_SEC_GROUP do FNAME=create_secgroup_obs$i $FNAME done open-build-service-2.9.4/dist/openstack/create-vm-volumes.sh000077500000000000000000000015501332555733200240770ustar00rootroot00000000000000#!/bin/bash worker_prefix='worker' NUM=2 VM_BOOT_IMAGE_NAME=obs-worker-grub-image VM_BOOT_SIZE=1 VM_ROOT_SIZE=4 VM_SWAP_SIZE=1 while [ $1 ];do OPT=$1 shift case $OPT in -p|--prefix) WORKER_PREFIX=$1;shift;; -r|--root-size) VM_ROOT_SIZE=$1;shift;; -b|--boot-size) VM_BOOT_SIZE=$1;shift;; -s|--swap-size) VM_SWAP_SIZE=$1;shift;; -n|--number) NUM=$1;shift;; -t|--templat) VM_BOOT_IMAGE_NAME=1;shift;; esac done for i in $(seq 1 $NUM) do echo "Creating volumes for worker$i" VM_BOOT_NAME=$WORKER_PREFIX$i\-grub-image VM_ROOT_NAME=$WORKER_PREFIX$i\-root VM_SWAP_NAME=$WORKER_PREFIX$i\-swap cinder create --image $VM_BOOT_IMAGE_NAME --name $VM_BOOT_NAME $VM_BOOT_SIZE cinder create --name $VM_ROOT_NAME $VM_ROOT_SIZE cinder create --name $VM_SWAP_NAME $VM_SWAP_SIZE cinder set-bootable $VM_BOOT_NAME true done open-build-service-2.9.4/dist/overview.html.TEMPLATE000066400000000000000000000011761332555733200221500ustar00rootroot00000000000000

The OBS web interface can be used to setup OBS, browse the content and to build packages. It is also the api url to be used with clients like osc.

The OBS repositories contain the build results, the repositories can be added to package managers like zypper or apt.

The Open Build Service project can be found at www.openbuildservice.org. And the openSUSE project is hosting a public instance of OBS at build.opensuse.org which is not limited to SUSE distributions.

open-build-service-2.9.4/dist/rails.include000066400000000000000000000017171332555733200206620ustar00rootroot00000000000000# # usage: # $HTTP["host"] == "someapp.opensuse.org" { # rails_app = "someapp" # rails_root = "/srv/www/opensuse/someapp" # rails_procs = 4 # # production/development are typical values here # rails_mode = "production" # include "vhosts.d/rails.inc" # } url.rewrite += ("^/apidocs(/|)$" => "/apidocs/html/index.html") magnet.attract-physical-path-to = ( conf_dir + "/cleanurl-v5.lua" ) accesslog.filename = log_root + "/obs-" + rails_app + "-access.log" rails_tmp = rails_root + "/tmp" server.document-root = rails_root + "/public" fastcgi.server = ( ".fcgi" => ( rails_app => ( "socket" => rails_tmp + "/sockets/fcgi.socket", "bin-path" => server.document-root + "/dispatch.fcgi", "bin-environment" => ( "RAILS_ENV" => rails_mode, "TMP" => rails_tmp ), "max-procs" => rails_procs, "idle-timeout" => 3600, ) ) ) open-build-service-2.9.4/dist/schedule-obs.sh000077500000000000000000000015601332555733200211130ustar00rootroot00000000000000#! /bin/sh set -e unset OPENQA_CONFIG function trigger_run { OBS_VERSION="$1" FULL_URL="http://download.opensuse.org/repositories/OBS:/$2/" filename=`curl -s $FULL_URL | grep "obs-server.x86_64-.*qcow2" | head -n1 | sed -e 's,.*href=",,; s,".*,,; s,\.mirrorlist,,'` last_obs_filename="/tmp/.last.obs_$OBS_VERSION" ofilename=`cat $last_obs_filename || touch $last_obs_filename` if test "x$ofilename" != "x$filename"; then /usr/share/openqa/script/client isos post --host https://openqa.opensuse.org HDD_1_URL=$FULL_URL$filename DISTRI=obs ARCH=x86_64 VERSION=$OBS_VERSION BUILD=`echo $filename | sed -e 's,obs-server.x86_64-,,; s,Build,,; s,\.qcow2,,'` FLAVOR=Appliance > /dev/null echo $filename > $last_obs_filename fi } trigger_run Unstable Server:/Unstable/images trigger_run 2.8 Server:/2.8:/Staging/images trigger_run 2.9 Server:/2.9:/Staging/images open-build-service-2.9.4/dist/setup-appliance.sh000077500000000000000000000571421332555733200216370ustar00rootroot00000000000000#!/bin/bash ############################################################################### # # DEFINITION OF FUNCTIIONS # ############################################################################### function execute_silently { $@ > /dev/null 2>&1 return $? } ############################################################################### function logline { [[ $BOOTSTRAP_TEST_MODE == 1 ]] && return echo $@ } ############################################################################### function check_service { srv=$1 service_critical=$2 [[ $SETUP_ONLY == 1 ]] && return echo "Checking service $srv ..." logline "Enabling $srv" execute_silently systemctl enable $srv\.service if [[ $? -gt 0 ]];then logline "WARNING: Enabling $srv daemon failed." fi STATUS=`systemctl is-active $srv\.service 2>/dev/null` if [[ "$STATUS" == "inactive" ]];then echo "$srv daemon not started. Trying to start" execute_silently systemctl start $srv\.service if [[ $? -gt 0 ]];then echo -n "Starting $srv daemon failed." if [[ $service_critical == 1 ]];then echo " Exiting ..." exit 1 fi fi fi } ############################################################################### function check_server_cert { # Create directory if not exists # Usefull on testing systems where no obs-server rpm is installed [ -d $backenddir/certs/ ] || mkdir -p $backenddir/certs/ if [[ ! -e $backenddir/certs/server.${FQHOSTNAME}.created || ! -e $backenddir/certs/server.${FQHOSTNAME}.crt ]]; then # setup ssl certificates (NOT protected with a passphrase) logline "Creating a default SSL certificate for the server" logline "Please replace it with your version in $backenddir/certs directory..." DETECTED_CERT_CHANGE=1 # hostname specific certs - survive intermediate hostname changes if [ ! -e $backenddir/certs/server.${FQHOSTNAME}.crt ] ; then # This is just a dummy SSL certificate, but it has a valid hostname. # Admin can replace it with his version. create_selfsigned_certificate echo "$OPENSSL_CONFIG" | openssl req -new -nodes -config /dev/stdin \ -x509 -days 365 -batch \ -key $backenddir/certs/server.key \ -out $backenddir/certs/server.${FQHOSTNAME}.crt if [[ $? == 0 ]];then echo "Do not remove this file or new SSL CAs will get created." > $backenddir/certs/server.${FQHOSTNAME}.created fi else echo "ERROR: SSL CAs in $backenddir/certs exists, but were not created for your hostname" exit 1 fi fi } ############################################################################### function create_selfsigned_certificate() { cert_outdir=$backenddir/certs COUNTER=0 DNS_NAMES="" for name in $PROPOSED_DNS_NAMES;do DNS_NAMES="$DNS_NAMES DNS.$COUNTER = $name" COUNTER=$(($COUNTER + 1 )) done logline "Creating crt/key in $cert_outdir" OPENSSL_CONFIG="prompt = no distinguished_name = req_distinguished_name [req_distinguished_name] countryName = CC stateOrProvinceName = OBS Autogen State or Province localityName = OBS Autogen Locality organizationName = OBS Autogen Organisation organizationalUnitName = OBS Autogen Organizational Unit commonName = $FQHOSTNAME emailAddress = test@email.address [req] req_extensions = v3_req distinguished_name = req_distinguished_name attributes = req_attributes x509_extensions = v3_ca [req_attributes] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment [ v3_ca ] subjectKeyIdentifier=hash authorityKeyIdentifier=keyid:always,issuer basicConstraints = CA:true [ v3_req ] # Extensions to add to a certificate request basicConstraints = critical,CA:FALSE keyUsage = digitalSignature, keyEncipherment subjectAltName = @alt_names [alt_names] $DNS_NAMES [ v3_ca ] basicConstraints = CA:TRUE subjectAltName = @alt_names " } ############################################################################### function get_hostname { if [[ $1 && $BOOTSTRAP_TEST_MODE == 1 ]];then FQHOSTNAME=$1 else FQHOSTNAME=`hostname -f 2>/dev/null` fi if type -p ec2-public-hostname; then FQHOSTNAME=`ec2-public-hostname` fi if [ "$FQHOSTNAME" = "" ]; then ask "Please enter the full qualified hostname!" FQHOSTNAME=$rv fi # fallback in non-interative mode if [ "$FQHOSTNAME" = "" ]; then # Prefer interface with default route if exists DEFAULT_ROUTE_INTERFACE=`LANG=C ip route show|perl -e '$_=<>; ( m/^default via.*dev\s+([\w]+)\s.*/ ) && print $1'` # Fallback to IP of the VM/host FQHOSTNAME=`LANG=C ip addr show $DEFAULT_ROUTE_INTERFACE| perl -lne '( m#^\s+inet\s+([0-9\.]+)(/\d+)?\s+.*# ) && print $1' | grep -v ^127. | head -n 1` if [ "$?" != "0" -o "$FQHOSTNAME" = "" ]; then echo " Can't determine hostname or IP - Network setup failed!" echo " Check if networking is up and dhcp is working!" echo " Using 'localhost' as FQHOSTNAME." FQHOSTNAME="localhost" fi USEIP=$FQHOSTNAME fi if [[ -z $USEIP ]];then DOMAINNAME="" if [[ $FQHOSTNAME =~ '.' ]];then DOMAINNAME=$(echo $FQHOSTNAME | perl -pe 's/^[\w\-_]*\.(.*)/$1/') SHORTHOSTNAME=$(echo $FQHOSTNAME | perl -pe 's/^([\w\-_]*)\..*/$1/') else SHORTHOSTNAME=$FQHOSTNAME fi fi } ############################################################################### function generate_proposed_dnsnames { if [[ ! $FQHOSTNAME ]];then get_hostname fi if [[ $FQHOSTNAME != 'localhost' ]];then LOCAL_HOST="localhost" fi if [[ $FQHOSTNAME == $SHORTHOSTNAME ]];then DNSNAMES="$SHORTHOSTNAME $LOCAL_HOST" else DNSNAMES="$SHORTHOSTNAME $FQHOSTNAME $LOCAL_HOST" fi ask "Proposed DNS names: " "$DNSNAMES" PROPOSED_DNS_NAMES=$rv } ############################################################################### function adjust_api_config { echo "Adjust configuration for this hostname" # use local host to avoid SSL verification between webui and api api_options_yml=$apidir/config/options.yml sed -i 's,^frontend_host: .*,frontend_host: "localhost",' $api_options_yml sed -i 's,^frontend_port: .*,frontend_port: 443,' $api_options_yml sed -i 's,^frontend_protocol: .*,frontend_protocol: "'"https"'",' $api_options_yml sed -i 's,^external_frontend_host: .*,frontend_host: "'"$FQHOSTNAME"'",' $api_options_yml sed -i 's,^external_frontend_port: .*,frontend_port: 443,' $api_options_yml sed -i 's,^external_frontend_protocol: .*,frontend_protocol: "'"https"'",' $api_options_yml } ############################################################################### function adapt_worker_jobs { #changed IP means also that leftover jobs are invalid - cope with that echo "Adapting present worker jobs" sed -i "s,server=\"http://[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*:5352,server=\"http://$FQHOSTNAME:5352,g" \ $backenddir/jobs/*/* 2> /dev/null sed -i "s,server=\"http://[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*:5252,server=\"http://$FQHOSTNAME:5252,g" \ $backenddir/jobs/*/* 2> /dev/null #remove old workers status and idling/building markers rm -f $backenddir/jobs/*/*status 2> /dev/null rm -f $backenddir/workers/*/* 2> /dev/null # create repo directory or apache fails when nothing got published mkdir -p $backenddir/repos chown obsrun.obsrun $backenddir/repos } ############################################################################### function prepare_database_setup { cd /srv/www/obs/api RAILS_ENV=production rails.ruby2.5 db:migrate:status > /dev/null if [[ $? > 0 ]];then echo "Initialize MySQL databases (first time only)" echo " - reconfiguring /etc/my.cnf" perl -p -i -e 's#.*datadir\s*=\s*/var/lib/mysql$#datadir= /srv/obs/MySQL#' /etc/my.cnf echo " - installing to new datadir" mysql_install_db echo " - changing ownership for new datadir" chown mysql:mysql -R /srv/obs/MySQL echo " - restarting mysql" systemctl restart mysql echo " - setting new password for user root in mysql" mysqladmin -u root password "opensuse" if [[ $? > 0 ]];then echo "ERROR: Your mysql setup doesn't fit your rails setup" echo "Please check your database settings for mysql and rails" exit 1 fi RUN_INITIAL_SETUP="true" fi RAKE_COMMANDS="" if [ -n "$RUN_INITIAL_SETUP" ]; then logline "Initialize OBS api database (first time only)" cd $apidir RAKE_COMMANDS="db:create db:setup writeconfiguration" else logline "Migrate OBS api database" cd $apidir RAKE_COMMANDS="db:migrate:with_data" echo fi logline "Setting ownership of '$backenddir' obsrun" chown obsrun.obsrun $backenddir logline "Setting up rails environment" for cmd in $RAKE_COMMANDS do logline " - Doing 'rails.ruby2.5 $cmd'" RAILS_ENV=production bundle exec rails.ruby2.5 $cmd >> $apidir/log/db_migrate.log if [[ $? > 0 ]];then (>&2 echo "Command $cmd FAILED") exit 1 fi done if [ -n "$RUN_INITIAL_SETUP" ]; then if [[ ! "$SETUP_ONLY" ]];then `systemctl restart obsscheduler.service` fi fi } ############################################################################### function add_login_info_to_issue { cat >> /etc/issue < /srv/www/obs/overview/index.html cat < /etc/issue ******************************************************************************* ** NETWORK SETUP FAILED ** ** ** ** OBS is not usable. A working DNS resolution for your host is required! ** ** You can check this with 'hostname -f'. ** ** This often happens in virtualization environments like e.g. VirtualBox. ** ** ** ** You also could run ** ** ** ** /usr/lib/obs/server/setup-appliance.sh ** ** ** ** for interactive configuration ** ** ** ******************************************************************************* EOF } ############################################################################### function check_server_key { # reuse signing key even if hostname changed if [ ! -e $backenddir/certs/server.key ]; then install -d -m 0700 $backenddir/certs openssl genrsa -out $backenddir/certs/server.key 1024 2>/dev/null fi } ############################################################################### function import_ca_cert { # apache has to trust the api ssl certificate if [ ! -e /etc/ssl/certs/server.${FQHOSTNAME}.crt ]; then cp $backenddir/certs/server.${FQHOSTNAME}.crt \ /usr/share/pki/trust/anchors/server.${FQHOSTNAME}.pem update-ca-certificates fi } ############################################################################### function relink_server_cert { if [[ $DETECTED_CERT_CHANGE == 1 ]];then CERT_LINK_FILE=$backenddir/certs/server.crt # check if CERT_LINK_FILE not exists or is symbolic link because we don't # want to remove real files if [ ! -e $CERT_LINK_FILE -o -L $CERT_LINK_FILE ];then # change links for certs according to hostnames cd $backenddir/certs rm -f server.crt ln -sf server.${FQHOSTNAME}.crt server.crt cd - >/dev/null fi fi } ############################################################################### function fix_permissions { cd $apidir chown -R wwwrun.www $apidir/log } ############################################################################### function create_issue_file { echo "Recreating /etc/issue" # create base version of /etc/issues cat > /etc/issue <> /etc/issue < /srv/www/obs/overview/index.html } ############################################################################### function ask { logline $1 if [[ $NON_INTERACTIVE == 1 ]];then rv=$2 logline "Using default value '$rv' in non-interactive mode" return fi echo "Default: $2" read rv if [[ ! $rv ]];then rv=$2 fi } ############################################################################### function check_required_backend_services { [[ $SETUP_ONLY == 1 ]] && return REQUIRED_SERVICES="obsrepserver obssrcserver obsscheduler obsdispatcher obspublisher" for srv in $REQUIRED_SERVICES ;do ENABLED=`systemctl is-enabled $srv` ACTIVE=`systemctl is-active $srv` [[ "$ENABLED" == "enabled" ]] || systemctl enable $srv [[ "$ACTIVE" == "active" ]] || systemctl start $srv done } ############################################################################### function check_recommended_backend_services { [[ $SETUP_ONLY == 1 ]] && return RECOMMENDED_SERVICES="obsdodup obsdeltastore obssigner obssignd obsservicedispatch" for srv in $RECOMMENDED_SERVICES;do STATE=$(chkconfig $srv|awk '{print $2}') if [[ $STATE != on ]];then ask "Service $srv is not enabled. Would you like to enable it? [Yn]" "y" case $rv in y|yes|Y|YES) systemctl enable $srv systemctl start $srv ;; esac fi done } ############################################################################### function check_optional_backend_services { DEFAULT_ANSWER="n" if [[ $ENABLE_OPTIONAL_SERVICES ]];then DEFAULT_ANSWER="y" fi [[ $SETUP_ONLY == 1 ]] && return OPTIONAL_SERVICES="obswarden obsapisetup obsstoragesetup obsworker obsservice obsservicedispatch" for srv in $OPTIONAL_SERVICES;do STATE=$(chkconfig $srv|awk '{print $2}') if [[ $STATE != on ]];then ask "Service $srv is not enabled. Would you like to enable it? [yN]" $DEFAULT_ANSWER case $rv in y|yes|Y|YES) systemctl enable $srv systemctl start $srv ;; esac fi done } ############################################################################### function prepare_apache2 { [[ $SETUP_ONLY == 1 ]] && return PACKAGES="apache2 apache2-mod_xforward rubygem-passenger-apache2 memcached" PKG2INST="" for pkg in $PACKAGES;do rpm -q $pkg >/dev/null || PKG2INST="$PKG2INST $pkg" done if [[ -n $PKG2INST ]];then zypper --non-interactive install $PKG2INST >/dev/null fi MODULES="passenger rewrite proxy proxy_http xforward headers socache_shmcb" for mod in $MODULES;do a2enmod -q $mod || a2enmod $mod done FLAGS=SSL for flag in $FLAGS;do a2enflag $flag >/dev/null done } ############################################################################### function prepare_passenger { perl -p -i -e \ 's#^(\s*)PassengerRuby "/usr/bin/ruby"#$1\PassengerRuby "/usr/bin/ruby.ruby2.5"#' \ /etc/apache2/conf.d/mod_passenger.conf } ############################################################################### function prepare_obssigner { # Only used if there is a local BSConfig if [ -e /usr/lib/obs/server/BSConfig.pm ]; then # signing setup perl -p -i -e 's,^\s*#\s*our \$gpg_standard_key.*,our \$gpg_standard_key = "/srv/obs/obs-default-gpg.asc";,' /usr/lib/obs/server/BSConfig.pm perl -p -i -e 's,^\s*#\s*our \$keyfile.*,our \$keyfile = "/srv/obs/obs-default-gpg.asc";,' /usr/lib/obs/server/BSConfig.pm perl -p -i -e 's,^\s*#\s*our \$sign = .*,our \$sign = "/usr/bin/sign";,' /usr/lib/obs/server/BSConfig.pm perl -p -i -e 's,^\s*#\s*our \$forceprojectkeys.*,our \$forceprojectkeys = 1;,' /usr/lib/obs/server/BSConfig.pm chmod 4755 /usr/bin/sign # create default gpg key if not existing if [ ! -e "$backenddir"/obs-default-gpg.asc ] && grep -q "^our \$keyfile.*/obs-default-gpg.asc.;$" /usr/lib/obs/server/BSConfig.pm; then GPG_KEY_CREATED=1 echo -n Generating OBS default GPG key .... mkdir -p "$backenddir"/gnupg/phrases chmod -R 0700 "$backenddir"/gnupg cat >/tmp/obs-gpg.$$ < "$backenddir"/obs-default-gpg.asc # empty file just for accepting the key touch "$backenddir/gnupg/phrases/defaultkey@localobs" fi # to update sign.conf also after an appliance update if [ -e "$backenddir"/obs-default-gpg.asc ] && ! grep -q "^user" /etc/sign.conf; then # extend signd config echo "user: defaultkey@localobs" >> /etc/sign.conf echo "server: 127.0.0.1" >> /etc/sign.conf echo "allowuser: obsrun" >> /etc/sign.conf echo "allow: 127.0.0.1" >> /etc/sign.conf echo "phrases: $backenddir/gnupg/phrases" >> /etc/sign.conf echo done rm /tmp/obs-gpg.$$ sed -i 's,^# \(our $sign =.*\),\1,' /usr/lib/obs/server/BSConfig.pm sed -i 's,^# \(our $forceprojectkeys =.*\),\1,' /usr/lib/obs/server/BSConfig.pm fi if [ ! -e "$backenddir"/obs-default-gpg.asc ] ; then sed -i 's,^\(our $sign =.*\),# \1,' /usr/lib/obs/server/BSConfig.pm sed -i 's,^\(our $forceprojectkeys =.*\),# \1,' /usr/lib/obs/server/BSConfig.pm fi fi } function setup_registry { # check if docker registry is installed or return logline "Starting container registry setup!" rpm -q --quiet obs-container-registry if [ $? -gt 0 ];then logline "Package 'obs-container-registry' not found. Skipping registry setup!" return fi # check if $container_registries already configured in BSConfig and return grep -q -P '^\s*our\s+\$container_registries\s*=' /usr/lib/obs/server/BSConfig.pm if [ $? -lt 1 ];then logline "Configuration for container_registries already active in BSConfig. Skipping registry setup!" return fi # check if $publish_containers already configured in BSConfig and return grep -q -P '^\s*our\s+\$publish_containers\s*=' /usr/lib/obs/server/BSConfig.pm if [ $? -lt 1 ];then logline "Configuration for publish_containers already active in BSConfig. Skipping registry setup!" return fi # reconfigure docker registry only to be accessible via apache proxy logline "Bind registry to loopback interface only" perl -p -i -e "s/0.0.0.0:5000/127.0.0.1:5000 # config changed by $0/" /etc/registry/config.yml # restart registry to reread confi if already started logline "Activating registry startup" systemctl status registry && systemctl restart registry systemctl enable registry systemctl start registry # configure $container_registries and $publish_containers # in BSConfig logline "Configuring local container registry in BSConfig" cat <> /usr/lib/obs/server/BSConfig.pm ### Configuration added by $0 our \$container_registries = { 'localhost' => { server => 'https://localhost:444', user => 'ignored', password => 'ignored', # Please be aware of the trailing slash repository_base => '/', } }; our \$publish_containers = [ '.*' => ['localhost'], ]; ### 1; EOF # check obspublisher and restart if needed logline "Checking obspublisher and restart if required." systemctl status obspublisher && systemctl restart obspublisher logline "Finished container registry setup!" } ############################################################################### # # MAIN # ############################################################################### export LC_ALL=C ENABLE_OPTIONAL_SERVICES=0 # package or appliance defaults if [ -e /etc/sysconfig/obs-server ]; then source /etc/sysconfig/obs-server fi # Set default directories apidir=/srv/www/obs/api backenddir=/srv/obs # Overwrite directory defaults with settings in # config file /etc/sysconfig/obs-server if [ -n "$OBS_BASE_DIR" ]; then backenddir="$OBS_BASE_DIR" fi if [[ ! $BOOTSTRAP_TEST_MODE == 1 && $0 != "-bash" ]];then NON_INTERACTIVE=0 while [[ $1 ]];do case $1 in --non-interactive) NON_INTERACTIVE=1;; --setup-only) SETUP_ONLY=1;; --enable-optional-services) ENABLE_OPTIONAL_SERVICES=1;; esac shift done # prepare configuration for obssigner before any other backend service # is started, because obssigner configuration might affect other services # too GPG_KEY_CREATED=0 prepare_obssigner if [[ $GPG_KEY_CREATED == 1 ]];then pushd . # avoid systemctl cd /etc/init.d ./obssrcserver reload ./obsrepserver reload popd fi check_required_backend_services check_recommended_backend_services check_optional_backend_services check_service mysql 1 get_hostname ### In case of the appliance, we never know where we boot up ! OLDFQHOSTNAME="NOTHING" if [ -e $backenddir/.oldfqhostname ]; then OLDFQHOSTNAME=`cat $backenddir/.oldfqhostname` fi DETECTED_HOSTNAME_CHANGE=0 if [ "$FQHOSTNAME" != "$OLDFQHOSTNAME" ]; then echo "Appliance hostname changed from $OLDFQHOSTNAME to $FQHOSTNAME !" DETECTED_HOSTNAME_CHANGE=1 fi if [[ $DETECTED_HOSTNAME_CHANGE == 1 ]];then adapt_worker_jobs adjust_api_config fi echo "$FQHOSTNAME" > $backenddir/.oldfqhostname OBSVERSION=`rpm -q --qf '%{VERSION}' obs-server` OS=`head -n 1 /etc/SuSE-release` RUN_INITIAL_SETUP="" prepare_database_setup check_server_key generate_proposed_dnsnames DNS_NAMES="$rv" DETECTED_CERT_CHANGE=0 check_server_cert import_ca_cert relink_server_cert fix_permissions prepare_apache2 prepare_passenger check_service apache2 check_service memcached # make sure that apache gets restarted after cert change if [[ $DETECTED_CERT_CHANGE && ! $SETUP_ONLY ]];then systemctl reload apache2 fi check_service obsapidelayed create_issue_file setup_registry if [ -n "$FQHOSTNAME" ]; then create_overview_html add_login_info_to_issue else network_failure_warning fi exit 0 fi open-build-service-2.9.4/dist/setup_source_service_docker.sh000077500000000000000000000105051332555733200243240ustar00rootroot00000000000000#!/bin/bash DAEMON_UID=2 DAEMON_GID=2 SUSE=$( . /etc/os-release; echo $ID ) VERSION=$( . /etc/os-release; echo $VERSION ) docker_image="suse/obs-source-service:latest" if [ $UID != 0 ];then echo "You must be root!" exit 1 fi if [ "$SUSE" == "opensuse" ]; then SUSE=openSUSE downloadserver="http://download.opensuse.org" echo "Adding repository for docker containers" zypper ar --refresh -n --no-gpg-checks $downloadserver/repositories/Virtualization:/containers/$SUSE"_Leap_"$VERSION/Virtualization:containers.repo echo "Adding repository for source service containment" zypper ar --refresh -n --no-gpg-checks -t rpm-md $downloadserver/repositories/OBS:/Server:/Unstable/containment/ Containment echo "Adding main obs repository" zypper ar --refresh -n --no-gpg-checks $downloadserver/repositories/OBS:/Server:/Unstable/$SUSE\_$VERSION/OBS:Server:Unstable.repo docker_install="obs-source-service-docker-image" elif [ "$SUSE" == "SLE" -o "$SUSE" == "sles" ]; then SUSE=SLE_ VERSION=${VERSION/-/_} echo "Adding repository for source service containment" zypper ar --refresh -n --no-gpg-checks -t rpm-md $downloadserver/repositories/OBS:/Server:/Unstable/containment/ Containment docker_install="obs-source-service-docker-image" # If we are on a SLE the download server and repositories must be different. # Must be clarified. Until clarification the installer only works on openSUSE Distributions. else echo "Something wrong with the OS brand. Must be SLE or opensuse ($SUSE)." exit fi function check_vg_opts { VG_NAME="`echo $HOSTNAME|perl -p -e 's/-/_/g'`_docker" if [ -z "$PV_DEVICE" ];then echo "Please enter pv device" read PV_DEVICE fi if [ -z "$NON_INTERACTIVE_MODE" ];then echo "PV device $PV_DEVICE is used" echo "VG name $VG_NAME is used." echo "Please confirm with 'yes'" read YES else YES=yes fi if [[ $YES != 'yes' ]];then exit 1 fi } function install_docker_vg { # setup is now done by docker daemon cat < /etc/docker/daemon.json { "storage-driver": "devicemapper", "storage-opts": [ "dm.directlvm_device=$PV_DEVICE", "dm.thinp_percent=95", "dm.thinp_metapercent=1", "dm.thinp_autoextend_threshold=80", "dm.thinp_autoextend_percent=20", "dm.directlvm_device_force=false" ] } EOF } echo "Refreshing repositories" zypper -n --gpg-auto-import-keys ref -s # Install the software we need echo "Installing required software" zypper -n remove kernel-default-base zypper -n install kernel-default zypper -n install obs-server obs-source_service perl-XML-Structured acl zypper -n install docker sle2docker echo "creating run directory for service" mkdir -p /srv/obs/run chown obsrun:obsrun /srv/obs/run echo "set extended acl feature on /srv/obs/run/" setfacl -m u:obsservicerun:rwx /srv/obs/run/ echo "Setting correct bsservicegroup" if grep -q "^our \$bsservicegroup =.*" /usr/lib/obs/server/BSConfig.pm; then echo "Changing service group ..." sed -i "s,^our \$bsservicegroup =.*,our \$bsservicegroup = 'docker';," /usr/lib/obs/server/BSConfig.pm fi echo "Setting service wrapper" if ! grep -q "^our \$service_wrapper =.*" /usr/lib/obs/server/BSConfig.pm; then echo "No service wrapper defined. We must create one..." echo "our \$service_wrapper = { '*' => '/usr/lib/obs/server/call-service-in-docker.sh' };1;" >> /usr/lib/obs/server/BSConfig.pm fi echo "Setting docker image" if ! grep -q "^our \$docker_image =.*" /usr/lib/obs/server/BSConfig.pm; then echo "No docker image configured. Will configure the docker image..." echo "our \$docker_image = 'suse/obs-source-service:latest';1;" >> /usr/lib/obs/server/BSConfig.pm fi echo "Altering default docker opts" sed -i 's,DOCKER_OPTS=.*, DOCKER_OPTS="--userns-remap=obsservicerun:obsrun",' /etc/sysconfig/docker echo "creating subuid and subgid" grep -q -P '^obsservicerun:' /etc/subuid || \ echo "obsservicerun:"$(($( id -u obsservicerun ) - $DAEMON_UID ))":65536" >> /etc/subuid grep -q -P '^obsrun:' /etc/subgid || \ echo "obsrun:"$(($( id -g obsservicerun ) - $DAEMON_GID ))":65536" >> /etc/subgid check_vg_opts install_docker_vg echo "enable and start docker" systemctl enable docker systemctl restart docker echo "enable and start obsservice" systemctl enable obsservice systemctl restart obsservice echo "install and register containment" zypper -n install --replacefiles $docker_install open-build-service-2.9.4/dist/stable_project_meta.xml.example000066400000000000000000000026361332555733200243660ustar00rootroot00000000000000 $VERSION version of the Open Build Service Server This is the maintained version $VERSION of OBS. x86_64 x86_64 x86_64 x86_64 x86_64 open-build-service-2.9.4/dist/staging_project_meta.xml.example000066400000000000000000000035451332555733200245500ustar00rootroot00000000000000 $VERSION version of the Open Build Service Server This is the maintained version $VERSION of OBS. x86_64 x86_64 x86_64 x86_64 x86_64 open-build-service-2.9.4/dist/sysconfig.obs-server000066400000000000000000000257511332555733200222240ustar00rootroot00000000000000# # NOTE: all these options can be also declared in /etc/buildhost.config on each worker differently. # ## Path: Applications/OBS ## Description: The OBS backend code directory ## Type: string ## Default: "" ## Config: OBS # # An empty dir will lead to the fall back directory, typically /usr/lib/obs/server/ # OBS_BACKENDCODE_DIR="" ## Path: Applications/OBS ## Description: The base for OBS communication directory ## Type: string ## Default: "" ## Config: OBS # # An empty dir will lead to the fall back directory, typically /srv/obs/run # OBS_RUN_DIR="" ## Path: Applications/OBS ## Description: The base for OBS logging directory ## Type: string ## Default: "" ## Config: OBS # # An empty dir will lead to the fall back directory, typically /srv/obs/log # OBS_LOG_DIR="" ## Path: Applications/OBS ## Description: The base directory for OBS ## Type: string ## Default: "" ## Config: OBS # # An empty dir will lead to the fall back directory, typically /srv/obs # OBS_BASE_DIR="" ## Path: Applications/OBS ## Description: Automatically setup api and webui for OBS server, be warned, this will replace config files ! ## Type: ("yes" | "no") ## Default: "no" ## Config: OBS # # This is usally only enabled on the OBS Appliance # OBS_API_AUTOSETUP="no" # # NOTE: all these options can be also declared in /etc/buildhost.config on each worker differently. # ## Path: Applications/OBS ## Description: define source server host to be used ## Type: string ## Default: "" ## Config: OBS # # An empty setting will point to localhost:5352 by default # OBS_SRC_SERVER="" ## Path: Applications/OBS ## Description: define repository server host to be used ## Type: string ## Default: "" ## Config: OBS # # An empty setting will point to localhost:5252 by default # OBS_REPO_SERVERS="" ## Path: Applications/OBS ## Description: define number of build instances ## Type: integer ## Default: 0 ## Config: OBS # # 0 instances will automatically use the number of CPU's # OBS_WORKER_INSTANCES="0" ## Path: Applications/OBS ## Description: define names of build instances for z/VM ## Type: string ## Default: "" ## Config: OBS # # The names of the workers as defined in z/VM. These must have two minidisks # assigned, and have a secondary console configured to the local machine: # 0150 is the root device # 0250 is the swap device # #OBS_WORKER_INSTANCE_NAMES="LINUX075 LINUX076 LINUX077" OBS_WORKER_INSTANCE_NAMES="" ## Path: Applications/OBS ## Description: The base directory, where sub directories for each worker will get created ## Type: string ## Default: "" ## Config: OBS # # OBS_WORKER_DIRECTORY="" ## Path: Applications/OBS ## Description: The base for port numbers used by worker instances ## Type: integer ## Default: "0" ## Config: OBS # # 0 means let the operating system assign a port number # OBS_WORKER_PORTBASE="0" ## Path: Applications/OBS ## Description: Number of parallel compile jobs per worker ## Type: integer ## Default: "1" ## Config: OBS # # this maps usually to "make -j1" during build # OBS_WORKER_JOBS="1" ## Path: Applications/OBS ## Description: Run in test mode (build results will be ignore, no job blocking) ## Type: ("yes" | "") ## Default: "" ## Config: OBS # OBS_WORKER_TEST_MODE="" ## Path: Applications/OBS ## Description: define one or more labels for the build host. ## Type: string ## Default: "" ## Config: OBS # # A label can be used to build specific packages only on dedicated hosts. # For example for benchmarking. # OBS_WORKER_HOSTLABELS="" ## Path: Applications/OBS ## Description: can be used to define a security level of the worker ## Type: string ## Default: "" ## Config: OBS # # This will extend the hostlabels and can be used to limit the workers # to the hosts which have all security fixes deployed. # OBS_WORKER_SECURITY_LEVEL="" ## Path: Applications/OBS ## Description: Register in SLP server ## Type: ("yes" | "no") ## Default: "yes" ## Config: OBS # # OBS_USE_SLP="yes" ## Path: Applications/OBS ## Description: Use a common cache directory for downloaded packages ## Type: string ## Default: "" ## Config: OBS # # Enable caching requires a given directory here. Be warned, content will be # removed there ! # OBS_CACHE_DIR="" ## Path: Applications/OBS ## Description: Defines the package cache size ## Type: size in MB ## Default: "" ## Config: OBS # # Set the size to 50% of the maximum usable size of this partition # OBS_CACHE_SIZE="" ## Path: Applications/OBS ## Description: Defines the nice level of running workers ## Type: integer ## Default: 18 ## Config: OBS # # Nicenesses range from -20 (most favorable scheduling) to 19 (least # favorable). # Default to 18 as some testsuites depend on being able to switch to # one priority below (19) _and_ having changed the numeric level # (so going from 19->19 makes them fail). # OBS_WORKER_NICE_LEVEL=18 ## Path: Applications/OBS ## Description: Set used VM type by worker ## Type: ("auto" | "xen" | "kvm" | "lxc" | "zvm" | "emulator:$arch" | "emulator:$arch:$script" | "none" | "openstack") ## Default: "auto" ## Config: OBS # # OBS_VM_TYPE="auto" ## Path: Applications/OBS ## Description: Set kernel used by worker (kvm) ## Type: ("none" | "/boot/vmlinuz" | "/foo/bar/vmlinuz) ## Default: "none" ## Config: OBS # # For z/VM this is normally /boot/image # OBS_VM_KERNEL="none" ## Path: Applications/OBS ## Description: Set initrd used by worker (kvm) ## Type: ("none" | "/boot/initrd" | "/foo/bar/initrd-foo) ## Default: "none" ## Config: OBS # # for KVM, you have to create with (example for openSUSE 11.2): # # export rootfstype="ext4" # mkinitrd -d /dev/null -m "ext4 binfmt_misc virtio_pci virtio_blk" -k vmlinuz-2.6.31.12-0.2-default -i initrd-2.6.31.12-0.2-default-obs_worker # # a working initrd file which includes virtio and binfmt_misc for OBS in order to work fine # # for z/VM, the build script will create a initrd at the given location if # it does not yet exist. # OBS_VM_INITRD="none" ## Path: Applications/OBS ## Description: Autosetup for XEN/KVM/TMPFS disk (root) - Filesize in MB ## Type: integer ## Default: "4096" ## Config: OBS # # OBS_VM_DISK_AUTOSETUP_ROOT_FILESIZE="4096" ## Path: Applications/OBS ## Description: Autosetup for XEN/KVM disk (swap) - Filesize in MB ## Type: integer ## Default: "1024" ## Config: OBS # # OBS_VM_DISK_AUTOSETUP_SWAP_FILESIZE="1024" ## Path: Applications/OBS ## Description: Filesystem to use for autosetup {none,ext4}=ext4, ext3=ext3 ## Type: string ## Default: "ext3" ## Config: OBS # # OBS_VM_DISK_AUTOSETUP_FILESYSTEM="ext3" ## Path: Applications/OBS ## Description: Filesystem mount options to use for autosetup ## Type: string ## Default: "" ## Config: OBS # # OBS_VM_DISK_AUTOSETUP_MOUNT_OPTIONS="" ## Path: Applications/OBS ## Description: Enable build in memory ## Type: ("yes" | "") ## Default: "" ## Config: OBS # # WARNING: this requires much memory! # OBS_VM_USE_TMPFS="" ## Path: Applications/OBS ## Description: Specify custom options for VM handler ## Type: string ## Default: "" ## Config: OBS # # Can be used to workaround problems with VM handler and should not be needed usually # OBS_VM_CUSTOM_OPTION="" ## Path: Applications/OBS ## Description: Memory allocated for each VM (512) if not set ## Type: integer ## Default: "" ## Config: OBS # # OBS_INSTANCE_MEMORY="" ## Path: Applications/OBS ## Description: Enable storage auto configuration ## Type: ("yes" | "") ## Default: "" ## Config: OBS # # WARNING: this may destroy data on your hard disk ! # This is usually only used on mass deployed worker instances # OBS_STORAGE_AUTOSETUP="" ## Path: Applications/OBS ## Description: Setup LVM via obsstoragesetup ## Type: ("take_all" | "use_obs_vg" | "none") ## Default: "use_obs_vg" ## Config: OBS # # take_all: WARNING: all LVM partitions will be used and all data erased ! # use_obs_vg: A lvm volume group named "OBS" will be re-setup for the workers. # OBS_SETUP_WORKER_PARTITIONS="use_obs_vg" ## Path: Applications/OBS ## Description: Size in MB when creating LVM partition for cache partition ## Type: integer ## Default: "" ## Config: OBS # # OBS_WORKER_CACHE_SIZE="" ## Path: Applications/OBS ## Description: Size in MB when creating LVM partition for each worker root partition ## Type: integer ## Default: "" ## Config: OBS # # OBS_WORKER_ROOT_SIZE="" ## Path: Applications/OBS ## Description: Size in MB when creating LVM partition for each worker swap partition ## Type: integer ## Default: "" ## Config: OBS # # OBS_WORKER_SWAP_SIZE="" ## Path: Applications/OBS ## Description: URL to a proxy service for caching binaries used by worker ## Type: string ## Default: "" ## Config: OBS # # OBS_WORKER_BINARIES_PROXY="" ## Path: Applications/OBS ## Description: URL to a ssh pub key to allow root user login ## Type: string ## Default: "" ## Config: OBS # # This is usually used on mass (PXE) deployed workers) # OBS_ROOT_SSHD_KEY_URL="" ## Path: Applications/OBS ## Description: URL to a script to be downloaded and executed ## Type: string ## Default: "" ## Config: OBS # # This is a hook for doing special things in your setup at boot time # OBS_WORKER_SCRIPT_URL="" ## Path: Applications/OBS ## Description: If chroot/lxc is used for build, empty it after build is finished ## Type: ("yes" | "") ## Default: "" ## Config: OBS # # OBS_WORKER_CLEANUP_CHROOT="" ##Path: Application/OBS ## Description: wipes the build environment of the worker after the build ## Type: ("yes" | "") ## Default: "" ## Config: OBS # # OBS_WORKER_WIPE_AFTER_BUILD="" ##Path: Application/OBS ## Description: name or id of openstack instance that controls the worker (building) instances ## Type: ("yes" | "") ## Default: "" ## Config: OBS # # OBS_WORKER_CONTROL_INSTANCE="" ##Path: Application/OBS ## Description: name or id flavor to create openstack worker (building) instance ## Type: ("yes" | "") ## Default: "" ## Config: OBS # # OBS_WORKER_OS_FLAVOR="" ##Path: Application/OBS ## Description: openstack environment variables. Only used when OBS_VM_TYPE=openstack ## Type: ("yes" | "") ## Default: "" ## Config: OBS # # OS_AUTH_URL="" OS_PROJECT_ID="" OS_PROJECT_NAME="" OS_USER_DOMAIN_NAME="" OS_USERNAME="" OS_PASSWORD="" OS_REGION_NAME="" OBS_WORKER_PREFIX="" OBS_OPENSTACK_DISK_SIZE="" OBS_OPENSTACK_SWAP_SIZE="" OBS_OPENSTACK_MEMORY_SIZE="" open-build-service-2.9.4/dist/systemd/000077500000000000000000000000001332555733200176655ustar00rootroot00000000000000open-build-service-2.9.4/dist/systemd/obsapidelayed.service000066400000000000000000000046001332555733200240540ustar00rootroot00000000000000[Unit] Description=OBS API Delayed server After=network.target [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStart=/bin/bash -c "/usr/bin/echo -n 'Starting OBS api delayed job handler'" ExecStart=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec script/delayed_job.api.rb --queue=quick start -n 3" ExecStart=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec script/delayed_job.api.rb --queue=releasetracking start -i 1000" ExecStart=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec script/delayed_job.api.rb --queue=issuetracking start -i 1010" ExecStart=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec script/delayed_job.api.rb --queue=mailers start -i 1020" ExecStart=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec script/delayed_job.api.rb start -i 1030" ExecStart=/bin/bash -c "/usr/bin/echo -n 'Starting OBS api clock daemon'" ExecStart=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec /usr/bin/clockworkd --log-dir=log -l -c config/clock.rb start" ExecStop=/bin/bash -c "/usr/bin/echo -n 'Shutting down OBS api delayed job handler'" ExecStop=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec script/delayed_job.api.rb --queue=quick stop -n 3" ExecStop=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec script/delayed_job.api.rb --queue=releasetracking stop -i 1000" ExecStop=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec script/delayed_job.api.rb --queue=issuetracking stop -i 1010" ExecStop=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec script/delayed_job.api.rb --queue=mailers stop -i 1020" ExecStop=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec script/delayed_job.api.rb stop -i 1030" ExecStop=/bin/bash -c "/usr/bin/echo -n 'Shutting down OBS api clock daemon'" ExecStop=chroot --userspec=www-data:www-data / /bin/bash -c "cd /srv/www/obs/api && /usr/bin/bundle exec /usr/bin/clockworkd --log-dir=log -l -c config/clock.rb stop" KillMode=process [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/systemd/obsdeltastore.service000066400000000000000000000005371332555733200241260ustar00rootroot00000000000000[Unit] Description=OBS deltastore daemon After=network.target obsstoragesetup.service [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStart=/usr/lib/obs/server/bs_deltastore --logfile deltastore.log ExecStop=/usr/lib/obs/server/bs_deltastore --stop ExecReload=/usr/lib/obs/server/bs_deltastore --restart [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/systemd/obsdispatcher.service000066400000000000000000000003431332555733200241010ustar00rootroot00000000000000[Unit] Description=OBS job dispatcher After=network.target [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStart=/usr/lib/obs/server/bs_dispatch ExecReload=/bin/kill -HUP $MAINPID [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/systemd/obsdodup.service000066400000000000000000000004701332555733200230670ustar00rootroot00000000000000[Unit] Description=OBS dodup, updates download on demand metadata After=network.target [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStart=/usr/lib/obs/server/bs_dodup ExecStop=/usr/lib/obs/server/bs_dodup --stop ExecReload=/usr/lib/obs/server/bs_dodup --restart [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/systemd/obspublisher.service000066400000000000000000000003501332555733200237460ustar00rootroot00000000000000[Unit] Description=OBS repository publisher After=network.target [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStart=/usr/lib/obs/server/bs_publish ExecReload=/bin/kill -HUP $MAINPID [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/systemd/obsrepserver.service000066400000000000000000000003471332555733200237740ustar00rootroot00000000000000[Unit] Description=OBS repository server After=network.target [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStart=/usr/lib/obs/server/bs_repserver ExecReload=/bin/kill -HUP $MAINPID [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/systemd/obsscheduler@.service000066400000000000000000000004101332555733200240240ustar00rootroot00000000000000[Unit] Description=OBS scheduler service for %I After=network.target [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStart=/usr/lib/obs/server/bs_sched %i ExecStop=/usr/lib/obs/server/bs_admin --shutdown-scheduler %i [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/systemd/obsservice.service000066400000000000000000000003511332555733200234120ustar00rootroot00000000000000[Unit] Description=OBS source service server After=network.target [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStart=/usr/lib/obs/server/bs_service ExecReload=/bin/kill -HUP $MAINPID [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/systemd/obssigner.service000066400000000000000000000003411332555733200232400ustar00rootroot00000000000000[Unit] Description=OBS signer service After=network.target [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStart=/usr/lib/obs/server/bs_signer ExecReload=/bin/kill -HUP $MAINPID [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/systemd/obssrcserver.service000066400000000000000000000003561332555733200237750ustar00rootroot00000000000000[Unit] Description=OBS source repository server After=network.target [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStart=/usr/lib/obs/server/bs_srcserver ExecReload=/bin/kill -HUP $MAINPID [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/systemd/obswarden.service000066400000000000000000000003571332555733200232400ustar00rootroot00000000000000[Unit] Description=OBS warden, monitors the workers After=network.target [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStart=/usr/lib/obs/server/bs_warden ExecReload=/bin/kill -HUP $MAINPID [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/systemd/obsworker@.service000066400000000000000000000013001332555733200233560ustar00rootroot00000000000000[Unit] Description=OBS worker for %i After=network.target [Service] EnvironmentFile=/etc/sysconfig/obs-server ExecStartPre=/bin/sh -c "/bin/systemctl set-environment HOSTNAME=`hostname`" ExecStartPre=/bin/mkdir -p /var/cache/build/root_%i ExecStartPre=/bin/mkdir -p /var/cache/build/state_%i ExecStartPre=/bin/chmod 755 /var/cache/build/root_%i ExecStartPre=/bin/chmod 755 /var/cache/build/state_%i ExecStart=/usr/lib/obs/server/bs_worker --hardstatus --root /var/cache/build/root_%i --statedir /var/cache/build/state_%i --id ${HOSTNAME}:%i --reposerver http://${OBS_REPO_SERVERS} --jobs $OBS_WORKER_JOBS & ExecReload=/bin/kill -HUP $MAINPID [Install] WantedBy=multi-user.target open-build-service-2.9.4/dist/t/000077500000000000000000000000001332555733200164405ustar00rootroot00000000000000open-build-service-2.9.4/dist/t/.gitignore000066400000000000000000000000041332555733200204220ustar00rootroot00000000000000tmp open-build-service-2.9.4/dist/t/0000-check_users_and_group.ts000066400000000000000000000005731332555733200237260ustar00rootroot00000000000000#!/bin/bash export BASH_TAP_ROOT=$(dirname $0) . $(dirname $0)/bash-tap-bootstrap plan tests 3 for group in obsrun;do result_group=$(getent group $group | cut -f1 -d:) is "$result_group" "$group" "Checking group $group" done for user in obsrun obsservicerun;do result_user=$(getent passwd $user | cut -f1 -d:) is "$result_user" "$user" "Checking user $user" done open-build-service-2.9.4/dist/t/0010-obs-bootstrap-api.t000066400000000000000000000053721332555733200225570ustar00rootroot00000000000000#!/bin/bash export BOOTSTRAP_TEST_MODE=1 export NON_INTERACTIVE=1 export BASH_TAP_ROOT=$(dirname $0) . $(dirname $0)/bash-tap-bootstrap if [ -f $(dirname $0)/../setup-appliance.sh ]; then . $(dirname $0)/../setup-appliance.sh else if [ -f /usr/lib/obs/server/setup-appliance.sh ];then . /usr/lib/obs/server/setup-appliance.sh else BAIL_OUT "Could not find setup-appliance.sh" fi fi plan tests 12 ################################################################################ # Cleanup temporary files rm -rf $(dirname $0)/tmp get_hostname localhost is "$FQHOSTNAME" "localhost" "Checking FQHOSTNAME without domain" is "$DOMAINNAME" "" "Checking with empty DOMAINNAME" is "$SHORTHOSTNAME" "localhost" "Checking SHORTHOSTNAME localhost" generate_proposed_dnsnames is "$rv" 'localhost ' "Checking proposed dns names without domain" get_hostname foobar.suse.de is "foobar.suse.de" $FQHOSTNAME "Checking FQHOSTNAME " is "$SHORTHOSTNAME" "foobar" "Checking SHORTHOSTNAME foobar" generate_proposed_dnsnames is "$rv" 'foobar foobar.suse.de localhost' "Checking proposed dns names" # CHECKING CERT TEMPLATE create_selfsigned_certificate is "$OPENSSL_CONFIG" 'prompt = no distinguished_name = req_distinguished_name [req_distinguished_name] countryName = CC stateOrProvinceName = OBS Autogen State or Province localityName = OBS Autogen Locality organizationName = OBS Autogen Organisation organizationalUnitName = OBS Autogen Organizational Unit commonName = foobar.suse.de emailAddress = test@email.address [req] req_extensions = v3_req distinguished_name = req_distinguished_name attributes = req_attributes x509_extensions = v3_ca [req_attributes] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment [ v3_ca ] subjectKeyIdentifier=hash authorityKeyIdentifier=keyid:always,issuer basicConstraints = CA:true [ v3_req ] # Extensions to add to a certificate request basicConstraints = critical,CA:FALSE keyUsage = digitalSignature, keyEncipherment subjectAltName = @alt_names [alt_names] DNS.0 = foobar DNS.1 = foobar.suse.de DNS.2 = localhost [ v3_ca ] basicConstraints = CA:TRUE subjectAltName = @alt_names ' export backenddir=$(dirname $0)/tmp/ mkdir -p $backenddir/certs check_server_key key_file=$backenddir\certs/server.key [ -e $key_file ] is 0 $? "Checking if key file ($key_file) exists" check_server_cert for ext in crt created do file=server.foobar.suse.de.$ext; [ -e $backenddir/certs/$file ] is $? 0 "Checking file $file" done relink_server_cert SUBJ=$(openssl x509 -text -noout -in $backenddir/certs/server.crt |grep DNS) is \ "$SUBJ" \ ' DNS:foobar, DNS:foobar.suse.de, DNS:localhost'\ "Checking openssl certificate subject" open-build-service-2.9.4/dist/t/0030-installed-files.t000066400000000000000000000010171332555733200222630ustar00rootroot00000000000000#!/bin/bash # export BOOTSTRAP_TEST_MODE=1 export NON_INTERACTIVE=1 export BASH_TAP_ROOT=$(dirname $0) # . $(dirname $0)/bash-tap-bootstrap # plan tests 7 for i in \ $DESTDIR/etc/logrotate.d/obs-server\ $DESTDIR/etc/init.d/obssrcserver\ $DESTDIR/etc/init.d/obsdodup\ $DESTDIR/usr/sbin/obs_admin\ $DESTDIR/usr/sbin/obs_serverstatus do [[ -e $i ]] is $? 0 "Checking $i" done for i in \ $DESTDIR/usr/sbin/rcobssrcserver\ $DESTDIR/usr/sbin/rcobsdodup do [[ -L $i ]] is $? 0 "Checking $i" done open-build-service-2.9.4/dist/t/0050-test-appliance.ta000066400000000000000000000016211332555733200222610ustar00rootroot00000000000000#!/bin/bash export BOOTSTRAP_TEST_MODE=1 export NON_INTERACTIVE=1 export BASH_TAP_ROOT=$(dirname $0) . $(dirname $0)/bash-tap-bootstrap plan tests 2 for i in $(dirname $0)/../setup-appliance.sh /usr/lib/obs/server/setup-appliance.sh;do [[ -f $i && -z $SETUP_APPLIANCE ]] && SETUP_APPLIANCE=$i done if [[ -z $SETUP_APPLIANCE ]];then BAIL_OUT "Could not find setup appliance" fi . $SETUP_APPLIANCE MAX_WAIT=300 tmpcount=$MAX_WAIT # Service enabled and started for srv in \ obsapisetup do STATE=` systemctl is-enabled $srv\.service 2>/dev/null` is "$STATE" "enabled" "Checking $srv enabled" ACTIVE=`systemctl is-active $srv\.service` while [[ $ACTIVE != 'active' ]];do tmpcount=$(( $tmpcount - 1 )) ACTIVE=`systemctl is-active $srv\.service` if [[ $tmpcount -le 0 ]];then ACTIVE='timeout' break fi sleep 1 done is "$ACTIVE" "active" "Checking $srv status" done open-build-service-2.9.4/dist/t/0060-check_required_services.ts000066400000000000000000000015341332555733200242560ustar00rootroot00000000000000#!/usr/bin/env perl use strict; use warnings; use Test::More 'tests' => 16; my $max_wait = 300; my @daemons = qw/obsapidelayed obsdispatcher obspublisher obsrepserver obsscheduler obssrcserver apache2 mysql/; foreach my $srv (@daemons) { my @state=`systemctl is-enabled $srv\.service 2>/dev/null`; chomp($state[-1]); is($state[-1],"enabled","Checking if recommended service $srv is enabled"); } my %srv_state=(); while ($max_wait > 0) { foreach my $srv (@daemons) { my @state=`systemctl is-active $srv\.service 2>/dev/null`; chomp($state[0]); if ( $state[0] eq 'active') { $srv_state{$srv} = 'active'; } } if ( keys(%srv_state) == scalar(@daemons) ) { last; } $max_wait--; sleep 1; } foreach my $srv ( @daemons ) { is($srv_state{$srv} || 'timeout','active',"Checking recommended service '$srv' status"); } exit 0; open-build-service-2.9.4/dist/t/0070-check_recommended_services.ts000066400000000000000000000014451332555733200247220ustar00rootroot00000000000000#!/usr/bin/env perl use strict; use warnings; use Test::More 'tests' => 8; my $max_wait = 300; my @daemons = qw/obsdodup obssigner obsdeltastore obsservicedispatch/; foreach my $srv (@daemons) { my @state=`systemctl is-enabled $srv\.service 2>/dev/null`; chomp($state[-1]); is($state[-1],"enabled","Checking if recommended service $srv is enabled"); } my %srv_state=(); while ($max_wait > 0) { foreach my $srv (@daemons) { my @state=`systemctl is-active $srv\.service 2>/dev/null`; chomp($state[0]); if ( $state[0] eq 'active') { $srv_state{$srv} = 'active'; } } if ( keys(%srv_state) == scalar(@daemons) ) { last; } $max_wait--; sleep 1; } foreach my $srv ( @daemons ) { is($srv_state{$srv} || 'timeout','active',"Checking recommended service '$srv' status"); } exit 0; open-build-service-2.9.4/dist/t/0080-check_ssl_certs.ts000066400000000000000000000012641332555733200225360ustar00rootroot00000000000000#!/bin/bash export BOOTSTRAP_TEST_MODE=1 export NON_INTERACTIVE=1 export BASH_TAP_ROOT=$(dirname $0) . $(dirname $0)/bash-tap-bootstrap plan tests 5 for i in $(dirname $0)/../setup-appliance.sh /usr/lib/obs/server/setup-appliance.sh;do [[ -f $i && -z $SETUP_APPLIANCE ]] && SETUP_APPLIANCE=$i done if [[ -z $SETUP_APPLIANCE ]];then BAIL_OUT "Could not find setup appliance" fi . $SETUP_APPLIANCE get_hostname FQHN=$FQHOSTNAME for file in \ server.crt \ server.key \ server.$FQHN\.created \ server.$FQHN\.crt do [ -e /srv/obs/certs/$file ] is "$?" 0 "Checking file $file" done curl https://localhost &>/dev/null is "$?" 0 "Checking https://localhost for SSL Certificate Errors" open-build-service-2.9.4/dist/t/0090-check_database.ts000066400000000000000000000010101332555733200222670ustar00rootroot00000000000000#!/bin/bash export BASH_TAP_ROOT=$(dirname $0) . $(dirname $0)/bash-tap-bootstrap plan tests 3 DB_NAME=api_production DB_EXISTS=$(mysql -e "show databases"|grep $DB_NAME) is "$DB_EXISTS" "$DB_NAME" "Checking if database exists" TABLES_IN_DB=$(mysql -e "show tables" $DB_NAME) [[ $TABLES_IN_DB ]] is "$?" 0 "Checking if tables in database $DB_NAME" D=`ps -ef|grep "mysqld .* --datadir=/srv/obs/MySQL"|wc -l` [ $D -gt 1 -o -f /srv/obs/MySQL/*.pid ] is "$?" 0 "Checking if database is started under /srv/obs/MySQL" open-build-service-2.9.4/dist/t/0100-check_webserver_and_api.ts000066400000000000000000000016401332555733200242030ustar00rootroot00000000000000#!/bin/bash export BASH_TAP_ROOT=$(dirname $0) . $(dirname $0)/bash-tap-bootstrap plan tests 4 if [ ! -f $HOME/.oscrc ];then cat < $HOME/.oscrc [general] apiurl = https://localhost [https://localhost] user = Admin pass = opensuse EOF fi API_VERSION=$(osc api about|grep revision|perl -p -e 's#.*(.*).*#$1#') RPM_VERSION=$(rpm -q --qf "%{version}\n" obs-server) is $API_VERSION $RPM_VERSION "Checking api about version" OSC_UNAUHTORIZED=$(osc -A https://localhost ls 2>&1|grep 401) [ -z "$OSC_UNAUHTORIZED" ] is "$?" 0 "Checking authorization for osc" # test /apidocs HTTP_OK=$(curl -ik https://localhost/apidocs/index 2>/dev/null |grep "200 OK") [ -n "$HTTP_OK" ] is $? 0 "Checking for https://localhost/apidocs/index" STATUS_CODE_200=$(curl -I http://localhost 2>/dev/null|head -1|grep -w 200) [[ -n $STATUS_CODE_200 ]] is "$?" 0 "Checking https://localhost for http status code 200" open-build-service-2.9.4/dist/t/Gemfile000066400000000000000000000002151332555733200177310ustar00rootroot00000000000000source 'https://rubygems.org' gem 'capybara' gem 'rspec-core' gem 'rspec-expectations' # as driver for capybara gem 'poltergeist', '>= 1.4' open-build-service-2.9.4/dist/t/Gemfile.lock000066400000000000000000000021251332555733200206620ustar00rootroot00000000000000GEM remote: https://rubygems.org/ specs: addressable (2.5.0) public_suffix (~> 2.0, >= 2.0.2) capybara (2.13.0) addressable mime-types (>= 1.16) nokogiri (>= 1.3.3) rack (>= 1.0.0) rack-test (>= 0.5.4) xpath (~> 2.0) cliver (0.3.2) diff-lcs (1.3) mime-types (3.1) mime-types-data (~> 3.2015) mime-types-data (3.2016.0521) mini_portile2 (2.1.0) nokogiri (1.7.1) mini_portile2 (~> 2.1.0) poltergeist (1.14.0) capybara (~> 2.1) cliver (~> 0.3.1) websocket-driver (>= 0.2.0) public_suffix (2.0.5) rack (2.0.1) rack-test (0.6.3) rack (>= 1.0) rspec-core (3.5.4) rspec-support (~> 3.5.0) rspec-expectations (3.5.0) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.5.0) rspec-support (3.5.0) websocket-driver (0.6.5) websocket-extensions (>= 0.1.0) websocket-extensions (0.1.2) xpath (2.0.0) nokogiri (~> 1.3) PLATFORMS ruby DEPENDENCIES capybara poltergeist (>= 1.4) rspec-core rspec-expectations BUNDLED WITH 1.13.7 open-build-service-2.9.4/dist/t/README.md000066400000000000000000000015341332555733200177220ustar00rootroot00000000000000# Open Build Service Appliance QA Suite This is a test suite based on perl's [prove](http://perldoc.perl.org/prove.html), and [RSpec](http://rspec.info/). We are testing the following: * The appliance boots and all OBS servers start * Sign Up & Log In via the frontend works * Building a simple package works ## Running the suite This test suite runs [automatically](https://github.com/os-autoinst/os-autoinst-distri-obs) against new appliances that got built from our [OBS:Server:Unstable](https://build.opensuse.org/project/show/OBS:Server:Unstable) on [openQA](https://openqa.opensuse.org/). ## QA for package updates Additionally our [test instance](https://build-test.opensuse.org/) rebuilds all packages in [OBS:Server:Unstable](https://build.opensuse.org/project/show/OBS:Server:Unstable) and it's publisher is calling "zypper up" to update itself. open-build-service-2.9.4/dist/t/bash-tap000066400000000000000000000171651332555733200200740ustar00rootroot00000000000000#!/bin/bash bash_tap_version='1.0.2' # Our state. _bt_plan='' _bt_expected_tests=0 _bt_plan_output=0 _bt_current_test=0 _bt_tap_output='' _bt_has_output_plan=0 _bt_done_testing=0 _bt_output_capture=0 # Our test results so far unset _bt_test_ok unset _bt_test_actual_ok unset _bt_test_name unset _bt_test_type unset _bt_test_reason # Cleanup stuff. declare -a _bt_on_exit_cmds trap "_bt_on_exit" EXIT # Planning functions. function _bt_output_plan() { local num_tests="$1" local directive="$2" local reason="$3" if [ "$_bt_has_output_plan" = 1 ]; then _caller_error "The plan was already output" fi _bt_clear_out _bt_out "1..$num_tests" if [ -n "$directive" ]; then _bt_out " # $directive" fi if [ -n "$reason" ]; then _bt_out " $reason" fi _bt_print_out _bt_has_output_plan=1 } function plan() { local plan="$1" case "$plan" in no_plan) no_plan ;; skip_all) skip_all "$2" ;; tests) expected_tests "$2" ;; *) _bt_die "Unknown or missing plan: '$plan'" ;; esac } function expected_tests() { local num="$1" if [ -z "$num" ]; then echo $_bt_expected_tests else if [ -n "$_bt_plan" ]; then _bt_caller_error "Plan is already defined" fi # TODO: validate _bt_plan="$num" _bt_expected_tests="$num" _bt_output_plan "$_bt_expected_tests" fi } function no_plan() { if [ -n "$_bt_plan" ]; then _bt_caller_error "Plan is already defined" fi _bt_plan="no plan" } function done_testing() { local num_tests="$1" if [ -z "$num_tests" ]; then num_tests="$_bt_current_test" fi if [ "$_bt_done_testing" = 1 ]; then _bt_caller_error "done_testing was already called" fi if [ "$_bt_expected_tests" != 0 -a "$num_tests" != "$_bt_expected_tests" ]; then ok 0 "planned to run $_bt_expected_tests but done_testing expects $num_tests" else _bt_expected_tests="$num_tests" fi if [ "$_bt_has_output_plan" = 0 ]; then _bt_plan="done testing" _bt_output_plan "$num_tests" fi } function has_plan() { test -n "$_bt_plan" } function skip_all() { local reason="${*:?}" _bt_output_plan 0 SKIP "$reason" } # Test functions. function ok() { local result="$1" local name="$2" _bt_current_test=$((_bt_current_test + 1)) # TODO: validate $name if [ -z "$name" ]; then name='unnamed test' fi name="${name//#/\\#}" _bt_clear_out if [ "$result" = 0 ]; then _bt_out "not ok" if [ -n "$TODO" ]; then _bt_test_ok[$_bt_current_test]=1 else _bt_test_ok[$_bt_current_test]=0 fi _bt_test_actual_ok[$_bt_current_test]=0 else _bt_out "ok" _bt_test_ok[$_bt_current_test]=1 _bt_test_actual_ok[$_bt_current_test]="$result" fi _bt_out " $_bt_current_test - $name" _bt_test_name[$_bt_current_test]="$name" if [ -n "$TODO" ]; then _bt_out " # TODO $TODO" _bt_test_reason[$_bt_current_test]="$TODO" _bt_test_type[$_bt_current_test]="todo" else _bt_test_reason[$_bt_current_test]='' _bt_test_type[$_bt_current_test]='' fi _bt_print_out } function _is_diag() { local result="$1" local expected="$2" diag " got: '$result'" diag " expected: '$expected'" } function is() { local result="$1" local expected="$2" local name="$3" if [ "$result" = "$expected" ]; then ok 1 "$name" else ok 0 "$name" _is_diag "$result" "$expected" fi } function _isnt_diag() { local result="$1" local expected="$2" diag " got: '$result'" diag " expected: anything else" } function isnt() { local result="$1" local expected="$2" local name="$3" if [ "$result" != "$expected" ]; then ok 1 "$name" else ok 0 "$name" _isnt_diag "$result" "$expected" fi } function like() { local result="$1" local pattern="$2" local name="$3" # NOTE: leave $pattern unquoted, see http://stackoverflow.com/a/218217/870000 if [[ "$result" =~ $pattern ]]; then ok 1 "$name" else ok 0 "$name" diag " got: '$result'" diag " expected: match for '$pattern'" fi } function unlike() { local result="$1" local pattern="$2" local name="$3" # NOTE: leave $pattern unquoted, see http://stackoverflow.com/a/218217/870000 if [[ ! "$result" =~ $pattern ]]; then ok 1 "$name" else ok 0 "$name" diag " got: '$result'" diag " expected: no match for '$pattern'" fi } function cmp_ok() { echo TODO } # Other helper functions function BAIL_OUT() { echo "Bail out! $@" exit 255 } function skip() { echo TODO } function todo_skip() { echo TODO } function todo_start() { echo TODO } function todo_end() { echo TODO } # Output function diag() { local message="$1" if [ -n "$message" ]; then _bt_escaped_echo "# $message" fi } # Util functions for output capture within current shell function start_output_capture() { if [ $_bt_output_capture = 1 ]; then finish_output_capture _bt_caller_error "Can't start output capture while already active" fi local stdout_tmpfile="/tmp/bash-itunes-test-out.$$" local stderr_tmpfile="/tmp/bash-itunes-test-err.$$" _bt_add_on_exit_cmd "rm -f '$stdout_tmpfile' '$stderr_tmpfile'" _bt_output_capture=1 exec 3>&1 >$stdout_tmpfile 4>&2 2>$stderr_tmpfile } function finish_output_capture() { local capture_stdout_varname="$1" local capture_stderr_varname="$2" if [ $_bt_output_capture != 1 ]; then _bt_caller_error "Can't finish output capture when it wasn't started" fi exec 1>&3 3>&- 2>&4 4>&- _bt_output_capture=0 if [ -n "$capture_stdout_varname" ]; then local stdout_tmpfile="/tmp/bash-itunes-test-out.$$" eval "$capture_stdout_varname=\$(< $stdout_tmpfile)" fi if [ -n "$capture_stderr_varname" ]; then local stderr_tmpfile="/tmp/bash-itunes-test-err.$$" eval "$capture_stderr_varname=\$(< $stderr_tmpfile)" fi } # Internals function _bt_stdout() { echo "$@" } function _bt_stderr() { echo "$@" >&2 } function _bt_die() { _bt_stderr "$@" exit 255 } # Report an error from the POV of the first calling point outside this file function _bt_caller_error() { local message="$*" local thisfile="${BASH_SOURCE[0]}" local file="$thisfile" local frame_num=2 until [ "$file" != "$thisfile" ]; do frame=$(caller "$frame_num") IFS=' ' read line func file <<<"$frame" done _bt_die "Error: $message, on line $line of $file" } # Echo the supplied message with lines after the # first escaped as TAP comments. function _bt_escaped_echo() { local message="$*" local output='' while IFS= read -r line; do output="$output\n# $line" done <<<"$message" echo -e "${output:4}" } function _bt_clear_out() { _bt_tap_output="" } function _bt_out() { _bt_tap_output="$_bt_tap_output$*" } function _bt_print_out() { _bt_escaped_echo "$_bt_tap_output" } # Cleanup stuff function _bt_add_on_exit_cmd() { _bt_on_exit_cmds[${#_bt_on_exit_cmds[*]}]="$*" } function _bt_on_exit() { if [ $_bt_output_capture = 1 ]; then finish_output_capture fi for exit_cmd in "${_bt_on_exit_cmds[@]}"; do diag "cleanup: $exit_cmd" eval "$exit_cmd" done # TODO: check that we've output a plan/results } open-build-service-2.9.4/dist/t/bash-tap-bootstrap000066400000000000000000000015411332555733200220760ustar00rootroot00000000000000#!/bin/bash # # Bash TAP Bootstrap: # Copy this file into your project tests dir and source it # from each test file with: # . $(dirname $0)/bash-tap-bootstrap # It takes care of finding bash-tap or outputing a usage message. # bash_tap_bootstrap_version='1.0.2' if [ "${BASH_SOURCE[0]}" = "$0" ]; then # Being run directly, probably by test harness running entire dir. echo "1..0 # SKIP bash-tap-bootstrap isn't a test file" exit 0 fi if [ -z "$BASH_TAP_ROOT" ]; then # TODO: search likely locations. BASH_TAP_ROOT="$(dirname ${BASH_SOURCE[0]})/../../bash-tap" fi if [ -f "$BASH_TAP_ROOT/bash-tap" ]; then . "$BASH_TAP_ROOT/bash-tap" else echo "Bail out! Unable to find bash-tap. Install from https://github.com/illusori/bash-tap or set \$BASH_TAP_ROOT if you have it installed somewhere unusual." exit 255 fi open-build-service-2.9.4/dist/t/osc/000077500000000000000000000000001332555733200172245ustar00rootroot00000000000000open-build-service-2.9.4/dist/t/osc/0000_configure_osc.ts000066400000000000000000000005171332555733200230630ustar00rootroot00000000000000#!/usr/bin/env perl use strict; use warnings; use Test::More tests => 1; eval { my $oscrc="$::ENV{HOME}/.oscrc"; open(OSCRC,'>',$oscrc) || die "Could not open $oscrc: $!"; print OSCRC "[general] apiurl = https://localhost [https://localhost] user=Admin pass=opensuse "; close OSCRC; }; ok(!$@,"Configuring oscrc"); exit 0 open-build-service-2.9.4/dist/t/osc/0010_create_interconnect.ts000066400000000000000000000003261332555733200242530ustar00rootroot00000000000000#!/usr/bin/env perl use strict; use warnings; use Test::More tests => 1; use FindBin; system("osc meta prj openSUSE.org -F $FindBin::Bin/fixtures/openSUSE.org.xml"); ok(!$?,"Configuring interconnect"); exit 0; open-build-service-2.9.4/dist/t/osc/0020_create_home:Admin.ts000066400000000000000000000003401332555733200235500ustar00rootroot00000000000000#!/usr/bin/env perl use strict; use warnings; use Test::More tests => 1; use FindBin; system("osc meta prj home:Admin -F $FindBin::Bin/fixtures/home:Admin.xml"); ok(!$?,"Checking creation of home:Admin project"); exit 0 open-build-service-2.9.4/dist/t/osc/0030_create_and_build_package.ts000066400000000000000000000047531332555733200251660ustar00rootroot00000000000000#!/usr/bin/env perl use strict; use warnings; use Test::More tests => 4; use FindBin; use File::Path qw(make_path remove_tree); use File::Copy; use Cwd; my $RCODE=0; my $TMP_DIR="$FindBin::Bin/tmp"; # prepare TMP_DIR remove_tree($TMP_DIR); make_path($TMP_DIR); chdir($TMP_DIR); # checkout home:Admin system("osc co home:Admin"); ok(!$?,"Checking preparation of project"); # prepare package eval { chdir("home:Admin") || die "Could not change to directory 'home:Admin': $!"; mkdir("obs-testpackage") || die "Could not create directory 'obs-testpackage':$!"; system("osc add obs-testpackage"); die "Could not add package 'obs-testpackage' via osc" if ($?); chdir("obs-testpackage") || die "Could not change to directory '".cwd()."/obs-testpackage': $!"; my $src="$FindBin::Bin/fixtures/obs-testpackage._service"; my $dst="./_service"; copy($src,$dst) or die "Copy '$src' -> '$dst' failed: $!"; system("osc ar"); die "Could not add files to package via osc!" if ($?); }; ok(!$@,"Checking preparation of package"); # commit package system('osc ci -m "initial version"'); ok(!$?,"Checking initial commit of package obs-testpackage"); # wait for building results my $time_out = 60 * 60; # wait for at least an hour my $start_time = time(); my $retry_timeout = 5; # retry after X seconds my $succeed; while (1) { my $states = { broken => 0, scheduled => 0, succeeded => 0, building => 0, failed => 0, signing => 0, finished => 0, unresolvable => 0 }; my $re = join('|',keys(%$states)); my $recalculation = 0; my @result = `osc r`; for my $line (@result) { if ( $line =~ /($re)(\*)?$/) { if (($2 ||'') eq '*'){ $recalculation = 1; } else { $states->{$1}++; } } } # test reached timeout (e.g. stuck while signing) last if (($start_time + $time_out) < time()); if (! $recalculation) { # if all have succeeded and no recalculation is needed the test succeed $succeed = 1 if ($states->{succeeded} == @result); # if any of the results is failed/broken the whole test is failed my $bad_results = $states->{failed} + $states->{broken} + $states->{unresolvable}; if ($bad_results > 0) { $succeed = 0; print STDERR "@result"; } } last if (defined($succeed)); sleep($retry_timeout); } my $r = ok($succeed,"Checking if build succeeded"); if (! $r) { open(F,">","$TMP_DIR/.SKIP") || die "Error while touching $TMP_DIR/.SKIP: $!"; close(F); } exit 0 open-build-service-2.9.4/dist/t/osc/0040_delete_package_build.ts000066400000000000000000000011141332555733200243300ustar00rootroot00000000000000#!/usr/bin/env perl use strict; use warnings; use Test::More; use FindBin; use File::Path qw(make_path remove_tree); my $TMP_DIR="$FindBin::Bin/tmp/"; if ( -f "$TMP_DIR/.SKIP" ) { plan skip_all => "Previous tests failed - keeping results"; } else { plan tests => 2; chdir("$TMP_DIR/home:Admin"); system("osc delete obs-testpackage"); ok(!$?,"Deleting package obs-testpackage"); system("osc ci -m \"removed package obs-testpackage\""); ok(!$?,"Commiting deleted package obs-testpackage"); # cleanup TMP_DIR chdir($FindBin::Bin); remove_tree($TMP_DIR); } exit 0; open-build-service-2.9.4/dist/t/osc/0200-check_docker_registry.ts000066400000000000000000000035211332555733200245100ustar00rootroot00000000000000#!/usr/bin/perl use strict; use warnings; use Test::More tests => 2; # These test need a lot of resources, so they should be # skipable SKIP: { skip "tests disabled by default. To enable set ENABLE_DOCKER_REGISTRY_TESTS=1", 2 unless $ENV{ENABLE_DOCKER_REGISTRY_TESTS}; `osc rdelete -m "testing deleted it" -rf BaseContainer 2>&1`; `rm -rf /tmp/BaseContainer`; `osc branch openSUSE.org:openSUSE:Templates:Images:42.3:Base openSUSE-Leap-Container-Base BaseContainer`; chdir("/tmp"); `osc co BaseContainer/openSUSE-Leap-Container-Base`; chdir("/tmp/BaseContainer/openSUSE-Leap-Container-Base"); `osc meta prjconf openSUSE.org:openSUSE:Templates:Images:42.3:Base |osc meta prjconf -F -`; open(my $fh, "<", "/tmp/BaseContainer/openSUSE-Leap-Container-Base/config.kiwi")||die $!; my $result; while (<$fh>) { s#obs://#obs://openSUSE.org:#; $result .= $_; } close($fh); open(my $of, ">", "/tmp/BaseContainer/openSUSE-Leap-Container-Base/config.kiwi")||die $!; print $of $result; close($of); `osc ci -m "reconfigured 'source path' elements to use 'openSUSE.org:' as prefix in config.kiwi"`; `osc r -w`; ok($? == 0,"Checking result code"); my $last_upload=""; my $timeout=1800; # waiting for publishing to start sleep 10; # Waiting for publishing while (1) { my $found=""; $timeout--; open(my $fh, "<", "/srv/obs/log/publisher.log"); while (<$fh>) { $last_upload = $1 if ( $_ =~ /Decompressing.*(\/tmp\/\w*)/ ); } close($fh); open($fh, "<", "/srv/obs/log/publisher.log"); while (<$fh>) { $found = $1 if ( $_ =~ /Deleting ($last_upload)/); } close($fh); if ( $found ) { ok(1,"Checking for upload"); last; } else { if ($timeout < 0 ) { ok(0,"Checking for upload"); last; } } sleep 1; } } exit 0; open-build-service-2.9.4/dist/t/osc/Makefile000066400000000000000000000001121332555733200206560ustar00rootroot00000000000000check: @for i in *.ts; do perl -c $$i; done test_system: prove -v *.ts open-build-service-2.9.4/dist/t/osc/fixtures/000077500000000000000000000000001332555733200210755ustar00rootroot00000000000000open-build-service-2.9.4/dist/t/osc/fixtures/home:Admin.xml000066400000000000000000000006471332555733200236210ustar00rootroot00000000000000 <description/> <person userid="Admin" role="maintainer"/> <repository name="openSUSE_Tumbleweed"> <path project="openSUSE.org:openSUSE:Factory" repository="snapshot"/> <arch>i586</arch> </repository> <repository name="openSUSE_Leap_42.2"> <path project="openSUSE.org:openSUSE:Leap:42.2" repository="standard"/> <arch>x86_64</arch> </repository> </project> �����������������������������������������������������������������������������������������open-build-service-2.9.4/dist/t/osc/fixtures/obs-testpackage._service�������������������������������0000664�0000000�0000000�00000000766�13325557332�0025703�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<services> <service name="obs_scm"> <param name="versionformat">%ad</param> <param name="url">git://github.com/M0ses/obs-testpackage.git</param> <param name="scm">git</param> <param name="extract">dist/obs-testpackage.spec</param> </service> <service name="tar" mode="buildtime"/> <service name="recompress" mode="buildtime"> <param name="compression">xz</param> <param name="file">*.tar</param> </service> <service name="set_version" mode="buildtime"/> </services> ����������open-build-service-2.9.4/dist/t/osc/fixtures/openSUSE.org.xml���������������������������������������0000664�0000000�0000000�00000001705�13325557332�0024051�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<project name="openSUSE.org"> <title>Remote OBS instance This project is representing a remote build service instance. https://api.opensuse.org/public open-build-service-2.9.4/dist/t/spec/000077500000000000000000000000001332555733200173725ustar00rootroot00000000000000open-build-service-2.9.4/dist/t/spec/features/000077500000000000000000000000001332555733200212105ustar00rootroot00000000000000open-build-service-2.9.4/dist/t/spec/features/0010_authentication_spec.rb000066400000000000000000000011131332555733200262220ustar00rootroot00000000000000require "spec_helper" RSpec.describe "Authentication" do after(:example) do logout end it "should be able to sign up" do visit "/" fill_in 'login', with: 'test_user' fill_in 'email', with: 'test_user@openqa.com' fill_in 'pwd', with: 'opensuse' fill_in 'pwd_confirmation', with: 'opensuse' click_button('Sign Up') expect(page).to have_content("The account 'test_user' is now active.") expect(page).to have_link('link-to-user-home') end it "should be able to login" do login expect(page).to have_link('link-to-user-home') end end open-build-service-2.9.4/dist/t/spec/features/0020_interconnect_spec.rb000066400000000000000000000007341332555733200257070ustar00rootroot00000000000000require "spec_helper" RSpec.describe "Interconnect" do before(:context) do login end after(:context) do logout end it "should be able to create link" do visit "/configuration/interconnect" # Don't wait for the javascript text replacement... page.execute_script("$('input[type=\"submit\"]').prop('disabled', false)") click_button('Save changes') expect(page).to have_content("Project 'openSUSE.org' was created successfully") end end open-build-service-2.9.4/dist/t/spec/features/0030_project_spec.rb000066400000000000000000000012441332555733200246600ustar00rootroot00000000000000require "spec_helper" RSpec.describe "Project" do before(:context) do login end after(:context) do logout end it "should be able to create" do within("div#subheader") do click_link('Create Home') end click_button('Create Project') expect(page).to have_content("Project 'home:Admin' was created successfully") end it "should be able to add repositories" do within("div#subheader") do click_link('Home Project') end click_link('Repositories') click_link('Add repositories') check('repo_openSUSE_Leap_42_3') expect(page).to have_content("Successfully added repository 'openSUSE_Leap_42.3'") end end open-build-service-2.9.4/dist/t/spec/features/0040_package_spec.rb000066400000000000000000000043061332555733200246100ustar00rootroot00000000000000require "spec_helper" RSpec.describe "Package" do before(:context) do login end after(:context) do logout end it "should be able to create new" do within("div#subheader") do click_link('Home Project') end click_link('Create package') fill_in 'name', with: 'ctris' fill_in 'title', with: 'ctris' fill_in 'description', with: 'ctris' click_button('Save changes') expect(page).to have_content("Package 'ctris' was created successfully") end it "should be able to upload files" do within("div#subheader") do click_link('Home Project') end click_link('ctris') click_link('Add file') attach_file("file", File.expand_path('../../fixtures/ctris.spec', __FILE__)) click_button('Save changes') expect(page).to have_content("The file 'ctris.spec' has been successfully saved.") # second line of defense ;-) click_link('Add file') attach_file("file", File.expand_path('../../fixtures/ctris-0.42.tar.bz2', __FILE__)) click_button('Save changes') expect(page).to have_content("The file 'ctris-0.42.tar.bz2' has been successfully saved.") end it "should be able to branch" do within("div#subheader") do click_link('Home Project') end click_link('Branch existing package') fill_in 'linked_project', with: 'openSUSE.org:openSUSE:Tools' fill_in 'linked_package', with: 'build' # Do not wait for autocomplete page.execute_script("$('input[type=\"submit\"]').prop('disabled', false)") click_button('Create Branch') expect(page).to have_content('build.spec') end it 'should be able to delete' do click_link('Delete package') expect(page).to have_content('Do you really want to delete this package?') click_button('Ok') expect(page).to have_content('Package was successfully removed.') end it "should be able to successfully build" do 100.downto(1) do |counter| visit("/package/show/home:Admin/ctris") # wait for the build results ajax call sleep(5) puts "Refreshed build results, #{counter} retries left." succeed_build = page.all('td', class: 'status_succeeded') if succeed_build.length == 1 break end end end end open-build-service-2.9.4/dist/t/spec/fixtures/000077500000000000000000000000001332555733200212435ustar00rootroot00000000000000open-build-service-2.9.4/dist/t/spec/fixtures/ctris-0.42.tar.bz2000066400000000000000000000331021332555733200241530ustar00rootroot00000000000000BZh91AY&SYͷ./_}B~}s>^*t'knEӻT\kN&v}U l Ԁw;=v)OmKl)WHLCE@U֡IB%@ Rh`̡Es1"F22be6CI&CSe!y 2Ѡhi@zh !#$SƩG 4 Q zOP4iS~a=MMQ=&D`dOSGzG=F'PIMQ)fHɣF 21h4 !STdh4ib @h$ @ hěMO(ɣFF@ 4 C@X%o)[ "0ZJY$`PwA'* !#V6gx&Q&q$JmGX^ՍuvqB}t PY+"ьDHA"E AG`̜$#&b 8O宱q -hW04f_ $A=tQ"2$R,ȉ"2 Ub, A*0"$H$bAQ` *ADAbDVZ0HH#Q"#c (QB"BD  >; ĭm3 L{6Oj38pXMM *4̆C$؀l(ʛt߲֙>'mLb 1Z0QV Ѡ3OtнZcn1@DX,o73nG",V)QRх@`M+biDm֬| , EWX,Ubs$=#o' "PD':/_K=ֻC,b(i*pv ev)K!ۢG 3ٜ~Oy `ӦUMg,HURx̥Au!_H!oaǔ}\f:VovhowdOt@$+2jp6p&34|w@"T(<OT"@;ƠCoMyl= {ٞ z q!j7h;J]V+˩xu3$'G[ue**ڕ$I&9Z-m^So顛+i:5߬ .kkE~6c}[,~ .}8 :)t ٶ}: *A$I)HUUb7Msq| عxhVUdx_[0hbndXR ֭7r<:G)Z.׀Cylx1Da{w7!Ix֊AC"1S΍uJ}N>?]Q!aW/ uP c(y2è! h@2@rόanQQ֪:mM>YǏYfs.G A6'`69ָY5f"CƻQ:GsHl#Fh5@U u5C^fxFG:=məfw)a7rܝ.Eg "(:k#EO[k#|ahU0=kާe_-&mW7p/ ZՔZ̫ W]V'NGs<2Qq}GR_泍T tBkB%AQ9#NXvi,W*JMfVt¼5()AEb(BʻdP)(P wΜQY1܊BT&mw`B6]]%V5$S"v.zX$DIetuʤd&))%gi,2m0 Xݥ-C aC9Ҏɐk2z_ca3hᖙZK"񲸅 Fz,L[љ~A7)ڑ\ٚwWp1"<]Fݰ"P% |vc֬@ QQÉ#F@nJ9NK Ys w뼪lks ĢuY|)m}N/*ZvV~/%uZUw(la|͙(c|E%gKf[`TV,\{ab4׬T&*t]0u8]d [:mvT4 8-YH+fq"Zk]鮴i#}aXƢ;^X56'b&:ERh[D $=XX}UfX۳],v̘.d, 0b+yrX/ cY6&TaFtreW-]R o)'YpӚ4x(i]pG@SBXpBn5#vK-48CeL-P(9жUOuP=Y‰#,ײVcnY4L8ZX^(1;4f 笊6H7gS<@_n%qLq]{uszX">0}˛x={h <'RɫB #kC=H߰.^KSɬss5ujUkm˖Փ[f$ Ҋ|o* ϐ;Zh3Uhc4R.E0OlNX]PҤ}h6@;-k>o[\BWvxeR%;m"(#2UI E%ؚM nN9V[2aUĄ =ItbP**6fjPleT8,&V9VML2Atluo9~|<(מߺtO 8/a%5¾`d!Rq[)%i!aP!h :oAl2Z""~VOQ1$!4PAx>m-r90į_*ɧ>l ݥYrkWT_\4 @P5,;KBi]!`'A ↙K4̳1XHއtb0ݍʕ:O29m3ΨN{5BUGT^%fι̫c>*Q* ,ElՠcĐ J tzH@6}ʷ$pWHXfr/taN^3X[gE/" НΟE[|\ja˛Hw8-KaXslI (ѻoX~98Cٛ`{0t%( yD/.VZR`ܲX"dQ59;G4:opU@}utM~mlѨQuUN($ {!6݌)H? !alR  jRi~yV=g wn|/=[8r|5͵˦\oW9*{Y9GOsm(K22@{p@H%yL(>x,==v )$(Mq&9@R-v[`': 79c}xq0f.B]h-P9`8E4ymADPA`8X\I/9T!.+fSD0(_ eQvyu_{u3oo6̖kL-֣`|G; Y2\7(im.,ӫ5P,('7V)V͔ Lq,2 Q&Q慤^!8Vh랔DANRC("!!<׷kNmI݁0ϢXni; CgVS` 5. q@& Ti b0ס~~Wܒ@9_I~7gW $m+S$Āq2IFc6STh,J!a# (ʧxARpJ>HLT%$'w:qAzM1yi xj^ "B1{$AzRє\K "(<yP1I uDzm> f*«Y^)àhSID?À_ Ӱ0-K<)22Szr<ɚ0dJ|Ֆ|8ide7em501E,PX",XZhwO wfm50뽑]XĈAXSI2dn..Ú `n2Wzptk/cvӴ@ ;=-'/5)]9DY7۷ޮZZUs[axf:R2!~ohń;%K ;(Na?9]F tC nv@Ĕv|<àdʒcKfwO@sKMwsC:\O/o'bY,2 YT >MřB6|x@>CSse 'ubgǖU Qv:ߺ}G~}6 vGc"^`#9.a6=rU]/\cqGr:oOI9uX̆3A #6@dO|'h`ty$>= ݜE{[_`g̝6a G*<`nL_ع蛠 vEՐ?Eq&=qb騁 /Cpm96I(nܥˊ;]W4dffdhXI97ϝL gg^@܆ :y&Us6h4NqW@ xly:u_>K&y|MВmGS!FHO}?kZ3yU!>B1f\zȆ6*vÿͅdp` Xa=(v}N!'c}ߕluJ d81K\>/{`lzMQn,ߒ;@!p`>O:y]<>@ɞa t (pL܏<:wgxv 00h"JIpG2"KI'=!$YHI_PA,UtoWlf Ti30F@c'3#;] :KarCb@(%s;STA+< zzz =zs^$ok HׄGvk=gֳxGDDe;W3.&Q%(FCYt%'7rLjL!%Ck"@}6^$Y ޠ.n 7}\7?|N{^\Dll1@H"2υTU=Xk9qmh  S^bFP$Fq̌iT5CXBi| $T8a v3fj uKfkⵅXa{KAaCy>^j d l ~0AKffr`t12)h27|kL3hٶ9ܠ/鼺 ,KꙕV_6C [ceV긝+PP62NV)Q|[0u > Q6a]V#BX&l4p9p ʩGEU$xeAf|Cc)WQ#SY|b6|*x[`>u1܍r 1DwB!ul.=CIDžͬІZڦsq5RH YKMupY,b0b0lD*(-9DBIZdUi\ژCf[P4ճtɋL]`%0(aKA8uڀ3^_/q5 Ʒ !\&ferO[XD&2c=([%Jc6dň߅6[^C\Q1Qo! T<M䔾6oG 3e z Ϻt+D,C:\;gNO`EqQBßG^>nЛ$Vm$l& 4Lrl(f&%΋4D I&>?B@"I$a;7snL wp cqc'.fB#8p;VLKRD%Bnsa+Wwu<;QZwvI--]Yw`m23s/MhH|BQ|*]Ț޾BOV!M@pWM%m;ieB\ B.Z$~Mq "Û_3 잖ǁNO7ZZ玚 1׷kFcM23&tMM6\!Q]Ob YA`#!(ʃۖ!P 3u̲N q0pEn,TI2usrһ$$1H$q xT c̶7\[/pRb'i7L49;GtדKn8`AU ׳j&"^A}w $˭(e^F #(&KW D %lfK+=T@,bY@aD0eєag Z г* Gz{P.@(L"paa.j*k@e\(d8d9\\# p@J6y%vXfᵆ^2lŹSAQ#Íw7VYf@aSmbU٤P"%D6 "FED$V@E ߤ7 -pYœic$%s3 oÚy= Ɔp$T+C%;;a0pCNwՋ6ĭGV#8K7UV. ˻:dB@$P`^ nBud2K2 XB YXkP'LuLZ N\.E U 8P5)52"oqIҤ9xfaoIb N"C·qbm6jbNcO`nxhS6 vCQ~y`l[|`Z$6Z`yVH :yWiz:cil>gr١vm])xbmyBv^Y9>CHO^dYaƑ$;v#n8evc6\- b33#gQe( %re|vĹc:)_5{Tgqsi.UM{+cEJ*q*^?Fk@(%JB"(Hfۗzopen-build-service-2.9.4/dist/t/spec/fixtures/ctris.spec000066400000000000000000000012521332555733200232430ustar00rootroot00000000000000Name: ctris Summary: Console based tetris clone URL: http://www.hackl.dhs.org/ctris/ Group: Amusements/Games/Action/Arcade License: GPL Version: 0.42 Release: 1 Source0: %{name}-%{version}.tar.bz2 BuildRequires: ncurses-devel BuildRoot: %{_tmppath}/%{name}-%{version}-build %description ctris is a colorized, small and flexible Tetris(TM)-clone for the console. Go play! %prep %setup -q %build make CFLAGS="$RPM_OPT_FLAGS" %install make install DESTDIR=$RPM_BUILD_ROOT %clean rm -rf $RPM_BUILD_ROOT; %files %defattr (-,root,root) %doc AUTHORS COPYING README TODO %doc %{_mandir}/man6/ctris.6.gz /usr/games/ctris open-build-service-2.9.4/dist/t/spec/spec_helper.rb000066400000000000000000000024571332555733200222200ustar00rootroot00000000000000# OBS Appliance spec helper. # # for capybara rspec support require 'support/capybara' SCREENSHOT_DIR = "/tmp/rspec_screens" RSpec.configure do |config| config.before(:suite) do FileUtils.rm_rf(SCREENSHOT_DIR) FileUtils.mkdir_p(SCREENSHOT_DIR) end config.after(:each) do |example| if example.exception take_screenshot(example) dump_page(example) end end config.fail_fast = 1 end def dump_page(example) filename = File.basename(example.metadata[:file_path]) line_number = example.metadata[:line_number] dump_name = "dump-#{filename}-#{line_number}.html" dump_path = File.join(SCREENSHOT_DIR, dump_name) page.save_page(dump_path) end def take_screenshot(example) filename = File.basename(example.metadata[:file_path]) line_number = example.metadata[:line_number] screenshot_name = "screenshot-#{filename}-#{line_number}.png" screenshot_path = File.join(SCREENSHOT_DIR, screenshot_name) page.save_screenshot(screenshot_path) end def login visit "/user/login" fill_in 'user-login', with: 'Admin' fill_in 'user-password', with: 'opensuse' click_button('log-in-button') expect(page).to have_link('link-to-user-home') end def logout within("div#subheader") do click_link('Logout') end expect(page).to have_no_link('link-to-user-home') end open-build-service-2.9.4/dist/t/spec/support/000077500000000000000000000000001332555733200211065ustar00rootroot00000000000000open-build-service-2.9.4/dist/t/spec/support/capybara.rb000066400000000000000000000013621332555733200232170ustar00rootroot00000000000000require 'capybara' require 'capybara/dsl' require 'capybara/poltergeist' require 'socket' Capybara.register_driver :poltergeist do |app| Capybara::Poltergeist::Driver.new(app, debug: false, timeout: 60) end Capybara.default_driver = :poltergeist Capybara.javascript_driver = :poltergeist Capybara.save_path = '/tmp/rspec_screens' # Set hostname begin hostname = Socket.gethostbyname(Socket.gethostname).first rescue SocketError hostname = "" end ipaddress = Socket.ip_address_list.find { |ai| ai.ipv4? && !ai.ipv4_loopback? }.ip_address if hostname.empty? hostname = ipaddress end Capybara.app_host = ENV['SMOKETEST_HOST'].nil? ? "https://#{hostname}" : "http://localhost:3000" RSpec.configure do |config| config.include Capybara::DSL end open-build-service-2.9.4/docker-compose.ci.yml000066400000000000000000000006051332555733200212620ustar00rootroot00000000000000version: "2" services: rspec: image: openbuildservice/frontend volumes: - .:/obs depends_on: - db command: /obs/contrib/start_rspec db: image: openbuildservice/mariadb volumes: - mysql_vol:/var/lib/mysql_tmpfs/ - .:/obs command: /obs/contrib/start_test_db volumes: mysql_vol: driver_opts: type: tmpfs device: tmpfs open-build-service-2.9.4/docker-compose.ci_old.yml000066400000000000000000000006711332555733200221230ustar00rootroot00000000000000version: "2" services: old-test-suite: image: openbuildservice/old-test-suite volumes: - .:/obs_readonly:ro privileged: true depends_on: - db command: /obs/contrib/start_old_tests db: image: openbuildservice/mariadb volumes: - mysql_vol:/var/lib/mysql_tmpfs/ - .:/obs command: /obs/contrib/start_test_db volumes: mysql_vol: driver_opts: type: tmpfs device: tmpfs open-build-service-2.9.4/docker-compose.yml000066400000000000000000000011751332555733200206730ustar00rootroot00000000000000version: "2" services: db: image: openbuildservice/mariadb ports: - "3306:3306" cache: image: openbuildservice/memcached ports: - "11211:11211" backend: image: openbuildservice/backend volumes: - .:/obs worker: image: openbuildservice/backend volumes: - .:/obs privileged: true depends_on: - backend command: /obs/contrib/start_development_worker frontend: image: openbuildservice/frontend build: context: . volumes: - .:/obs ports: - "3000:3000" depends_on: - db - cache - backend - worker open-build-service-2.9.4/docs/000077500000000000000000000000001332555733200161625ustar00rootroot00000000000000open-build-service-2.9.4/docs/ReleaseNotes-0.9000066400000000000000000000025601332555733200210050ustar00rootroot000000000000001. Inter Buildservice Connectivity ---------------------------------- To make the projects of another buildservice available at http://remote.buildservice.net available, create a project in your build service containing a "remoteurl" element. Example: The openSUSE Build Service https://api.opensuse.org/public This allows access to the sources and repositories of the remote service: - repository pathes can contain remote repository elements - source links can refer to remote sources - aggregations can refer to remote packages Refering to a remote project works by prepending the project name of the project containing the remoteurl to the remote project name. For example, project openSUSE:Tools from the openSUSE build service would be available by using "openSUSE.org:openSUSE:Tools". Automatic rebuild triggering also works for remote resources. You must be the build service administrator to create projects containing the remoteurl element. Limitations (aka bugs to be fixed): - each build service can only be referenced once in your project tree - remote projects cannot itself be remote on the other service - remote aggregates must specify which packages to take the binaries from - failing builds because of server unavaiability will not automatically be retried Hava a lot of fun! open-build-service-2.9.4/docs/ReleaseNotes-0.9.1000066400000000000000000000004771332555733200211510ustar00rootroot00000000000000# # OBS Bugfix release 0.9.1 # Following fixes compared to 0.9: * fix osc local build with remote projects. * fix https support for remote build service. * fix ignore flags suppression in project configuration. * Increased timeouts for Inter Build Service connect -> 0.9.1 is required now to use api.opensuse.org ! open-build-service-2.9.4/docs/ReleaseNotes-1.0000066400000000000000000000015471332555733200210010ustar00rootroot00000000000000# # Release Notes for openSUSE Build Service 1.0.0 # Please read the README.SETUP file for initial installation instructions. README.UPDATE has informations for updaters. Main new features are: * Submission request handling. It is possible to create, show, accept or decline a package submission from a project to another. * The source handling has improved for source links. Merged sources are checked out by default and the server creates a source link with changes on submission time. * The branch request creates a branch project from another one. This project builds against the branched one and has package(s) with source links inside. Important Bugfixes: * Flag handling has been fixed for debuginfo flag in api and web frontend. * rpmlint call gots enabled, if configured in project config. * The publisher works out of the box now. open-build-service-2.9.4/docs/ReleaseNotes-1.5000066400000000000000000000006331332555733200210010ustar00rootroot00000000000000# # Release Notes for openSUSE Build Service 1.5 # Please read the README.SETUP file for initial installation instructions. README.UPDATE has informations for updaters. Main new features are: * Product Build Support http://en.opensuse.org/Build_Service/Concepts/Product_Definition * kiwi based image building in general * Enhanced Download on Demand * Embedded build support (EXPERIMENTAL STATUS) open-build-service-2.9.4/docs/ReleaseNotes-1.6000066400000000000000000000004711332555733200210020ustar00rootroot00000000000000# # Release Notes for openSUSE Build Service 1.6 # Please read the README.SETUP file for initial installation instructions. README.UPDATE has informations for updaters. Main new features are: * Cross architecture build support via qemu builds * build compare support for detecting unchanged package builds open-build-service-2.9.4/docs/ReleaseNotes-1.7000066400000000000000000000042371332555733200210070ustar00rootroot00000000000000# # Release Notes for openSUSE Build Service 1.7 (PRE-RELEASE) # Please read the README.SETUP file for initial installation instructions. README.UPDATE has informations for updaters. Please note that the new SLP service discovery support is recommended to be used. bs_worker instances will find all OBS server instances in the entire network. You can disable this via syconfig settings: /etc/sysconfig/obs-server /etc/sysconfig/obs-worker The former called "frontend" is now called "api". The former called "webclient" is now called "webui". New processes are * bs_signerd Needed when a signing key is configured in BSConfig.pm * bs_warden Optionally, detects hanging build hosts * delayed_jobs Needed to run regular jobs on api instance which creates cache content. * Cron jobs api and webui requires cronjobs to log the worker status and to generate the load graphes. These are active by default in obs-server rpm package. Otherwise templates for the cronjobs can be found in dist/ directory of git tree. Main new features are: * Rewritten dependency solver in scheduler which gave a significant speed up. It is using the SAT solver via perl-BSSolv module. * Speedup in webui by using new xml parser * Attribute system to store all kind of information for projects, packages or sub packages as defined by the server admin. * Extended request handling - new request types (delete or change_devel) - new review mechanism * clean product build support (inside of chroot enviroment) * Improved source handling of branched packages (less often created conflicts). branch command is creating full copies now with additional _link * webui is themable now * Build trigger reporting, the reason why a package got triggered to build is stored now and requestable. * Scheduler status reporting for each repository Regression: * The Download on Demand feature got not yet re-implemented to support the new scheduler SAT support. Preview features are: * patchinfo aka maintenance handling, this is not complete yet. * source service handler to let server and client side modules prepare sources in the same way. open-build-service-2.9.4/docs/ReleaseNotes-1.8000066400000000000000000000021551332555733200210050ustar00rootroot00000000000000 # # OBS Release 1.8 # This release is primarly driven by the parties of the MeeGo project. It will offer a 1.7 code base with following additional features. * ACL read permission check * Permission support for groups (backport from master) * Download on Demand (backport from master) * KVM Autosetup (backport from master) * CB{pre}install (backport from master) An api database migration is needed after updating from 1.7 release with packages or plain git checkout. Please read README.UPDATERS file in that case. OBS Appliance users get all needed update steps automatically after upgrading their appliance. This can be done either via package upgrade or via replacing the appliance image. Changes: * Support new XML structures of OBS 2.0 * backend logging includes time stamps now Fixes: * Fix when calling patch command, upstream changed in incompatible way * webui layout fixes needed after changes on static.opensuse.org * minor stability fixes in backend code * scheduler performance optimization if using remote build service instances. * fixed syntax error in obsworker init script with manual VM configuration open-build-service-2.9.4/docs/ReleaseNotes-2.0000066400000000000000000000060321332555733200207740ustar00rootroot00000000000000# # openSUSE Build Service 2.0 # Please read the README.SETUP file for initial installation instructions or use the OBS Appliance from http://en.opensuse.org/Build_Service/OBS-Appliance README.UPDATERS file has informations for updaters. OBS Appliance users who have setup their LVM can just replace their appliance image without data loss. The migration will happen automatically. Known Regressions: ================== * read access feature enhancements of MeeGo branch (named 1.8 in git) are not included with this release. Do *NOT* update from 1.8 to this release. We will merge these branches in a later release. * API calls which got deprecated with 1.0 release got removed: /platform (use /build or /search/repository_id instead) /repository (use /build or /search/repository_id instead) /project (use /source instead) /package (use /source instead) * The OBS Appliance comes with package signing enabled by default now. As consequence some operations like project creation may take a long time if your hardware has no hardware random number generator. (gpg key creation needs a safe random generator source). Features: ========= * Revised webui, new layout with more funcationality. * Anonymous access mode for webui => to be configured in api config/options.yml * Improved permission system: - User group support for roles and requests * Improved request system: - Review mechanism - New "reviewer" role to set default reviewers for a package or project. These can be users or groups. http://en.opensuse.org/Build_Service/Concepts/Review - New requests types for - set bugowner - add role * Project link mechanism http://en.opensuse.org/Build_Service/Concepts/ProjectLinking * Build job scheduling strategies got configurable http://en.opensuse.org/Build_Service/Concepts/BuildSchedulingStrategies * New fair build job dispatcher is distributing the jobs based on importance and former used build times now. * Build cycle dependencies are accessable via api and webui now. * History for meta files like _meta and _prjconf * Project/Package undelete feature * Source service are stabelized and enabled by default. - default services for downloading files and to verify them are available. - basic support in webui for source services * Lots of smaller improvements and bugfixes Changes: ======== * xml files are validated by the api * "expansion error" state got renamed to "unresolved". * New "signing" state. * For cross architecture compiling support: CBpreinstall and CBinstall prjconf directives added. - CBPreinstall: add to preinstall section if emulator_arch - CBinstall: add to install section if emulator_arch - as with VMinstall, there's no dependency resolution. Add all needed dependencies manually. Requirements: ============= * The OBS server should not run on a system with less than 2GB memory. 4GB is recommended, esp. when the same system is also building packages. * Use osc 0.127 or later to get access to the new features. open-build-service-2.9.4/docs/ReleaseNotes-2.1000066400000000000000000000064341332555733200210030ustar00rootroot00000000000000# # openSUSE Build Service 2.1 # Please read the README.SETUP file for initial installation instructions or use the OBS Appliance from http://en.opensuse.org/Build_Service/OBS-Appliance There is also an install medium with installs OBS on hard disc now. README.UPDATERS file has informations for updaters. OBS Appliance users who have setup their LVM can just replace their appliance image without data loss. The migration will happen automatically. Known Regressions to 2.0: ========================= - Project listing of deleted projects is only allowed by the admin Features: ========= * web interface improvements: - Linked projects and packages are shown if existing. - Source Service Editor can be used to add or remove source services. Also to edit each service parameters and to trigger a run. - Merged sources of linked packages can be shown and the merged files can be edited. - New submit requests can be created. - Existing repository configuration can be edited. Add or remove pathes or architectures. - Additional reviews by users or groups can be added to requests. The reviews can be processed as well in web interface. - Displayed load diagrams can be configured to any architecture now. - Source history and commit view has been added. - Interface for new user registration. * api - Support Clone and supersed of existing requests. "osc rq clone" can be used to clone packages from an existing request. When submitting these cloned packages the original request gets superseded. - api: Improved LDAP support updating user information from LDAP server (This functionality has been provided by Intel) - Read access control for package sources. New created projects or packages can get the "" flag to hide any access to the sources of a package. This includes access to the source files, source and debug packages and build log. (This functionality has been provided by the LinuxFoundation) - administrator can configure new user registration to deny or allow registration only after approval in config/options.yml file * backend: - Source services to checkout from external SCM repositories have been added. This includes also necessary source services to compress tar balls, use spec/dsc files out of them or to update the version in spec/dsc files. - Accepted submit request store the exact merged versions to allow later tracking of the requests. osc and the web interface can show now the diffs of accepted requests as result. - aggregate allows to skip source packages now. (Functionality has been provided by Nokia). - File provides can be mapped to packages now in prjconf - GPG sign key can get modifed with increased expiration date - scheduler kills building jobs when switching to blocked/excluded/disabled state - Cross Build support for MIPS architecture (Functionality has been provided by LinuxFoundation) Changes: ======== * It is recommended to switch to MySQL database for the webui. Please see README.UPDATERS for details. Requirements: ============= * The OBS server should not run on a system with less than 2GB memory. 4GB is recommended, esp. when the same system is also building packages. * Use osc 0.129 or later to get access to the new features. open-build-service-2.9.4/docs/ReleaseNotes-2.4000066400000000000000000000140101332555733200207730ustar00rootroot00000000000000 # # Open Build Service 2.4 # Please read the README.SETUP file for initial installation instructions or use the OBS Appliance from http://openbuildservice.org/download/ There is also an install medium with installs OBS on hard disc now. dist/README.UPDATERS file has informations for updaters. OBS Appliance users who have setup their LVM can just replace their appliance image without data loss. The migration will happen automatically. Regressions: ============ * LDAP support is not tested and in unknown state (not part of test suite) (A rewrite of it is WIP) * source service editor in webui is not available anymore Changes on purpose: =================== * Migration to Ruby 1.9 and Rails 3.2 * More database constraints have been introduced to avoid inconsistent databases. As a result this may cause problems during update if the database is already inconsistent. Please use the script check_database script to fix these kind of issues: # cd /srv/www/obs/api # RAILS_ENV=production ./script/check_database It may return a number of SQL commands to make the database consistend again. * request database got moved from backend to the api. Directly after update all requests seem to have vanished, but a delayed job is importing them again. This can take some hours dependening on the amount of requests. * package meta of package copy command is not copying relationships and devel definitions anymore * Requests do require a description now in any case * webui configurations got moved from environments/*.rb to config/options.yml. * Package conflicts and obsoletes are taken into account when resolving dependencies for build jobs. This improves build consistency but may also lead to new "unresolvable" errors, since they were ignored so far. The old behaviour can be restored by adding the following line into the project configuration: ExpandFlags: ignoreconflicts * cross architecture build directives in project configuration CBInstall and CBPreinstall are not supported anymore. Use standard "Install" and "Preinstall" directives instead. => all architectures can be used as host for cross build now. * The api file/distribution.xml is obsolete, the config gets stored in the SQL database now. Existing file gets imported during migration. The list from remote instances can displayed and updated automatically now. * The api has no html pages anymore. The user and group management went into the standard webui configuration module. Features ======== * Arch Linux package format support got added. * Build Job Constraints handling. It can be used to define build instance requirements for certain build jobs. Either for defined package sources or for all repositories. Documentation about this can be found here: http://openbuildservice.org/help/manuals/obs-reference-guide/cha.obs.build_job_constraints.html * Support of preinstall images, which can be used to speed up build jobs. They need to be build manually, but get automatically used for creating a build instance base in a faster way than just with installing packages. * Static links (without version and release numbers) for binary results in the published repositories can be configured now. This can be done via the "staticlinks" keyword in Repotype: definition. * Build Prerequires can be used also with the FileProvides feature in the build configuration now. * Regular users can copy entire projects (without binary results) now. * Upload of binaries via api is allowed to administrators now. * Data about used resources of build jobs get collected. However it gets not yet used for build job assignment * Requests can be listed also for given groups now. * Checkout of delete packages does not require a srcmd5 sum anymore. * Delete Requests can be used to request deletion of repositories from a project. * Support of creation of application data xml which can be used for app stores for application centric browsing versus package browsing. * Support hugetable memory usage for kvm workers. * Support building cross build formats, eg. building an rpm with a kiwi build description or an deb package via a .spec file. * Efficient transport of product build results from worker to backend. * Secure Boot signing support * aarch64 architecture added (aka armv8 or arm64) * New generic "emulator" virtualization support. Can be used to build inside of system emulators (for foreign hardware for example) * New s390 z/VM virtualization support. * Asynchronus mode for handling interconnects is available. This will avoid scheduler hangups with slow or broken network to a remote instance. Enable this via our $sched_asyncmode = 1; in BSConfig.pm file. It is the default for new OBS installations. Deprecated: =========== The following calls have been marked as deprecated, they will get removed in OBS 3.0 * api - /person/$LOGIN/group -> use /group?login=$LOGIN instead - /search/project_id -> use /search/project/id instead - /search/package_id -> use /search/package/id instead * The deprecated support to copy qemu binaries from external build host into build environment has been removed. This includes also the CBInstall and CBPreinstall project configuration directives. Notes for systems using systemd: ================================ OBS is using init scripts, not systemd style .service files. This will stay until we will switch from SLES 11 to SLES 12 as default production platform. openSUSE installations may use systemd (instead of sysvinit) and it should work in general. Not working are usages like # rcobssrcserver status You will only get the systemd status here. Also stopping services may not kill all processes, which leads to abstract errors when trying to restart them. We heard also about trashed MySQL databases when using systemd. To avoid these problems you need switch directory to avoid the systemd mapper: # cd /etc/init.d # ./obssrcserver status|stop|start open-build-service-2.9.4/docs/ReleaseNotes-2.5000066400000000000000000000103221332555733200207760ustar00rootroot00000000000000 # # Open Build Service 2.5 # Please read the README.SETUP file for initial installation instructions or use the OBS Appliance from http://openbuildservice.org/download/ There is also an install medium with installs OBS on hard disc now. dist/README.UPDATERS file has informations for updaters. OBS Appliance users who have setup their LVM can just replace their appliance image without data loss. The migration will happen automatically. Features ======== * Support multiple binary backends in one OBS instance. This means that multiple separate servers can host a defined part of the binary results. * The api can be configured to protect access to all newly created projects by default * configuration data which is used by multiple obs parts is stored in the /configuration path. This is the configuration.xml file in the backend. It contains - enabled scheduler architectures - OBS instance name and description * kiwi image building understands "obsrepositories:/" as repository. OBS will expand the repositories as defined in the local project definition. This allows to move around kiwi files without the need to modify them. * Auto cleanup of user ~:branches: projects can be enabled via /configuration * Build constraints can be defined per architecture or per package now * Integrated Notification system. Administrators can activate it globally in the configuration settings. User can modify these settings in their home page. * Integrated comment system for projects, packages and requests. * Product tracking and product update channel support. * Groups can have email adresses now. * Support usage of kernels from OBS repositories inside of kvm and xen builds * Support ppc64le and m68k architecture * api provides a token system for a secure handling of pre-defined actions. This can be used to update sources of a specific package in the name of a user without the need to expose his credentials. Find details here: http://openbuildservice.org/help/manuals/obs-reference-guide/cha.obs.authorization.token.html * Definition of release channel support (can deliever product specific channels from a pool of build packages). * Manual release mechanism, to release sources and binaries without rebuild. Target definition in repositories configuration of projects. Changes on purpose: =================== * webui and api have merged into a single rails process. Details are described in README.Update file. * api is accessible without credentials if the anonymous mode is enabled. * The scheduler states in /build/_workerstates were moved into a partition structure to support multiple backends running in one instance. * Submit request for entire projects will skip packages which do not contain source changes relative to their linked packages. * Notification system has been moved from the backend to the api. The backend system is still available but declared as deprecated. * /about api route is publicly accessible now for system live checks. Incompatible changes: ===================== * The /architectures route is not providing the attributes "recommended" and "available" anymore. It just provides a fixed list of all known architectures in general. => Use /configuration route to configure the available architectures for your instance. This is also used by the backend now. => Define the used architectures for default targets directly in /distribution. => Modification of architectures is not supported in this route anymore. Notes for systems using systemd: ================================ OBS is using init scripts, not systemd style .service files. This will stay until we will switch from SLES 11 to SLES 12 as default production platform. openSUSE installations may use systemd (instead of sysvinit) and it should work in general. Not working are usages like # rcobssrcserver status You will only get the systemd status here. Also stopping services may not kill all processes, which leads to abstract errors when trying to restart them. We heard also about trashed MySQL databases when using systemd. To avoid these problems you need switch directory to avoid the systemd wrapper: # cd /etc/init.d # ./obssrcserver status|stop|start open-build-service-2.9.4/docs/ReleaseNotes-2.6000066400000000000000000000057601332555733200210110ustar00rootroot00000000000000 # # Open Build Service 2.6 # WARNING: WARNING: This is a development release, not for production usage! WARNING: Please read the README.SETUP file for initial installation instructions or use the OBS Appliance from http://openbuildservice.org/download/ There is also an install medium with installs OBS on hard disc now. dist/README.UPDATERS file has informations for updaters. OBS Appliance users who have setup their LVM can just replace their appliance image without data loss. The migration will happen automatically. Features ======== * Debian live build image generation support (thanks to Jan Blunck) * webui handles package sources supporting project links. * priorization system for requests * optional binary package tracking, can be used to track released packages. Esp. important for maintained products. * Groups can have maintainers now, who can modify the member list * Requests can have a priority now. Request search is sorting by the priority first and by the age of the request second. * The request history system got a big overhaul - reviews have a history too now - more fine granular history elements which can be displayed more flexible * EXPERIMENTAL: Entire projects can be linked now with frozen source revisions. Changes on purpose: =================== * some delayed jobs run in seperate queues now to avoid to block other jobs Incompatible changes: ===================== * You get also an update of sphinx searchd. The on-disk db files became incompatible, so please remove all files in following directory to enforce a re-index to avoid problems: /srv/www/obs/api/db/sphinx/production/ * maintenance tag handling via _channel files got replaced with an option to re-define the updateinfo scheme template. We are only aware of one instance using this feature, in case it troubles someone else, please speak up and we will introduce some compatibility handling. * Reworked request history: - We do not support to import history elements of requests anymore. => do not update OBS instances older than 2.4 directly to OBS 2.6! (we do support updates from 2.5 only anyway) - Also old clients will not show history anymore by default. Use osc 0.148 for full new support of request histories Notes for systems using systemd: ================================ OBS is using init scripts, not systemd style .service files. This will stay until we will switch from SLES 11 to SLES 12 as default production platform. openSUSE installations may use systemd (instead of sysvinit) and it should work in general. Not working are usages like # rcobssrcserver status You will only get the systemd status here. Also stopping services may not kill all processes, which leads to abstract errors when trying to restart them. We heard also about trashed MySQL databases when using systemd. To avoid these problems you need switch directory to avoid the systemd mapper: # cd /etc/init.d # ./obssrcserver status|stop|start open-build-service-2.9.4/docs/ReleaseNotes-2.7000066400000000000000000000151301332555733200210020ustar00rootroot00000000000000 # # Open Build Service 2.7 # Please read the README.md file for initial installation instructions or use the OBS Appliance from http://openbuildservice.org/download/ If you already have a running OBS installation and want to update it, please read also the README.UPDATERS file about the necessary steps. Please note that you need to have at least OBS 2.5 to be able to update to OBS 2.7 release. Migrations from older OBS versions will fail. OBS Appliance users who have setup their LVM according to our documentation can just replace their appliance image without data loss. The migration will happen automatically. OBS 2.7 will be the last release supporting SUSE Linux Enterprise Server 11 as operating system for the server installation. Changes On Purpose ================== * Repository meta data and packages are now signed with sha256 instead of sha1 if RSA is used * Force login for all API calls (Exception are /public, /trigger and /about routes). * The experimental Download On Demand configurations are not supported anymore. A new, fully supported implementation is in place (see below). * Adding git repositories is using the new "obs_scm" service now. * Users are allowed to add themself in their home project, even when they got removed. Admins can set the user state to "locked" when they do want to block them. * request "addrevision" option is using unexpanded revision when "updatelink" option is used * package search for bug issues in attributes is done via own attribute_issue xpath attribute. the search api used to match source and attribute issues via same xpath attribute before General Features ================ * Incremental storage of sources from upstream SCM systems * cpio archives are stored incremental and offered the to the build job as directory. * Download on demand repositories * support changing source repos now * can be configured by the administrator via api or webui * add support for peer fingerprint verification (using gpg or SSL) * Source services can be used during build time * they get executed before the packaging tool (eg. rpm-build) gets started and can be used for creating needed archives. * New hardware architectures are supported: k1om, aarch64_ilp32 * Many improvements in the maintenance support features * Allow non-obs urls in kiwi build descriptions, map to obs urls if possible * Support embargo of projects (#316776) * Instantiate package api call for projects building service packs * Support tracking of github issues * Groups can have an email adress for notifications and contact * New option to avoid usage of defined repositories when branching a package via the OBS:BranchSkipRepositories attribute * allow to set or modify the accept_at time in a request can be used also to pre-approve requests with open reviews * The frontend will warn you about inconsitencies between the api database and the backend via email * Fix to work when hosted in a subdirectory of the server directory. This change allows OBS API and WebUI to be installed on a web server in a place other than document root, so its URL prefix has a non-empty pathname (like https://example.com/obs instead of https://obs.example.com). Backend Features ================ * make the new repository handling the default now. * bs_admin: * give the admin a chance to prefer publish events with --prefer-publish-event * Basic support for simpleimage Adding support for squashfs and tar.gz binaries and for simpleimage build type (request for build update to support it sent separately) * support buildtime source services * skip them on service server * add dependencies to build env * Improved scheduler speed * collax package format support * Support virt-builder repository meta data * New waitservice and mergeservice commands to handle _service files Worker Features =============== * --vm-enable-console to allow user input for debugging * Support to use docker as virtualization engine Webui Features ================= * Support to add remote repositories * use new obs_scm service for git source URLs * Make it possible enforce removal of projects which others depend on * kiwi import feature Supports import of standarized kiwi archives (kiwi.txz suffix) * Show all open requests of a project in the UI * Support unlock of projects * All request tab at user's home with search an order. FIXES: #928 * Restrict supersede requests to same source project * Allow picking requests to supersede on submit requests * Break project names in watchlist Currently long project names are shortened in the watchlist. This makes it quite hard to identify projects in that list. This commit will instead add html zery width spaces after every colon which allows HTML to break them if necessary. * Don't truncate project or packages names on search results. * Send `Content-Disposition: inline` for text files Without this fix, text files such as _log are served with `Content-Disposition: attachment`, which makes Firefox prompt the user to download the file rather than simply showing the text within the browser. * Improve load time when loading open requests for a user on profile page. Notes for systems using systemd: ================================ OBS is using init scripts, not systemd style .service files. This will stay until we switch from SLES 11 to SLES 12 as default production platform. openSUSE installations may use systemd (instead of sysvinit) and it should work in general. Not working are usages like # rcobssrcserver status You will only get the systemd status here and not the service status. Also stopping services may not terminate all processes, which leads to abstract errors when trying to restart them. To avoid these problems you need to switch directory to by-pass systemd: # cd /etc/init.d # ./obssrcserver status|stop|start Other small changes =================== * experimental deep_check_dependent_projects_on_macro_change option * experimental view=order mode for _builddepinfo * bs_serverstatus now works on ajax socket * allow to get buildinfo for remote projects * improved scheduling of inter-project dependent repositories * use sign time from old signature when re-signing * support view=versrel in _result query * experimental expandflags:preinstallexpand support * support "repotag" extension to set repository tags * support "singleexport" option to use/publish only the best version * support "simpleimage" build type * support switching the build type from kiwi to spec in followup builds * support triggerservicerun on _product * add --rebuild-full-tree option to bs_admin * implement view=cpioheaders for packages * allow an array ref for $BSConfig::notification_plugin open-build-service-2.9.4/docs/ReleaseNotes-2.8000066400000000000000000000066651332555733200210200ustar00rootroot00000000000000 # # Open Build Service 2.8 # WARNING: WARNING: This is a development release, not for production usage! WARNING: Please read the README.SETUP file for initial installation instructions or use the OBS Appliance from http://openbuildservice.org/download/ There is also an install medium with installs OBS on hard disc now. dist/README.UPDATERS file has informations for updaters. OBS Appliance users who have setup their LVM can just replace their appliance image without data loss. The migration will happen automatically. Features ======== UI: * Allow triggering services from the UI. * Show a hint to project maintainers, when he/she is not a package maintainer of the target package of a request * Main projects list is now filtered based on a configurable (by the admin) regular expression * Users can download the public key and SSL certificate for a project via the project home page * import of kiwi build descriptions is supported (obs-service-kiwi_import) API: * Allow admins to lock or delete users and their home projects via new command * Users can be declared as sub accounts of other users. Useful for automated scripts. * New API route to get public key and SSL certificate: GET /source/:project_name/_keyinfo * New feature toggle config file. Use config/feature.yml to enable/disable features in the OBS. Backend: * multibuild: allow to build multiple jobs from one source package without the need of creating local links * experimental support of snap package format * workers are now also tracked when they went away (new states "down", "away" and "dead") * worker capabilities can be requested * usable workers can be requested with uncommited constraints * functionality to remove published packages (osc unpublish) * New obsservicedispatch service to handle source service runs in a queue and asynchron. * preinstall images can be used for local building * improved speed of diffing sources * Support caching of pulled git sources Shipment: * optional docker container to run source services is provided Wanted changes: =============== * kiwi builds: build configuration changes from the project where the kiwi file is stored have always an effect now. * maintenance_release requests are locking only the source packages on creation now. They don't lock the patchinfos. The project gets locked on release now. * service wrapper script for LXC got replaced by a docker alternative Other changes ============= * Server side pagination on user show page for improving the performance. * The way to identify spiders got changed. A separate configuration via apache is no longer required. See the Administration Guide. * Frontend stack is using ruby 2.4 and rails 5.0.1 now Notes for systems using systemd: ================================ OBS is using init scripts, not systemd style .service files. This will stay until we will switch from SLES 11 to SLES 12 as default production platform. openSUSE installations may use systemd (instead of sysvinit) and it should work in general. Not working are usages like # rcobssrcserver status You will only get the systemd status here. Also stopping services may not kill all processes, which leads to abstract errors when trying to restart them. We heard also about trashed MySQL databases when using systemd. To avoid these problems you need switch directory to avoid the systemd mapper: # cd /etc/init.d # ./obssrcserver status|stop|start open-build-service-2.9.4/docs/api/000077500000000000000000000000001332555733200167335ustar00rootroot00000000000000open-build-service-2.9.4/docs/api/Makefile000066400000000000000000000006341332555733200203760ustar00rootroot00000000000000apidocs: cd api; make apidocs doc: apidocs xsltproc xs3p.xsl package.xsd >package.html xsltproc xs3p.xsl project.xsd >project.html xsltproc xs3p.xsl platform.xsd >platform.html xsltproc xs3p.xsl projectresult.xsd >projectresult.html xsltproc xs3p.xsl packageresult.xsd >packageresult.html xsltproc xs3p.xsl status.xsd >status.html validate: python validate_xml.py api/ python validate_xml.py backend/ open-build-service-2.9.4/docs/api/api/000077500000000000000000000000001332555733200175045ustar00rootroot00000000000000open-build-service-2.9.4/docs/api/api/Makefile000066400000000000000000000003031332555733200211400ustar00rootroot00000000000000all: @echo "Targets: test apidocs" test: ../restility/bin/rest_test api.txt apidocs: BUNDLE_GEMFILE=../../../src/api/Gemfile bundle exec ../restility/bin/rest_doc api.txt --html -o ../html open-build-service-2.9.4/docs/api/api/about.xml000066400000000000000000000003761332555733200213460ustar00rootroot00000000000000 Open Build Service API API to the Open Build Service 0.1 open-build-service-2.9.4/docs/api/api/about.xsd000066400000000000000000000033121332555733200213350ustar00rootroot00000000000000 This schema describes the format of the about information for the openSUSE API. Top level element for about data. Title of the API. Description of purpose of API. Revision of API. The revision is incremented when the API changes in an incompatible way. Link to documentation of API. open-build-service-2.9.4/docs/api/api/activity.xml000066400000000000000000000002101332555733200220530ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/activity.xsd000066400000000000000000000020441332555733200220600ustar00rootroot00000000000000 Package or project with activity value. open-build-service-2.9.4/docs/api/api/added_timestamp.xml000066400000000000000000000002101332555733200233430ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/added_timestamp.xsd000066400000000000000000000020711332555733200233500ustar00rootroot00000000000000 Package or project with timestamp when it was added. open-build-service-2.9.4/docs/api/api/aggregate.xml000066400000000000000000000011171332555733200221540ustar00rootroot00000000000000 glibc libz gcc gcc-c++ gcc glibc open-build-service-2.9.4/docs/api/api/aggregate.xsd000066400000000000000000000021211332555733200221460ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/api.txt000066400000000000000000001271421332555733200210250ustar00rootroot00000000000000= Open Build Service API Version: 2.9 Only authenticated users are allowed to access the API. Authentication is done by sending a Basic HTTP Authorisation header. Do NOT simply add api calls here without discussion before. The main base routes are: /source : To handle all source submissions and project setups /build : To access build results and their states /published : Read access to published repositories /search : Access to search interface == Table of Contents Contents: == About GET /about Get information about API. XmlResult: about == Architectures GET /architectures Get a list of all known architectures known to OBS in general. This is not the list of architectures provided by this instance, go to /configuration for this XmlResult: architecture list == Issue Trackers GET /issue_trackers Get the list of available issue trackers XmlResult: issue_trackers : Issue tracker name GET /issue_trackers/ Read issue tracker data. XmlResult: issue_tracker PUT /issue_tracker/ Update issue tracker data. XmlResult: status POST /issue_tracker/ Create new issue tracker. XmlResult: issue_tracker DELETE /issue_tracker/ Delete issue tracker. XmlResult: status GET /issue_trackers/show_url_for Parameters: issue: attribute used for issue search (example: 'bnc#123456') == Distribution List GET /distributions Get the list of base distributions hosted on this OBS instance XmlResult: distributions PUT /distributions Write the list of base distributions hosted on this OBS instance XmlResult: distributions GET /distributions/ Get data of one base distributions hosted on this OBS instance XmlResult: status POST /distributions/ Modifies one base distribution entry. Distro must be hosted on the OBS instance XmlResult: status GET /distributions/include_remotes Get the list of base distributions hosted on this OBS instance and on all used remote instances XmlResult: distributions == User data : Id of user Id of group GET /person/ Read user data. XmlResult: user PUT /person/ Write user data. XmlBody: user XmlResult: status POST /person?cmd=register Can be used to register a new user, if OBS instance is allowing this. POST /person/ cmd=change_password to post the new password in the request body. Just the first line gets used. XmlBody: password XmlResult: status Parameters: cmd: change_password cmd: lock # will lock the user and his home projects cmd: delete # will mark the user as deleted and remove her home projects GET /person//token Lists all authentication token for a user XmlResult: tokenlist POST /person//token Create a new authentication token for this user. It may be limited for a specific package via optional project&package parameters. cmd: create project: create a token for a specific project package: create a token for a specific package XmlResult: status DELETE /person//token/ Delete a specific authentication token. The is listed in GET call. XmlResult: status == Group data GET /group List available groups XmlResult: directory Parameters: login: List available groups of this user. GET /group/ Read group data. XmlResult: group PUT /group/ Write group data. XmlBody: user XmlResult: status POST /group/ Modify group data. Multiple commands on processing group. add_user: add a user to a group remove_user: remove a user from a group set_email: set email adress of group. Parameters: userid: user login name, required for add_user and remove_user command email: email address for set_email command. email of group gets removed if not defined XmlResult: status DELETE /group/ Delete a group. XmlResult: status == Sources === Projects GET /source/ Read list of projects. XmlResult: directory Parameters: deleted: show deleted projects instead of existing POST /source Commands on processing sources globally. Possible commands are branch: branch a set of packages based on attributes or on existing request orderkiwirepos: sort the repositories inside of a kiwi file according to path relationships createmaintenanceincident: create mainatenance incident projects based on attribute search Parameters: linkrev: linked revision, optional (for linkrev=base) attribute: attribute used for package search, default is OBS:Maintained update_project_attribute: attribute name used to find out possible existing update projects of a package request: branch by request, branch all packages in actions of request for superseding it noaccess: the new created project will be read protected target_project: project which will get used or created GET /source//_meta Read project meta file. XmlResult: project PUT /source//_meta Write project meta file. Parameters: comment: comment, optional XmlBody: project XmlResult: status DELETE /source/ Deletes specified project. All packages of this project are deleted as if a DELETE request were issued for each package. Parameters: force: If force = 1, the project is deleted even if repositories of other projects include a path to a repository from this project. The path in the other repository is replaced by one pointing to 'deleted/standard', preventing the build and publishing of the other repository. comment: comment, optional XmlResult: status GET /source//_project List project files Parameters: meta: switch for _meta files, optional rev: revision, optional XmlResult: project directory.xsd GET /source//_project/ Read a project file. Parameters: meta: switch for _meta files, optional rev: revision, optional GET /source//_attribute/ Get all attributes or a specific one XmlBody: attribute POST /source//_attribute/ Modifies a specific attribute as in body Parameters: comment: comment, optional XmlResult: status DELETE /source//_attribute/ Removes a specific attribute Parameters: comment: comment, optional XmlResult: status GET /source//_config Read project configuration Parameters: rev: revision, mandatory Result: configuration as text/plain PUT /source//_config Change project configuration Parameters: comment: comment, optional XmlResult: status GET /source//_pattern Get list of all patterns set up for this project XmlResult: pattern GET /source//_pattern/ Get pattern XmlResult: pattern PUT /source//_pattern/ Write pattern XmlBody: pattern XmlResult: status DELETE /source//_pattern/ Remove pattern XmlResult: status GET /source//_pubkey Get project GPG key. If the project has no own key (default), it uses the first available one in the namespace hierarchy, ending at the global buildservice key. Result: gpgkey DELETE /source//_pubkey Removes the current gpg key. Has no effect if no key is set. XmlResult: status POST /source/ Multiple commands on processing sources in package. Possible commands are createkey: Generate a new gpg key. If the project already has an own gpg key, the old key is discarded. extendkey: Extend the expiration date of gpg keys. undelete: undelete the project and all packages existing when the project got removed. freezelink: create/update a freeze of a project link showlinked: List all projects linking to this one copy: copy the entire project move: ADMIN ONLY. schedulers need to be stopped. move all sources and binaries around from oproject. createmaintenanceincident: create a single mainatenance incident project as sub project createpatchinfo: create a new patchinfo package collecting all mentioned issues in sources addchannels: add channel packages and repositories for matching packages in project modifychannels: modify existing channels by specified mode set_flag: change a defined flag, requires at least flag and status parameters remove_flag: remove a defined flag, requires at least flag and status parameters lock: lock a project unlock: unlock a locked project release: release sources and binaries according to release target specification Parameters: noaccess: the new created project will be read protected repository: set_flag for given repository or release just this repository (optional) arch: set_flag for given arch (optional) flag: modify this flag (build/publish/..) for set_flag command status: enable or disable for set_flag command comment: description for the history # for modifychannels and addchannels command only: mode: how to deal with disabled channels: add_disabled(default), skip_disabled, enable_all # for copy command only: oproject: origin project name (required) resign: resign all binaries with new target project key makeolder: make target older, the source vrev is bumped by two numbers and target by one makeoriginolder: make origin older, the source vrev is extended and target is guaranteed to be newer withhistory: copies sources with history on copy command withbinaries: copies also binaries on copy command noservice: do not run source services setrelease: define a specific release tag when used with "release" command. Setting it to "-" strips the release string. Note: this modifies only the filename. === Packages GET /source/ Read list of packages. XmlResult: package directory.xsd Parameters: deleted: show deleted package instead of existing expand: include also packages from linked projects view: issues, can be used to show all tracked issues for all packages in project productlist, shows all containing products, unifies result when used with expand verboseproductlist, same as productlist, but with detailed information about the product GET /source// Package source listing Parameters: rev: revision of new package, optional linkrev: linked revision, optional emptylink: bool, , optional expand: bool, expand links, optional meta: bool, switch to meta files, optional view: The "info" view will show data like source version, md5sums and build description files. May be used together with parse, arch or repository parameter, optional "issues" can be used to show all tracked issues for all packages in project, optional "products" show all products of a package (works only on "_product" packages) extension: filter for file extension, optional lastworking: bool, show sources of last mergeable sources in case of conflicting changes, optional withlinked: bool, show all used package containers (in case of multiple link indirections) in linkinfo information, optional deleted: bool, show content of deleted package instance parse: bool, for view=info: take build description into account, optional arch: string, for view=info: parse buildinfo for this architecture, optinal repository: string, for view=info: parse buildinfo for this repository, optinal product: string, limit the product view to a given product XmlResult: package directory.xsd GET /source///_meta Read project meta data. Parameters: rev: revision of new package, optional XmlResult: package PUT /source///_meta Write project meta data. Writing of the project meta data commits the packages contained in the project to the build backend. Parameters: comment: comment, optional XmlBody: package XmlResult: status DELETE /source// Deletes specified package including all source files Parameters: comment: comment, optional XmlResult: status GET /source///_attribute/ Get all attributes or a specific one XmlBody: attribute POST /source///_attribute/ Modifies a specific attribute as in body Parameters: comment: comment, optional XmlResult: status DELETE /source///_attribute/ Removes a specific attribute Parameters: comment: comment, optional XmlResult: status GET /source///_history Get package commit history XmlResult: revisionlist POST /source//?cmd=showlinked List all package instances linking to this one. Result: package list POST /source//?cmd=diff Create a source diff Parameters: rev: revision of new package, optional oproject: old project, optional opackage: old package, optional orev: old revision, optional Result: diff as text/plain POST /source//?cmd=instantiate Instantiate a package container, which is available via project links only so far. Parameters: makeoriginolder: optional, can be used to modify source and target to guarantee that new version stay older also on future updates in older code stream XmlResult: status POST /source//?cmd=release Releases sources and binaries of that package. This requires a set release target in the repository definitions of . Also the trigger must be set to "manual" Parameters: comment: description for the history repository: limit the release to the specified repository target_repository: specify the target repository name (together with target_project) target_project: overwrites the target definition from project meta (repository and target_repository parameter required as well) XmlResult: status POST /source//?cmd=unlock Unlocks a locked package Parameters: comment: description for the history XmlResult: status POST /source//?cmd=branch Create a source link from a package of an existing project to a new subproject of the requesters home project. The default target is home::branches:/ A possible defined devel project in the package meta data gets ignored. Parameters: ignoredevel: bool, optional target_project: target project name, optional target_package: target package name, optional noaccess: the new created project will be read protected, bool, optional missingok: the target package does not exist newinstance: the target package exists only via project links, but the link should point to given project add_repositories: bool, optional, adds repositories base on source project (default for new projects) update_path_elements: bool, optional, update all path elements if needed (used repositories depend on each other) extend_package_names: bool, optional, extends package and repository names to allow multiple instances of same package add_repositories_rebuild: use defined rebuild policy for new repos ("transitive", "direct" or "local") or copy it from the source project ("copy") add_repositories_block: use defined block policy for new repos XmlResult: status POST /source//?cmd=set_flag Modify or set a defined flag for package Parameters: repository: set_flag for given repository (optional) arch: set_flag for given arch (optional) flag: modify this flag (build/publish/..) for set_flag command status: enable or disable for set_flag command XmlResult: status POST /source//?cmd=createSpecFileTemplate Create template for RPM SPEC file. Returns an error, if the SPEC file already exists. XmlResult: status POST /source//?cmd=commit Commits package changes to buildservice Parameters: rev: revision, mandatory comment: comment, optional XmlResult: status POST /source//?cmd=collectbuildenv Creates _buildenv files based on origin package builds. This can be used to re-use exact older build enviroment even when further new binary packages got added. For example to re-build an old maintenance update in the same way. Parameters: oproject: Origin project to copy from (required) opackage: Origin package to copy from (required) comment: Optional comment by the user XmlResult: status POST /source//?cmd=importchannel Import a kiwi channel file for OBS. Project names will be set to update projects if defined. Parameters: target_project: optional target repository to set target_repository: optional target repository to set XmlResult: status POST /source//?deleteuploadrev Removes all changes made to the upload revision and reverts to last revision Parameters: none XmlResult: status === Source files : File name GET /source// Get directory listing of all source files in the package Parameters: rev: package source revision, optional linkrev: linked revision, optional expand: expand links, optional meta: switch to meta files lastworking: auto detect last working link revision, optional view: The "cpio" view will stream all files as cpio, optional extension: filter for file extension, optional GET /source/// Read source file. Result: Content of file Parameters: meta: switch to meta files PUT /source/// Write source file. Parameters: rev: if set to 'upload', multiple files can be uploaded one by one in one commit, before finishing the commit with cmd=commit (see below), optional comment: comment, optional keeplink: bool, optional meta: switch to meta files Body: Content of file XmlResult: status DELETE /source/// Delete source file. XmlResult: status Parameters: meta: switch to meta files POST /source// Multiple commands on processing sources in package. Possible commands are diff: for server side diff linkdiff: for server side diff of a linked or branched package servicediff: shows the changes of the service run commit: commit files in upload revision commitfilelist: commit defined files in upload revision deleteuploadrev: delete all uploaded sources which are not committed yet copy: copy package sources from another package undelete: undelete the package unlock: unlock a package with lock enabled. A comment is required. release: release sources and binaries according to release target specification branch: branch a package into another one linktobranch: convert a plain source link into a full branch enablechannel: adds repositories and enable this channel package updatepatchinfo: update _patchinfo file, esp. the issues list remove_flag: remove a specific flag from meta (flag must be defined, optionally arch and repository) set_flag: remove a specific flag from meta (flag must be defined, optionally arch and repository) showlinked: show all source packages linking to this one deleteuploadrev: delete all uploaded, but not yet commited files. rebuild: rebuild all builds getprojectservices: list all service defined in project spaces defined for this package. runservice: trigger run of defined services in _service file waitservice: returns when all services have finished, code 200 when service run was successful mergeservice: drops the _service file and commits all server side generated files time: set the time on undelete operation (admin only operation) wipe: wipe all build results of this package Parameters: rev: package source revision, optional linkrev: linked revision, optional orev: origin package source revision as defined in opackage/project, optional olinkrev: origin linked revision, optional oproject: origin project, used as base project opackage: origin package, used as base package requestid: log the requestid in source history, optional (copy and commitfilelist only) expand: expand links, optional keeplink: keep link on source commit, optional repairlink: repair link on source commit, optional dontupdatesource: Do not update origin package, optional (copy only) noservice: do not run source services comment: comment for history, optional meta: switch to meta files arch: architecture when using flag modifing command repository: repository when using flag modifing command view: may be "xml" for structured answered (for diff commands) withissues: set to get issues parsed from changelogs (for diff commands) onlyissues: used to limit to issues (for diff commands) setrelease: define a specific release tag when used with "release" command. Setting it to "-" strips the release string. withvalidate: activate sha validation code withvrev: copy also the vrev counter of the revision GET /source////_attribute/ Get all attributes or a specific one XmlBody: attribute POST /source////_attribute/ Modifies a specific attribute as in body Parameters: comment: comment, optional XmlResult: status DELETE /source////_attribute/ Removes a specific attribute Parameters: comment: comment, optional XmlResult: status == Requests GET /request Get a list of requests. When using the "view=collection" you need also to filter either by user, project or package. Parameters: view: collection, return a collection of requests instead of directory listing user: filter for given user, includes all target projects and packages where the user is maintainer and also open review requests project: limit to result to defined target project or review requests package: limit to result to defined target package or review requests states: filter for given request state, multiple matches can be added as comma seperated list (eg states=new,review) types: filter for given action types (comma seperated) roles: filter for given roles (creator, maintainer, reviewer, source or target) withhistory: includes the request history in result withfullhistory: includes the request and review history in result limit: to limit the result to the given number of requests XmlResult: collection GET /request/ Get a request Parameters: withhistory: includes the request history in result withfullhistory: includes the request and review history in result XmlResult: request POST /request Create a new request XmlResult: request Commands on processing requests create: to crfeate a new request Parameters: addrevision: ask the server to add revisions of current sources to the request PUT /request/ Modify a request. NOTE: Only admins can change all parts of a request. XmlResult: request POST /request/?cmd=diff Shows the diff of all affected packages. Result: diff as text/plain POST /request/ Modify a request XmlResult: request Commands on processing requests addreview: Adds a review to a request assignreview: Adds a review for a user and accepts it for the used group changestate: Modifies the state of a request changereviewstate: Modifies the state of a review inside a request setpriority: Modifies the priority of a requst setincident: Change the target incident for maintenance_incident actions setacceptat: Set or modify the accept_at time. Either specified by time parameter or now. Parameters: newstate: Define the new state priority: Define the new priority by_user: Specify the user of the new review by_group: Specify the group of the new review by_project: Specify the project of the new review by_package: Specify the user of the new review incident: Specifiy inicdent number for setincident time: Specifiy time for setacceptat == Attribute definition api GET /attribute/ List all attribute namespaces XmlResult: directory GET /attribute// List all attributes under given namespace XmlResult: directory GET /attribute//_meta shows namespace setup XmlResult: attribute_namespace_meta DELETE /attribute//_meta Delete a attribute namespace and all attributes below XmlResult: status PUT /attribute//_meta change attribute namespace meta XmlBody: attribute_namespace_meta_data XmlResult: status GET /attribute///_meta shows attribute setup XmlResult: attribute_meta DELETE /attribute///_meta Delete a attribute and all its values in projects or packages XmlResult: status PUT /attribute///_meta change attribute meta XmlBody: attribute_meta_data XmlResult: status == Comments api GET /comments/request/:id GET /comments/project/:name GET /comments/package/:project/:pname Get all comments for the object XmlResult: directory POST /comments/request/:id?parent_id= POST /comments/project/:name?parent_id= POST /comments/package/:project/:pname?parent_id= Create a comment for the object XmlResult: status DELETE /comment/:id Delete a comment specified by the comment id XmlResult: status == Build Results : Build repository GET /build/ List all repositories XmlResult: directory GET /build/ List all repositories of the specified project XmlResult: directory GET /build// List all architectures of the specified project repository XmlResult: directory GET /build// List all packages used in this project repository for given architecture. XmlResult: directory === Binaries GET /build//// Get list of binaries built by the sources of the given package Result: binarylist GET /build///// Get single binary from build results of given package Result: binary file GET /build/////?view=fileinfo GET /build/////?view=fileinfo_ext Get information about the binary from build results of given package Result: fileinfo GET /build////_builddepinfo POST /build////_builddepinfo Shows all build dependencies of one or more packages, a change in any of them will trigger a build. A changed dependency can be posted to let the server recalculate the order including the local dependency changes. Parameters: package= filter package container, can be used multiple times view=pkgnames show package names instead of binary names view=revpkgnames show which packages will be triggered if the package is changed view=order sort packages ordered by dependencies Result: build dependencies GET /build////_jobhistory?package=&code=succeeded&limit=10 Get the build log of all finished builds in this repository, including time and trigger reason. Optional filtering for one ore more packages/codes is possible. Result: jobhistory GET /build////_repository Get list of binaries in given repository (binaries produced by all packages of the project) Result: binarylist POST /build////_repository?match= Uploads binaries to a given repository. ADMIN ONLY Result: status PUT /build////_repository/ Uploads binaries into the repository. ADMIN ONLY. Result: status GET /build////_repository/ Get single binary from the given repository Result: binary file === Status GET /build//_result Return build results for the packages, architectures and repositories specified by the parameters. If no parameters are given, all results for the project are returned. The view parameter specifies which sections are included in the results. view=summary includes the summary of the status values. view=status includes detailed status information. view=binarylist includes the list of generated binary files. If no view parameter is given, view=status is assumed. To combine views the parameter can be given multiple times. Parameters: package: package name, optional, multiple arch: architecture, optional, multiple repository: name of repository, optional, multiple view: summary | status | binarylist lastbuild: bool, show last build result (avoiding current building job state) localbuild: bool, include build results from packages with project local links multibuild: bool, include build results from _multibuild definitions XmlResult: buildresult GET /build/////_history Get build history XmlResult: buildhistory GET /build/////_reason Detailed reason, why the last build got triggered. This may be caused by a source change, meta change (binary package below changed) or release number sync. A user triggered build will show up as source change. XmlResult: buildreason GET /build/////_jobstatus Get build status of a current running build job or empty result if no job is running. XmlResult: jobstatus GET /build/////_status Get build status of the specified project/package/repo/arch combination XmlResult: buildstatus GET /build/////_log Get build log. Parameters: nostream: do not hang if the build is currently running last: show log from last finished build start: start at a given number of bytes end: stop after the given number of bytes view: special view instead of plain logfile. "entry" shows the size and mtime of logfile. Result: Build log as text file. === Worker GET /worker/_status Lists all running jobs, waiting jobs, status of the backend services and general statistics. XmlResult: workerstatus GET /worker/ Lists all capabilities of the worker. Can be used for constraints XmlResult: workercapability POST /worker?cmd=checkconstraints Calculates the possible workers with a given contraints rule. Further required parameters are: Parameters: project: project name package: package name arch: architecture repository: name of repository XmlResult: directory === Control POST /build/?cmd=rebuild Triggers package rebuild for the repositories/architectures of the package specified by the parameters. If no parameters are given, all packages of the project are completely rebuilt. Possible values for the code parameter are: succeeded - build succeeded failed - build failed disabled - build is disabled in package config excluded - build is excluded in spec file scheduled - package is ready to be built building - package is building on a worker broken - package source is bad (i.e. no specfile) unresolvable - build needs unavailable binary packages Parameters: package: package name, optional, multiple arch: architecture, optional, multiple repository: name of repository, optional, multiple code: build status code, optional, multiple XmlResult: status POST /build/?cmd=abortbuild Kill all running builds, marking them as failed Parameters: see cmd=rebuild POST /build/?cmd=restartbuild Restart all running builds Parameters: see cmd=rebuild POST /build/?cmd=unpublish Delete all binary packages from publish area Parameters: see cmd=rebuild POST /build/?cmd=wipe Delete all binary packages from build area Parameters: see cmd=rebuild === Local Build GET /build/////_buildinfo Get build information for local building XmlResult: buildinfo POST /build/////_buildinfo Get build info for local building using the POSTed specfile. can be "_repository", if the designated package does not yet exist on the server. Usefull for local build test before committing the initial package. Body: specfile XmlResult: buildinfo === Trigger area POST /trigger/runservice This call needs a token created via "createtoken" command to identify user and package which shall be updated via a source service. The token must be delivered as HTTP header: Authorization: Token Parameters project: To be used together with "package" to identify the object. package: XmlResult: status === Repository Information GET /build////_repository Returns list of binaries contained in the specified repository XmlResult: binarylist GET /build////_repository/ Returns binary Result: binary file GET /build//// Returns list of binaries contained in the specified repository XmlResult: binarylist GET /build/////_buildinfo Build info according to the committed sources XmlResult: buildinfo POST /build/////_buildinfo Build info according to the uploaded sources XmlResult: buildinfo GET /build////_builddepinfo Returns dependency information of packages in the specified repository. One or more packages can be specified with the 'package' parameter. By default dependencies for all packages are returned. XmlResult: builddepinfo GET /build///_buildconfig Build configuration for this repository, all base package requirements, mappings and macros. == Search GET /search/project Searches for project metadata using xpath. A xpath predicate has to be specified using the match parameter. The predicate will be used in this expression: /project[]. Only complete meta files will be returned. Parameters: match: xpath predicate, mandatory XmlResult: collection GET /search/project/id Searches for project metadata analogous to /search/project, only the root element is returned without any children. Parameters: match: xpath predicate, mandatory XmlResult: collection GET /search/package Searches for package metadata using xpath. A xpath predicate has to be specified using the match parameter. The predicate will be used in this expression: /package[]. Only complete meta files will be returned. Parameters: match: xpath predicate, mandatory XmlResult: collection GET /search/package/id Searches for package metadata analogous to /search/package, only the root element is returned without any children. Parameters: match: xpath predicate, mandatory XmlResult: collection GET /search/published/binary/id Search for currenlty available binaries in the publish area. Parameters: match: xpath predicate, mandatory XmlResult: collection GET /search/published/pattern/id Search for published patterns Parameters: match: xpath predicate, mandatory XmlResult: collection GET /search/channel/binary Search for packages, sources or binaries which are referenced in channel files. Unlike the released/binary search this works already after setting up a channel even without releasing files. Parameters: match: xpath predicate, mandatory XmlResult: collection GET /search/channel/binary/id Search for packages, sources or binaries which are referenced in channel files. Unlike the released/binary search this works already after setting up a channel even without releasing files. This is the short version without full release data information. Parameters: match: xpath predicate, mandatory XmlResult: collection GET /search/released/binary Search for binaries which got released. This works only for binaries published via release mechanism. It also includes binaries which got removed again. It is recommended to specify at least the @name binary package name or the @disturl. Parameters: match: xpath predicate, mandatory XmlResult: collection GET /search/released/binary/id Search for binaries which got released. This works only for binaries published via release mechanism. It also includes binaries which got removed again. This is the short version without full release data information. Parameters: match: xpath predicate, mandatory XmlResult: collection GET /search/request Searches for requests using xpath. A xpath predicate has to be specified using the match parameter. The predicate will be used in this expression: /request[]. Only complete meta files will be returned. Parameters: match: xpath predicate, mandatory withhistory: includes the request history in result withfullhistory: includes the request and review history in result XmlResult: collection GET /search/issue Searches for issue metadata using xpath. A xpath predicate has to be specified using the match parameter. The predicate will be used in this expression: /issue[]. Only complete issue information will be returned. Parameters: match: xpath predicate, mandatory XmlResult: collection GET /search/owner Search for default responsible person or group. Using the binary parameter the lookup happens via a build binary name Using user or group all sources where they are responsible are for are listed. Either binary, user or group parameter is required. Parameters: binary: specify the binary package to search for user: specify a user login name to list his packages group: specify a group title name to list their packages devel: true/false: include devel package definitions? limit: limit the number of results. Default is "1" single result. Use 0 for all hits, -1 for deepest match. This works only when used together with binary search otherwise always all items get returned. project: specify project to search in filter: Comma seperated list of role names to be taken into account attribute: specify attribute which is marking the default project(s). Default is OBS:OwnerRootProject XmlResult: collection GET /search/missing_owner Search for missing definitions of specific role. No parameter is required by default Parameters: devel: true/false: include devel package definitions? limit: limit the number of results. project: specify project to search in filter: Comma seperated list of role names to be taken into account attribute: specify attribute which is marking the default project(s). Default is OBS:OwnerRootProject XmlResult: collection == Published binary package tree GET /published List of published projects XmlResult: directory GET /published/ List of repositories of published projects XmlResult: directory GET /published// List of published repositories for the given project/repo XmlResult: directory GET /published/// List of published binaries for the given project/repo/arch XmlResult: directory GET /published//// Download published binary NOTE: use this only if you absolutely have to as it doesn't use the redirector Result: binary GET /published////?view=ymp Generate an ymp pattern that includes the needed repositories to install the given binary XmlResult: ymp == Build Results (Legacy) This section describes the obsolete API for build results. It will be replaced by the API available under /build. === Build Results GET /result///result Read project summary result. XmlResult: projectresult GET /result////result Read package result. XmlResult: packageresult GET /result/////log Read build log. Result: Build log as text file. == Statistics : limit count of results. optional, defaults to 10. : group results by: project, package, repo or arch. : can be projects or packages. optional, defaults to packages GET /statistics/latest_added?limit= Get a list of packages and projects (mixed) latest added to the build service. All entries are sorted by creation time. XmlResult: latest_added GET /statistics/added_timestamp// Get timestamp when project or package was added to the build service. XmlResult: added_timestamp GET /statistics/latest_updated?limit= Get a list of packages and project that were last updated. All entries are sorted by the update timestamp. XmlResult: latest_updated GET /statistics/updated_timestamp// Get timestamp when project or package was last updated. XmlResult: updated_timestamp GET /statistics/activity// Get activity in % of project or package. XmlResult: activity GET /statistics/most_active?type=&limit= Get list of most active packages (type=packages) or projects (type=projects). Also returns count of updates since package was created when type=packages. Also returns count of packages that are in this project when type=projects. XmlResult: most_active GET /statistics/highest_rated?limit= Get list of highest rated projects and packages. Results are sorted by score. Only items with more than 3 ratings will show up in this list. XmlResult: highest_rated GET /statistics/rating// Get rating of a specific project or package. Also returns what score the logged in user gave and how many ratings there are already for the specified object. XmlResult: rating PUT /statistics/rating// Rate this project / package. XmlResult: rating GET /statistics/download_counter?limit= Get download counters for top downloaded files including to which project, package, repository and architecture they belong. XmlResult: download_counter GET /statistics/download_counter?group_by=&limit= Get summarized download counters for top downloaded projects, packages, repositories or architectures (by setting group_by parameter to project, package, repo or arch) including count of files that belong to the respective object. XmlResult: download_counter_summary PUT /statistics/redirect_stats Send download statistics from the openSUSE download redirector to the build service api, to update the download_counter database. User needs to have appropriate permissions. XmlResult: redirect_stats GET /statistics/newest_stats Get the timestamp of the newest stats in build service. This is useful for the create_stats_xml.rb script. Using this value it can import only those statistics that changed from the last import of statistics. If there are no statistics yet, returns "1970-01-01T01:00:00+01:00" XmlResult: newest_stats == Status Messages : limit count of messages. optional, defaults to unlimited. GET /status_message/?limit= Get a list of status messages. XmlResult: status_messages PUT /status_message/ Send a new status message to the build service. User needs to have appropriate permissions. XmlResult: status_message == Messages (for projects/packages) : message id : limit count of messages. optional, defaults to unlimited. GET /message/ Get (one) message specified by id. XmlResult: messages GET /message/?limit= Get a list of messages, independent of project or package. All entries are ordered by creation time (latest first). XmlResult: messages GET /message/?project= Get a list of messages for this package. All entries are ordered by creation time (latest first). XmlResult: messages GET /message/?project=&package= Get a list of messages for this package. All entries are ordered by creation time (latest first). XmlResult: messages PUT /message/?project=&package= Append message to the specified package (or project, if package parameter is omitted). XmlBody :message XmlResult: message == Internal only routes /public shall not be used in any tools, it is for OBS remote support only and may change or disappear at any time. The route is only working when anonymous mode is enabled. open-build-service-2.9.4/docs/api/api/architecture.rng000066400000000000000000000007731332555733200227050ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/architecture.xml000066400000000000000000000001061332555733200227050ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/attrib.xml000066400000000000000000000002521332555733200215120ustar00rootroot00000000000000 A B open-build-service-2.9.4/docs/api/api/attrib.xsd000066400000000000000000000017551332555733200215210ustar00rootroot00000000000000 This schema describes one attribute A attribute open-build-service-2.9.4/docs/api/api/attrib_type.rng000066400000000000000000000040201332555733200225360ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/attrib_type.xml000066400000000000000000000007341332555733200225600ustar00rootroot00000000000000 A long description of this attribute. A B A B C 2 open-build-service-2.9.4/docs/api/api/attribute_namespace_meta.xml000066400000000000000000000002371332555733200252550ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/attribute_namespace_meta.xsd000066400000000000000000000020351332555733200252510ustar00rootroot00000000000000 This schema describes the system wide attribute definitions for the OBS. To specify access lists of a namespace or attribute. Attribute namespace defintions open-build-service-2.9.4/docs/api/api/binary_released.rng000066400000000000000000000071521332555733200233510ustar00rootroot00000000000000 added modified open-build-service-2.9.4/docs/api/api/binary_released.xml000066400000000000000000000011641332555733200233600ustar00rootroot00000000000000 added l3 Iggy obs://build.opensuse.org/My:Maintenance:2793/openSUSE_13.1_Update/904dbf574823ac4ca7501a1f4dca0e68-package.openSUSE_13.1_Update open-build-service-2.9.4/docs/api/api/buildhistory.xml000066400000000000000000000031121332555733200227440ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/buildhistory.xsd000066400000000000000000000016561332555733200227550ustar00rootroot00000000000000 This schema describes the response data of a succesful buildhistory request to the buildservice API open-build-service-2.9.4/docs/api/api/buildresult.xml000066400000000000000000000024471332555733200225730ustar00rootroot00000000000000
RPM exit code -7
nothing provides libqt4-devel
RPM exit code -7
building on build27/1
nail
nothing provides libqt4-devel
open-build-service-2.9.4/docs/api/api/channel.rng000066400000000000000000000047401332555733200216310ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/channel.xml000066400000000000000000000007761332555733200216500ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/configuration.rng000066400000000000000000000171751332555733200230760ustar00rootroot00000000000000 on off on off on off on off on off allow confirmation deny on off on off on off on off on off on off open-build-service-2.9.4/docs/api/api/configuration.xml000066400000000000000000000005471332555733200231030ustar00rootroot00000000000000 MyText openSUSE Build Service ^home:.+ home projects i586 open-build-service-2.9.4/docs/api/api/constraints.rng000066400000000000000000000071311332555733200225650ustar00rootroot00000000000000 false true false true open-build-service-2.9.4/docs/api/api/constraints.xml000066400000000000000000000020311332555733200225710ustar00rootroot00000000000000 reference_benchmark_host chroot 3.42 1.0 default fpu mmx 2 1 1.5 512 i586 x86_64 kernel-default kernel-smp 8 2 open-build-service-2.9.4/docs/api/api/directory.xml000066400000000000000000000002351332555733200222320ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/directory.xsd000066400000000000000000000051301332555733200222270ustar00rootroot00000000000000 This schema specifies the format of a directory listing. Directory listing. One entry in the directory. It's identified by its name. Information about the source link. Information about source service run of last commit. open-build-service-2.9.4/docs/api/api/directory_view.xml000066400000000000000000000003321332555733200232620ustar00rootroot00000000000000 KDE4 for Factory KDE 4 for SLES 10 open-build-service-2.9.4/docs/api/api/directory_view.xsd000066400000000000000000000021311332555733200232570ustar00rootroot00000000000000 This schema specifies the format of a directory listing. Directory listing. One entry in the directory. It's identified by its name. Depending on the requested view it may contain additional data as subelements. open-build-service-2.9.4/docs/api/api/distributions.rng000066400000000000000000000036201332555733200231170ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/distributions.xml000066400000000000000000000027471332555733200231420ustar00rootroot00000000000000 openSUSE 11.0 openSUSE:11.0 standard openSUSE_11.0 i586 x86_64 ppc ppc64 http://www.opensuse.org openSUSE 10.3 openSUSE:10.3 openSUSE_10.3 standard http://www.opensuse.org Fedora 9 Fedora:9 Fedora_9 standard i586 x86_64 http://www.fedoraproject.org open-build-service-2.9.4/docs/api/api/download_counter.xml000066400000000000000000000037371332555733200236060ustar00rootroot00000000000000 5753 4773 4295 4240 4195 4186 4145 4096 4064 4036 open-build-service-2.9.4/docs/api/api/download_counter.xsd000066400000000000000000000043651332555733200236020ustar00rootroot00000000000000 Download counter - top build service downloads. Timestamp of first counted download (of all counters). Timestamp of last counted download (of all counters). Sum of all counted downloads of the selected objects (project/package/repo/arch). Sum of all counted downloads (of the whole build service). open-build-service-2.9.4/docs/api/api/download_counter_summary.xml000066400000000000000000000013141332555733200253500ustar00rootroot00000000000000 982023 104497 89723 44258 32326 20760 15245 13324 12865 12681 open-build-service-2.9.4/docs/api/api/download_counter_summary.xsd000066400000000000000000000036771332555733200253640ustar00rootroot00000000000000 Download counter - top build service downloads. Timestamp of first counted download (of all counters). Timestamp of last counted download (of all counters). Sum of all counted downloads (of the whole build service). Sum of all different files that are in this container (project, package, repo or arch). open-build-service-2.9.4/docs/api/api/download_stats.xml000066400000000000000000000011331332555733200232510ustar00rootroot00000000000000 438 17324 14724 21724 43458 4845 438 47346874658 4835465445 open-build-service-2.9.4/docs/api/api/download_stats.xsd000066400000000000000000000024231332555733200232520ustar00rootroot00000000000000 Download statistics from redirector. Project for which we have new download counters. It's identified by its name. Package for which we have a new download counter. It's identified by its name and has the count as value. open-build-service-2.9.4/docs/api/api/group.rng000066400000000000000000000017511332555733200213540ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/group.xml000066400000000000000000000003101332555733200213540ustar00rootroot00000000000000 review-team review@openbuildservice.org open-build-service-2.9.4/docs/api/api/highest_rated.xml000066400000000000000000000004651332555733200230450ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/highest_rated.xsd000066400000000000000000000035071332555733200230430ustar00rootroot00000000000000 List of the highest rated packages and projects of the openSUSE build service ordered by rating. Count of votes / ratings for this package. Count of votes / ratings for this project. open-build-service-2.9.4/docs/api/api/ichain.test000066400000000000000000000027121332555733200216420ustar00rootroot00000000000000@arg_project="FATE" @arg_package="FATE-stable" @arg_filename="fate.spec.spec" @arg_revision="1" @arg_userid="freitag" @arg_comment="Testcomment" @arg_platform="SLE10" @arg_arch="x86_64" @data_body="test" @show_passed = true @arg_rpmname = "fate-1.2.1-3.1.x86_64.rpm" alias_host "api.opensuse.org", "apitest.opensuse.org" request "GET /about", 401, false request "GET /source/", 401, false request "GET /source//_meta", 401, false request "PUT /source//_meta", 401, false request "GET /source///_meta", 401, false request "PUT /source///_meta", 401, false request "GET /source///", 401, false request "PUT /source///", 401, false request "DELETE /source///", 401, false request "POST /source//?cmd=createSpecFileTemplate", 401, false request "POST /source//?cmd=rebuild", 401, false request "POST /source//?cmd=commit&rev=&comment=&user=", 401, false request "GET /platform/", 401, false request "GET /person/", 401, false request "PUT /person/", 401, false request "GET /rpm/////", 401, false request "GET /result///result", 401, false request "GET /result////result", 401, false request "GET /result/////log", 401, false open-build-service-2.9.4/docs/api/api/issue.rng000066400000000000000000000024101332555733200213410ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/issue_tracker.rng000066400000000000000000000004431332555733200230600ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/issue_tracker.xml000066400000000000000000000005021332555733200230660ustar00rootroot00000000000000 Novell Bugzilla bugzilla bnc bnc#\d+ https://bugzilla.novell.com/show_bug.cgi?id=@@@ https://bugzilla.novell.com/ open-build-service-2.9.4/docs/api/api/issue_trackers.rng000066400000000000000000000007461332555733200232510ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/issue_trackers.xml000066400000000000000000000174611332555733200232650ustar00rootroot00000000000000 boost trac Boost Trac https://svn.boost.org/trac/boost/ https://svn.boost.org/trac/boost/ticket/@@@ boost#(\d+) false bco bugzilla Clutter Project Bugzilla http://bugzilla.clutter-project.org/ http://bugzilla.clutter-project.org/show_bug.cgi?id=@@@ bco#(\d+) false RT other CPAN Bugs https://rt.cpan.org/ http://rt.cpan.org/Public/Bug/Display.html?id=@@@ RT#(\d+) false deb bugzilla Debian Bugzilla http://bugs.debian.org/ http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=@@@ deb#(\d+) false fdo bugzilla Freedesktop.org Bugzilla https://bugs.freedesktop.org/ https://bugs.freedesktop.org/show_bug.cgi?id=@@@ fdo#(\d+) false GCC bugzilla GCC Bugzilla http://gcc.gnu.org/bugzilla/ http://gcc.gnu.org/bugzilla/show_bug.cgi?id=@@@ GCC#(\d+) false bgo bugzilla Gnome Bugzilla https://bugzilla.gnome.org/ https://bugzilla.gnome.org/show_bug.cgi?id=@@@ bgo#(\d+) false bio bugzilla Icculus.org Bugzilla https://bugzilla.icculus.org/ https://bugzilla.icculus.org/show_bug.cgi?id=@@@ bio#(\d+) false bko bugzilla Kernel.org Bugzilla https://bugzilla.kernel.org/ https://bugzilla.kernel.org/show_bug.cgi?id=@@@ (?:Kernel|K|bko)#(\d+) false kde bugzilla KDE Bugzilla https://bugs.kde.org/ https://bugs.kde.org/show_bug.cgi?id=@@@ kde#(\d+) false lp launchpad Launchpad.net Bugtracker https://bugs.launchpad.net/bugs/ https://bugs.launchpad.net/bugs/@@@ b?lp#(\d+) false Meego bugzilla Meego Bugs https://bugs.meego.com/ https://bugs.meego.com/show_bug.cgi?id=@@@ Meego#(\d+) false bmo bugzilla Mozilla Bugzilla https://bugzilla.mozilla.org/ https://bugzilla.mozilla.org/show_bug.cgi?id=@@@ bmo#(\d+) false bnc bugzilla Novell Bugzilla https://apibugzilla.novell.com https://bugzilla.novell.com/show_bug.cgi?id=@@@ (?:bnc|BNC)\s*[#:]\s*(\d+) true ITS other OpenLDAP Issue Tracker http://www.openldap.org/its/ http://www.openldap.org/its/index.cgi/Contrib?id=@@@ ITS#(\d+) false i bugzilla OpenOffice.org Bugzilla http://openoffice.org/bugzilla/ http://openoffice.org/bugzilla/show_bug.cgi?id=@@@ i#(\d+) false fate fate openSUSE Feature Database https://features.opensuse.org/ https://features.opensuse.org/@@@ (?:fate|Fate|FATE)\s*#\s*(\d+) false rh bugzilla RedHat Bugzilla https://bugzilla.redhat.com/ https://bugzilla.redhat.com/show_bug.cgi?id=@@@ rh#(\d+) false bso bugzilla Samba Bugzilla https://bugzilla.samba.org/ https://bugzilla.samba.org/show_bug.cgi?id=@@@ bso#(\d+) false sf sourceforge SourceForge.net Tracker http://sf.net/support/ http://sf.net/support/tracker.php?aid=@@@ sf#(\d+) false Xamarin bugzilla Xamarin Bugzilla http://bugzilla.xamarin.com/index.cgi http://bugzilla.xamarin.com/show_bug.cgi?id=@@@ Xamarin#(\d+) false cve cve CVE Numbers http://cve.mitre.org/ http://cve.mitre.org/cgi-bin/cvename.cgi?name=@@@ (CVE-\d\d\d\d-\d+) false bxo bugzilla XFCE Bugzilla https://bugzilla.xfce.org/ https://bugzilla.xfce.org/show_bug.cgi?id=@@@ bxo#(\d+) false open-build-service-2.9.4/docs/api/api/latest_added.xml000066400000000000000000000044101332555733200226420ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/latest_added.xsd000066400000000000000000000022141332555733200226400ustar00rootroot00000000000000 List of packages and projects latest added to the openSUSE build service ordered by creation timestamp. open-build-service-2.9.4/docs/api/api/latest_updated.xml000066400000000000000000000043311332555733200232310ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/latest_updated.xsd000066400000000000000000000022231332555733200232250ustar00rootroot00000000000000 List of packages and projects latest updated in the openSUSE build service ordered by the updated timestamp. open-build-service-2.9.4/docs/api/api/link.rng000066400000000000000000000044631332555733200211600ustar00rootroot00000000000000 patches branch topadd link true open-build-service-2.9.4/docs/api/api/link.xml000066400000000000000000000006741332555733200211720ustar00rootroot00000000000000 # This will be inserted at the top of the spec file open-build-service-2.9.4/docs/api/api/message.xml000066400000000000000000000001721332555733200216520ustar00rootroot00000000000000 sample message... open-build-service-2.9.4/docs/api/api/message.xsd000066400000000000000000000023771332555733200216610ustar00rootroot00000000000000 This schema describes the format of a single message, that can be sent to the build service via PUT request. Display message only to involved users. Additionally send message as email to all involved users or not. Severity of this message. Used to display in different colors. open-build-service-2.9.4/docs/api/api/messages.xml000066400000000000000000000011171332555733200220350ustar00rootroot00000000000000 sample message... sample message... sample message... open-build-service-2.9.4/docs/api/api/messages.xsd000066400000000000000000000057751332555733200220510ustar00rootroot00000000000000 This schema describes the format for messages in the opensuse build service. Messages can be appended to projects and packages, to give other developers and users of the build service additional information - for example that development of the package will be continued later, because the packager has no time for it atm. Count of returned messages. Name of the project, if messages are filtered by project or package. Name of the package, if messages are filtered by package. Severity of this message. Used to display in different colors. Additionally send message as email to all involved users. Date when email was sent, if send_mail flag is true. If email was not (yet) sent, this attribute does not exist. Display message only to involved users. open-build-service-2.9.4/docs/api/api/most_active.xml000066400000000000000000000017731332555733200225530ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/most_active.xsd000066400000000000000000000036101332555733200225410ustar00rootroot00000000000000 List of most active packages or projects in the openSUSE build service ordered activity. Count of updates a package already had. Count of packages this project has. open-build-service-2.9.4/docs/api/api/multibuild.rng000066400000000000000000000007671332555733200224000ustar00rootroot00000000000000 multibuild package open-build-service-2.9.4/docs/api/api/multibuild.xml000066400000000000000000000002021332555733200223720ustar00rootroot00000000000000 gcc gcc-testsuite open-build-service-2.9.4/docs/api/api/newest_stats.xml000066400000000000000000000001401332555733200227440ustar00rootroot00000000000000 2007-03-11T16:43:51+01:00 open-build-service-2.9.4/docs/api/api/newest_stats.xsd000066400000000000000000000007111332555733200227460ustar00rootroot00000000000000 This schema describes the format for transferring newest statistics imported into opensuse build service from the redirect server (by redirect_stats). open-build-service-2.9.4/docs/api/api/obs.rng000066400000000000000000000261711332555733200210060ustar00rootroot00000000000000 noarch aarch64 aarch64_ilp32 armv4l armv5l armv6l armv7l armv5el armv6el armv7el armv7hl armv8el hppa m68k i386 i486 i586 i686 athlon ia64 k1om mips mipsel mips32 mips64 mips64el ppc ppc64 ppc64p7 ppc64le riscv64 s390 s390x sh4 sparc sparc64 sparc64v sparcv8 sparcv9 sparcv9v x86_64 local transitive direct local all local never trylocal localonly serveronly buildtime disabled off localdep all manual maintenance l3 l2 l1 acc unsupported maintainer bugowner reviewer downloader reader Admin User chroot lxc kvm xen secure fpu mmx sse sse2 sse3 ssse3 K M G T low moderate important critical open-build-service-2.9.4/docs/api/api/package.rng000066400000000000000000000060201332555733200216050ustar00rootroot00000000000000 package open-build-service-2.9.4/docs/api/api/package.xml000066400000000000000000000006531332555733200216250ustar00rootroot00000000000000 KDE libraries Base libraries of the KDE framework open-build-service-2.9.4/docs/api/api/packageresult.xml000066400000000000000000000016171332555733200230650ustar00rootroot00000000000000 20060127T134047Z Building. 1 0 Succesfully built. Building. Failed.
Compiler failed with error 42.
open-build-service-2.9.4/docs/api/api/packageresult.xsd000066400000000000000000000071261332555733200230640ustar00rootroot00000000000000 This schema specifies the format for results of the build of a package by the openSUSE build service. Top level element for project build result. Date and time the result was generated. Status information. Summary of the status. Information about build errors. Build error details. Build result summary for a certain architecture of a target platform. Name of resulting RPM. Number of packages in a given state. Hardware architecture. open-build-service-2.9.4/docs/api/api/patchinfo.rng000066400000000000000000000063151332555733200221740ustar00rootroot00000000000000 true false security recommended optional feature open-build-service-2.9.4/docs/api/api/patchinfo.xml000066400000000000000000000012531332555733200222020ustar00rootroot00000000000000 build obs-server obs-api build-initvm Add syntax check Fixes remote root hole Fixes remote root hole security low obs-api security update Just a description in multiline adrian@suse.de open-build-service-2.9.4/docs/api/api/pattern.rng000066400000000000000000000052601332555733200216740ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/primary.rng000066400000000000000000000114111332555733200216750ustar00rootroot00000000000000 rpm noarch athlon i386 i486 i586 i686 ia64 ppc ppc64 s390 s390x x86_64 src md5 sha YES NO open-build-service-2.9.4/docs/api/api/productlist.xml000066400000000000000000000003151332555733200226010ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/productlist.xsd000066400000000000000000000024101332555733200225750ustar00rootroot00000000000000 This schema specifies the format of product listing. Product listing. One entry in the listing. open-build-service-2.9.4/docs/api/api/project.rng000066400000000000000000000075261332555733200216740ustar00rootroot00000000000000 project standard maintenance maintenance_incident maintenance_release open-build-service-2.9.4/docs/api/api/project.xml000066400000000000000000000017641332555733200217040ustar00rootroot00000000000000 SuperKDE SuperKDE is a heavily tuned version of KDE. i386 x86_64 i386 i386 open-build-service-2.9.4/docs/api/api/projects.xml000066400000000000000000000001661332555733200220620ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/rating.xml000066400000000000000000000001761332555733200215160ustar00rootroot00000000000000 2.3 open-build-service-2.9.4/docs/api/api/rating.xsd000066400000000000000000000024311332555733200215100ustar00rootroot00000000000000 This schema describes the format for ratings of objects (packages/projects) of the opensuse build service. This element contains the rating score and some (optional) attributes to identify the rated object. This is the value which the currently logged in user gave this project/package. open-build-service-2.9.4/docs/api/api/redirect_stats.xml000066400000000000000000001750321332555733200232550ustar00rootroot00000000000000 0 0 0 0 106 0 0 0 0 0 0 0 0 0 0 0 2 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 0 9 5 9 9 36 110 9 9 11 11 10 23 10 10 9 10 10 10 12 11 13 10 9 9 42 25 38 27 13 143 63 138 75 27 12 11 9 10 9 8 33 17 35 24 12 1 1 1 1 1 1 1 1 9 9 9 8 10 13 9 9 10 10 10 24 10 10 9 9 12 9 9 8 45 16 8 150 33 10 9 9 9 9 15 12 10 10 11 9 9 34 13 9 9 10 9 9 10 28 9 9 10 10 9 12 open-build-service-2.9.4/docs/api/api/redirect_stats.xsd000066400000000000000000000040661332555733200232510ustar00rootroot00000000000000 Detailed download statistics from redirector. open-build-service-2.9.4/docs/api/api/request.rng000066400000000000000000000210621332555733200217050ustar00rootroot00000000000000 review new accepted declined revoked superseded deleted submit delete change_devel add_role set_bugowner maintenance_incident maintenance_release group update noupdate cleanup true false true false open-build-service-2.9.4/docs/api/api/request.xml000066400000000000000000000052701332555733200217220ustar00rootroot00000000000000 moderate noupdate true Forgetit! Looks great! Review got accepted I don't want to make Klaas my enemy... Review got approved I don't want to make Klaas my enemy... 2009-12-22T23:00:00 Kraft Kraft is KDE software for craftsmen and should urgently be added to the openSUSE distribution. The version from kraft.old can be deleted and will not be used as devel package anymore. open-build-service-2.9.4/docs/api/api/revisionlist.xml000066400000000000000000000017771332555733200227740ustar00rootroot00000000000000 3bcf28c526fe93afc1674f2c20333ceb unknown saschpe df58f7222e36caffde066dc5ede1c280 unknown saschpe test checkin. ab382eb3dfb9a9e82ee3753b7d52689c unknown saschpe Reverted test commit. 8812fc3965a59a9478c37e7c4d4e089f unknown saschpe ab382eb3dfb9a9e82ee3753b7d52689c unknown saschpe open-build-service-2.9.4/docs/api/api/rpm-ns.rng000066400000000000000000000166601332555733200214410ustar00rootroot00000000000000 package patch script message product atom EQ LE GE LT GT 1 0 dir ghost open-build-service-2.9.4/docs/api/api/service.rng000066400000000000000000000020001332555733200216440ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/service.xml000066400000000000000000000004341332555733200216670ustar00rootroot00000000000000 12312313212 foo bar open-build-service-2.9.4/docs/api/api/status.xml000066400000000000000000000003121332555733200215450ustar00rootroot00000000000000 Ok
Operation successfull.
home:kfreitag:Factory
open-build-service-2.9.4/docs/api/api/status.xsd000066400000000000000000000036611332555733200215550ustar00rootroot00000000000000 This schema specifies the format for status information which is returned in response to PUT requests and in case they fail to GET requests. Status information. Summary of the status. Detailed, human readable information about the status. Additional data tag that can be processed by the client. Machine readable. open-build-service-2.9.4/docs/api/api/status_message.xml000066400000000000000000000004161332555733200232560ustar00rootroot00000000000000 a single sample message. contains some text ... attributes are optional and ignored when sent (put) to the api. the format of this example will be used as a convenient way to put a message. open-build-service-2.9.4/docs/api/api/status_messages.xml000066400000000000000000000010661332555733200234430ustar00rootroot00000000000000 testmessage number one ... the format of this file will be used for getting messages from the api. it can also be used to send (put) multiple messages to the api at once. in this case the optional attributes will be ignored. open-build-service-2.9.4/docs/api/api/status_messages.xsd000066400000000000000000000031151332555733200234360ustar00rootroot00000000000000 This schema describes the format for status messages of the opensuse build service status. Status messages are used to give a hint to the users what's going on on the build service, for example if build hosts are having problems. This element contains the actual message with some attributes. The value for the attribute 'severity' (0-3) has the following meaning: 0: informal message 1: status changed to good with this message (green traffic lights displayed) 2: status changed to mid critical (yellow traffic lights displayed) 3: status changed to critical (red traffic lights displayed) open-build-service-2.9.4/docs/api/api/suse-primary.rng000066400000000000000000000056071332555733200226640ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/updated_timestamp.xml000066400000000000000000000002141332555733200237340ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/updated_timestamp.xsd000066400000000000000000000020741332555733200237400ustar00rootroot00000000000000 Package or project with timestamp of the last update. open-build-service-2.9.4/docs/api/api/user.rng000066400000000000000000000025461332555733200212010ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/api/user.xml000066400000000000000000000004701332555733200212050ustar00rootroot00000000000000 cinderella cindy@example.com Cinderella confirmed Admin open-build-service-2.9.4/docs/api/api/workerstatus.xml000066400000000000000000000032751332555733200230120ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/backend/000077500000000000000000000000001332555733200203225ustar00rootroot00000000000000open-build-service-2.9.4/docs/api/backend/Makefile000066400000000000000000000003121332555733200217560ustar00rootroot00000000000000all: @echo "Targets: test apidocs" test: RUBYLIB=../restility/lib ../restility/bin/rest_test -I .. api.txt apidocs: RUBYLIB=../restility/lib ../restility/bin/rest_doc -I .. api.txt --html -o html open-build-service-2.9.4/docs/api/backend/api.txt000066400000000000000000000061571332555733200216450ustar00rootroot00000000000000= openSUSE Package Repository Interface Version: 0.1 Only authenticated users are allowed to acces the API. Authentication is done by sending a Basic HTTP Authorisation header. : Project name : Package name : Platform name All names aren't allowed to contain spaces, slashes or colons. == Table of Contents Contents: == Sources Host: backend-source.opensuse.org === Project meta data GET /source/ GET /project/ Read list of projects. XmlResult: frontend/projects frontend/directory.xsd GET /source//_meta GET /project/ Read project meta file. XmlResult: frontend/project PUT /source//_meta PUT /project/ Write project meta file. XmlBody: frontend/project XmlResult: frontend/project === Package meta data GET /source///_meta GET /package// Read project meta data. XmlResult: package PUT /source///_meta PUT /package// Write project meta data. Writing of the project meta data commits the packages contained in the project to the build backend. XmlBody: package XmlResult: frontend/package GET /package///history Read package change history XmlResult: revisionhistory === Other source files : File name GET /source/// Read source file. Result: Content of file PUT /source/// Write source file. Body: Content of file XmlResult: fileputresult == Repository data Host: backend-source.opensuse.org GET /repository/ List all projects. XmlResult: frontend/directory GET /repository/ List all repositories for a project. XmlResult: frontend/directory GET /repository// List a specific repository of a project. XmlResult: repository == RPMs Host: backend-repository.opensuse.org GET /rpm//// Get list of RPMs. GET /rpm//// Get RPM. Result: RPM == Build Status Summaries Host: backend-source.opensuse.org GET /status/ Read project summary result. XmlResult: packstatuslistlist GET /status///:all Read repository summary result. XmlResult: statussumlist GET /status/// Read package result. XmlResult: statussumlist == Build Results Packages Host: backend-repository.opensuse.org GET /rpm/////logfile Read build log. Result: Build log as text file. GET /rpm/////status Read package build status. XmlResult: packagestatus GET /rpm/////history Read package build history. XmlResult: buildhistory == rebuild trigger Host: backend-repository.opensuse.org DELETE /rpm/////status == build client status Host: backend-repository.opensuse.org GET /workerstatus XmlResult: workerstatus == build client job history Host: backend-repository.opensuse.org GET /info//jobhistory XmlResult: jobhistlist open-build-service-2.9.4/docs/api/backend/api.txt.test000066400000000000000000000021441332555733200226130ustar00rootroot00000000000000@arg_project = "testing" @arg_package = "test1" @arg_filename = "test1.spec" @arg_arch = "i586" @arg_repository = "SUSE_Factory" @arg_rpmname = "mydummy-1.0-1.1.i586.rpm" request "GET /source/" response = request "GET /source//_meta" if ( response ) @data_body = response.body request "PUT /source//_meta" end response = request "GET /source///_meta" if ( response ) @data_body = response.body # request "PUT /source///_meta" end response = request "GET /source///" if ( response ) @data_body = response.body request "PUT /source///" end request "GET /platform/" request "GET /platform/" request "GET /rpm////" request "GET /rpm////" request "GET /status//:all/:all" request "GET /status///:all" request "GET /status///" request "GET /rpm/////logfile" request "GET /rpm/////status" open-build-service-2.9.4/docs/api/backend/fileputresult.xml000066400000000000000000000002761332555733200237600ustar00rootroot00000000000000 0d59d4904e9d6605fb7298038bbbb239 1.0 unknown open-build-service-2.9.4/docs/api/backend/fileputresult.xsd000066400000000000000000000034411332555733200237530ustar00rootroot00000000000000 This schema describes the response of a PUT request on a file which is part of a package. Information about the written file. TODO: Unify this with status and package schemas. MD5 sum of all files of the package. ??? ??? Version of package. ??? open-build-service-2.9.4/docs/api/backend/package.xml000066400000000000000000000004021332555733200224330ustar00rootroot00000000000000 Test 1 First test package. open-build-service-2.9.4/docs/api/backend/package.xsd000066400000000000000000000037641332555733200224470ustar00rootroot00000000000000 This schema describes a format for describing an openSUSE package for the build service. Top level element for package data. Title of the package. Description of the package. A person which is associated with the package. disable package to be built for a specific repository or architecture (or both). open-build-service-2.9.4/docs/api/backend/packagestatus.xml000066400000000000000000000000351332555733200237010ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/backend/packagestatus.xsd000066400000000000000000000011441332555733200237010ustar00rootroot00000000000000 This schema describes the format for describing the build status of an individual package. Top level element for status data. open-build-service-2.9.4/docs/api/backend/project.xml000066400000000000000000000004311332555733200225100ustar00rootroot00000000000000 testing Testing Project i586 open-build-service-2.9.4/docs/api/backend/quota.xml000066400000000000000000000004721332555733200222000ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/backend/statussumlist.xml000066400000000000000000000003751332555733200240150ustar00rootroot00000000000000 open-build-service-2.9.4/docs/api/backend/statussumlist.xsd000066400000000000000000000023571332555733200240150ustar00rootroot00000000000000 This schema describes build status summary lists. Container for status sums. Status summary. open-build-service-2.9.4/docs/api/restility/000077500000000000000000000000001332555733200207635ustar00rootroot00000000000000open-build-service-2.9.4/docs/api/restility/History.txt000066400000000000000000000001001332555733200231540ustar00rootroot00000000000000== 0.0.1 2007-09-11 * 1 major enhancement: * Initial release open-build-service-2.9.4/docs/api/restility/License.txt000066400000000000000000000431131332555733200231100ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin Steet, Fifth Floor, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. open-build-service-2.9.4/docs/api/restility/Manifest.txt000066400000000000000000000005641332555733200232770ustar00rootroot00000000000000History.txt License.txt Manifest.txt README.iChaintest README.rest_test README.txt Rakefile bin/rest_doc bin/rest_test config/hoe.rb config/requirements.rb lib/rest.rb lib/rest_htmlprinter.rb lib/rest_test.rb script/destroy script/generate setup.rb tasks/deployment.rake tasks/environment.rake tasks/website.rake test/test_helper.rb test/test_restility.rb validate_xml.rb open-build-service-2.9.4/docs/api/restility/README.iChaintest000066400000000000000000000012111332555733200237300ustar00rootroot00000000000000The iChain integration can be tested with the test tool rest_test. For that, it needs to be called with the proper configuration files and it tests the entire API protected behind iChain. From the tools directory, it must be called: ./rest_test --test=../frontend/ichain.test ../frontend/api.txt where api.txt is a general description of the API to test and the ichain.test file is a listing of the requests that should be tested together with some additional attributes. Consider to create a ichain.test.config file that contains valid credentials. More information can be found in the README.rest_test file. Klaas Freitag open-build-service-2.9.4/docs/api/restility/README.rest_test000066400000000000000000000051661332555733200236660ustar00rootroot00000000000000rest_test ========= rest_test is a tool for testing REST-style APIs. It uses the same API specification as is used by rest_doc for generating API docs and executes a list of user-defined tests exercising the API. "rest_test --help" shows the usage and available command line options of the test tool. On the first run rest_test creates a test file as a template. It has the same name as the api specification file suffixed by ".test". The test file specifies which tests are executed. By default one test for each API function is created. The test file is a executed by a Ruby interpreter in a special environment. This means you can use normal ruby statements to control the flow of the tests, use variables, etc. The test environment provides some special functions which can be used in the test file: request , [expected_http_result] This function makes a call to the API and checks that the returned result adheres to the specification. If the result is an XML file, it is checked that the result validates against the schema. By default an HTTP return code of 2xx is treated as success. This can be overridden by specifying a different return code as second argument to the request function. The apicall argument is a string specifying which kind of request is executed. It has to be the same as in the API specification. Variables which are contained in the apicall string in the form of "" are replaced by the value of a corresponding variable defined in the test file. The name of the Ruby variable has to be "@arg_name", where the "name" part has to be identical to the "name" part of the argument in the apicall string. if the apicall is a PUT request, it reads the body which is transferred with the request from a variable "@data_body" which has to be specified in the test file. alias_host original_name, aliased_name This function sets an alias name which is used instead of the original host name from the API specification. This makes it possible to use the same test for different installations of the API. If a file with the name of the test file suffixed by ".config" exists, this is executed in the same environment as the test file before the test file is run. This can be used to do specific setups which shouldn't be part of the general test file, e.g. some host aliasing or setup of login data. If the test or the config file sets values to the variables "@user" and "@password", these are transmitted with all succeeding API calls as authentication information. Usually these values should be set up in the config file. open-build-service-2.9.4/docs/api/restility/README.txt000066400000000000000000000002621332555733200224610ustar00rootroot00000000000000This is Restility, a set of tools for writing REST style web services. Feel free to contact the author Cornelius Schumacher , if you have questions or comments. open-build-service-2.9.4/docs/api/restility/Rakefile000066400000000000000000000002131332555733200224240ustar00rootroot00000000000000require 'config/requirements' require 'config/hoe' # setup Hoe + all gem configuration Dir['tasks/**/*.rake'].each { |rake| load rake }open-build-service-2.9.4/docs/api/restility/bin/000077500000000000000000000000001332555733200215335ustar00rootroot00000000000000open-build-service-2.9.4/docs/api/restility/bin/rest_doc000077500000000000000000000035701332555733200232700ustar00rootroot00000000000000#!/usr/bin/env ruby.ruby2.5 require 'optparse' require 'rubygems' require 'active_support' require 'builder' require 'erb' include ERB::Util $LOAD_PATH << "#{File.dirname(__FILE__)}/../lib" require "rest" require "rest_htmlprinter" class Options attr_accessor :verbose, :filename, :html, :outline, :output_dir, :include_dir def initialize verbose = false html = false end def self.parse( args ) options = Options.new opt = OptionParser.new opt.banner = "Usage: rest_doc [options] filename" opt.on( "-h", "--help", "Print this message" ) do puts opt exit end opt.on( "-v", "--verbose", "Verbose mode" ) do options.verbose = true end opt.on( "--html", "Create HTML output" ) do options.html = true end opt.on( "--outline", "Create outline output" ) do options.outline = true end opt.on( "-o", "=DIRECTORY", "Output directory" ) do |val| options.output_dir = val end opt.on( "-I", "=DIRECTORY", "Include directory as search path for XML files") do |val| options.include_dir = val end begin opt.parse!( args ) rescue OptionParser::InvalidOption puts $! puts opt exit end if ( ARGV.size > 1 ) puts "Too many arguments" puts opt exit elsif ( ARGV.size < 1 ) puts "Too few arguments" puts opt exit end options.filename = ARGV[0] options end end options = Options.parse( ARGV ) XmlFile.include_dir = options.include_dir begin document = Document.new document.parse_args if ( options.html ) printer = HtmlPrinter.new if ( options.output_dir ) printer.output_dir = options.output_dir end elsif ( options.outline ) printer = OutlinePrinter.new else printer = TextPrinter.new end printer.print document rescue Errno::ENOENT puts $! end open-build-service-2.9.4/docs/api/restility/bin/rest_test000077500000000000000000000063571332555733200235100ustar00rootroot00000000000000#!/usr/bin/ruby.ruby2.5 require 'optparse' require 'rubygems' require_gem 'active_support' require 'erb' include ERB::Util require "ftools" $LOAD_PATH << File.dirname( $0 ) require "rest.rb" require "rest_test.rb" class Options attr_accessor :verbose, :filename, :html, :outline, :output_dir, :create_template, :testfile, :force, :request, :show_all, :include_dir, :output_html def initialize verbose = false html = false end def self.parse( args ) options = Options.new opt = OptionParser.new opt.banner = "Usage: rest_test [options] filename" opt.on( "-h", "--help", "Print this message" ) do puts opt exit end opt.on( "-v", "--verbose", "Verbose mode" ) do options.verbose = true end opt.on( "--create-test", "Create test template" ) do options.create_template = true end opt.on( "--test=testfile", "Select test file" ) do |val| options.testfile = val end opt.on( "--force", "Force overwriting of test file" ) do options.force = true end opt.on( "--request=name", "Select request to be tested" ) do |val| options.request = val end opt.on( "--show-all", "Show all requests, not only non-passed." ) do options.show_all = true end opt.on( "--output-html", "Put out results formatted as HTML." ) do options.output_html = true end opt.on( "-I", "=DIRECTORY", "Include directory as search path for XML files") do |val| options.include_dir = val end begin opt.parse!( args ) rescue OptionParser::InvalidOption STDERR.puts $! STDERR.puts opt exit 1 end if ( ARGV.size > 1 ) STDERR.puts "Too many arguments" STDERR.puts opt exit 1 elsif ( ARGV.size < 1 ) STDERR.puts "Too few arguments" STDERR.puts opt exit 1 end options.filename = ARGV[0] options end end options = Options.parse( ARGV ) begin XmlFile.include_dir = options.include_dir if ( !options.testfile ) options.testfile = options.filename + ".test" end document = Document.new document.parse_args @requests = document.all_children Request if ( !File.exists?( options.testfile ) || options.create_template ) if ( File.exists?( options.testfile ) ) if ( !options.force ) STDERR.puts "Test file '#{options.testfile} already exists." + " Use '--force' to overwrite it. Exiting." return end puts "Overwriting test file '#{options.testfile}'." else puts "Creating test file '#{options.testfile}'." end File.open( options.testfile, "w" ) do |file| @requests.each do |r| file.puts "request \"#{r}\"" end end end puts "Working on testfile #{options.testfile}." runner = TestRunner.new @requests configfile = options.testfile + ".config" if ( File.exists? configfile ) File.open configfile do |file| eval file.read, runner.context.get_binding end end if ( options.verbose ) runner.context.show_xmlbody = true end runner.context.show_passed = options.show_all runner.context.request_filter = options.request runner.context.output_html = options.output_html runner.run options.testfile rescue Errno::ENOENT puts $! end open-build-service-2.9.4/docs/api/restility/config/000077500000000000000000000000001332555733200222305ustar00rootroot00000000000000open-build-service-2.9.4/docs/api/restility/config/hoe.rb000066400000000000000000000045771332555733200233450ustar00rootroot00000000000000#require 'restility/version' AUTHOR = 'Cornelius Schumacher' # can also be an array of Authors EMAIL = "cschum@suse.de" DESCRIPTION = "Utilities for writing REST web services." GEM_NAME = 'restility' # what ppl will type to install your gem RUBYFORGE_PROJECT = 'restility' # The unix name for your project HOMEPATH = "http://#{RUBYFORGE_PROJECT}.rubyforge.org" DOWNLOAD_PATH = "http://rubyforge.org/projects/#{RUBYFORGE_PROJECT}" @config_file = "~/.rubyforge/user-config.yml" @config = nil RUBYFORGE_USERNAME = "unknown" def rubyforge_username unless @config begin @config = YAML.load(File.read(File.expand_path(@config_file))) rescue puts <<-EOS ERROR: No rubyforge config file found: #{@config_file} Run 'rubyforge setup' to prepare your env for access to Rubyforge - See http://newgem.rubyforge.org/rubyforge.html for more details EOS exit end end RUBYFORGE_USERNAME.replace @config["username"] end REV = nil # UNCOMMENT IF REQUIRED: # REV = `svn info`.each {|line| if line =~ /^Revision:/ then k,v = line.split(': '); break v.chomp; else next; end} rescue nil VERS = "0.0.1" + (REV ? ".#{REV}" : "") RDOC_OPTS = ['--quiet', '--title', 'restility documentation', "--opname", "index.html", "--line-numbers", "--main", "README", "--inline-source"] class Hoe def extra_deps @extra_deps.reject! { |x| Array(x).first == 'hoe' } @extra_deps end end # Generate all the Rake tasks # Run 'rake -T' to see list of generated tasks (from gem root directory) hoe = Hoe.new(GEM_NAME, VERS) do |p| p.author = AUTHOR p.description = DESCRIPTION p.email = EMAIL p.summary = DESCRIPTION p.url = HOMEPATH p.rubyforge_name = RUBYFORGE_PROJECT if RUBYFORGE_PROJECT p.test_globs = ["test/**/test_*.rb"] p.clean_globs |= ['**/.*.sw?', '*.gem', '.config', '**/.DS_Store'] #An array of file patterns to delete on clean. # == Optional p.changes = p.paragraphs_of("History.txt", 0..1).join("\\n\\n") #p.extra_deps = [] # An array of rubygem dependencies [name, version], e.g. [ ['active_support', '>= 1.3.1'] ] #p.spec_extras = {} # A hash of extra values to set in the gemspec. end CHANGES = hoe.paragraphs_of('History.txt', 0..1).join("\\n\\n") PATH = (RUBYFORGE_PROJECT == GEM_NAME) ? RUBYFORGE_PROJECT : "#{RUBYFORGE_PROJECT}/#{GEM_NAME}" hoe.remote_rdoc_dir = File.join(PATH.gsub(/^#{RUBYFORGE_PROJECT}\/?/,''), 'rdoc') open-build-service-2.9.4/docs/api/restility/config/requirements.rb000066400000000000000000000005521332555733200253020ustar00rootroot00000000000000require 'fileutils' include FileUtils require 'rubygems' %w[rake hoe newgem rubigen].each do |req_gem| begin require req_gem rescue LoadError puts "This Rakefile requires the '#{req_gem}' RubyGem." puts "Installation: gem install #{req_gem} -y" exit end end $:.unshift(File.join(File.dirname(__FILE__), %w[.. lib])) #require 'restility' open-build-service-2.9.4/docs/api/restility/lib/000077500000000000000000000000001332555733200215315ustar00rootroot00000000000000open-build-service-2.9.4/docs/api/restility/lib/rest.rb000077500000000000000000000170311332555733200230400ustar00rootroot00000000000000#!/usr/bin/ruby require 'fileutils' class XmlFile @@include_dir = "" def XmlFile.include_dir= dir if !dir dir = "" end @@include_dir = dir end def XmlFile.exist? file_name exists? file_name end def XmlFile.exists? file_name find_file file_name end def XmlFile.copy file_name, output_dir dir_name = File.dirname( file_name ) if ( dir_name =~ /^\// ) puts STDERR, "Absolute file names aren't allowed as XML file names." + " (#{dir_name})"; return end if ( dir_name ) output_dir += "/" + dir_name end if ( dir_name && !dir_name.empty? && !File.exist?( dir_name ) ) `mkdir -p #{output_dir}` if ( $? != 0 ) puts STDERR, "Unable to create directory '#{dir_name}'" end end FileUtils.copy( find_file( file_name ), output_dir ) end def XmlFile.find_file file_name if ( File.exists? file_name ) return file_name end if ( !@@include_dir.empty? ) file_name = @@include_dir + "/" + file_name if ( File.exists? file_name ) return file_name end end return nil end end class Node attr_accessor :parent, :level, :name attr_reader :children def initialize n = nil @name = n @children = Array.new @level = 0 end def add_child c @children.push c c.parent = self c.level = @level + 1 end def print printer printer.do_print self end def print_children printer if ( @children ) @children.each do |child| child.print printer end end end def root? return !parent end def root if parent return parent.root end return self end def to_s @name end def all_children type @result = Array.new @children.each do |child| if ( child.class == type ) @result.push child end @result.concat( child.all_children( type ) ) end @result end end class Section < Node end class Request < Node attr_accessor :verb, :path, :id @@id = 0 def initialize @id = @@id; @@id += 1 super() end def to_s p = @path.gsub(/<([^>]*?)\??>=/, "\\1=") @verb + " " + p end def parameters result = Array.new @path.scan( /[^=]<(.*?)(\??)>/ ) do |p| node = self found = false optional = $2.empty? ? false : true while( node && !found ) node.children.each do |c| if ( c.is_a?( Parameter ) && c.name == $1 ) c.optional = optional result.push c found = true break end end node = node.parent end if ( !found ) n = Parameter.new( $1 ) n.optional = optional result.push n end end result end def host node = self while( node ) node.children.each do |c| if c.is_a? Host return c end end node = node.parent end nil end end class Text < Node attr_accessor :text def initialize @text = Array.new super() end def to_s @text.join("\n") end def append t @text.push t end end class Parameter < Node attr_accessor :description, :optional def initialize n = nil @optional = false super end def to_s s = @name.to_s s += " (optional)" if @optional if ( !@description || @description.empty? ) s else s + " - " + @description end end end class Xml < Node attr_accessor :schema end class Body < Node end class Result < Node end class XmlBody < Xml end class XmlResult < Xml end class Host < Node end class Contents < Node end class Version < Node end class Document < Section def initialize super self.name = "DOCUMENT" end def parse_args sections = Hash.new sections[ 0 ] = self @section = nil while line = gets if ( line =~ /^\s+(\S.*)$/ ) if ( !@text ) @text = Text.new end @text.append $1 else if ( @text && @current ) @current.add_child @text end @text = nil end if ( line =~ /^(=+) (.*)/ ) level = $1.size title = $2 @section = Section.new title @current = @section parent = sections[ level - 1 ] parent.add_child @section sections[ level ] = @section elsif ( line =~ /^(GET|PUT|POST|DELETE) (.*)/ ) @request = Request.new @current = @request @request.verb = $1 @request.path = $2 @section.add_child( @request ) elsif ( line =~ /^<(.*)>: (.*)/ ) parameter = Parameter.new parameter.name = $1 parameter.description = $2 @current.add_child( parameter ) elsif ( line =~ /^Host: (.*)/ ) host = Host.new $1 @current.add_child( host ) elsif ( line =~ /^Body: (.*)/ ) body = Body.new $1 @current.add_child( body ) elsif ( line =~ /^Result: (.*)/ ) result = Result.new $1 @current.add_child( result ) elsif ( line =~ /^XmlBody: (.*)/ ) body = XmlBody.new $1 @current.add_child( body ) elsif ( line =~ /^XmlResult: (.*) +(.*)/ ) result = XmlResult.new $1 result.schema = $2 @current.add_child( result ) elsif ( line =~ /^XmlResult: (.*)/ ) result = XmlResult.new $1 @current.add_child( result ) elsif ( line =~ /^Contents/ ) @current.add_child( Contents.new ) elsif ( line =~ /^Version: (.*)/ ) version = Version.new $1 @current.add_child( version ) end end end end class Printer def initialize @missing = Hash.new end def print node do_prepare do_print node do_finish end def print_document printer print_section printer end def do_print node method = "print_" + node.class.to_s.downcase send method, node end def do_prepare end def do_finish end def method_missing symbol, *args if ( !@missing[ symbol ] ) @missing[ symbol ] = true STDERR.puts "Warning: #{self.class} doesn't support '#{symbol}'." end end end class TextPrinter < Printer def indent node node.level.times do printf " " end end def print_section section indent section puts "SECTION " + section.to_s section.print_children self end def print_request request indent request puts "Request: " + request.to_s host = request.host if ( host ) indent host puts " HOST: " + host.name end request.parameters.each do |p| indent request puts " PARAMETER: #{p.to_s}" end request.print_children self end def print_text text text.text.each do |t| indent text puts t end end def print_parameter parameter indent parameter puts "PARAMETER_DEF: " + parameter.name + " - " + parameter.description end def print_host host indent host puts "HOST_DEF: " + host.name end def print_result result indent result puts "Result: " + result.name end def print_xmlresult result indent result printf "XmlResult: " + result.name if ( result.schema ) printf " (Schema: #{result.schema})" end printf "\n" end def print_body body indent body puts "Body: " + body.name end end class OutlinePrinter < Printer def print node node.level.times do printf " " end puts "#{node.level} #{node.class}" node.print_children self end def print_section node print node end end open-build-service-2.9.4/docs/api/restility/lib/rest_htmlprinter.rb000077500000000000000000000073001332555733200254660ustar00rootroot00000000000000#!/usr/bin/ruby require "rest" class HtmlPrinter < Printer attr_accessor :output_dir def initialize super() @output_dir = "html" @xml_examples = Hash.new @xml_schemas = Hash.new end def do_prepare unless File.exists? @output_dir Dir.mkdir @output_dir end @index = File.new( @output_dir + "/index.html", "w" ) @html = Builder::XmlMarkup.new( :target => @index, :indent => 2 ) @html.comment! "This file was generated by restility at #{Time.now}" end def do_finish puts "Written #{@index.path}." @xml_examples.each do |f,b| if !XmlFile.exist?( f ) STDERR.puts "XML Example '#{f}' is missing." else XmlFile.copy f, @output_dir end end @xml_schemas.each do |f,b| if !XmlFile.exist?( f ) STDERR.puts "XML Schema '#{f}' is missing." else XmlFile.copy f, @output_dir end end @index.close end def print_section section if ( !section.root? ) tag = "h#{section.level}" @html.tag!( tag, section ) end section.print_children self end def print_request request @html.div( "class" => "request" ) do @html.p do @html.a( "name" => request.id ) do @html.b request.to_s end end if false host = request.host if ( host ) @html.p "Host: " + host.name end end if request.parameters.size > 0 @html.p "Arguments:" @html.ul do request.parameters.each do |p| @html.li p.to_s end end end request.print_children self end end def print_text text @html.p do |p| text.text.each do |t| p.span(t) p.br end end end def print_parameter parameter end def print_host host @html.p "Host: " + host.name end def print_result result @html.p "Result: " + result.name end def print_body body @html.p "Body: " + body.name end def print_xmlresult result print_xml_links "Result", result.name, result.schema end def print_xmlbody body print_xml_links "Body", body.name, body.schema end def print_xml_links title, xmlname, schema example = xmlname + ".xml" if ( !schema || schema.empty? ) schema = xmlname + ".rng" schema = xmlname + ".xsd" unless XmlFile.exist? schema end @xml_examples[ example ] = true @xml_schemas[ schema ] = true @html.p do |p| p << title p << ": " has_example = XmlFile.exist? example has_schema = XmlFile.exist? schema if has_example @html.a( "Example", "href" => example ) end if has_schema p << " "; @html.a( "Schema", "href" => schema ) end if( !has_example && !has_schema ) p << xmlname end end end def print_contents contents @html.div do |p| p << create_contents_list( contents.root, 1 ) end end def create_contents_list section, min_level result = "" section.children.each do |s| if ( s.is_a? Section ) result += create_contents_list s, min_level end if ( s.is_a? Request ) result += "
  • " + h( s.to_s ) + "
  • \n" end end endresult = "" if ( !result.empty? ) if ( section.level > min_level ) endresult = "
  • " + h( section.to_s ) end if ( section.level >= min_level ) endresult += "
      \n" + result + "
    " else endresult = result end if ( section.level > min_level ) endresult += "
  • " end end endresult end def print_version version @html.p "Version: " + version.to_s end end open-build-service-2.9.4/docs/api/restility/lib/rest_test.rb000066400000000000000000000161371332555733200241020ustar00rootroot00000000000000require "net/https" require "tempfile" class ParameterError < Exception end class TestContext attr_writer :show_xmlbody, :request_filter, :show_passed, :output_html def initialize requests @host_aliases = Hash.new @output = "" @requests = requests start end def start @tested = 0 @unsupported = 0 @failed = 0 @passed = 0 @error = 0 @skipped = 0 end def bold str if @output_html str.gsub! //, ">" "#{str}" else "\033[1m#{str}\033[0m" end end def red str bold str # "\E[31m#{str}\E[30m" end def green str bold str end def magenta str bold str end def get_binding return binding() end def unsupported out magenta( " UNSUPPORTED" ) @unsupported += 1 out_flush end def failed out red( " FAILED" ) @failed += 1 out_flush end def passed out green( " PASSED" ) @passed += 1 if ( @show_passed ) out_flush else out_clear end end def skipped # out magenta( " SKIPPED" ) @skipped += 1 out_flush end def error str = nil error_str = " ERROR" if ( str ) error_str += ": " + str end out red( error_str ) @error += 1 out_flush end def alias_host old, new @host_aliases[ old ] = new end def out str @output += str + "\n"; end def out_clear @output = "" end def out_flush print @output out_clear end def request arg, return_code = nil, xml_check_wanted = true @tested += 1 if ( @request_filter && arg !~ /#{@request_filter}/ ) skipped return nil end out bold( "REQUEST: " + arg ) request = @requests.find { |r| r.to_s == arg } if ( !request ) STDERR.puts " Request not defined" return nil end xml_bodies = request.all_children XmlBody if ( !xml_bodies.empty? ) xml_body = xml_bodies[0] out " XMLBODY: " + xml_body.name end xml_results = request.all_children XmlResult if ( !xml_results.empty? ) xml_result = xml_results[0] out " XMLRESULT: " + xml_result.name end out " host: '#{request.host}'" host = request.host.to_s if ( !host || host.empty? ) error "No host defined" return nil end if @host_aliases[ host ] host = @host_aliases[ host ] end out " aliased host: #{host}" begin path = substitute_parameters request rescue ParameterError error return nil end out " Path: " + path splitted_host = host.split( ":" ) host_name = splitted_host[0] host_port = splitted_host[1] out " Host name: #{host_name} port: #{host_port}" if ( request.verb == "GET" ) req = Net::HTTP::Get.new( path ) if ( true||@user ) req.basic_auth( @user, @password ) end response = Net::HTTP.start( host_name, host_port ) do |http| http.request( req ) end if ( response.is_a? Net::HTTPRedirection ) location = URI.parse response["location"] out " Redirected to #{location}, scheme is #{location.scheme}" http = Net::HTTP.new( location.host, location.port ) if location.scheme == "https" http.use_ssl = true end http.start do |http| req = Net::HTTP::Get.new( location.path ) if ( @user ) out " setting user #{@user}" req.basic_auth( @user, @password ) end out " calling #{location.host}, #{location.port}" response = http.request( req ) end end elsif( request.verb == "POST" ) req = Net::HTTP::Post.new( path ) if ( @user ) req.basic_auth( @user, @password ) end response = Net::HTTP.start( host_name, host_port ) do |http| http.request( req, "" ) end elsif( request.verb == "PUT" ) if ( !@data_body ) error "No body data defined for PUT" return nil end if ( xml_body && @show_xmlbody ) out "Request body:" out @data_body end req = Net::HTTP::Put.new( path ) if ( @user ) req.basic_auth( @user, @password ) end response = Net::HTTP.start( host_name, host_port ) do |http| http.request( req, @data_body ) end else STDERR.puts " Test of method '#{request.verb}' not supported yet." unsupported return nil end if ( response ) out " return code: #{response.code}" if ( xml_result && @show_xmlbody ) out "Response body:" out response.body end if ( ( return_code && response.code == return_code.to_s ) || ( response.is_a? Net::HTTPSuccess ) ) if ( xml_check_wanted && xml_result ) if ( xml_result.schema ) schema_file = xml_result.schema else schema_file = xml_result.name + ".xsd" end if ( validate_xml response.body, schema_file ) out " Response validates against schema '#{schema_file}'" passed else failed end else passed end else failed end end response end def substitute_parameters request path = request.path.clone request.parameters.each do |parameter| p = parameter.name arg = eval( "@arg_#{parameter.name}" ) if ( !arg ) out " Can't substitute parameter '#{p}'. " + "No variable @arg_#{p} defined." raise ParameterError end path.gsub! /<#{p}>/, arg end path end def validate_xml xml, schema_file tmp = Tempfile.new('rest_test_validator') tmp.print xml tmp_path = tmp.path tmp.close found_schema_file = XmlFile.find_file schema_file if ( !found_schema_file ) out " Unable to find schema file '#{schema_file}'" return false end cmd = "/usr/bin/xmllint --noout --schema #{found_schema_file} #{tmp_path} 2>&1" # puts "CMD: " + cmd output = `#{cmd}` if $?.exitstatus > 0 out "xmllint return value: #{$?.exitstatus}" out output return false end return true end def print_summary undefined = @tested - @unsupported - @failed - @passed - @error - @skipped puts "#tester passed #{@passed}" puts "#tester failed #{@failed}" puts "#tester error #{@error}" puts "#tester skipped #{@unsupported + @skipped + undefined}" puts puts "Total #{@tested} tests" puts " #{@passed} passed" puts " #{@failed} failed" if ( @unsupported > 0 ) puts " #{@unsupported} unsupported" end if ( @error > 0 ) puts " #{@error} errors" end if ( @skipped > 0 ) puts " #{@skipped} skipped" end if ( undefined > 0 ) puts " #{undefined} undefined" end end end class TestRunner attr_reader :context def initialize requests @context = TestContext.new requests end def run testfile File.open testfile do |file| eval( file.read, @context.get_binding ) end @context.print_summary end end open-build-service-2.9.4/docs/api/restility/script/000077500000000000000000000000001332555733200222675ustar00rootroot00000000000000open-build-service-2.9.4/docs/api/restility/script/destroy000077500000000000000000000005451332555733200237120ustar00rootroot00000000000000#!/usr/bin/env ruby APP_ROOT = File.join(File.dirname(__FILE__), '..') begin require 'rubigen' rescue LoadError require 'rubygems' require 'rubigen' end require 'rubigen/scripts/destroy' ARGV.shift if ['--help', '-h'].include?(ARGV[0]) RubiGen::Base.use_component_sources! [:rubygems, :newgem, :newgem_theme] RubiGen::Scripts::Destroy.new.run(ARGV) open-build-service-2.9.4/docs/api/restility/script/generate000077500000000000000000000005471332555733200240150ustar00rootroot00000000000000#!/usr/bin/env ruby APP_ROOT = File.join(File.dirname(__FILE__), '..') begin require 'rubigen' rescue LoadError require 'rubygems' require 'rubigen' end require 'rubigen/scripts/generate' ARGV.shift if ['--help', '-h'].include?(ARGV[0]) RubiGen::Base.use_component_sources! [:rubygems, :newgem, :newgem_theme] RubiGen::Scripts::Generate.new.run(ARGV) open-build-service-2.9.4/docs/api/restility/setup.rb000066400000000000000000001065021332555733200224540ustar00rootroot00000000000000# # setup.rb # # Copyright (c) 2000-2005 Minero Aoki # # This program is free software. # You can distribute/modify this program under the terms of # the GNU LGPL, Lesser General Public License version 2.1. # unless Enumerable.method_defined?(:map) # Ruby 1.4.6 module Enumerable alias map collect end end unless File.respond_to?(:read) # Ruby 1.6 def File.read(fname) open(fname) {|f| return f.read } end end unless Errno.const_defined?(:ENOTEMPTY) # Windows? module Errno class ENOTEMPTY # We do not raise this exception, implementation is not needed. end end end def File.binread(fname) open(fname, 'rb') {|f| return f.read } end # for corrupted Windows' stat(2) def File.dir?(path) File.directory?((path[-1,1] == '/') ? path : path + '/') end class ConfigTable include Enumerable def initialize(rbconfig) @rbconfig = rbconfig @items = [] @table = {} # options @install_prefix = nil @config_opt = nil @verbose = true @no_harm = false end attr_accessor :install_prefix attr_accessor :config_opt attr_writer :verbose def verbose? @verbose end attr_writer :no_harm def no_harm? @no_harm end def [](key) lookup(key).resolve(self) end def []=(key, val) lookup(key).set val end def names @items.map {|i| i.name } end def each(&block) @items.each(&block) end def key?(name) @table.key?(name) end def lookup(name) @table[name] or setup_rb_error "no such config item: #{name}" end def add(item) @items.push item @table[item.name] = item end def remove(name) item = lookup(name) @items.delete_if {|i| i.name == name } @table.delete_if {|name, i| i.name == name } item end def load_script(path, inst = nil) if File.file?(path) MetaConfigEnvironment.new(self, inst).instance_eval File.read(path), path end end def savefile '.config' end def load_savefile begin File.foreach(savefile()) do |line| k, v = *line.split(/=/, 2) self[k] = v.strip end rescue Errno::ENOENT setup_rb_error $!.message + "\n#{File.basename($0)} config first" end end def save @items.each {|i| i.value } File.open(savefile(), 'w') {|f| @items.each do |i| f.printf "%s=%s\n", i.name, i.value if i.value? and i.value end } end def load_standard_entries standard_entries(@rbconfig).each do |ent| add ent end end def standard_entries(rbconfig) c = rbconfig rubypath = File.join(c['bindir'], c['ruby_install_name'] + c['EXEEXT']) major = c['MAJOR'].to_i minor = c['MINOR'].to_i teeny = c['TEENY'].to_i version = "#{major}.#{minor}" # ruby ver. >= 1.4.4? newpath_p = ((major >= 2) or ((major == 1) and ((minor >= 5) or ((minor == 4) and (teeny >= 4))))) if c['rubylibdir'] # V > 1.6.3 libruby = "#{c['prefix']}/lib/ruby" librubyver = c['rubylibdir'] librubyverarch = c['archdir'] siteruby = c['sitedir'] siterubyver = c['sitelibdir'] siterubyverarch = c['sitearchdir'] elsif newpath_p # 1.4.4 <= V <= 1.6.3 libruby = "#{c['prefix']}/lib/ruby" librubyver = "#{c['prefix']}/lib/ruby/#{version}" librubyverarch = "#{c['prefix']}/lib/ruby/#{version}/#{c['arch']}" siteruby = c['sitedir'] siterubyver = "$siteruby/#{version}" siterubyverarch = "$siterubyver/#{c['arch']}" else # V < 1.4.4 libruby = "#{c['prefix']}/lib/ruby" librubyver = "#{c['prefix']}/lib/ruby/#{version}" librubyverarch = "#{c['prefix']}/lib/ruby/#{version}/#{c['arch']}" siteruby = "#{c['prefix']}/lib/ruby/#{version}/site_ruby" siterubyver = siteruby siterubyverarch = "$siterubyver/#{c['arch']}" end parameterize = lambda {|path| path.sub(/\A#{Regexp.quote(c['prefix'])}/, '$prefix') } if arg = c['configure_args'].split.detect {|arg| /--with-make-prog=/ =~ arg } makeprog = arg.sub(/'/, '').split(/=/, 2)[1] else makeprog = 'make' end [ ExecItem.new('installdirs', 'std/site/home', 'std: install under libruby; site: install under site_ruby; home: install under $HOME')\ {|val, table| case val when 'std' table['rbdir'] = '$librubyver' table['sodir'] = '$librubyverarch' when 'site' table['rbdir'] = '$siterubyver' table['sodir'] = '$siterubyverarch' when 'home' setup_rb_error '$HOME was not set' unless ENV['HOME'] table['prefix'] = ENV['HOME'] table['rbdir'] = '$libdir/ruby' table['sodir'] = '$libdir/ruby' end }, PathItem.new('prefix', 'path', c['prefix'], 'path prefix of target environment'), PathItem.new('bindir', 'path', parameterize.call(c['bindir']), 'the directory for commands'), PathItem.new('libdir', 'path', parameterize.call(c['libdir']), 'the directory for libraries'), PathItem.new('datadir', 'path', parameterize.call(c['datadir']), 'the directory for shared data'), PathItem.new('mandir', 'path', parameterize.call(c['mandir']), 'the directory for man pages'), PathItem.new('sysconfdir', 'path', parameterize.call(c['sysconfdir']), 'the directory for system configuration files'), PathItem.new('localstatedir', 'path', parameterize.call(c['localstatedir']), 'the directory for local state data'), PathItem.new('libruby', 'path', libruby, 'the directory for ruby libraries'), PathItem.new('librubyver', 'path', librubyver, 'the directory for standard ruby libraries'), PathItem.new('librubyverarch', 'path', librubyverarch, 'the directory for standard ruby extensions'), PathItem.new('siteruby', 'path', siteruby, 'the directory for version-independent aux ruby libraries'), PathItem.new('siterubyver', 'path', siterubyver, 'the directory for aux ruby libraries'), PathItem.new('siterubyverarch', 'path', siterubyverarch, 'the directory for aux ruby binaries'), PathItem.new('rbdir', 'path', '$siterubyver', 'the directory for ruby scripts'), PathItem.new('sodir', 'path', '$siterubyverarch', 'the directory for ruby extentions'), PathItem.new('rubypath', 'path', rubypath, 'the path to set to #! line'), ProgramItem.new('rubyprog', 'name', rubypath, 'the ruby program using for installation'), ProgramItem.new('makeprog', 'name', makeprog, 'the make program to compile ruby extentions'), SelectItem.new('shebang', 'all/ruby/never', 'ruby', 'shebang line (#!) editing mode'), BoolItem.new('without-ext', 'yes/no', 'no', 'does not compile/install ruby extentions') ] end private :standard_entries def load_multipackage_entries multipackage_entries().each do |ent| add ent end end def multipackage_entries [ PackageSelectionItem.new('with', 'name,name...', '', 'ALL', 'package names that you want to install'), PackageSelectionItem.new('without', 'name,name...', '', 'NONE', 'package names that you do not want to install') ] end private :multipackage_entries ALIASES = { 'std-ruby' => 'librubyver', 'stdruby' => 'librubyver', 'rubylibdir' => 'librubyver', 'archdir' => 'librubyverarch', 'site-ruby-common' => 'siteruby', # For backward compatibility 'site-ruby' => 'siterubyver', # For backward compatibility 'bin-dir' => 'bindir', 'bin-dir' => 'bindir', 'rb-dir' => 'rbdir', 'so-dir' => 'sodir', 'data-dir' => 'datadir', 'ruby-path' => 'rubypath', 'ruby-prog' => 'rubyprog', 'ruby' => 'rubyprog', 'make-prog' => 'makeprog', 'make' => 'makeprog' } def fixup ALIASES.each do |ali, name| @table[ali] = @table[name] end @items.freeze @table.freeze @options_re = /\A--(#{@table.keys.join('|')})(?:=(.*))?\z/ end def parse_opt(opt) m = @options_re.match(opt) or setup_rb_error "config: unknown option #{opt}" m.to_a[1,2] end def dllext @rbconfig['DLEXT'] end def value_config?(name) lookup(name).value? end class Item def initialize(name, template, default, desc) @name = name.freeze @template = template @value = default @default = default @description = desc end attr_reader :name attr_reader :description attr_accessor :default alias help_default default def help_opt "--#{@name}=#{@template}" end def value? true end def value @value end def resolve(table) @value.gsub(%r<\$([^/]+)>) { table[$1] } end def set(val) @value = check(val) end private def check(val) setup_rb_error "config: --#{name} requires argument" unless val val end end class BoolItem < Item def config_type 'bool' end def help_opt "--#{@name}" end private def check(val) return 'yes' unless val case val when /\Ay(es)?\z/i, /\At(rue)?\z/i then 'yes' when /\An(o)?\z/i, /\Af(alse)\z/i then 'no' else setup_rb_error "config: --#{@name} accepts only yes/no for argument" end end end class PathItem < Item def config_type 'path' end private def check(path) setup_rb_error "config: --#{@name} requires argument" unless path path[0,1] == '$' ? path : File.expand_path(path) end end class ProgramItem < Item def config_type 'program' end end class SelectItem < Item def initialize(name, selection, default, desc) super @ok = selection.split('/') end def config_type 'select' end private def check(val) unless @ok.include?(val.strip) setup_rb_error "config: use --#{@name}=#{@template} (#{val})" end val.strip end end class ExecItem < Item def initialize(name, selection, desc, &block) super name, selection, nil, desc @ok = selection.split('/') @action = block end def config_type 'exec' end def value? false end def resolve(table) setup_rb_error "$#{name()} wrongly used as option value" end undef set def evaluate(val, table) v = val.strip.downcase unless @ok.include?(v) setup_rb_error "invalid option --#{@name}=#{val} (use #{@template})" end @action.call v, table end end class PackageSelectionItem < Item def initialize(name, template, default, help_default, desc) super name, template, default, desc @help_default = help_default end attr_reader :help_default def config_type 'package' end private def check(val) unless File.dir?("packages/#{val}") setup_rb_error "config: no such package: #{val}" end val end end class MetaConfigEnvironment def initialize(config, installer) @config = config @installer = installer end def config_names @config.names end def config?(name) @config.key?(name) end def bool_config?(name) @config.lookup(name).config_type == 'bool' end def path_config?(name) @config.lookup(name).config_type == 'path' end def value_config?(name) @config.lookup(name).config_type != 'exec' end def add_config(item) @config.add item end def add_bool_config(name, default, desc) @config.add BoolItem.new(name, 'yes/no', default ? 'yes' : 'no', desc) end def add_path_config(name, default, desc) @config.add PathItem.new(name, 'path', default, desc) end def set_config_default(name, default) @config.lookup(name).default = default end def remove_config(name) @config.remove(name) end # For only multipackage def packages raise '[setup.rb fatal] multi-package metaconfig API packages() called for single-package; contact application package vendor' unless @installer @installer.packages end # For only multipackage def declare_packages(list) raise '[setup.rb fatal] multi-package metaconfig API declare_packages() called for single-package; contact application package vendor' unless @installer @installer.packages = list end end end # class ConfigTable # This module requires: #verbose?, #no_harm? module FileOperations def mkdir_p(dirname, prefix = nil) dirname = prefix + File.expand_path(dirname) if prefix $stderr.puts "mkdir -p #{dirname}" if verbose? return if no_harm? # Does not check '/', it's too abnormal. dirs = File.expand_path(dirname).split(%r<(?=/)>) if /\A[a-z]:\z/i =~ dirs[0] disk = dirs.shift dirs[0] = disk + dirs[0] end dirs.each_index do |idx| path = dirs[0..idx].join('') Dir.mkdir path unless File.dir?(path) end end def rm_f(path) $stderr.puts "rm -f #{path}" if verbose? return if no_harm? force_remove_file path end def rm_rf(path) $stderr.puts "rm -rf #{path}" if verbose? return if no_harm? remove_tree path end def remove_tree(path) if File.symlink?(path) remove_file path elsif File.dir?(path) remove_tree0 path else force_remove_file path end end def remove_tree0(path) Dir.foreach(path) do |ent| next if ent == '.' next if ent == '..' entpath = "#{path}/#{ent}" if File.symlink?(entpath) remove_file entpath elsif File.dir?(entpath) remove_tree0 entpath else force_remove_file entpath end end begin Dir.rmdir path rescue Errno::ENOTEMPTY # directory may not be empty end end def move_file(src, dest) force_remove_file dest begin File.rename src, dest rescue File.open(dest, 'wb') {|f| f.write File.binread(src) } File.chmod File.stat(src).mode, dest File.unlink src end end def force_remove_file(path) begin remove_file path rescue end end def remove_file(path) File.chmod 0777, path File.unlink path end def install(from, dest, mode, prefix = nil) $stderr.puts "install #{from} #{dest}" if verbose? return if no_harm? realdest = prefix ? prefix + File.expand_path(dest) : dest realdest = File.join(realdest, File.basename(from)) if File.dir?(realdest) str = File.binread(from) if diff?(str, realdest) verbose_off { rm_f realdest if File.exist?(realdest) } File.open(realdest, 'wb') {|f| f.write str } File.chmod mode, realdest File.open("#{objdir_root()}/InstalledFiles", 'a') {|f| if prefix f.puts realdest.sub(prefix, '') else f.puts realdest end } end end def diff?(new_content, path) return true unless File.exist?(path) new_content != File.binread(path) end def command(*args) $stderr.puts args.join(' ') if verbose? system(*args) or raise RuntimeError, "system(#{args.map{|a| a.inspect }.join(' ')}) failed" end def ruby(*args) command config('rubyprog'), *args end def make(task = nil) command(*[config('makeprog'), task].compact) end def extdir?(dir) File.exist?("#{dir}/MANIFEST") or File.exist?("#{dir}/extconf.rb") end def files_of(dir) Dir.open(dir) {|d| return d.select {|ent| File.file?("#{dir}/#{ent}") } } end DIR_REJECT = %w( . .. CVS SCCS RCS CVS.adm .svn ) def directories_of(dir) Dir.open(dir) {|d| return d.select {|ent| File.dir?("#{dir}/#{ent}") } - DIR_REJECT } end end # This module requires: #srcdir_root, #objdir_root, #relpath module HookScriptAPI def get_config(key) @config[key] end alias config get_config # obsolete: use metaconfig to change configuration def set_config(key, val) @config[key] = val end # # srcdir/objdir (works only in the package directory) # def curr_srcdir "#{srcdir_root()}/#{relpath()}" end def curr_objdir "#{objdir_root()}/#{relpath()}" end def srcfile(path) "#{curr_srcdir()}/#{path}" end def srcexist?(path) File.exist?(srcfile(path)) end def srcdirectory?(path) File.dir?(srcfile(path)) end def srcfile?(path) File.file?(srcfile(path)) end def srcentries(path = '.') Dir.open("#{curr_srcdir()}/#{path}") {|d| return d.to_a - %w(. ..) } end def srcfiles(path = '.') srcentries(path).select {|fname| File.file?(File.join(curr_srcdir(), path, fname)) } end def srcdirectories(path = '.') srcentries(path).select {|fname| File.dir?(File.join(curr_srcdir(), path, fname)) } end end class ToplevelInstaller Version = '3.4.1' Copyright = 'Copyright (c) 2000-2005 Minero Aoki' TASKS = [ [ 'all', 'do config, setup, then install' ], [ 'config', 'saves your configurations' ], [ 'show', 'shows current configuration' ], [ 'setup', 'compiles ruby extentions and others' ], [ 'install', 'installs files' ], [ 'test', 'run all tests in test/' ], [ 'clean', "does `make clean' for each extention" ], [ 'distclean',"does `make distclean' for each extention" ] ] def ToplevelInstaller.invoke config = ConfigTable.new(load_rbconfig()) config.load_standard_entries config.load_multipackage_entries if multipackage? config.fixup klass = (multipackage?() ? ToplevelInstallerMulti : ToplevelInstaller) klass.new(File.dirname($0), config).invoke end def ToplevelInstaller.multipackage? File.dir?(File.dirname($0) + '/packages') end def ToplevelInstaller.load_rbconfig if arg = ARGV.detect {|arg| /\A--rbconfig=/ =~ arg } ARGV.delete(arg) load File.expand_path(arg.split(/=/, 2)[1]) $".push 'rbconfig.rb' else require 'rbconfig' end ::Config::CONFIG end def initialize(ardir_root, config) @ardir = File.expand_path(ardir_root) @config = config # cache @valid_task_re = nil end def config(key) @config[key] end def inspect "#<#{self.class} #{__id__()}>" end def invoke run_metaconfigs case task = parsearg_global() when nil, 'all' parsearg_config init_installers exec_config exec_setup exec_install else case task when 'config', 'test' ; when 'clean', 'distclean' @config.load_savefile if File.exist?(@config.savefile) else @config.load_savefile end __send__ "parsearg_#{task}" init_installers __send__ "exec_#{task}" end end def run_metaconfigs @config.load_script "#{@ardir}/metaconfig" end def init_installers @installer = Installer.new(@config, @ardir, File.expand_path('.')) end # # Hook Script API bases # def srcdir_root @ardir end def objdir_root '.' end def relpath '.' end # # Option Parsing # def parsearg_global while arg = ARGV.shift case arg when /\A\w+\z/ setup_rb_error "invalid task: #{arg}" unless valid_task?(arg) return arg when '-q', '--quiet' @config.verbose = false when '--verbose' @config.verbose = true when '--help' print_usage $stdout exit 0 when '--version' puts "#{File.basename($0)} version #{Version}" exit 0 when '--copyright' puts Copyright exit 0 else setup_rb_error "unknown global option '#{arg}'" end end nil end def valid_task?(t) valid_task_re() =~ t end def valid_task_re @valid_task_re ||= /\A(?:#{TASKS.map {|task,desc| task }.join('|')})\z/ end def parsearg_no_options unless ARGV.empty? task = caller(0).first.slice(%r<`parsearg_(\w+)'>, 1) setup_rb_error "#{task}: unknown options: #{ARGV.join(' ')}" end end alias parsearg_show parsearg_no_options alias parsearg_setup parsearg_no_options alias parsearg_test parsearg_no_options alias parsearg_clean parsearg_no_options alias parsearg_distclean parsearg_no_options def parsearg_config evalopt = [] set = [] @config.config_opt = [] while i = ARGV.shift if /\A--?\z/ =~ i @config.config_opt = ARGV.dup break end name, value = *@config.parse_opt(i) if @config.value_config?(name) @config[name] = value else evalopt.push [name, value] end set.push name end evalopt.each do |name, value| @config.lookup(name).evaluate value, @config end # Check if configuration is valid set.each do |n| @config[n] if @config.value_config?(n) end end def parsearg_install @config.no_harm = false @config.install_prefix = '' while a = ARGV.shift case a when '--no-harm' @config.no_harm = true when /\A--prefix=/ path = a.split(/=/, 2)[1] path = File.expand_path(path) unless path[0,1] == '/' @config.install_prefix = path else setup_rb_error "install: unknown option #{a}" end end end def print_usage(out) out.puts 'Typical Installation Procedure:' out.puts " $ ruby #{File.basename $0} config" out.puts " $ ruby #{File.basename $0} setup" out.puts " # ruby #{File.basename $0} install (may require root privilege)" out.puts out.puts 'Detailed Usage:' out.puts " ruby #{File.basename $0} " out.puts " ruby #{File.basename $0} [] []" fmt = " %-24s %s\n" out.puts out.puts 'Global options:' out.printf fmt, '-q,--quiet', 'suppress message outputs' out.printf fmt, ' --verbose', 'output messages verbosely' out.printf fmt, ' --help', 'print this message' out.printf fmt, ' --version', 'print version and quit' out.printf fmt, ' --copyright', 'print copyright and quit' out.puts out.puts 'Tasks:' TASKS.each do |name, desc| out.printf fmt, name, desc end fmt = " %-24s %s [%s]\n" out.puts out.puts 'Options for CONFIG or ALL:' @config.each do |item| out.printf fmt, item.help_opt, item.description, item.help_default end out.printf fmt, '--rbconfig=path', 'rbconfig.rb to load',"running ruby's" out.puts out.puts 'Options for INSTALL:' out.printf fmt, '--no-harm', 'only display what to do if given', 'off' out.printf fmt, '--prefix=path', 'install path prefix', '' out.puts end # # Task Handlers # def exec_config @installer.exec_config @config.save # must be final end def exec_setup @installer.exec_setup end def exec_install @installer.exec_install end def exec_test @installer.exec_test end def exec_show @config.each do |i| printf "%-20s %s\n", i.name, i.value if i.value? end end def exec_clean @installer.exec_clean end def exec_distclean @installer.exec_distclean end end # class ToplevelInstaller class ToplevelInstallerMulti < ToplevelInstaller include FileOperations def initialize(ardir_root, config) super @packages = directories_of("#{@ardir}/packages") raise 'no package exists' if @packages.empty? @root_installer = Installer.new(@config, @ardir, File.expand_path('.')) end def run_metaconfigs @config.load_script "#{@ardir}/metaconfig", self @packages.each do |name| @config.load_script "#{@ardir}/packages/#{name}/metaconfig" end end attr_reader :packages def packages=(list) raise 'package list is empty' if list.empty? list.each do |name| raise "directory packages/#{name} does not exist"\ unless File.dir?("#{@ardir}/packages/#{name}") end @packages = list end def init_installers @installers = {} @packages.each do |pack| @installers[pack] = Installer.new(@config, "#{@ardir}/packages/#{pack}", "packages/#{pack}") end with = extract_selection(config('with')) without = extract_selection(config('without')) @selected = @installers.keys.select {|name| (with.empty? or with.include?(name)) \ and not without.include?(name) } end def extract_selection(list) a = list.split(/,/) a.each do |name| setup_rb_error "no such package: #{name}" unless @installers.key?(name) end a end def print_usage(f) super f.puts 'Inluded packages:' f.puts ' ' + @packages.sort.join(' ') f.puts end # # Task Handlers # def exec_config run_hook 'pre-config' each_selected_installers {|inst| inst.exec_config } run_hook 'post-config' @config.save # must be final end def exec_setup run_hook 'pre-setup' each_selected_installers {|inst| inst.exec_setup } run_hook 'post-setup' end def exec_install run_hook 'pre-install' each_selected_installers {|inst| inst.exec_install } run_hook 'post-install' end def exec_test run_hook 'pre-test' each_selected_installers {|inst| inst.exec_test } run_hook 'post-test' end def exec_clean rm_f @config.savefile run_hook 'pre-clean' each_selected_installers {|inst| inst.exec_clean } run_hook 'post-clean' end def exec_distclean rm_f @config.savefile run_hook 'pre-distclean' each_selected_installers {|inst| inst.exec_distclean } run_hook 'post-distclean' end # # lib # def each_selected_installers Dir.mkdir 'packages' unless File.dir?('packages') @selected.each do |pack| $stderr.puts "Processing the package `#{pack}' ..." if verbose? Dir.mkdir "packages/#{pack}" unless File.dir?("packages/#{pack}") Dir.chdir "packages/#{pack}" yield @installers[pack] Dir.chdir '../..' end end def run_hook(id) @root_installer.run_hook id end # module FileOperations requires this def verbose? @config.verbose? end # module FileOperations requires this def no_harm? @config.no_harm? end end # class ToplevelInstallerMulti class Installer FILETYPES = %w( bin lib ext data conf man ) include FileOperations include HookScriptAPI def initialize(config, srcroot, objroot) @config = config @srcdir = File.expand_path(srcroot) @objdir = File.expand_path(objroot) @currdir = '.' end def inspect "#<#{self.class} #{File.basename(@srcdir)}>" end def noop(rel) end # # Hook Script API base methods # def srcdir_root @srcdir end def objdir_root @objdir end def relpath @currdir end # # Config Access # # module FileOperations requires this def verbose? @config.verbose? end # module FileOperations requires this def no_harm? @config.no_harm? end def verbose_off begin save, @config.verbose = @config.verbose?, false yield ensure @config.verbose = save end end # # TASK config # def exec_config exec_task_traverse 'config' end alias config_dir_bin noop alias config_dir_lib noop def config_dir_ext(rel) extconf if extdir?(curr_srcdir()) end alias config_dir_data noop alias config_dir_conf noop alias config_dir_man noop def extconf ruby "#{curr_srcdir()}/extconf.rb", *@config.config_opt end # # TASK setup # def exec_setup exec_task_traverse 'setup' end def setup_dir_bin(rel) files_of(curr_srcdir()).each do |fname| update_shebang_line "#{curr_srcdir()}/#{fname}" end end alias setup_dir_lib noop def setup_dir_ext(rel) make if extdir?(curr_srcdir()) end alias setup_dir_data noop alias setup_dir_conf noop alias setup_dir_man noop def update_shebang_line(path) return if no_harm? return if config('shebang') == 'never' old = Shebang.load(path) if old $stderr.puts "warning: #{path}: Shebang line includes too many args. It is not portable and your program may not work." if old.args.size > 1 new = new_shebang(old) return if new.to_s == old.to_s else return unless config('shebang') == 'all' new = Shebang.new(config('rubypath')) end $stderr.puts "updating shebang: #{File.basename(path)}" if verbose? open_atomic_writer(path) {|output| File.open(path, 'rb') {|f| f.gets if old # discard output.puts new.to_s output.print f.read } } end def new_shebang(old) if /\Aruby/ =~ File.basename(old.cmd) Shebang.new(config('rubypath'), old.args) elsif File.basename(old.cmd) == 'env' and old.args.first == 'ruby' Shebang.new(config('rubypath'), old.args[1..-1]) else return old unless config('shebang') == 'all' Shebang.new(config('rubypath')) end end def open_atomic_writer(path, &block) tmpfile = File.basename(path) + '.tmp' begin File.open(tmpfile, 'wb', &block) File.rename tmpfile, File.basename(path) ensure File.unlink tmpfile if File.exist?(tmpfile) end end class Shebang def Shebang.load(path) line = nil File.open(path) {|f| line = f.gets } return nil unless /\A#!/ =~ line parse(line) end def Shebang.parse(line) cmd, *args = *line.strip.sub(/\A\#!/, '').split(' ') new(cmd, args) end def initialize(cmd, args = []) @cmd = cmd @args = args end attr_reader :cmd attr_reader :args def to_s "#! #{@cmd}" + (@args.empty? ? '' : " #{@args.join(' ')}") end end # # TASK install # def exec_install rm_f 'InstalledFiles' exec_task_traverse 'install' end def install_dir_bin(rel) install_files targetfiles(), "#{config('bindir')}/#{rel}", 0755 end def install_dir_lib(rel) install_files libfiles(), "#{config('rbdir')}/#{rel}", 0644 end def install_dir_ext(rel) return unless extdir?(curr_srcdir()) install_files rubyextentions('.'), "#{config('sodir')}/#{File.dirname(rel)}", 0555 end def install_dir_data(rel) install_files targetfiles(), "#{config('datadir')}/#{rel}", 0644 end def install_dir_conf(rel) # FIXME: should not remove current config files # (rename previous file to .old/.org) install_files targetfiles(), "#{config('sysconfdir')}/#{rel}", 0644 end def install_dir_man(rel) install_files targetfiles(), "#{config('mandir')}/#{rel}", 0644 end def install_files(list, dest, mode) mkdir_p dest, @config.install_prefix list.each do |fname| install fname, dest, mode, @config.install_prefix end end def libfiles glob_reject(%w(*.y *.output), targetfiles()) end def rubyextentions(dir) ents = glob_select("*.#{@config.dllext}", targetfiles()) if ents.empty? setup_rb_error "no ruby extention exists: 'ruby #{$0} setup' first" end ents end def targetfiles mapdir(existfiles() - hookfiles()) end def mapdir(ents) ents.map {|ent| if File.exist?(ent) then ent # objdir else "#{curr_srcdir()}/#{ent}" # srcdir end } end # picked up many entries from cvs-1.11.1/src/ignore.c JUNK_FILES = %w( core RCSLOG tags TAGS .make.state .nse_depinfo #* .#* cvslog.* ,* .del-* *.olb *~ *.old *.bak *.BAK *.orig *.rej _$* *$ *.org *.in .* ) def existfiles glob_reject(JUNK_FILES, (files_of(curr_srcdir()) | files_of('.'))) end def hookfiles %w( pre-%s post-%s pre-%s.rb post-%s.rb ).map {|fmt| %w( config setup install clean ).map {|t| sprintf(fmt, t) } }.flatten end def glob_select(pat, ents) re = globs2re([pat]) ents.select {|ent| re =~ ent } end def glob_reject(pats, ents) re = globs2re(pats) ents.reject {|ent| re =~ ent } end GLOB2REGEX = { '.' => '\.', '$' => '\$', '#' => '\#', '*' => '.*' } def globs2re(pats) /\A(?:#{ pats.map {|pat| pat.gsub(/[\.\$\#\*]/) {|ch| GLOB2REGEX[ch] } }.join('|') })\z/ end # # TASK test # TESTDIR = 'test' def exec_test unless File.directory?('test') $stderr.puts 'no test in this package' if verbose? return end $stderr.puts 'Running tests...' if verbose? begin require 'test/unit' rescue LoadError setup_rb_error 'test/unit cannot loaded. You need Ruby 1.8 or later to invoke this task.' end runner = Test::Unit::AutoRunner.new(true) runner.to_run << TESTDIR runner.run end # # TASK clean # def exec_clean exec_task_traverse 'clean' rm_f @config.savefile rm_f 'InstalledFiles' end alias clean_dir_bin noop alias clean_dir_lib noop alias clean_dir_data noop alias clean_dir_conf noop alias clean_dir_man noop def clean_dir_ext(rel) return unless extdir?(curr_srcdir()) make 'clean' if File.file?('Makefile') end # # TASK distclean # def exec_distclean exec_task_traverse 'distclean' rm_f @config.savefile rm_f 'InstalledFiles' end alias distclean_dir_bin noop alias distclean_dir_lib noop def distclean_dir_ext(rel) return unless extdir?(curr_srcdir()) make 'distclean' if File.file?('Makefile') end alias distclean_dir_data noop alias distclean_dir_conf noop alias distclean_dir_man noop # # Traversing # def exec_task_traverse(task) run_hook "pre-#{task}" FILETYPES.each do |type| if type == 'ext' and config('without-ext') == 'yes' $stderr.puts 'skipping ext/* by user option' if verbose? next end traverse task, type, "#{task}_dir_#{type}" end run_hook "post-#{task}" end def traverse(task, rel, mid) dive_into(rel) { run_hook "pre-#{task}" __send__ mid, rel.sub(%r[\A.*?(?:/|\z)], '') directories_of(curr_srcdir()).each do |d| traverse task, "#{rel}/#{d}", mid end run_hook "post-#{task}" } end def dive_into(rel) return unless File.dir?("#{@srcdir}/#{rel}") dir = File.basename(rel) Dir.mkdir dir unless File.dir?(dir) prevdir = Dir.pwd Dir.chdir dir $stderr.puts '---> ' + rel if verbose? @currdir = rel yield Dir.chdir prevdir $stderr.puts '<--- ' + rel if verbose? @currdir = File.dirname(rel) end def run_hook(id) path = [ "#{curr_srcdir()}/#{id}", "#{curr_srcdir()}/#{id}.rb" ].detect {|cand| File.file?(cand) } return unless path begin instance_eval File.read(path), path, 1 rescue raise if $DEBUG setup_rb_error "hook #{path} failed:\n" + $!.message end end end # class Installer class SetupError < StandardError; end def setup_rb_error(msg) raise SetupError, msg end if $0 == __FILE__ begin ToplevelInstaller.invoke rescue SetupError raise if $DEBUG $stderr.puts $!.message $stderr.puts "Try 'ruby #{$0} --help' for detailed usage." exit 1 end end open-build-service-2.9.4/docs/api/restility/tasks/000077500000000000000000000000001332555733200221105ustar00rootroot00000000000000open-build-service-2.9.4/docs/api/restility/tasks/deployment.rake000066400000000000000000000017401332555733200251360ustar00rootroot00000000000000desc 'Release the website and new gem version' task :deploy => [:check_version, :website, :release] do puts "Remember to create SVN tag:" puts "svn copy svn+ssh://#{rubyforge_username}@rubyforge.org/var/svn/#{PATH}/trunk " + "svn+ssh://#{rubyforge_username}@rubyforge.org/var/svn/#{PATH}/tags/REL-#{VERS} " puts "Suggested comment:" puts "Tagging release #{CHANGES}" end desc 'Runs tasks website_generate and install_gem as a local deployment of the gem' task :local_deploy => [:website_generate, :install_gem] task :check_version do unless ENV['VERSION'] puts 'Must pass a VERSION=x.y.z release version' exit end unless ENV['VERSION'] == VERS puts "Please update your version.rb to match the release version, currently #{VERS}" exit end end desc 'Install the package as a gem, without generating documentation(ri/rdoc)' task :install_gem_no_doc => [:clean, :package] do sh "#{'sudo ' unless Hoe::WINDOZE }gem install pkg/*.gem --no-rdoc --no-ri" end open-build-service-2.9.4/docs/api/restility/tasks/environment.rake000066400000000000000000000001731332555733200253210ustar00rootroot00000000000000task :ruby_env do RUBY_APP = if RUBY_PLATFORM =~ /java/ "jruby" else "ruby" end unless defined? RUBY_APP end open-build-service-2.9.4/docs/api/restility/tasks/website.rake000066400000000000000000000002611332555733200244150ustar00rootroot00000000000000# stubs for the website generation # To install the website framework: # script/generate website task :website_generate task :website_upload task :website => :publish_docs open-build-service-2.9.4/docs/api/restility/test/000077500000000000000000000000001332555733200217425ustar00rootroot00000000000000open-build-service-2.9.4/docs/api/restility/test/test_helper.rb000066400000000000000000000001111332555733200245760ustar00rootroot00000000000000require 'test/unit' require File.dirname(__FILE__) + '/../lib/restility' open-build-service-2.9.4/docs/api/restility/test/test_restility.rb000066400000000000000000000002531332555733200253560ustar00rootroot00000000000000require File.dirname(__FILE__) + '/test_helper.rb' class TestRestility < Test::Unit::TestCase def setup end def test_truth assert true end end open-build-service-2.9.4/docs/api/restility/validate_xml.rb000077500000000000000000000002041332555733200237600ustar00rootroot00000000000000#!/usr/bin/ruby Dir.new( "." ).each do |e| if ( e =~ /(.*)\.xml$/ ) system "xmllint -noout -schema #{$1}.xsd #{e}" end end open-build-service-2.9.4/docs/api/validate_xml.py000077500000000000000000000046541332555733200217720ustar00rootroot00000000000000#!/usr/bin/python # # Copyright (c) 2010, Sascha Peilicke , Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA import os, sys from subprocess import call def validate_schema(arg, dirname, filenames): """Validates XML files in a directory against their provided schema definition. Calls 'xmllint' to do the validation when a supported schema definition file is found. The supported schema definitions are RelaxNG, Schematron and XML Schema. """ for filename in filenames: if filename.endswith('.xml'): # check for XML files relname = os.path.join(dirname, filename) # relative filename from working directory basename = relname.rsplit('.', 1)[0] # split of the file ending (aka '.xml') if os.path.exists(basename + '.xsd'): # has a XML Schema file? call("xmllint --noout --schema {0} {1}".format(basename + '.xsd', relname).split(' ')) elif os.path.exists(basename + '.rng'): # has a RelaxNG schema file? call("xmllint --noout --relaxng {0} {1}".format(basename + '.rng', relname).split(' ')) elif os.path.exists(basename + '.sch'): # has a Schematron schema file? call("xmllint --noout --schematron {0} {1}".format(basename + '.sch', relname).split(' ')) else: # has none unfortunately print("no schema to validate {0}".format(relname)) if __name__ == "__main__": # we're called directly if len(sys.argv) != 2 or not os.path.isdir(sys.argv[1]): print "please provide a directory with XML files to validate!" sys.exit(1) os.path.walk(sys.argv[1], validate_schema, None) # walk all files in the provided directory recursively open-build-service-2.9.4/docs/daii.txt000066400000000000000000000042521332555733200176340ustar00rootroot00000000000000Directory App Integration Interface =================================== To attract projects to build in the Build Service we implement an interface that returns pathes to binary packages according to one project name and a list of package names. The usecase is to invite application directory pages such as http://kde-apps.org to offer their user the following service: If the user builds in the OBS, the directory app takes an OBS project- and package name. On the fly, the directory app uses the integration interface of the OBS with these two parameters and gets a list of the available binary packages for the package, see example structure below. The directory app can render a nice download box for the users to pick rpms from the OBS for their system. Example communication: Input: project, package names Result-XML File: http://download.opensuse.org/repositories/home:/kfreitag/kraft.ymp openSUSE 11.0 http://opensuse.org/static/os11logo16.png"/> http://www.opensuse.org Kraft is software for people... http://download.opensuse.org/repositories/home:/kfreitag/openSUSE_11.0/i586/kraft-0.25-6.4.i586.rpm Trmltools convert rml to pdf http://download.opensuse.org/repositories/home:/kfreitag/Fedora9/noarch/trmltools-1.1-5.4.noarch.rpm Fedora 9 http://opensuse.org/static/fed9logo16.png"/> http://www.fedoraproject.org Kraft is software for people... http://download.opensuse.org/repositories/home:/kfreitag/fedora9/i586/kraft-0.25-6.4.i586.rpm Trmltools convert rml to pdf http://download.opensuse.org/repositories/home:/kfreitag/fedora9/i586/trmltools-1.1-5.4.noarch.rpm open-build-service-2.9.4/docs/dev/000077500000000000000000000000001332555733200167405ustar00rootroot00000000000000open-build-service-2.9.4/docs/dev/GUIDE.md000066400000000000000000000021471332555733200201230ustar00rootroot00000000000000# Development Style Guide This development guide tries to show you how we style the code of OBS. ## Models All the **ActiveRecord::Base** models should use the same structure to be easy to follow for everyone. We have overwritten the template for model, so you can just use Rails::Generators like that: ``` rails generate model Dog ``` For a better comprehension [here](model_template_example.rb) you have an example with code in it's place. ## Scaffold Controllers All the Controllers should use the same structure to be easy to follow for everyone. We have overwritten the template for controllers too, so you can just use Rails::Generators like that: ``` rails generate scaffold_controller Webui::Dog ``` Have in mind that namespaced controllers that uses non-namespaced models should be created as follows: ``` rails generate scaffold_controller Webui::Dog --model-name=Dog ``` Also to have in mind is that the template is based on **Pundit policies** and you will have to create it. For a better comprehension [here](controller_template_example.rb) you have an example with code in it's place. open-build-service-2.9.4/docs/dev/attrib_domain_model.png000066400000000000000000002504571332555733200234570ustar00rootroot00000000000000PNG  IHDRbKGD pHYs  tIME  65p IDATxy\U9ILfZh aE%9pի@he-P:"q))zsJmP)3J)GSx~9L9c^g#gmvDDDDDDDDi""""""""E!'Q@HDDDDDDD( $"""""""dq2 8DDDDDDDDB""""""""NF!'Q@HDDDDDDD( $"""""""dq2 8DDDDDDDDB""""""""NE] """R4Mӧ"7RJ{ ;B""""0uT`nTqDDDDgΜnc0 *URv"" u,2=TRDDl6u jUJQ@HDDDę(CHDDDJDDDDJ@B*U*CHD41 DDDDfaZK\?{B ;_dРAXV,YBhh(kdBCCHMMgeܹl6RSS۷/m۶eر7g}b덍%<<97?+K(CHDDDVu{n֮]K ZW-_ӳ/rgz+W޽{Sn]V+۷oݛƍsԩn/˪)l[qW<@޽R?RJ!D6l@FF*U""" 2fŋ˧Nd2Hpp0[lW_O>LJ-Z8[[bZ SNŶsڵL>lܸ yWloY`K,cǎ|嗸Ν;矩Z*o6UT̜9T*WL׮]iٲ%L2;wb6W_nݺ,_9sбcG6n܈bq\OqXd ֭ŋԨQ>}Pv"QΜϟ7|lW^T^œ9so⡇riii̞=N:q fΜ ٳȑ#$''ϓ|={Zӯ_?|}}l6M7|7zGu6 W_}ŋpkW^߿?gח+YǙ6mŅӯ_?ʗ/ ;j'%%;v;F׮]ԩ .nwdm?y$aÆ,X*'︔bcciԨVȑ#Ej: ;άYh֬Ç'%%+VpQbEiذ!&M"55M6ѧOFͽ?\ǏG-X`۶m+]_~% ,e˖ 8g2}bQ\:V+~!+Wg^1/石zjBBBx9x#@QcyG o߾lYVnʉ'۷/QQQ8pUV9ښ;o6={@ݻ2e AAA 6 qͽf_l| L0`Zٿ;2DDDDJ !???z-~'Kquu`0 @j)\bEG`",,~Qg^F;Rn]*T~ˮ]hܸq;u>:uаaCZjU0*WL駟իW1LF5j?cR5ZiHNN^zlOݻٽ{7⋘L&{9fΜ坫lٲw}] J*ݻÇc00iӆiӦqB۷o`ժUZ:s >>>7[:uؼy3111ԫWG}Fi DDDDJ2øqiӦo9s9ZΛ3sm.l4ͶHȿje׮]E7/`0ǹfa6ٳ'O=YYY)S'e~_0m4G233Z8W\Ir l6QyyuyO]CO޺}]dZYlK,g%((;v8՟[s忟^1cPjU.]...싛M6ԨQ;v/0k,v!"rӐ1ț%Ϟ={X,<3ԯ_ӧO;ڿ?)))W- w1m_p!;v?pd_s2ŵj<@2eXf cԩDFFr VZELL *T "":uꐕbq駟c.\@Æ mW@@69si&}]&MTl;{ 6mIJJbӦM,[+ҰaC, HMM/kyfcǎ)Svl.cVZE2e]v:2>SGLL l6_/n{iӦѤIz#(CHDDD ˴x'صk.wիWg޽Ԯ][*UDPPPA%yg֬YX,^|EիW 2k(nӓ~3~xӧ>>>jՊT>crrrR ?1k֬F.]hذ!+W.];w׳i&jԨADDe˖-?E]j{vZ, <ݺuݝVZq >s6l@ӦM9zhǕAaB۷gܹUnSJʔ)CBBʕxzz8G ݻ7WfܸqY}:op͛DŽ S3HD.`)Jy7 7P[O>#FPvmӦM#>>^(""weUz[oݑ?>tϟa̚5m:tm۶zc81e\CyWRMfRDB""""%9QDDD4Q@HDDDʤL*oDDDD:E_URDDvl6AJ7PB""""w?X """B""""%#ܜDD FusQ@HDDDDDDD( $"""""""dq2 8=eLDDDVH6kFg)[?~hqb2nt233#""rnQܜHyW36K&%0zʆ .DXx1ݛjժlϵӣGbcc3gu_ѣ?~|uIIIxxxp!:E?NHHHum۶eal!!!XVf35j ,,1,MDDDnDDDDnB/ؿshd`2~v}l/hlٲ\zуm۶Pб:d,##Xsv;̛7__bլYEaZ?>WM67e$"""7FCDDDDnBBX/ǘŁ 7E?bA)iFP55kf^^^yfǺӧ~zƎKnnui2ߟ8D"""DDDDn)W 1{s 92^oAq)srJV+|׎mΝ… :u-3uTVXAFFd21|&O̫ʕ+Yj&9s/ЬY3ԩCxx8/^<EgѢEpBL&CeÆ ;wnUDXx1z狽+Wqqq 4~__뺷V\_|Q`]ll,O>$;wUҵkW )O۶m0`p9+.$$ՊlF9B""""_a߾}|En sԯ_ɓ'Ӻuk~뾦Ç/hذl6[l6'|ԪUX֮]ˉ'صkWkiѢ..W WNpPƌéSJ*L&پ<==T|||x"pyǏ;Y,<<<\2UVG4oޜ-ZиqcG< s=?m۶-=۶mM6ׯ_/-[/c4)[,PlYʖ-[ytBll,gϾ^(rȘn/`cvŰaڵ+ԭ[3g|E quu-l2ZW@J õWعdوm۶!mL&fϞ޽{پ};ǏgwW9f3FL۹s= rs6Ջm۶1cƌ[v7hЀ͛7;Ӈ>}CVy_d2}#!!At""B"""r sINN&((m~)#G_&%%ī߿5lٲ 8nm6޽;v &`_sssrss ,wa~WWw}GթX [q+f͚D֭">>t^z%ƌٳiР^^^lٲq܊+x饗HIIaϞ=DGG|"cfz) Ge˖Eɝ ʕ+ l6_ӨQ#.]Dff&>>>lywѣ-{zWDD={$!!Ν;s}o~z\\\n85x`:uĺu H)Z7n,2 }ΝG}D͚5ٸq#5k,9\]dfdиq㫆&,,ÇbiӦ={j׮Mvv6;v$)) ڵkܹsgʔ)fV+cǎ~+}%յkWî{e˖t ///<<< :x ݻwb0rH|||mOΝ9uݺu.ñlzɓܹ3Kc|||7ogϦW^Q|y5k'|֖Z ))sO``&) %1pGg'LFF۷o~(O x -[vÙMrgHD΢ !)VyvԩyjԨAJJ /27n,t"99e˖-((pfϞݻ ֻwo6l78DDD)t?n}vPPPCD9&qvFuHzy[""""B""""ŨQ:ADDDJDDDDDDDDB""""""""NF!'Q@HDDDDDDD( $"""""""dq2 8uȭecb0p1 EDDD< ?ɋ F*xi T(;B""""@L+fN[ȱ6߿h-[vն9spy TlڵcĈ4i҄.]ǻ~1"##Y~=QQQlݺٌdh'OfԨQXV4h@llUïHVV/&33޽{SZ5-X~}zAll,s̹=z4Ǐ/.)) :D``UǴhѢ )m۶ 0<-$$ՊlF98#DDDDn;XmvBBVn` `FΝ;yG8p pyǏӳgO, ZhQ -[x1-[PҀ\=zm6 :VBedd\V:_v|}}7oY&-j2|V^M6mn*HDDn-`ٰXrfI W}:=]d"""}Waq"""lv;62,"UP?ʪUۆ Ʒ~˳>СC1L߿?SNeŊdddYfM=l޽u ![d\Z:7\݅ f.t_???:vHݹ{y9rzbܸq,^@XXoiiia^~~~p{{ɓܹ3:M8իӷo3880;0f̘2d͛7'>>DʢJ*0x`'嗔T9}||'00PKS2)QFa09rdhrFL&h0fEʗs|9 C%++mǏL2zDDDD!GFJ8DDDDDDDDB""""""""NF!'SDJKKcڴi>}Z!MJx7{""""rR@HDJS2|p v]V;h EDDDD!̙3vl6AV*+MDDDD Y,eB""!ͦNB"RYVeT6Yf9˟;.FtsA( $"r)CHDnl N\ #<4[*uS爈G!)Ք!RBr ʹb1T ,YO;w7 ,(UVj*f̘_Mll,gϦ~}iР5^.V+oߞf͚XbŊ,_/IKKՕ-Zзo_^233y'O>B b׮]9r|c֭[1ԯ_AEYf#Ѽys ~Wn0p@իW5tݑ!4o[`9))Ν;pL&Çgtԉx4FU}ZjŶgϞʞ={'k.UV%<|ׯO~(_<]vn/8wQ̚5SO=En07?nÒoj^gϘ;w.ʕcpB\\\7o+ ,d21~x&OLlllIMMeرԪU~J$پ};裏(Wqqqm~~~0Ë/ȗ_~O?}]}zqBBB k۶- `ΝT&LZ߿Q??ȑ#9~zjWNNGgРAlْ>m۶p;qRv䅄+1w}GHHs᪀?'nnnlٲ:hs|wlNZ=7&--'NRlBVСc+%ξ&.. &pR___֯__gɼ̼6BVظqcے%K ԑ?l26l< QXDDJ=eHv;2R { .]ߏF1L#r^ԩ/KvݱAj m۶Lzz53ԨQ&MPB.\f+ykذ!j"##'xʕ+; پ}; d&=s uaP^=}Q5j䨿^zW_q…"u ~wtB@@5{2nȟyc)+[f\\Λw۱X,ZEy%sq?}Qڷoψ#nI7lؐ>ٌ+͚5cDFFPpgϞİxb""Rj)CHDJۑ dbcc9GYYYvcۋlW߼~X,ٳsp;!$J@nf_bPDe˖_/gqӼys-[lntR{1ʗ/Ͼ}XbE>޽{AjժQj]K˖-ٸq#ϟ`ՎmfǏ<*>}`6ooIz->fܹs9ʔ)sCvܙJ*ZDDJ5eHvAvA2eh׮fԛL&ٴi-Zj.?'ye S˗gԭ[77][},r9˟f?O?%443gbZ}T;Dn57si,pw.diTû}ܹ3zݝoxxxoݙ2e ݻwbP~} @TT'Ndʔ)<_bE_)Wƍ+4nܘnݺѻwo <6>55ēJŅ_t޽Ǥt$*UD||< , &&~ 7775jѣysu᪀[LL ͚5n[oūJ۶miРnnQ@HDns;5ڷoܹsׯTVTV+ZO?e…\pkgP޺{{o>jժE޽Kt]=j]ٖz޽{zjƍ/Y͛DŽ C"""?sAj֬+Rl ļy5kmۖ-[b0n"}emZ3@{<(,g8 \@͍BٺukZnX~7{zz2q뺆d;wsWSvmnZaaa6<'P}k׎vXXy򸻻ӫW/zUvOOkQ R`|_Dy7!S*ڲO>ԩS!C>}:=6j( -yr+_ӌ ƌѣG1  6@ؿ?#Fp|-[V1AAAkG""rs!$"Ziټy3f*t[h۶2DcǎWU )l8DT˛e4k֌wu'$$UB""!e\M!)J{&Je( $"rŢ ^( $"ʛ`0TyJDDB:ADDDDD HNۮ""""""""E!'Q@HDDDDDDD( $"""""""d1mj+o>RKB""]I9Bf=߫SO̓O>2D[ "W<*saL2.PɳOtt4˖-9spy TڵkLj#hҤ ]tϏw}ױرcDFF~zغu+fɄx9b ȡC 2vsNٻw/Vjժѭ[7BCCشiQQQbŊ,_d cfiӦ >OOO@"""""w!D<- O7|r򫹐/ TV-LrKϽyf-[FxxU&M=,,84iR`QF3Z֭[a?777vItt4ǒ%Kl-r,_x}tRwHDDDD.!c"\bIKŖ1d&df=J~&..;ҫW/oС$$$ܯ 2e }!44F#7&66tn{@DU<%=l`R eCylbb"YYY,^LzMjՀC\\JZ~}zAll,s̹k=z4Ǐ/.)) :D``UǴhѢ )m۶ 0<-$$ՊlF9G!qJ$c `5_㚓`aT"߲e /2FeJZZpyѣGmFBBBCNJS萱 vc믿΁2o<|}}2VfM-Zje^6mT3.rۮ]رc 3j(""""w /д91fq`C& zQ=`PaJTȨQXf kn兿?7ov>}:ׯgر^w&Mt ȟkm[~ .vdDDDD@dtq`7_ïPEʕ+ l6_ӨQ#Ν;K's]vKB֭quuرcY榞6x`:uĺuO+mذmyA)MA$""""B"┲]ks~d_؛6yѣ'Osx{{6qDWN߾}`/J|;Ø1c cȐ!4oޜxIHH ++*U)55ı =jriRǫ#䎻)C(..~版G""q2 8!$""rmܸQ """")e8DDDDDDDDB""""""""NF!'Q@HDDDDDDD( $"""""""dq2 8u8,<\0/"""""Rz) $"N-38y1H#V e9"""""Rj4MӧjmSR%x ^u˵8il3b4t 9V]LڴiÔ)ShРAm iӦtڕÇK/oңG> 6 ooo:FӦM>|8=z3f?c٨Uo& 6 22.]389x Ɏu7ێ'O{riƍɓ'X,nݚ>}`0Xnqqq:[v-eʔcǎ>}u҅_Sr """"RS2|p v]V;h} 89FuowjUc1<==yX|yӧٱcqqq,]P,YK/drǢE/^o߾,]ݻkƛoرc1L|Wߟ?f0zh hذ!ӇZjݻy7֭[_p46lP]v%""{!"""" Μ9nfa0Tm.~\LPW#.&;""Agܸqs9Ο?OTT\tɑryʗ/ѣZ*!!!s!Μ9CNعs';6ɓ'S\98{GFF={Һuk~iy7o^M:uW^ 42eb ~iʕ+GFFk׮eΜ9>> V6m$Io}qUY4jԨl699O_ƍO ?xmۦ>}jDzW;vhZpԠA 0@NNN7n#F?&Y6e,ft!ʔ)c&+WH^~e%%%)..NgΜQbb7nl駟o-I\#G…  IDAT9rdȑ#QsSh"k*[U&IZduϋ/ӧ+!!>OJpBY,͝;W+WTV]… ZM@@&OSNiҥj߾$i_4w\}|,X9O!!!:t萢#ISݫ͛g[ƌ*Wϝ<|TZrNJ*IO\>P5*T6lؠҥKۏ5I(djb>$&-f jԫW^yEժUݻcjՔ5kHN:;Ӓ~M׮]q[ɤmj…ݩ[KZb-[e˖iڸq丯+!!Ak׮$u]+Vʕ+ퟟիWkѢE=,7">>> / IrssӜ9s?ڃQǏWƍu_Uo+ ZBdSle98rZ~BBB4yd{zJNNnnn8q>c͙3GVUC wOѣGJgi۶ڶm=z4*z 6T:uh"իWxzzW^/___}'1c>YVURE'OV*Uz^*6m^u>tcp,Y8q>#}rqqUxqk:wt"ŢM*$$ľբE >p[VɀcYFQQQ9N̙3ӧ:uݻ]vz'X>onTLM8˗չsgiFou^y5j(M2Z0`@J)( 24|[0`WmXԽ{wM6#fonAO9i!4w\edd؏s+޽{FX,ll,YHn BwpJsQb.#.""BSLϟ]>` &OOO%&&湫$Uti !)cW\'|DY,Zv~WIRvԲelӋ/VǎUzu?^&IաC/^#G*))I+WV~TB;iƍZ`.\E*((H]tfvYWZ˗?TŊեK),,L6M'OVBB"##բE _K.՗_~i̙JNNV2eԹsg֭[oF59r=/Rڸq>c :T?xm5kƏ5jh͚5Ow}E˪R#___:uJӧOױcZjhP ' Irӧk֬Yp<==JA#ڹsΜ9~IcƌQ||{neffj*Ydc:tHo<<Ӹ>ׯ_6ny| ŋ+00P2nCV^yyҥM6_UXXOXY,5nѣZhtIR…폳:́]-Zh…dR@@,=)--;,33S4zh/_^W^ԪU+h߾}5k,%$$hРA|zOG!7׾}TpaiF̴wM2LJII֭[snɩJÇ+>>^[nݻUF 9;;皟'NŋjРy={iY|IY,}ڹsf͚%ɤ&MjqJJJp]TbE/_^իWW…zj:tHӦMS^tժUKFQ+VЙ3g+۱zg͛7A*Y]}/^\uV%$$hƍYot!EFFjРAG}ӧO{_}@@o毼ڶm"EZx*TdY,=3X,Z`o Ld+=:xf͚%___wOjԸqc\R R…%I'NmYg,եK;vLSNիWJ*bQFntqsqqQ:uX,*R ";V'OT߾}rʩO>:~F)7779;;g;^nnnzꩧ$I=\W6l[/bW޽uI3FEр$\r7n뫐r ?  7|S6M~LB-P 0k Yttl6( GHCm۶\?+u\}]?FQ_| G)cSO=ƍ|J9BDsN&2c.^V4|p-Y}̞=[.];sciF#G:u$ooo?޾ĉի6lؠ0ܹS)$FO˗kРA:z}^piF~X,Pv I֭[B eۇ-[͛7kذarww/ZjԨ"""TH> iӦ)""BA6~N@a*D YS屲nM5w_J7*WS>co۶MK,Qvn[6i$`EGG/:QQQzrΝ;kرrvv5|py$oooGooo-\>}O~u_ edZe0HIZZZ囀[y/&hSc_ZRO˶-]q)!!A*Q*T I:t*V~obbbT~}>:uko $I 4Ј#dZ{)))JKK%Xf+=-s2ʔ|]&UXDԑſh\Uzz-Z4 SNVZ޽Fٳg9|;vly/=*۶iڴiSN)000ۼ֭[kntc bQff|||l_G:PJMZ˖񣌖t^BSe2r*6l;vPϞ=e4UhQŋns/w]v)&&&ǮcwcTIf Çe9sHeRJZp,Ν+WUV2L ,BRZPu*٨&گe0H&{A91R.]T\r~nnnնmAO?T}vM2i2C)::Z111P`B?t@THdrq WT^Ui*kŊWff{խ[W)ۓSÇWdd90@rrrs=B ĉZz_z:աC]6DPBB0 tP]ڰE׮[{޽&O;]^^^e&L7 Ppp֭[mFѣGgp5iDSLQllbbbr)00Pw|dnx\Wz,P B(N87|S!!!ر}ʕ+UլY3 5{B_=mSfM-YD쬡CޭsuMUTÇ5sL%''L2ܹ~iIݻ5o<*U:w,???{'ٳjܸOcǎIjRUD nG"W@J})EGGiӦ7oveosq ͛gr8p)ѣG+))ɾf˗CÇW:u4i$%''ԩS;vJ,w}W1cԹsgIR+99YƍeX*ȯB| L2j۶VZ{* @O멧$-X@&Io\\\~i˖-zLJJRJJۧI7}al6]vYj֬)'''Fؼ*TVm۶)22R5kTÆ Un]v>MV`%--MEׯ_$ ekݭY[VeffJ1l-.I*\V^-I~,\sǞjJ>>>ڷo~g͚5K 4h7# rB|l6~IR:un.'|R{է~zJ֭dR&Ml6U^)_VZ={hĉ[T_^ĉe4y&))I˗ҥKuQOzuUZT>3YVuIudRRR~z޽[*TPXX*Ud~)!Chܹ;vշo_yzzJ~m)&&F -[tj׮̙qIV@[VU6M@YzգG|*͙3> /l9sV\)Irqqj{le'PzbpŠ6?<<|B@ZVbbvڥ*U'W--tw?j޼ôࡅ!@FK[t!!1)HzJ@!l駟tU/_^5jԠ_rIP@>5` %}h),11Q6M 6Ԟ={J*Q5o\ѣC_5܇|uE=:y)~I/_VzӧOk̙5jl6z-}Grqq1Ps)::Zqqq3f~jܸC+yɓ>|m˚4iu֩XbnoW^Ν;u%iРAԩ}%Kh0a!&٬B I|||4k,=y7?%IZxN>-ͦڵko߾Y$iʔ)ZpL&S}hBQQQ}8سgOΝ;W:w}wϞ=y׃Һukeff>}Μ9S~~~Vm6l I:uڷo[ڗI-[&giƍ6lj׮-٬5k護… )IjӦk_~eۧWCM4?~sUll|}}iӦ\M8Q 6K/$777mڴI7o{GmWM@M/_tcа… +==].\m3fhǎ~222FiҤI:{… K)S_V@@vءI&魷޲v""""EhȐ!RJy]x;>HFQ&LPxxѣUJfO2AF)www>|X}RSSe0ԣG=sw,˗[m۶MSNj׮m^FR&MZ}Qdd̙cKu%͙3G͓$d2M6իW{abH"y.$$Dv!8qm^@@MP< .(!!ANNNruuhhlڵkmǏ+!!A3gΔ_iӦ ӌ34zhY,XB3f̐,j׮#Fh4fsꫯOk׮UDD.\۷ڵkիW/5nX-[ƍ+55U?~m-[Lɚ?\]]+&&Faaa:t&Mʕ+ҥKٳVSRR4bĨf͚ZlYA+*U*OߩS'رC}[nUފUbb<==;mb [.ۼ#Fg$-]TWllO<ѣG;TR]5jv$|;vL?|XuVZZVlbgUmSre1Bk֬љ3gt{iڴn/UOѣGٳ*[l?\/^Ԇ o˧YfruutTxx/m͛7w*V,Yf͚ZjԚ5kԐ$ IDATeK:uJ=z_|nPٳg:uh~}oRJ[&EӦMyWK[=c֭pJJJݻEN4x`EFF*((H*TQQQٳuʕlשy9c޽ZhY<f]˒\?ZQ&pht@P!gm۾]ijРmrl6QFpႮ_*Uڵkj߾/^͛M6w}xٲe5uTeffb?k>5k()88XժUSbnkawyG.]RZ*777M8Q̙#ժ!CyLEjر7n$VZ*Z}y=<}6mc-EFFWhh|}}5k, 6LfYժUS۶mպuk˗/իbŊsXFOsZJ*7` hʔ)O>P[̛7OO`ッJ6mڤJ*ĉըQ#5jHE͛iӦGVTT}'8^GYttl6-ܔC+d>JO4Wک_eI(_T?UƯ}$ >\K,mg֥K;Xmڴȑ#N:;ۘ'NP^aiΝn !b2Y/_Aѣ=(<<\mڴ$߿_:x,*T]*((HuV6쉇-[͛7kذaن(ZjԨ"""{l޼9JBB@}.]CS7[9s&[Er *w܇={fĉw\/qqqnte*D YS屲nM5w_J7*WS>co۶MK,qƍs" ~R=[Bd*H:W 붱JOOעE޽{B MZԽ{w1BgϾs4vl/^,WWW=zTmӴilӧNR```y[%(ŢL(88- !84A@zdRDe(%]ץT  [;ԳgOF-ZTAAAx$ӷ޽vڥIddd]RSS%I7 ÇfKs̑$yyyݱXJpBY,͝;W+WTVd2 0#E(hOSFMdLƌtޘ&گJW c0('ymoQQQQZzo@Mڶm}ާ~ 6?TFF=d2)$$D&* ޏ]B2#oNS}Ty[9vckŊX,v횾{?C/_xzzj6m;hڴiZ|RSS#Gh`VBB֮]ˍTe P\+TE6lѵ6?wɓ'cǎrww}ل <uyQFi+<<\M4є)S\r ?%Ys|(ŋsѩSm^֭5p@IR"E4vXuU͚5?OZ䏧$զlrBOJJJ.]F`0hĈZfΜ9(---Yضm.\}effܹs*SԼr2e޽*QTRܹ.cVUfsd4[dr  nZVҊ+Ժuk 8p@z˗U~}uQ6-[,YK$ժ&Mh޼yٳ?=>ӌ3OjĈX,ܹ叀&-f t`,Iڵkׯnݺqڵk=s%JСC˗7nX?N<)IZ~#ժL:u*_qy 6LC ?C3g-_ts6-xYN.N>5jbtҒn""""٬F…  &hԩj֬PڵC`0U&Ld҉'k~=[rr7o~ hĈzsI\]]5fKuO? Y=^XA*)z FL&tr_f7`IR^{9{Fz-`{URE;w95yw/m6nܘm])dD1(c! `8B! `8B! `8B! `8B ,OOO%&&RxUti 4'@Aӧk֬YOOORxP`*UJ܂.c! `8B! `8B! `8B! `8B! `8B! `8B! `8B! `8B! `8B! `8B! `8B! `8B! `8= DD.]8$;>}f͚CP PRɕxHCp0 !C@p0 !C@p0 !C@p0 !C@p0 !C@p0 !C@p0 !C@/yzz*11CҥKpRPM>]f͢0xzz*444ߟxOsqW)]|M*UxQ/:: '`0`0Pxl6[дi!`/TR҇~>|8#zԦ1cPGx!)fj`0>X>HI Q.'%;kHk v@Hf3F҇F@)@]NJwy9PnܹȰ˭ӷ*Vz5jbffQu=@N \ !ժXmܸQ*]BBBTN=Z4qD%''kѢEgd2)66VڱcOϗ6mjo¿vZܹSEСd65{lrss?nϫjw}E˪RbŊM6)==]'|DY,R׮]UjU{ķGoϷjUDDΞ=kڵkիT_4uTmܸQ_|߯ … *ZԥKy|effjΜ9ڼyV4h޽{xZp֮]+WC:t?'ցMvm޼Y/}iW^v3g(>>^^^^3f}Ν;5{lM:Ux m߾uʕ+9~_[v~WI7S߲elӋ/VǎUzu?^&IաCU^]tE9RIII\ *ԩS>};&'''ժUKW%A],[LgVi&fwܾf|e}O:h5mTӮ]rnhX~z͛7OO?  .O?.fƎ'OjZvZXB۷vu߱S}]ݻWׯWJJ *$ ///IR m Vzj풵}QJ,-[jРAyJHHPBkd2_̙3eٴ{nIR||]7.]dϟʖ-e˪\r:x;&W*'}VWTdIy{{߱샂7ӵk /IڳglbիWkݚk.effj̙ ȑ#*YJ,e˖UZl)HpuڭuýiC?/_7L2P-prr+"Ѩ M2E?su.rVlYh4f͚m:V@{Rk3<#Iڵk6oެUZU۶mSddj֬ nݺԩɇnWLmVV޽{jjϞ=2Lzd4Un]9;;gzVk5V8`ߟd/;=<{>(3fׯF~s(1nck׮e6w=gMV233%IGVuUI)))dũe˖j޼gϛ`lڵk990s<{zznݺڴi<<>cǎ\ONN&---[Ҷm[_^3.{{{غu+bȑ|W$$$gqzt}(cE<_WVXfc0С:u 77rQvA*UhРA d,/ŕ4 w%66gA ]bcc9|05r,\%KЪU+ Ғ\SH֔2w}ѣGYn͚5+l;<+eܹ,X@\4k֌#FPN¸~{ה:tD%\ ~Ǐ3w\^xԩéSy&֖?⋸仟D;88ߢ۷/7.5mۖ+W?E֭9!6l;wd֬Yԭ[!Cν*ԭ[7V\ѣv\x^7k֬akkkB[iZ9|0w!C IDATW_-vܾ}z ^OÆ qpp`ΝDEEѨQ#<"++={A-ׯ2H޽{駟x۷/ 4ry3S͚5‚,=FԔ/KKKFy Yq嵶mw߱n: [[̛mڴԩS>};;; ^G3gA3T!YSN+ի7|`O>4k֌5kY6tvvƍ۷Xlll@`̚5-[R?ٿ?gΜޞݻwR=<*?TƼcƌ?h4RVnۯgJ" x= 6))D֬Y C Qp VX!K,y&2*((XZgdJOH<={Xv-SN~vq15j {e̛7_|QȚg?͛:q|gBT+=r޽Çꫯҽ{)dBH֔\ac5jāذaʊPzuxj~e< ZYYlٲ"?nwY݊Pr3,_:t(Æ +tQ*DBH˭JOT̬СN宿‚ѣGWĊ5BH!WzBH !$k`Jx*I.ȭB\nVxBFn%k !N'ɖ\[!Y.r+e|8Viٲ%h4^ݻwLr̒\\BoF@*aYEVcPA٫ P*,V ~~~̟?]bbbJ]$FQi3=U ,4{ 99$nJͯedÆ ddd0l0j׮ mzܣXC ir򊐟$*g OfENwqqw4i҄`ڷoOKRn{e^[;:;3Q4JV쇷/jٵkhfffr屯}1sLK+lmm*BMY%''VUJ0|Ui}111]!DTTTW.XJ!rgi3… ۷Q9Q)7JֈTRk+Ē3Pw(l߾gggrrrػw/:7n@IQjԨɓ2e +W.8fLV}TTdvD͞={ųǧL%Wmmm > f4QT0Ov>IWWקߥ$%шAB:U IKW,M>k<#&&F?i}^<$¤(Y#*JJ?ܿgIJHPHpp0{Ғu*͛G:u5jT %^fڴi̘1#c?GGGBBB cdff/ot/6m*2l _| & BgGeiwD^yɕdd(:|pi+WdժUԫWhի'FQT1c wBȱ|#G.\PZBHQ(B!)V&XYȎʭۻmllHJJbDGGڅLQrjB!B!)x{{x+WҮ]wBNZ !VIII(=K]m( +3xLBH!B!Dccc#;A' BB!B!T !B!BT0R!$B!BQHB!B!D#BB!B!T !B!BT0R!$B!BQHB!B!D-Ok49rݣVZ4jHAD.\rLrAHF INN.eHe0&BXj!tIF#o׮]+}aCj*䂐FvJPsE=PvB+7B;iiiԯ_ .PjU9K˗/3uTA?" )))2o_δi͍ݻtRz=f111899)[SL!##C2J.]D^UDFFRzB7ĉiڴ):ݻw3rH֯_O5 &]tQ 0III 4GGG^{Gư0°%::7ޠsΘMLL $ 1ٱhѢ2]gjj*gf-X@ӓ ZjUIMMLJݻvZ,--tAAA3vX=w"vF͛0`dx*ꊷ7pa ?99WWW4hP+cXxCz"&/D*5q])IxB! !333n߾ NJ*dffZ`P";;___Zn͂ ~:'OJ* >t邫+qqq,X#G*3i$.^HժU׿EM6qY.\Zf޼y?^23c ׯNȑ#jT*SNҒ3gΰpBQT 4mƅ }… Sb[[[ɓ'QRP(n۷?{_`֭ڵ NNE̘1u^z$''Ctt4+WTnݺ\]]%I$ zڵk3rH]7]tڵkEn@bb"WJ*tޝ:u0o<Oڵ+6m"%%UVh 88?۷c4 +V7Aу;w{ѢE ߿__$֭[Fa„ DEEqƍB?ow℅Ɇ `ذaJPΝK9 ((qѼys֭[c9v^e֭￯ܿtnnnڵ+~)%zlllTIFZqǚ*T]re*`e1uU!qSn~RƓ2^B ggh֬) `0PTk{{|ݻr Ǐ/t\899% 4P3c _K/Tzzzl2طoXXXocjjGbb"J_xx911D'N/ӸqcAs۹s*kժU@R@߾}Kta7n,9hw?33夹[nJ.][P֭Ǐ'== e_Dyr1,--7nO+r{4iDGGR!BBU_~FCff#^/*Fܹsx0vͥK#:SSSj֬IZ8p 899ѲeKe;v^ȑ#tڵ_rq{F]T\\FVcnn;iiicnn^uhт>}_&˛s+44 6Ν; ⺌իWף뉈`ΝxxxhɌz)::ĕy`4k٧ӧY|y]\\ݻ7M4!88ӼyR +{{Q39]*eQ|w"uxGxR{2Br^!tE[puZ-GI&TB?~'cooOFXti*Uw_x8oߞݻwgϞ|}~iV1 XYYzjeZjj*ju'%UZ5NJ```Vƍ~:ԫW*U(/^LfffiffF@@}aժU 2<\hyxZߣ T%L<[8{\3 2]AdddhXlN̞=;2lذ"3h4߸q/'}= :_~2;6mʁnuÇgdeeѶmRgFLJӧOŋɌzZIѩTb[WKI^{yGo.;Z3IӡضJS3)IxB!yP qqq4mT^_~{{{ hdΜ9JSZFCvv >ٳg|2uaԩSA(r 4O# x111 6 loMÆ aݼU˗l͹~:4k֌~iիTVTWpwwgՏua/ ϏӪU+HHHvxTϏ)St9)Ja')Riy>u~SZ ɅQԛoɦMh߾=fffp]nj3XlM6̌8em۶ѯ_?8y$'O&22iӦ vwAR1}tz" qvv&''{p=222J5Faڴi 8*aРA,^޽{ /pUۇV}J,???ٳgQRh4>c3w<(-_| 9gfΜ94i$_wAxFѣGKL \92e :t`Ĉ2qDt: 6[nJ+]v[G:uXfMB˖-?> .,^pWWW7E+Rw-Ʌ'˅پzIKKS]Y[[3e{9z-'&&2`t:ԨQݻ7ׯ_JQ=0 >I 8`ze~*WZ/2M5gٲe :4x7Yv/^,46mTs֨Q!C_\dK(w)yX[r%V^zDGGS^=J?ܿgIJH e<)eecEp%133Sy,""8k,珽01))͛g6F1::@c@@3m۶TQFx=cTTq޼yEΟlWqʕRƓ2SE@@cg~y4)7 9VOLL [l)EޫD| [ne„ j+W80ڵkgh$99rf7wXooo.;*e˖_aÆѬY39p$S(;\㩸_!))]h2)IO!E*K }}Qs\Tڵ _lmm ~Q2UɅ篖;xSi]ȼ'e|8*={/wM*Uի)))@>}1bԬY3kBHٙHW^tȑ#Yp!&&&E'!!ɓ'e˖B˖nbܸqnSNUVkkkΝLONNfȐ!۷___:@NN۶mcرcƏǎ#,,SN]6b˦%}!R!Tfzu,MY&%Ae#4(߿?`ԩԭ[>@57n,tyooo?HM< ^z%KիrD @ժUԩ*RRR8z(AAAl޼www6nH~ ekkk֯_ܿsFb 0$>#F̙3h4O3˗ckk[}>}:cǎ`Μ94k֌ ñݝx>c%7sݾ}4=7ᅬmڴJ>BH?ڵk ٱhѢ2ݶezQ`ڂ === UV ,СCL2???fϞMʕ9v'O%׋*4BHP1Z >>^(__4Yƍܺu ___wb֭[XYY1}tjժ=z?/;v?@pp09s JbРAo:0m4m7CeܸqTRxpCXXXݻYbݻV0w%##333V\IN֭2O \9~'ݎ~l4haUV~Z@&%%yf=z4-[ĉrMz-F Z777֬Yȑ#!3AAAb4y&cǎqƬZǏs9^}UfΜIHHBVӤIƍ}UZmٲFCjo`ᄄs=ϟWZݿ9s%VVVԮ] &PNFGxbKUq_-bJk -[`x>B!BLtyFE|qyyy`Bhǎ1ҥK\2;v`*rXt))U6mNΌF}5}:ZWWW\\\eC +lɛ ˗/Օ˗/s%nJ^~;w:t(*Ubݺu֡hV{Yfo %22D,}FHH-e˖ܼyQJطoϥjQT%B,8#t:6o%^/r L.^X&I*j5ڵakkˁǾk̙3.:}!R!TvWt:b{zz?+]1U {{{ K8}4`<\\pJ7.]Thӓĉ0a_ƍ,]T\!'̅߿={ҽ{w6lÇK5BÆ a{\zY֭ׯWo޼Ν;}v"##$88(BG||<{\a޾};;wT*wņ  YQb;lll}*ʕ+W^p߯TF?\ŋ"''5k~Uԩ#WY~YN:<vڅtttd˖-`4ټyso͛lذP4$l߾^ٻw2ƍܾ}TFL<1cꫯضmdggȮ]}!Gh[_YF]"n5r;#m,z4j^4ThtnݚT-˼yXhoiӦ2aT*̛7FCrrc*}EWZntQԔYf1d^u+Qis(>>>+'J1| 1 _R 4Asu֍nݺ1h ZkZV*pҬY36l@ =y2d_|_ʗ_~`~S~G777+Jzz2XLOϟ? _`bbܹsy۷/)))^>>>wޑ ˫WNXX/_‚Yf:߀Xh @ѤI&Loƍ*]^z%LJ///V\#_ wXZZJ`޼y%T:/WWW<==K̴iӔ1ryzz2~x !,,ŋ/~~~,ܗP˺YK)3GwnY9j4՟3E-pUTAAAF%9r$cƌW^)uzW_}dcE9K L:c/'++m۲o>cJ" e|_ 9_}+ 9 ̤I aܹekҿg2Hw9>}WM0B͞=*U(ܒcB]_R!*ƍ?QsR-O !!B!BE*B!B!*B!B!`BH!B! !!B!B F*B!B!*B!B!`BH!B! !!B!B F*B!B!*myY9zT*Z5TE7PLrA!$˅BxEP}ϦqN6*M86|+;(D% B<@T*3Z2mP'ǠFgwudsh?&+++///~W͛Gnݸw^y:uDbb"8;;쌃NNN7oX_``ׯ3m4<<>^>d~|NLL%cO׮]> %%cҧOz_8ٳgNNN(Ջm*DDDx B]+V 88XʌB!R墅^o v5RDñ;`T^|}ٳ>}zi ,P$((VZ(,L0uн{w֮]%.]"((tƎ 57n,rY~rΝ;5͛73`g*JŹz*/_ΎEvTZN:IӦMSRR8z(AAAl޼www6nH~h4%'%%G1rHfΜF᧟~b̘1,_[[olӕ3g͚5c…ddd0|ppww'>>?@۷IKK#***sЦM(D_RfBU7i%5ZCh4rMƎKƍp!j5M4aܸqdbccj L:+++ ƍYt)Ç'$$DyFl۶u֡Vy^z^ӧOݻwjRF 6lݻYb#_=aǎxzz =~Xݺu  66{]222033Os <SqssCד Ja!\8|0=VR>O6mDJJ VBApp0~a`oog}iӦ 'N`iڴ)*B%___BCC1c * +z=kfȑ\v oootB*UXjժU+lmmyWػw/ mۦ\1޵kuU<3~)>۾}|6mNF}=vDEEѦMeu)8|`ʊ={L\\~~~_ Zlɘ1cjL4%K(W !!2^TT/3f ((aÆ7n?tNի1cjիٓ>}֭[Yr%̞=%y[',,L6l@FFÆ S*(BWaHnqAC1-[$--+WGT{fԨQL4Zj1p@qrre˖%ڴCѮ];SNsE7oT r177/.hѢ}ߟe˖juҥ濹ci eÆ dggsN˾cz"""عs'[OB\\m۶Uvޝy;t zp2ajjJ͚5 ͅ#znUp<*RT^rpp_FѐI*UUVfÆ xzzrA^z%6l-[=:ubɒ%ӬY}U%I|<|'ݎ/ҡC˜9s5kKe[oEF8r;w]TժU+W }K.$$$qF\ɓ'qpp̙3f/իWw}P0jYx1`$%%a0b)%]RRqqq <Z9I WJ*ʳhڵk'"(r1`@ˆXl!NCVNh4,['bffٳKj0 r~СCjپmڴ)P>}k.rrrJlmm΅rޓx!CXz5W&,,"sA̟={VP$MRQʫ\-\|K.uVz\y.V/k󮷨RfD׮];4!DoEahĠ#΀gϞԩSkرڵkSV-ٲe hZ6oL6mHHH`ƌ,[Mbff\Qj-;;;_7| @jسgqС"޽{dddPFR}qN6/10h /^LޝEU b Kzk(Fa&(J.yKFzif.denwQ.-&.&|%y?_3ל\<gΜaӦM8;; κur.O 5bĈxzz~<쳸1uT\BNl:wLff&Ϗ$ʕk=M IDAT+oYڥgr\^Ι3gxeh4Ҷm[,YѣG;v,pVZ帐G߾}=ޥKuFժUiѢuֱl2>;zME9*T`nysN͛v777ϟOJhԨ& .]ᅬ .̑tʗ/ HtsN^xy^ʔ)SQ+WeOoM HOObЧOF#֭|5jĨQҥ %J >>$Νʕ+ d2qFjժkƿVͦ΋PWH؜3i&jUwRJǯJ_z6mQQQfjԨ!C(Q5sθQh\ ˜7o}/{f)SnEawAʖ-6lb ֬YcZYp!̟?ٳgӭ[7RSS駟fѢE/Pɹ 7ϋ7/2(PvߟΝ;ӽ{wܨQ}^ǎIMM2t|ۅE2~xLbt}.U\k׮Ѿ}{Fe_>vvgڶmK۶mڵ+i}'9W5kdٲeԮ]+3sL}V++WfT\W?[n\z>ѹtbbb8q"SLՕSD "##IIISNX,6lhP`֛P]EGG3ydVZbjժ$''QCϞ=y3gkצSN-Z___|||HNNϏ:Э[7\]]y>KT.]|mĈM Mo],][eԩ?o{ѢEtOOOU徾|$$$`0xGy7oΐ!Cs޸qx衇t?{/$,,"En_ŋs}iӦ  8q]ҠAx V^͌39r$dee1k,bbbXx]ʕ+̚5sb6̾O\\\سgN|n]Fll,;wgϞOX3ġ~믿NDD˖-?sLر#/vO>_',,Ӿ}{k_o̘1,YDm|2DPڬolaZ\7dʕ̜9'''>#~g*UDBB[*UЮ];ySW\!..YfNRR$>>___>s7{=z4/2Sn]}Q/_N-^z|gǾ~||}:ٙ "##9w{ۛ ~i6nO?_Ar^x֭˺u눍eɒ%9X,+W޽{sYiӦ gϞ̚5"EȜ9shذ!p#f8;;|rիl۶^{EW/"PN6Xmd-`j...b!11>y(Q'xڵ .ꫯҹsgƍd"%%|||0Huj]->>޿ŋٲe [um۶lڴ5k;7 ފ/;v񗽍{Q/ eʔl̙3Lh$$$Ç鉻;l۶ >}K/ꊯ/aaal߾ݾerI"ޖWR ٳyfPLF#TTaÆvZf̘Abb"uj׮Mb(RݻT֮]K`` ŋW6_DD!jld`((Ԯ];֯_ώ;\2e˖qQd2jA,X7w\o^x+Vpq222_``1uT^ʪUrm͍Çs> >(~m #Gj9dgX[: w}ٳn|uނ xb_Dqјk+fqA^~eC9|777ŋgeڵ\~ۗ|"" _"l6Ulb6짼.]5k2e^xˮXq@RRO=l۶_~ 6УGV+&SNmVix BBB \h,Y8p tޝI&q233y8y$+WmޫrۛI&ui>0L,\О7;Ep4k̾3gxt8p[?믿sNʗ/ORh_Ϗ(طo[+<>mSNBCCmz1 Yӱ4ޢs-c"Rhyw#,YYY9u5kƱcXl$%%Xn4i҄-[ҲeKBBBhӦ gα֭[ѣ͛7e˖ 6{nsmРA,X Ǵ{Qi'::"""ذa}ӤI bƌ\p v [oũSBAB"R(Y,V{Rt19p2 ll6rؼy3-ZO߼y3TV //^̘1crlM6 4>~ vJ x'Xz53f`ȑŬYawuLW\a֬Y̝;0auԩSDGGSfMz!RSSټy3F1>:DF>>ʡCpwwgϞ3>}||pqqaϞ=zH !)6+n3[lج< cҤIt Dbbb9|0f͢H"$&&2g6lxGe8vqqq̚5 www8p :++3zhz):ĉ>}:ocǎN:dn|lח;v7Neʔyl̙3Lh$$$Ç鉻;l۶ >}K/ꊯ/aaal߾ݾerI?EDDDS@HD %Պ|d`  {9V^ʕ+y0 Zjb!>>!C@BB+W$::%K-777ȼy8}4ݻwgҤIcɓT\qk 66ooo&M}ӧd2pB7o(# .Hf9s{LoP)ԇJE.^!asήΤeU#K*EjհX,LPbccl6S~}.\0aMFƍ@O>$ `Ȑ! -ʄ r=fEeL2,\\\?~<dffDll=`Ҿ}{xi֬?m~ 4L"""d۶m(<Cv؁f1;t<; ߟ'2eڴihdҤIoF֭U9ٳgua޽ܹWWAqIJJ :ubаaCAcjk׮βvm[DDDD 3̽P~:u^ ѹW9ƈ# ksJY& NNJ(A]r=z'+=z={(ѹ6Mܻ!$"gqW<rmݺ/"yAAAyUxqzѣ=̙3UEH=d ,ߗm;`)4NIJJ+!${?2"""k<5qa|:oQEOt'jkDB?0C%"""5)؜T""""""""E!Q@HDDDDDDD( $"""""""`q0 8gUV&+Y& g'(VA""""""r; HqS9~EhxIJQ刈܆nB &f+b&bs#GЮ]Qs2aHIIwYF^ʺ6Ϝ9C@@ҸqcvXV-ZDu֌3K.Ν;޽;п9bH=,ԩSiР#Fȱܲe ))) &жm[~[luҋC HdX)J'`BB*Ubڴi>SJ*|7|sۺu+;v`˖-fȑyƍc=7tR<<ʕ+Y~}iÆ g`h///իg}F>}/^#GSvm|||t HdfEb}4mڔѣGsիGo9=?Ǐ… iVǏu@Ϗ>޽{S~}bXVlذшjeӦM 8e˖:(XڴiÐ!CwElhݺ5ƍ[nLǎ|r 6m4;8bZd9sдiӻ>˳|r"##IKKW^믿 رcԪU_~%Y_5jբtx{{w\۷5k>21 00NիWYjUeu놳33g.""""r( $"fj !يniz7پ};-Z`Ȑ!FΞ=tOOO=b!==-[c2Xv-NCw},K.wߥQF׏:upq{&;uEhvҥKTZڶm9~86tNʉ'xqrr?'Of۶mf_ΪU?>111X,3dHHH`ʕDGG0h/w|"""""rt˘JE.^!asήΤeUu֍cDzl2 T^=龾رm裏OcZqsscĉ{̟?oAŊXBCCoyquu|m@PPaaa̛73fP|},88q{#FL2 0˗3|pRRR(R5b޼yxzzRxqΝ˨Qx'6mO<}?&(((Ǿn +XfMy˗gϞl6Zhc^ǎYt)vu-c""""""""F!Q@HDDDDDDD( $"""""""`q0 8DDDDDDDDB""""""""F!Q@HDDDDDDD( $"""""""`q0 8DDDDDDDDB""""""""F!Q@HDDDDDDD( $"""""""`q0 8DDDDDDDDB""""""""F!Q@HDDDDDDD( $"""""""`q0 8DDDDDDDDB""""""""F!Q@HDDDDDDD( $"""""""`q0 8DDDDDDDD@DIMMe餤2oE^xU""""8DD ?X 6MC ;[o7B""w;<6 Պ`Pÿm4DDd6>DDdZU """""r_( $"R@Y,eh !Q@HDđ(CHDDDDDDD (ei !Q@HD<BGƍs%**ҥK3qD/_Μ9s6m+We=}4=z "".]B""b)pa֮]˓O>b5H+bŬV}s_"""""4DD ’!dZc\z///Y&cƌ`ĉ$''l2s0ѴiSCϞ=Yh4luػw/P-ϵkx嗩\2F`ڴil޼9sp/^̅ pww'$$N:faZܹ3UTaĈt___FdblݺՊ?ݻwD,Yu֑NRYf:EDDD@qRLP8qӪU+pB)BǎiӦ9A4bbbfard={SfM,X`3<áC8s W^eǎPhQ֯_Oj=z4׿Xl/^ge;;ݛ-_kK/ѷo_>ٳ9y$/qL<*48eP%Cח߲a\ ʕ+wRJك.Ԯ]۾,۷Zj,Y۷sA-SHHl޼Gyk׮Ѽys4h| ۷oĉٯ˾}0L̚5 )Y$%K$!!RF Zh'""""B""TaHJJbرQ~}Μ9(>x]cMZزe J\rT^K.oPD ]66l2 f~]fg6d̙\| .{gǎ]qQB"""TONJJ*C6^^^Ջ~lO!d$%%a6i޼9eʔaѢE'w&9reκ1ChŴoߞ-[PVO͙8q")))tɓ'IMMiӦ4i҄)S@7Ã'Np!=JVV}SL}4h>{G@@:t૯bddd(CHDD`fi6|wx뭷y ƍsAƏ#>>+WooJ]BCC]}]do߾=ժUdɒl߾o׳ZTPԩSR 5k֤I&XVT®]:t(իW^zԪU^6ۇdb֬Y9?Nɒ%)Y$ x]cs~T„ سgG/dL>֭[SBѣG9|0`0`6v=Cl6̙3|2 }]ώ;Xv-ƍB w;m,""""""rwVkKJJl6ӼysjԨAJJ}Ӎ#G8qD쬛rg-^/{=Fbb"CdɒDGGSJ2331L2ӧSn]w;VN8CXlYYYrJ||<{w|9rsi:t@`` VBzKK%CqCbccuriXcǎ孷'f%ul,W~h&h:1N]t9z ,2dxzz:}aDEE=R?^k BNj*CH +gSq2^A1\ٌ k׮ra<<<\r 2ӧOW^>Ϗ+6m=ߟaÆ +WM'%Db}슈&Y)Ǹrh.VsFT L&~Gff&˖-###ݻB >jԨA.]6lsνc=z4ƍ1?hѢZaÆ9O:Epppi=cX0LTPPmi""""* t)CHCeIarY;qdrl\b4h&;K/ nt҅}yح :4[Ʈ^jñcǰl0||||nyc=ƒ%KX,|駬Z֭[c4u"_pBUj qBGƍs%**ҥK3qD/_Μ9s6m+W}ѱcGxWrԓdb$%%A3f ~~~nV{izADD]tSe_S~Ʊm>ٍO5`cѻN3Ĉ#ԩeʔ'FŊٵk=4c vԩSzFh{F|IDDD?sM"WmK!wÇvZ|I,KDFFRX1,=eZjwZuqs=%''_ӴiSڴi+ݻwL2<[;F޽;ժUSqrv` 6PTn dʕb2ظq#jŋ8;;x2x{{[o1tP)r>3>,...~9F#qqq4mڔ={гgO-Z7 6n:݋b!44|ӧILLLJcڧݻsҳgOa8z(+Vo߾-[ٺu+VwN%>|8Zb\r`l6H"P~}V^͊+t˗SN' _Kruu]tat|||&LpǝJ߬iӦ~;^]ʠAhРSN%..>Lʔ)Cpp0_H9ڑly_~{@u.-"""" 929NC{VU?~}A@PA- HCTȃQL/h\-[lڴ~QvN\͛M?\ʽZDDD*[zm͍ŋ[MfffZ`Qpp0r# !B =!ٳصkŴhJeozΝqpp0{ܾ}{cjzTJu1cxGi۶-{رc۷s۷gСlٲoGyJy_EEE:tNNJ+Ϟ=\JuO>$:tٙGҡC:t@^^F 4 ӟё3fgL?֭[sSTF1]oС'N0K}zEQ8u@|(gΜip.~׮]h4$$$УGq,+ !B!'$ D#N8;#;ʕ+?sWυs뱪&͍O#<#={4)++3L ^=IIIbggרck4j>U0>ϺڭVR=cǎ8q IDATiE1Ipp0< eeelٲQ}kڇԜrisN>CYH!B!Z_'N@ҫW/шZ]mN⧟~RT j`mZwuK}5 :|~|}}͎w9n9t=ze˖SPP_~Iff&ӧO>h|5<^`0Fff&}AѫW/j5[n0k( >?6lْm۶qI>&M˗4i۶-!!!у2z}B!B4S-~9o!!SOq1{=z!t?7޽;ٳ~Vk2cUG}^gܸq682v֭҅[SQQ|={nݺꫯb0;vPZZO<+ҤI^c?oooٵks΄ҭ[7^}URSS;w.>>>XZZ]Tsͭs/6acc[oڵkΎS`_܍LNN~)tؑ/%&B!h^FU:ja5ٳS\V?0CS!B!KY? ƒX=ԁ >!/_~W~6mĨQj-Y?qqqxxxm˳>[gۙDGGF||s,,l9m)Vnq0 4MKj술jbccyWر]>[[[ٿ) фN-[PRRBee%gϞe۶mju0?Ύ;Fd駟J'!B![tpQ0GM„ ,]@ptt4-ZѓJヿ?;wl>g…fe3h HNN&112:v숯/aa7?RNNiűRSS<&M[&ѐ6!3gJ'z)‘#G_ԩ>tU.^H׮]#B;{20Ii&,--s['yiͦk.^-""vj^ŋ&33zV[g(88`IBfĉ(B~z*)WjCP!B4_2BH!ӧOSPPc=ŋȑ#ѧOFˊ+?>0}t-[U7G\\\Fԥz,/^LJJJFRR_5111\^z͍޽{UI Zh#8p j۷7dvEAA-Zˋ3fЮ];JKK2dH6֭[G۶mygh۶i@^ 8;;ߗUvv6!!!'4j{_B!D3 !!hpuuva4`߾}]r\*prz]v-۷'//ηzEVڵkkM`cw^jmHJJ2ݻ''zZYfѺuk-[#7o?d֬Yf#h6o\uUö큪ТEHHH߿odqvv&==ŋӯ_??bkkKzz:̛7O^B!1IBf֖"*++)//e˖_kۤ$BBBx=z4Gӱd.\@TTSN͛hZ͛'N ((ׯP^^Ndd$Ǐ^=ߚYѣG–-[Lۜ?OOO***n۞...,Z޽{|2 ,0MئMx')--v),,l$7ԛBxb1]#..N^B!1!$P~~>Ǐkkkj5j^ώ;6… ?~+V`iiIZZ1^^^… 1 lݺ$0 Zm?>>BB!I@)#Vb(>u;wO(--h4gF#*!Cn 111l߾˗/sرFp¢=z .ڵktm[gۙDGGF|||x/& 1;|p!IB!I@e }Aj TR,*/+lڴr}]?ttŒ3n{_Dqvv׵|rNj TA-&&W]SZZ,V4h`رc̙3F#m>U?T MM#o?O?4O?Y# 0]qYYY1yd&O\gmۨ?x̛7tuV>,,ŋӿ ˲B!{Cayg(~: 4_@m:gSoX&''SVVƆ (--eʔ)Pxxxի&L &&UV5,X@||YYjj*㏵V#Idkjh%Jc`0ڵ+> !~222Ljut'( nV{z/(#B! 5.JNmrjCgvӢJssz?x 'NDVӪU+(((h<&LCHLLl:SJJJ]Mƙ3gPGGG֬Y4T-K~z k׮嫯^@ȍ$DJ0wٹqB!͈Pvq'mB9+8J릥CUtj5+t\-߿dHHHhrN*( |Ccg4Vv(eWEKFٺu+rkS)**j888|p׮q̙|lٲ*++9{,۶mUqqv!7B43 B!ꖜ̐!Cω'P~q&_B (oѝ{(Ent=6i &tRThѢFO*]ܹ̟? ΠAHHH 99Dر#y!xIxɥB!5FHH*~ɴ8>}{1.^ G>}4\VXQӧlٲ:Wcjlܸ<.;88geexbRRR?--4믉aʕf+FEEF޽y0 Zh#8p j۷gTTT̮](((Exyy1c ڵkGii)C uh۶-< m۶5qqq!""/tP:yO&7iWWWӄDDDԹݭMsq܍RF#{h4R2df۹|2ǎk//:WѣpB]Fn-Z0v숋#""77&c)c(9GFFUi_g@|Q mڴ {{{233הB{&)cB!B7oC AR=_5uoONNOput:ՋBBBj;ƤI(**ݝ۞kRnHY9r}jSS׏#F0w\F]wq9N`޽ݻwӣG C$88hn޼!!B!x͛7֌:ĩM}2qD~'Ԣj 2V$L4 {{{+SNK.J?6uTLž={ݻ~.}||ظq#gf鸸PPPѣGͥe˖wn`` _}GWB!B@HOO7 taO>իWӭ[7֭[Ѣ%}9"VDFF^SQQA)//g̘1j O>i:|rt:~Էo_Əϔ)S5[6>''q6(e|+AAA'''+55~RRRʕ+XZZһwo,XLiw#GfV̞=^{Ç79ͭ9Q);5sLQ}z8Ey ɛs^3R3RPMG{Q!'..V%%%>|#GԻŋa޼yK[MF !D3UZެDRZ͠miR:G!)rՓ >ֵkW~'&NHzzz)dEFF6mN2j(A BfҨ"ԀΨFkz* woU~:wSRRHJJVPTh4?vVVV֚}l޼7n:֬YC֭dʕ7JEAכڵ+~8B!j/E~;5S5kzSăGBBL FY 6VoR@笱3~xΝ#~޽899ջ/ȱc_{G^^QQQѡCv K3f {E!wEpp0>>>,]T:C4{B4S `-kiFXO$(&<_|aZ5!))^~eFk?h sN bڴi?OOO***GFFMJJ saxxxM!B]J'!$͔Q1"7(S IDAT(]V,YW^y4Mƅ 8~8+VҒ4>cugΜ!99+VЪU+N8AXX_~%NNNbi9.]YÙ5k6663~xz)^z%!B!h 5(9r_~N:裏J{ŋq˛ !!L!Aee%NښBJ"&&۷se;fZ1O~~>SN5t:xGpttKstt7ew}='NƍkN !B!IRƚĉ(B~z*)j^Z:BFJP@:Q3(R>|8?ٺu+ÇGRq1&MDQQ(JW d44h)))UV?cǎ$ @LL An !B!IBti ޽;/^Ɔ#GpرFܹsFӟ[CՒlj'<=T yO(%% YWs\rOOO dflڴɴ⑯/!!Aj#66K2x`yy6l+W7|ĉƍo;B4_FEBzU=)c||p! ӓC=5sIlb*oŋڵW_}шNҥKu~?̙3x뭷~:+VB!BFFՕڵkhÃ}ѧOFqrss>=zm׮]K˻=zhn>|8:b <<<ٳgVNyټy3۷|7̙3777z=۷og_4! zqذafrO3h {챻$''Lzzz)d/_~֖t2227oG,5*",,(*ѻ]۷oߞG}? T"## AӿkCCCYh˗/穧2> %""J5-Bѐ͸qojҵԹ$g}FLL >,O?4ּ;L4޽{O!B!mH@lmm)**@ӲeKϯmRRJBCC߿?K,ڵkDEEѲeKNJBBÆ LJdOnQ^^Ndd$999[oѽ{z555sαl2j5-"<<4f…t^ϑ#GPըTUK͝;;;;Μ9òe())ARlU-[pEؿ?˗/7774hmW_%::5k4eC Yf )))8;;h_~R^Eƍ lll'bHH>>>hZ 3+LJ8 1+9s&&M2=fӧO7oqTSo;Rqʼ?U7|cVxGJ !B\H@69~8X[[VQzv؁ JRR .`0uV`0FLL jy&_^^GÃ;vMA[3;v0i$<===z47oӓ[򗿰yfrrrXn֬\D,Y L8=z4jRbbbbHLLՕ͛7^ѓ;lٲGt;'''9q`PClݺ;w0dKmۆ^GOp:۾[uFvv6!!!}߿ȋwƾ#B!M";wӧnb4ٳgFJŐ!Cpww7W¢SңG<<<c…\v:4]V\IAAw֭ۛ[SOammm9,,gϒoRΞ=ۨOǎquuRjJԩSJży7n\Fjƍu:IYY/P5a#x7LP0jĈSRRB\\/krvv&++ ;;;|MN<^oKKOO3eG!B!D'׮]‚#GЫW/J3رc̙3_~www}FOtjkk[gy-k4Ze iݺ5O?4۷ogǎf t:j5F{{{RRRLuQ?j4?ڵcܹ2jԨܺr5֭-[4IHYYY۴e޼y;իW M̫(!+E5f_Vk<[!Bte:#:* 5XZ4/Ľ$:翣u8x nnnyxSsEQxwMRJSZXeem9wt҅}ѥKz'JVXPN6m&:`ʔ)nݚM6SOѳgOt:۷oҥK7* O>\vǏ㏳k. j\BvL# ???RRRhЭz! #<<***8un0!;wkc]'bgg͛7͞F|wlذA^B!B9p7+QԴV3g[ڶGC~-ZXJKJ۷o4bO)//g̘1j 5:tt ~۞SO=Ell,ӳgOZn]ko&ՋiӦakkŋyYf FzsjՊx}]zEVLT?n>#G5Rtt4Ceڴi8;;G1gz=={dĈ >ܴ-[ضmY]tO?Z}xb-[?7/^#$$Ĭ$ L!B{Ҩ"ԀΨFkz* u?uQQQlڴ7?bA4h;w4MAR+W0rH233),,Ϗ7xcǚٴi`ѢERYY(zS6L׮]裏2dH2֭ 4RSSEQܘ:ui`~ZҥK/MY;FKKK2e&Up_tﯔ֮];h^irrҵkW姟~wE)JII(JG{r !eJeʕrT\Pض\ͽ/Hϯn͛7ʀEQeʐ!CgϚ/0rrrLU+))Q P($%%)/z\/!C(׮]SEQ&Lر6,Y{fe.\Pz)ĉۿdP3QoX՚Η_~IDDDҴnuZKVWܹ3gϖ'lĉ(BvvvU/;qF*ePF !Bq_hkj,4 zpiyF6lXV릹bWZEaa!o&gΜaٲeR駟nlb!~/_nnnf :љ*ѬYS4Eaa!k֬!%%ŔѢh_~;8Í70 nQ 5SZ_u^ZǏo1;wLBBtoٹqB!B}1AA IIIϏ@^JRRÆ >DDDd\\\(,,dĉѣQGCbb"l޼4?.իi׮]bر>\RFW6ۓbGVh?ڵkܹsѝruuڵkdggӭ[7ZlݻHLLm2o<Ǝի4iUT!B!Q#z#F͙3={sFիsi 7o$==={ؾ};.]"00F>}vǏ`׮]TTT\Byyy ??? կCDXXzJJJ8re˖wԮ-aaa|'\|w{!!B!.Ԩ^PoTG﮷3ydy6l؀JWWz˝ػw/#FC 8ш-/g͚5FzFωӪU+ywիZ27zR?I9|XUtt4Ceڴi8;;G1gz=={dĈ >ܴYpYɸoj۷//fٲe{Tܚt9sL,>9/PTXJOBq?,,BJFQѾ5jcM'!!hJ8W՛TjZԳ-m[YJ!B[[a:˗/M]SΝ={,XVݒ%KL?6DDDvHH#Gdݺuq%())1-_ƍ=G'''֯_oz|Mf̘_|APPB!7#!!hBmm[`BFXO$ԩS쌢(ܸq7xWWW  dffVիo&|ݻ z!Ν=AAAlܸ+V0uTLlٲ> ZM۶m [npI***(..‚Xذa۷ogժUQFAZZwo׮];ёyw;nRlmmB!)  !D3eTk JKow}Gtt4Y\RRRh4dztR&O֭[IKKCѰj*N>MmΙ3[gee'Ю];mFXX9gΜaݺuXYY׿/iӦ?F]֭7ߤO>8::6nΝ;bbb2dYYYKqtt_6=tf >Yf/NG׮]' @n`!B񛒀P#J>BBuHȠ7@S@߾})((_~%EEEwoCBB8z(a4? ;`B!-HX#+4 ˗/m߿M6hj >13qDO(ٱlٲۦJl5uԉٳg}l¶mʺt§~k֬aʕL<8p ֭3ޯjJMML4ooo\Z!Bf$ MY'++xS"++ŋyF6l+Vo>j>pڴiVLJd< bӦMrq찷sDDDХKf̘q~ILLggڧ˗/gԩKLL F8܎#$ٵį7*tj4X; -UniiIhhhL6iӦ*0{Y>rHFYk800:ϥZ>lA贴Ze3gd̙ ?̜9s&33zV[g(88`B!oJBԕ|ꓒ\z$ Fjj*yyy^FڵkYt) ܈1fNrr2eeelذRLb 7:աW^L0&`Ǜbmm͏?w} >B4̾#B!M"hJ>adeeѯ_?x jK.c뱶6դ*<ĉQմj ??? 0a"119]KJJƤӦM̙3(#k֬d!WW_KB!B|nI͸uUNgYһwo233䣏>>h42i$@yy٤ȿ6N'? ccccyWر]O[[[ٿ) 6W9y$qqq$&&ʍ+B!By_O۶m矹ql9sg{9"""h4\zj9ШU֭[1 __Ux>k}:sL>lBII ={m۶accs >B!B!FhJ>NNN3 ڵkСC9<'OwaÆ T*quugϞҼ0aK.%00;;;Mu-j5Ν;YpY? 4ILL;KXXi{YG!B!TJ͉\@3glTRaqE|*A⾺OƮxw8C fæÍ 6 YYY|֭Z`*++/ڵkvZO4sLJOOy畑޽{'$@ HIRvv gy&Yz&XB֭ӱco]SLQBB5x ƐUV)>>^w}ec+WJJ գG6SRmldJRN\R6n̙3rJƆxڴi$Vj}ӦMJNNVUUU+hԩr\*--U׮]uq-]TSNUyyymN8!Iz'L TRR2-Z ,0R3R1v9"LIV\ƪ_~ڰa^wiݺ4|Nv]zt:`?Xל9sԣGGjڴiJKK;wUTTr)%%%g7n>sEGG_t \.]i&8p@Պ:s,cǎOVCCN'`iBa0LCsLh-[TSSzK.K!{իp8TQQR*..… 5s̋g޽?RRR7񨺺:d˖-zT^^.˥9sےCBKx<7n֭[CӐ!CB>1bN[p566Jz衐>ZbEϣGi|wܡ'F S FB@j /BC Qǎ% ,6olӧO͛#3fLr餤wOǎӁtu]͛7;oԨQ*))$EFFk׮aSN3g Ѧvڵ#ci4͐9?s;w/IQ5k(..Nuuu={ uU)!Y#f 3ߵsq@ n~H:Wy-[Yf:Ƕիk|>IҀ~z_^={T hvrssxtq5` wl냡CMMMG233f|>w}7%..N۷o$!ڵK?3եKu9w8p6lؠI|l|ڷo_H`I&iƍR;S^{O]v0 9r$xUW]uY3FIIIZd `i CTMGvFzwb߾}j̘1zTĴ7''G/VNN~UXX(IzgTRRŋkРA!?+Vhr\O<&L { REp^PNNNݻWYYY|^]]$RGTTTz_|Q!4jԨ 7ǣ\0n5qD1mSON4TO6]6%v?;v֭[KV^m۶i2;vh֬Y޿kS=pC(LqhŹ.k&99Y+Wf5\3fϴf͚۲4zhD @9v Ԓweeex |\* `1BC `1BC `1BC `1BC `1B93 fab٨ __:ofW|]Eq@( ͆Mvmҡ~5y7|SGQAAe=_YY_~E?mrZ}5jРaÆiڴi;vlϚ5kW_DC QssLӔWdd$[nZl\VRrr$iڵe2224i$IrssO?n=XPP#Gj…$(9R&L`0*6:B[>.)?O1B>]|u7+55Kۧ|P6m 566J^[^ӧ~3f(##C~_~&O~[n[4{ls=q!!ڞ={Lx,X.@( $GTL]SF+IP}}^z%EDDA>#^Zv]WCUZZ'Of)77WwuEw}Zx222B :TsUff%QEEۭ Peez!Iȑ#ܬ=1M=zT@@,V@ 4?爘?`4VܹSVR׮]}v-_\oT[[|;OVaa^yAǏWϞ=խ[Kĉ9s,Y4y^̯\ aرcyf͵z衇B+V^n;]_|Q!UWWߡF馛4o޼" ) Sg#? ] !ۭ믿^TWWÌO .޽{wܩÇk֬Y Ν; {]{{x_~9޹s_fHƍ kGZ<2f;u3 iӦI<Eegg@'OԜ9s$I b&0!Y#_"t}2'..N>N:]|v""".}4k,͞=[GnСCս{w]uU;,YSNyLө";VzGYpݪ(gWRR0)B~!Ek***RNNG)&&F)))ڸq$)55U>O~^۷OZ|yXǠCi۶mի֭[`oװaTYYyY;wW+''G*((7߬&رC[lѭzY:Nhذaҥ yyyZt-[F1qˣ@ ;!刺o>}jvEDؔ1Fv: ;V+/;8r9tӧ6#ҥ 3hwvJ`-BC `1BC `1BC `1BV}}=_^III6<-]T˖-˸nxBABB<#cC `1BC `1BC `1BC `1BC `1BC `1Bc@vz%%%Q@iZGҥK믿v5i$%&&R @| `5!`1BC `1BC `1BC `1BC `1BC `1BC `1BC `1BC `1BP20xIENDB`open-build-service-2.9.4/docs/dev/cloud-diagram.png000066400000000000000000000224041332555733200221600ustar00rootroot00000000000000PNG  IHDR?8AbKGD pHYs  tIME $2iTXtCommentCreated with GIMPd.e IDATx{TF@i %6mړjڐtgkjjĚx=6D]4xv5qP2䂗`fx~d3?Q@FEEޯs`1}՝o{ァ\ݻW:t|AwSvv69a($$D┕|PgΜ ̓'Oj֬Y:}t8p@uttov+W]wݥ*eee)777ŋsi}_|~>?2?X||>n߃ shƍ}kΜ9u]{駟:|a+<<6~222zkjKo}Pyy$UaaaWUUJKK~g+t{;upjߞ}YEFFn+%%EoThٲeի0ڶm?Ӳ;CUsսޫl9sF%%%z衇tz4i$y<IٳgKݻWwj튉QSS֬Y#IjkkСC{,\^h͚5K?яta;V<(++K999:s֬Y#ժvmڴIeee*++JKK;}NL4Ix<ֆ TZZJJJRvv8gfM8D6f̘.řO>ՙ `Z[[;m_Љ?ߟC WNfddhMv5יӲSQQqkc5jq:fc}/ɓm7ol~Xfwm&&&φ_kZZZL|||^9qi򸸸u1s kjs^$w~ԨQt~ojllTDDDۜ?[Ν;#F\q$!!גdٴb fEgϞp8$I%%%jjjŋ{,TGNS/_׽QFTTT$.[~Y)wkEGGԩS$ۭIRff9_~YG?~Z۶mq-Z۷wZvN###Ң&vI_=\:m7t.QEEE mذAF_:v옞][ =+WWW]~hɒ%Z` tݟ+zb7n?(}ڰa?}z4{l=JKKfMF'y_^֭ShhcǎEǎӚ5kOO|gprrr#h̙xN8UV'7MĉѣGuYw}W Zv1ŋooя~dM2E+WTdd~_I}544tYfwmjnneXܬ 544QRHHfp讻Rhh.\gy> 9o}KO*g_v5qD555]wY튊R]]N}8Ρ[)88_n̏VhhhkرJMMՎ;/ݿLWjjk.IҌ3t9UVV2ϸlܸQJNNԩSURR"I:urrrp84uT>|ؿLL򔝝Pm~իWTqqq:~xJ6MZnVZ%I*,,jR\\#X}}.]ɓr:ڳg$驧Ҝ9sr+S{{$Z*..+\RC999xOO>$iʔ):}4n5raaa2d6oެJ͚5K/.^w}WK,$edd(**?+x裏T_>?p[u`))ۭ4\^/!88X$ >n5r6M4%&&W_Utt$iɝKMMF)ͦns- &ϧ6N'N_]G/X,裏i{[zgذaJJJ޽{%I;wae4H^W͌ AnuttCo_wQ}VkkV&N(cVf͚-[HpB;#7Nmmm~ӦMZ|RRR4fy^I}CWbbbx9r7իW+''G999.4id )(([o%á,W|_8  (!t?r\zᇻ\": ]8 B@ ~?Q/#PAAM;VhhUTzzze 2WopYr"##m6:ʹ?i@|>W\\?~x3ܢ3rH_^!!!Zv-\8:uJ999r8:u>,IZnz!vg֫3f… r8vۭ);;[eeeU~~RSS]v?y򤲲p8^W[5vXjǎM6Mc.o$}g9s?;/PǏѣ{L z fŊfj$IfĈtprczffc>k<zf6o4>2ۿXVm6]BBB̿cٱcIII77rlb{݆KKK'?ill-_Z@ ?!LNWUU/,c<$iĈX,.;N9Nnru]uu$ie so:t5~9rl6VTPPPXv-өy^utt\s+88X$ >$$$(&&FR{{{պ]/RAAAVl6[m/""BNSzedZڪ|:CQQQX,裏rrtY={*$$D/^޽{޿ZZZ~Wk׮UYYxjװa"I:|?٣"\#G󩾾GWvoj ?@?A^W+Vc>dZ5k,mٲETWW UO~M6iӦMSss $׫4k„ z*,,_KIJJ;wj|;}i-44Tںu$i7oMm;!F .C0n8MAAz-9eeeWXX^x=?~M3f ȑ#ݮ{N)))Ђ 4a\s7mڤ˗+%%Ecƌ%&&CѺ{n[l%%%iժUھ}Mo{gט $˥~u***=P?@ ~?@ ~Vhh(A|wqoU@̎Wׯ'q1}&`7ZbIJJ2 b$d vc1MDD1Ƙr#477[,j1 :UTT?Fdd mmmbc1>466e}z[>nkx())ۭ4\nݽ{Nz_goZтLBBbbbt!+))I/TPP"""2d>6.^?>Ң~ڷof̘>Hַn1ϟ={+9R>O뮻dQEE-o4k„ z*,,aw?ڷoϟqCCC[J߯yv 7Nmmmݮz甒 -X@&LPNNN@$)11Q=sMݲe?())IV{ύjn =PwXLJP?@ ~?@ |%fbOq 7M_xЋ?"nn{@ ~?@@ ~?U?B'f~ p; ~?@ ~?@ ŋNŋInkcVcc"##0jĈjmmUMM"""(-f~$""B˖-Smm$V>O˖-#1 `9r.^_fT]]Mpc@"""tRYVIjҥK >f~\KgП0|=3h f}+f5j??@9 ,:a >|@@ ~?Nn:W^OSmڴIG=sMeb۶mӟN̙;CUsսޫl9sF%%%z衇tz4i$y<Iٳg<쳊nWJJ|M+++KѲeT__%K\'4k,=*''\GڰaCm̙3Gz饗T\\%Kh޼y B`1wOn׍3cLFFiii1fڴi]:33tZa***x}y3|򉩫3w//}]]]mNc{O>/..۶o555fĉ~3?-6|p566*""mΟ?0IRMMM۝;wN#FJBBck׮ٳXΝk&IJOOQ$)/eU[[c{?Snv֯_F 03p-ZH۷oGFFEMMM풾zPunС]._GGGԩS$833SG/ѣGO?m۶uن@˿Tmm?DuZd,XAuRcm}Ip8|Z~*kҤI3g߯ߖܹs5{l=S/5sL9Nr*((7Mm޼Yv͛GٳגTRRUTT>s⋚۷OӦMTfo˗{G/;\C 鶽;wTSSO_ڽ{wm]f5}teggw޹xeggvkѢE $ O?\MMM:v?o 00?şo? @ ~Veeӻ\v5dȐkrt8;n'r~ɡSpp5v5qD555]w=:!"44/qٸqSDt)phԩ:|);;;kرJMMՎ;˻t2*??_֮]$I3fЅ p8TYYɠ@еz-]Tɓ't:gISO=9srW^c=vIRuu233U\\zjֿ<..NǏq`UUUfT֭ӪU$IZr\ ܉V\)áy<99mݺeiȐ!ڼy*++5k,⋺x}]-YD(ѣ>P}G$fϧ'|R4e>}6t:t:v&p6M4%&&W_Utt$iɝKMMF)ͦU__` O--- @\[o=zT*ժ'#ժYfi˖-:-\P\aÆ)))I{$ܹqРAzjnnƍnׇkQNNbcc&Md)99Y/r [op(++K@ڴi/_3F^WR`ӕedd(>>^GaPp=+` 3?`@g6rw.99YEEEt^$m/@ ~?@ ~? PJOOvpd\IDATn[7lۭ!CܐIRAA?Lc b8 nvʛ8qvEEEq?3?@/BFO36/JLLŋ;5kƏ/б5vXjǎ\YYbccTEGGk׮]3f… r8~*11Q_TZZ&?$(**J7oVZPyyyZzJKKU[[_ǏoppdTZZu_XX(*˥8~?@rJ9pph֭xZl?p|IIҔ)Sti~?s[T_Jvn׹sahذaaƅ O}}5x`Inj  `a$IeΝѣ%}53aÆ󩥥Ety:?eZhÆ $׫_zGܹSgeee~wN Uss3?D`7n\@yf\.%&&*55U111Z||>s)11Q~i&-_\)))3f^ϧ+111P||9'u~ʍ0-f~T[[{oZ>m/:;vLsSO=uM\.=]KNNVQQ 7Im0^@ ~?@ ~@B@X,t~C'm/@ ~?@ ~?@ ~?@ ~@ ~?@ @ ܞ'-%IENDB`open-build-service-2.9.4/docs/dev/controller_template_example.rb000066400000000000000000000037331332555733200250640ustar00rootroot00000000000000# Controller to manage dogs class Webui::DogsController < ApplicationController #### Includes and extends include AnimalControl #### Constants BASIC_DOG_NAMES = ['Tobby', 'Thor', 'Rambo', 'Dog', 'Blacky'].freeze #### Self config #### Callbacks macros: before_action, after_action, etc. before_action :set_dog, only: [:show, :edit, :update, :destroy] # Pundit authorization policies control after_action :verify_authorized, except: [:index, :blacks] after_action :verify_policy_scoped, only: [:index, :blacks] #### CRUD actions # GET /dogs def index @dogs = policy_scope(Dog) end # GET /dogs/1 def show if @dog.present? authorize @dog else skip_authorization end end # GET /dogs/new def new @dog = Dog.new authorize @dog end # GET /dogs/1/edit def edit authorize @dog end # POST /dogs def create @dog = Dog.new(dog_params) authorize @dog if @dog.save redirect_to @dog, notice: 'Dog was successfully created.' else render :new end end # PATCH/PUT /dogs/1 def update authorize @dog if @dog.update(dog_params) redirect_to @dog, notice: 'Dog was successfully updated.' else render :edit end end # DELETE /dogs/1 def destroy authorize @dog @dog.destroy redirect_to dogs_url, notice: 'Dog was successfully destroyed.' end #### Non CRUD actions # List all the black dogs # GET /dogs/blacks def blacks @dogs = policy_scope(Dog).blacks call_them(@dogs) render :index end #### Non actions methods # Use hide_action if they are not private def call_them(dogs = []) say('Hey!') dogs.each(&:bark) end hide_action :call_them private # Use callbacks to share common setup or constraints between actions. def set_dog @dog = Dog.find(params[:id]) end # Only allow a trusted parameter "white list" through. def dog_params params.require(:dog).permit(:name, :color) end end open-build-service-2.9.4/docs/dev/model_template_example.rb000066400000000000000000000042341332555733200237760ustar00rootroot00000000000000# This is a model class to represent dogs and is an example of how they have to # be structured for a better comprehension class Dog < ApplicationRecord #### Includes and extends include AnimalSystems include ActiveModel::AttributeMethods #### Constants NUMBER_OF_LEGS = 4 NUMBER_OF_QUEUES = 1 NUMBER_OF_EYES = 2 POSSIBLE_COLORS = ['white', 'black', 'brown', 'vanilla', 'chocolate', 'dotted'].freeze #### Self config self.table_name = 'OBS_dogs' #### Attributes attr_accessor :number_of_barks attribute_method_prefix 'reset_' alias_method :go_home, :save alias_method :go_home!, :save! #### Associations macros (Belongs to, Has one, Has many) belongs_to :owner, class_name: 'Person' belongs_to :herd belongs_to :house has_one :prefered_person, class_name: 'Person' has_many :places_to_pee, class_name: 'Place' has_many :places_to_sleep, through: :house #### Callbacks macros: before_save, after_save, etc. before_destroy :bark after_destroy :cry #### Scopes (first the default_scope macro if is used) default_scope where(alive: true) scope :blacks, -> { where(color: 'brown') } scope :deads, -> { rewhere(alive: false) } #### Validations macros validates :name, :color, pressence: true #### Class methods using self. (public and then private) #### To define class methods as private use private_class_method def self.born!(attributes) say("It's alive!") dog = new(attributes) dog.alive = true dog.save! dog.cry dog end def self.killall_by(attributes = {}) say('Die!') where(attributes).each(&:kill) end def self.call_all say('Fiuuiuuuu!') all.each(&:bark) end #### private def self.say(string) puts "[Dog's Master] >> #{string}" end private_class_method :say #### Instance methods (public and then protected/private) def initialize(attributes = {}) super @number_of_barks = 0 end def bark say('Guau!') @number_of_barks += 1 end def cry say('Iiiiii Iiii Iiiii!!') end protected def say(string) puts "[#{name}] >> #{string}" end private def reset_attribute(attribute) send("#{attribute}=", 0) end end open-build-service-2.9.4/docs/ichain.txt000066400000000000000000000110411332555733200201530ustar00rootroot00000000000000Authentication on the openSUSE Build Service ============================================ The authentication used on the build system has the following basic requirements: * The authentication has to happen on the frontend since we need client independance. That means that a valid authentication check can only be done on the frontend. Clients only transfer credentials to the frontend. * On the fronend, ActiveRBac for Ruby on Rails ( https://activerbac.turingstudio.com/trac ) is used because we want to benefit especially from the permission system that ActiveRBac comes with. Every change to the openSUSE build service authentication system should not change the ActiveRBAC code. Moreover our changes should integrate into the ActiveRBAC concepts. * Additionally we want to integrate the iChain support with the ActiveRBAC user management. iChain is a Novell identity management solution and if we support users can do everything with one login on the Novell and openSUSE websites. * Since there are many registered users on the Novell websites we do not want to give access to the openSUSE build service by default to anybody. Especially in the beginning we want to be able to control who has access to the BS. Thus there needs to be a process where people ask for enabling their iChain account and the BS admin team activates the user. * iChain works like a proxy transparent in front of a web application. All it does is adding a header value to the http header. In our case (iChain is much more powerfull than this) we only have the username in the header. Note that as a result of the administrational setup this header value can be considered as 100% true and secure. That means if the header contains the user name 'freitag' it is absolutely secure to consider that the user freitag is logged in. The huge benefit from that is that the app (the BS) does not have to bother with sensitive information like passwords for example. * We want to control in the clients what pages or functions are accessible without authentication (ie. the start page) and which need an authenticated user. That should not need configuration in the iChain system. How does that work in the BS? ============================= Every controller on the webclient calls the extract_user method in the application controller. This method tries to extract credentials from the request depending on the kind of authentication that is running. To switch on ichain authentication the parameter ICHAIN_HOST in the config file on both webclient and frontend need to be set to the IP address of the ichain host (Note: The IP is not yet used, so setting to non-nil is sufficient). If iChain is running and the user accesses a page that should be authenticated but is not yet, the webapp redirects to a special page. The iChain system is configured in the way that accessing this special page requires authentication and thus iChain displays the Novell standard login page. The user provides the credentials and after the login was successfull, the user is redirected to his initial requested page. If iChain is active the user is taken from the header value X-username (HTTP_X_USERNAME) that iChain transparently adds to the HTTP header. The username is added as a header value to all communication to the frontend as well. To check if a user is valid, the webclient does a lookup on the user name on the frontend. That can result in the following states: * the user was found and has the ActiveRBAC state 2: The user is valid and allowed to login in. * the user is not found. That means that the access to the BS was not yet granted. The user is forwarded to a page that lets him ask for BS access. * the user was found but is in state 5: The user has already asked for BS access but the BS admin team has not approved. The user sees a message that asks him to wait. In case the user is not yet in the frontend user database, the client sends a XML document of the following form to the frontend controller person action register: freitag Klaas Freitag freitag@suse.de 5 opensuse This is why I like to work with the BS This controller adds the new user to the BS database and sets the state to be unconfirmned. Now a BS admin has to switch the users state to confirmned. As long as that has not happened the user can not log in correctly. open-build-service-2.9.4/docs/obs-logo.svg000066400000000000000000000730021332555733200204260ustar00rootroot00000000000000 obs-landing/obs-logo.svg at gh-pages · openSUSE/obs-landing · GitHub Skip to content

    HTTPS clone URL

    Subversion checkout URL

    You can clone with
    or
    .
    Download ZIP
    86728da
    117 lines (116 sloc) 7.32 KB
    Sorry, something went wrong. Reload?
    Sorry, we cannot display this file.
    Sorry, this file is invalid so it cannot be displayed.
    Jump to Line
    Something went wrong with that request. Please try again.
    open-build-service-2.9.4/docs/openSUSE.org.xml000066400000000000000000000015051332555733200211340ustar00rootroot00000000000000 openSUSE.org Project This project refers to projects hosted on the Build Service at the openSUSE.org project. This is important especially for the base projects which provides the distributions to build against by default. Your local Build Service instance will request, download and cache all needed sources or binary packages from the openSUSE.org project when you build against it. Use openSUSE.org:openSUSE:12.3 for example to build against the openSUSE:12.3 project as specified on the opensuse.org Build Service. https://api.opensuse.org/public open-build-service-2.9.4/hakiri.yml000066400000000000000000000000221332555733200172160ustar00rootroot00000000000000app_path: src/api open-build-service-2.9.4/src/000077500000000000000000000000001332555733200160215ustar00rootroot00000000000000open-build-service-2.9.4/src/api/000077500000000000000000000000001332555733200165725ustar00rootroot00000000000000open-build-service-2.9.4/src/api/.haml-lint.yml000066400000000000000000000001041332555733200212530ustar00rootroot00000000000000inherits_from: .haml-lint_todo.yml exclude: - 'vendor/bundle/**/*' open-build-service-2.9.4/src/api/.haml-lint_todo.yml000066400000000000000000000046741332555733200223200ustar00rootroot00000000000000# This configuration was generated by # `haml-lint --auto-gen-config` # on 2017-06-21 13:25:12 +0200 using Haml-Lint version 0.26.0. # The point is for the user to remove these configuration records # one by one as the lints are removed from the code base. # Note that changes in the inspected code, or installation of new # versions of Haml-Lint, may require this file to be generated again. linters: # Offense count: 37 ConsecutiveSilentScripts: enabled: false # Offense count: 335 LineLength: enabled: false # Offense count: 84 InstanceVariables: enabled: false # Offense count: 107 RuboCop: enabled: false # Offense count: 152 SpaceInsideHashAttributes: enabled: false # Offense count: 64 InlineStyles: enabled: false # Offense count: 4 ViewLength: exclude: - "app/views/webui/monitor/_events.html.haml" - "app/views/webui/monitor/_workers_table.html.haml" - "app/views/webui/patchinfo/_form.html.haml" - "app/views/webui/user/show.html.haml" # Offense count: 1 MultilineScript: exclude: - "app/views/webui/package/_rpmlint_log.html.haml" # Offense count: 6 MultilinePipe: exclude: - "app/views/webui/package/_submit_request_dialog.html.haml" - "app/views/webui/repositories/_repository_entry.html.haml" - "app/views/webui/user/_dropdown_menu.html.haml" # Offense count: 4 UnnecessaryStringOutput: exclude: - "app/views/webui/repositories/_dod_repository_form.html.haml" - "app/views/webui/repositories/_repository_entry.html.haml" - "app/views/webui/user/edit.html.haml" # Offense count: 2 RubyComments: exclude: - "app/views/webui/repositories/_dod_repository_form.html.haml" - "app/views/webui/repositories/_repository_entry.html.haml" # Offense count: 2 ImplicitDiv: exclude: - "app/views/webui/repositories/_repository_entry.html.haml" - "app/views/webui/repositories/index.html.haml" # Offense count: 2 SpaceBeforeScript: exclude: - "app/views/webui/search/owner.html.haml" - "app/views/webui/theme/bratwurst/webui/package/_commit.html.haml" # Offense count: 15 RepeatedId: exclude: - "app/views/webui/search/owner.html.haml" - "app/views/layouts/webui/_personal_navigation.html.haml" # Offense count: 3 HtmlAttributes: exclude: - "app/views/webui/user/_dropdown_menu.html.haml" - "app/views/webui/user/index.html.haml" open-build-service-2.9.4/src/api/.rspec000066400000000000000000000000361332555733200177060ustar00rootroot00000000000000--color --require spec_helper open-build-service-2.9.4/src/api/.rubocop.yml000066400000000000000000000001441332555733200210430ustar00rootroot00000000000000inherit_from: ../../.rubocop.yml AllCops: UseCache: true CacheRootDirectory: tmp/rubocop_cache open-build-service-2.9.4/src/api/Gemfile000066400000000000000000000126361332555733200200750ustar00rootroot00000000000000# Edit this Gemfile to bundle your application's dependencies. # This preamble is the current preamble for Rails 3 apps; edit as needed. source 'https://rubygems.org' gem 'rails', '~> 5.1.1' # as our database gem 'mysql2' # as requirement for activexml gem 'nokogiri' # for delayed tasks gem 'delayed_job_active_record', '>= 4.0.0' # to fill errbit gem 'airbrake', '<= 7.1.0' # Due to a bug in Errbit we need to use 2.5.0 -> https://github.com/errbit/errbit/pull/1237 gem 'airbrake-ruby', '<= 2.5.0' # as JSON library - the default json conflicts with activerecord (by means of vice-versa monkey patching) gem 'yajl-ruby', require: 'yajl/json_gem' # to search the database gem 'thinking-sphinx', '> 3.1' # to paginate search results gem 'kaminari' # as abstract HTML of the bratwurst 'theme' gem 'haml' # to avoid tilt downgrade gem 'tilt', '>= 1.4.1' # to use markdown in the comment system gem 'redcarpet' # for nested attribute forms gem 'cocoon' # for activerecord lists. Used for AttribValues gem 'acts_as_list' # to parse a XML string into a ruby hash gem 'xmlhash', '>=1.3.6' # to escape HTML (FIXME: do we still use this?) gem 'escape_utils' # to sanitize HTML/CSS gem 'sanitize' # as authorization system gem 'pundit' # for password hashing gem 'bcrypt' # gem 'responders', '~> 2.0' # for threaded comments gem 'acts_as_tree' # js plotting (OBS monitor) gem 'flot-rails' # colorize for scripts gem 'colorize', require: false # XML Serialization got moved here gem 'activemodel-serializers-xml' # Spider Identification gem 'voight_kampff' # support coffeescript gem 'coffee-rails' # bind keyboard shortcuts to actions gem 'mousetrap-rails' # for issue tracker communication gem 'xmlrpc' # Multiple feature switch gem 'feature' # for profiling gem 'peek' gem 'peek-dalli' gem 'peek-mysql2' # for kerberos authentication gem 'gssapi', require: false # for sending events to rabbitmq gem 'bunny' # for making changes to existing data gem 'data_migrate', '= 3.2.2' # for URI encoding gem 'addressable' group :development, :production do # to have the delayed job daemon gem 'daemons' # as memcache client gem 'dalli' # to document ruby code gem 'rdoc' # to not rely on cron+rake gem 'clockwork', '>= 0.7' # as interface to LDAP gem 'ruby-ldap', require: false end group :production do # if you have an account, it can be configured by # placing a config/newrelic.yml # be aware about the non-OSS license # gem 'newrelic_rpm' end # Gems used only for testing the application and not required in production environments by default. group :test do # as testing frameworks gem 'minitest' gem 'test-unit' # to ensure a clean state for testing gem 'database_cleaner', '>= 1.0.1' # for test coverage reports gem 'codecov', require: false gem 'simplecov', require: false # for failing fast gem 'minitest-fail-fast' # for spec like reporting gem 'minitest-reporters' # for integration testing gem 'capybara' # for rspec like matchers gem 'capybara_minitest_spec' # to freeze time gem 'timecop' # to fake backend replies gem 'webmock', '>= 2.3.0' # for code quality checks gem 'flog', '> 4.1.0' # for mocking and stubbing gem 'mocha', '> 0.13.0', require: false # for testing common Rails functionality with simple one-liners gem 'shoulda-matchers', '~> 3.1' # for having fun while waiting for Rspec to finish gem 'nyan-cat-formatter' # assigns has been extracted to a gem gem 'rails-controller-testing' # To generate random data gem 'rantly', '>= 1.1.0' # To ensure consistent Git commits gem 'git-cop', '>=1.5.0' end # Gems used only during development not required in production environments by default. group :development do # as alternative to the standard IRB shell gem 'unicorn-rails' # webrick won't work # for annotating models with schema info gem 'annotate' end group :development, :test do # as testing framework gem 'rspec-rails', '~> 3.5.0' # for fixtures gem 'factory_bot_rails' # for mocking the backend gem 'vcr' # as alternative to the standard IRB shell gem 'pry', '>= 0.9.12' # for style checks gem 'rubocop', require: false # for rspec style checks gem 'rubocop-rspec', require: false # integrates with RuboCop to analyse HAML files gem 'haml_lint' # to generate random long strings gem 'faker' # as driver for capybara gem 'poltergeist' # to launch browser in test gem 'launchy' # for calling single testd gem 'single_test' # to find n+1 queries gem 'bullet' end # Gems used only for assets and not required in production environments by default. group :assets do # for minifying CSS gem 'cssmin', '>= 1.0.2' # for minifying JavaScript gem 'uglifier', '>= 1.2.2' # to use sass in the asset pipeline gem 'sass-rails', '~> 5.0.1' # assets for jQuery DataTables gem 'jquery-datatables-rails' # assets for the text editor gem 'codemirror-rails' # assets for jQuery tokeninput gem 'rails_tokeninput', '>= 1.6.1.rc1' # to create our sprite images/stylesheets gem 'sprite-factory', '>= 1.5.2' # to read and write PNG images gem 'chunky_png' # assets for jQuery and jQuery-ujs gem 'jquery-rails' # assets for jQuery-ui gem 'jquery-ui-rails', '~> 4.2.1' # version 5 needs henne's new webui # assets for the bootstrap front-end framework. Used by the bratwurst theme # gem 'bootstrap-sass-rails' # assets for font-awesome vector icons gem 'font-awesome-rails' # assets for formatting dates gem 'momentjs-rails' end open-build-service-2.9.4/src/api/Gemfile.lock000066400000000000000000000271601332555733200210220ustar00rootroot00000000000000GEM remote: https://rubygems.org/ specs: actioncable (5.1.4) actionpack (= 5.1.4) nio4r (~> 2.0) websocket-driver (~> 0.6.1) actionmailer (5.1.4) actionpack (= 5.1.4) actionview (= 5.1.4) activejob (= 5.1.4) mail (~> 2.5, >= 2.5.4) rails-dom-testing (~> 2.0) actionpack (5.1.4) actionview (= 5.1.4) activesupport (= 5.1.4) rack (~> 2.0) rack-test (>= 0.6.3) rails-dom-testing (~> 2.0) rails-html-sanitizer (~> 1.0, >= 1.0.2) actionview (5.1.4) activesupport (= 5.1.4) builder (~> 3.1) erubi (~> 1.4) rails-dom-testing (~> 2.0) rails-html-sanitizer (~> 1.0, >= 1.0.3) activejob (5.1.4) activesupport (= 5.1.4) globalid (>= 0.3.6) activemodel (5.1.4) activesupport (= 5.1.4) activemodel-serializers-xml (1.0.2) activemodel (> 5.x) activesupport (> 5.x) builder (~> 3.1) activerecord (5.1.4) activemodel (= 5.1.4) activesupport (= 5.1.4) arel (~> 8.0) activesupport (5.1.4) concurrent-ruby (~> 1.0, >= 1.0.2) i18n (~> 0.7) minitest (~> 5.1) tzinfo (~> 1.1) acts_as_list (0.9.10) activerecord (>= 3.0) acts_as_tree (2.7.0) activerecord (>= 3.0.0) addressable (2.5.2) public_suffix (>= 2.0.2, < 4.0) airbrake (7.1.0) airbrake-ruby (~> 2.5) airbrake-ruby (2.5.0) amq-protocol (2.3.0) annotate (2.7.2) activerecord (>= 3.2, < 6.0) rake (>= 10.4, < 13.0) ansi (1.5.0) arel (8.0.0) ast (2.4.0) bcrypt (3.1.11) builder (3.2.3) bullet (5.7.0) activesupport (>= 3.0.0) uniform_notifier (~> 1.10.0) bunny (2.9.1) amq-protocol (~> 2.3.0) capybara (2.17.0) addressable mini_mime (>= 0.1.3) nokogiri (>= 1.3.3) rack (>= 1.0.0) rack-test (>= 0.5.4) xpath (>= 2.0, < 4.0) capybara_minitest_spec (1.0.6) capybara (>= 2) minitest (>= 4) chunky_png (1.3.10) cliver (0.3.2) clockwork (2.0.2) activesupport tzinfo cocoon (1.2.11) codecov (0.1.10) json simplecov url codemirror-rails (5.16.0) railties (>= 3.0, < 6.0) coderay (1.1.2) coffee-rails (4.2.2) coffee-script (>= 2.2.0) railties (>= 4.0.0) coffee-script (2.4.1) coffee-script-source execjs coffee-script-source (1.12.2) colorize (0.8.1) concurrent-ruby (1.0.5) concurrent-ruby-ext (1.0.5) concurrent-ruby (= 1.0.5) crack (0.4.3) safe_yaml (~> 1.0.0) crass (1.0.3) cssmin (1.0.3) daemons (1.2.6) dalli (2.7.6) data_migrate (3.2.2) rails (>= 4.0) database_cleaner (1.6.2) delayed_job (4.1.4) activesupport (>= 3.0, < 5.2) delayed_job_active_record (4.1.2) activerecord (>= 3.0, < 5.2) delayed_job (>= 3.0, < 5) diff-lcs (1.3) docile (1.1.5) equatable (0.5.0) erubi (1.7.0) escape_utils (1.2.1) execjs (2.7.0) factory_bot (4.8.2) activesupport (>= 3.0.0) factory_bot_rails (4.8.2) factory_bot (~> 4.8.2) railties (>= 3.0.0) faker (1.8.7) i18n (>= 0.7) feature (1.4.0) ffi (1.9.18) flog (4.6.1) path_expander (~> 1.0) ruby_parser (~> 3.1, > 3.1.0) sexp_processor (~> 4.8) flot-rails (0.0.7) jquery-rails font-awesome-rails (4.7.0.2) railties (>= 3.2, < 5.2) git-cop (1.7.1) pastel (~> 0.7) refinements (~> 4.2) runcom (~> 1.3) thor (~> 0.20) globalid (0.4.1) activesupport (>= 4.2.0) gssapi (1.2.0) ffi (>= 1.0.1) haml (5.0.4) temple (>= 0.8.0) tilt haml_lint (0.27.0) haml (>= 4.0, < 5.1) rainbow rake (>= 10, < 13) rubocop (>= 0.50.0) sysexits (~> 1.1) hashdiff (0.3.7) i18n (0.9.1) concurrent-ruby (~> 1.0) innertube (1.1.0) joiner (0.3.4) activerecord (>= 4.1.0) jquery-datatables-rails (3.4.0) actionpack (>= 3.1) jquery-rails railties (>= 3.1) sass-rails jquery-rails (4.3.1) rails-dom-testing (>= 1, < 3) railties (>= 4.2.0) thor (>= 0.14, < 2.0) jquery-ui-rails (4.2.1) railties (>= 3.2.16) json (2.1.0) kaminari (1.1.1) activesupport (>= 4.1.0) kaminari-actionview (= 1.1.1) kaminari-activerecord (= 1.1.1) kaminari-core (= 1.1.1) kaminari-actionview (1.1.1) actionview kaminari-core (= 1.1.1) kaminari-activerecord (1.1.1) activerecord kaminari-core (= 1.1.1) kaminari-core (1.1.1) kgio (2.11.0) launchy (2.4.3) addressable (~> 2.3) loofah (2.2.2) crass (~> 1.0.2) nokogiri (>= 1.5.9) mail (2.7.0) mini_mime (>= 0.1.1) metaclass (0.0.4) method_source (0.9.0) middleware (0.1.0) mini_mime (1.0.0) mini_portile2 (2.3.0) minitest (5.10.3) minitest-fail-fast (0.1.0) minitest (~> 5) minitest-reporters (1.1.19) ansi builder minitest (>= 5.0) ruby-progressbar mocha (1.3.0) metaclass (~> 0.0.1) momentjs-rails (2.17.1) railties (>= 3.1) mousetrap-rails (1.4.6) mysql2 (0.4.10) nio4r (2.1.0) nokogiri (1.8.2) mini_portile2 (~> 2.3.0) nokogumbo (1.5.0) nokogiri nyan-cat-formatter (0.12.0) rspec (>= 2.99, >= 2.14.2, < 4) parallel (1.12.0) parser (2.4.0.2) ast (~> 2.3) pastel (0.7.2) equatable (~> 0.5.0) tty-color (~> 0.4.0) path_expander (1.0.2) peek (1.0.1) concurrent-ruby (>= 0.9.0) concurrent-ruby-ext (>= 0.9.0) railties (>= 4.0.0) peek-dalli (1.2.0) concurrent-ruby concurrent-ruby-ext dalli peek peek-mysql2 (1.2.0) concurrent-ruby concurrent-ruby-ext mysql2 peek pkg-config (1.2.3) poltergeist (1.17.0) capybara (~> 2.1) cliver (~> 0.3.1) websocket-driver (>= 0.2.0) power_assert (1.1.1) powerpack (0.1.1) pry (0.11.3) coderay (~> 1.1.0) method_source (~> 0.9.0) public_suffix (3.0.1) pundit (1.1.0) activesupport (>= 3.0.0) rack (2.0.5) rack-test (0.8.2) rack (>= 1.0, < 3) rails (5.1.4) actioncable (= 5.1.4) actionmailer (= 5.1.4) actionpack (= 5.1.4) actionview (= 5.1.4) activejob (= 5.1.4) activemodel (= 5.1.4) activerecord (= 5.1.4) activesupport (= 5.1.4) bundler (>= 1.3.0) railties (= 5.1.4) sprockets-rails (>= 2.0.0) rails-controller-testing (1.0.2) actionpack (~> 5.x, >= 5.0.1) actionview (~> 5.x, >= 5.0.1) activesupport (~> 5.x) rails-dom-testing (2.0.3) activesupport (>= 4.2.0) nokogiri (>= 1.6) rails-html-sanitizer (1.0.4) loofah (~> 2.2, >= 2.2.2) rails_tokeninput (1.7.0) railties (>= 3.1.0) railties (5.1.4) actionpack (= 5.1.4) activesupport (= 5.1.4) method_source rake (>= 0.8.7) thor (>= 0.18.1, < 2.0) rainbow (2.2.2) rake raindrops (0.18.0) rake (12.3.0) rantly (1.1.0) rb-fsevent (0.10.2) rb-inotify (0.9.10) ffi (>= 0.5.0, < 2) rdoc (6.0.1) redcarpet (3.4.0) refinements (4.3.0) responders (2.4.0) actionpack (>= 4.2.0, < 5.3) railties (>= 4.2.0, < 5.3) riddle (2.2.0) rspec (3.5.0) rspec-core (~> 3.5.0) rspec-expectations (~> 3.5.0) rspec-mocks (~> 3.5.0) rspec-core (3.5.4) rspec-support (~> 3.5.0) rspec-expectations (3.5.0) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.5.0) rspec-mocks (3.5.0) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.5.0) rspec-rails (3.5.2) actionpack (>= 3.0) activesupport (>= 3.0) railties (>= 3.0) rspec-core (~> 3.5.0) rspec-expectations (~> 3.5.0) rspec-mocks (~> 3.5.0) rspec-support (~> 3.5.0) rspec-support (3.5.0) rubocop (0.51.0) parallel (~> 1.10) parser (>= 2.3.3.1, < 3.0) powerpack (~> 0.1) rainbow (>= 2.2.2, < 3.0) ruby-progressbar (~> 1.7) unicode-display_width (~> 1.0, >= 1.0.1) rubocop-rspec (1.20.1) rubocop (>= 0.51.0) ruby-ldap (0.9.19) ruby-progressbar (1.9.0) ruby_parser (3.9.0) sexp_processor (~> 4.1) runcom (1.4.0) refinements (~> 4.2) safe_yaml (1.0.4) sanitize (4.6.4) crass (~> 1.0.2) nokogiri (>= 1.4.4) nokogumbo (~> 1.4) sass (3.5.3) sass-listen (~> 4.0.0) sass-listen (4.0.0) rb-fsevent (~> 0.9, >= 0.9.4) rb-inotify (~> 0.9, >= 0.9.7) sass-rails (5.0.7) railties (>= 4.0.0, < 6) sass (~> 3.1) sprockets (>= 2.8, < 4.0) sprockets-rails (>= 2.0, < 4.0) tilt (>= 1.1, < 3) sexp_processor (4.9.0) shoulda-matchers (3.1.2) activesupport (>= 4.0.0) simplecov (0.14.1) docile (~> 1.1.0) json (>= 1.8, < 3) simplecov-html (~> 0.10.0) simplecov-html (0.10.1) single_test (0.6.0) rake sprite-factory (1.7.1) sprockets (3.7.2) concurrent-ruby (~> 1.0) rack (> 1, < 3) sprockets-rails (3.2.1) actionpack (>= 4.0) activesupport (>= 4.0) sprockets (>= 3.0.0) sysexits (1.2.0) temple (0.8.0) test-unit (3.2.7) power_assert thinking-sphinx (3.4.2) activerecord (>= 3.1.0) builder (>= 2.1.2) innertube (>= 1.0.2) joiner (>= 0.2.0) middleware (>= 0.1.0) riddle (>= 2.0.0) thor (0.20.0) thread_safe (0.3.6) tilt (2.0.8) timecop (0.9.1) tty-color (0.4.2) tzinfo (1.2.4) thread_safe (~> 0.1) uglifier (4.1.5) execjs (>= 0.3.0, < 3) unicode-display_width (1.3.0) unicorn (5.3.0) kgio (~> 2.6) raindrops (~> 0.7) unicorn-rails (2.2.1) rack unicorn uniform_notifier (1.10.0) url (0.3.2) vcr (4.0.0) voight_kampff (1.1.1) rack (>= 1.4, < 3.0) webmock (3.3.0) addressable (>= 2.3.6) crack (>= 0.3.2) hashdiff websocket-driver (0.6.5) websocket-extensions (>= 0.1.0) websocket-extensions (0.1.3) xmlhash (1.3.7) pkg-config xmlrpc (0.3.0) xpath (3.0.0) nokogiri (~> 1.8) yajl-ruby (1.3.1) PLATFORMS ruby DEPENDENCIES activemodel-serializers-xml acts_as_list acts_as_tree addressable airbrake (<= 7.1.0) airbrake-ruby (<= 2.5.0) annotate bcrypt bullet bunny capybara capybara_minitest_spec chunky_png clockwork (>= 0.7) cocoon codecov codemirror-rails coffee-rails colorize cssmin (>= 1.0.2) daemons dalli data_migrate (= 3.2.2) database_cleaner (>= 1.0.1) delayed_job_active_record (>= 4.0.0) escape_utils factory_bot_rails faker feature flog (> 4.1.0) flot-rails font-awesome-rails git-cop (>= 1.5.0) gssapi haml haml_lint jquery-datatables-rails jquery-rails jquery-ui-rails (~> 4.2.1) kaminari launchy minitest minitest-fail-fast minitest-reporters mocha (> 0.13.0) momentjs-rails mousetrap-rails mysql2 nokogiri nyan-cat-formatter peek peek-dalli peek-mysql2 poltergeist pry (>= 0.9.12) pundit rails (~> 5.1.1) rails-controller-testing rails_tokeninput (>= 1.6.1.rc1) rantly (>= 1.1.0) rdoc redcarpet responders (~> 2.0) rspec-rails (~> 3.5.0) rubocop rubocop-rspec ruby-ldap sanitize sass-rails (~> 5.0.1) shoulda-matchers (~> 3.1) simplecov single_test sprite-factory (>= 1.5.2) test-unit thinking-sphinx (> 3.1) tilt (>= 1.4.1) timecop uglifier (>= 1.2.2) unicorn-rails vcr voight_kampff webmock (>= 2.3.0) xmlhash (>= 1.3.6) xmlrpc yajl-ruby BUNDLED WITH 1.13.6 open-build-service-2.9.4/src/api/Makefile000066400000000000000000000066121332555733200202370ustar00rootroot00000000000000include ../../Makefile.include OBS_API_LOGS := access.log backend_access.log delayed_job.log error.log lastevents.access.log all: install: prepare_dirs prepare_rake docs config log_files build prepare_dirs: $(INSTALL) -d -m 755 $(DESTDIR)$(OBS_API_PREFIX) $(INSTALL) -d -m 755 $(DESTDIR)$(OBS_API_PREFIX)/log $(INSTALL) -d -m 755 $(DESTDIR)$(OBS_API_PREFIX)/tmp $(INSTALL) -d -m 755 $(DESTDIR)$(OBS_API_PREFIX)/config # prepare for running sphinx daemon $(INSTALL) -m 0755 -d $(DESTDIR)$(OBS_API_PREFIX)/db/sphinx{,/production} prepare_rake: prepare_dirs cp -a * $(DESTDIR)$(OBS_API_PREFIX) rm $(DESTDIR)$(OBS_API_PREFIX)/Makefile rm $(DESTDIR)$(OBS_API_PREFIX)/Procfile touch $(DESTDIR)$(OBS_API_PREFIX)/log/production.log touch $(DESTDIR)$(OBS_API_PREFIX)/config/production.sphinx.conf docs: $(INSTALL) -d -m 755 $(DESTDIR)$(OBS_APIDOCS_PREFIX)/api $(INSTALL) -d -m 755 $(DESTDIR)$(OBS_APIDOCS_PREFIX)/api/html cp -av ../../docs/api/api $(DESTDIR)$(OBS_APIDOCS_PREFIX) cp -av ../../docs/api/html/* $(DESTDIR)$(OBS_APIDOCS_PREFIX)/api/html/ ln -sf $(OBS_APIDOCS_PREFIX)/api/ $(DESTDIR)$(OBS_API_PREFIX)/public/schema echo 'CONFIG["apidocs_location"] ||= File.expand_path("../docs/api/html/")' >> $(DESTDIR)$(OBS_API_PREFIX)/config/environment.rb echo 'CONFIG["schema_location"] ||= File.expand_path("../docs/api/")' >> $(DESTDIR)$(OBS_API_PREFIX)/config/environment.rb config: prepare_dirs # we need config also in building environment, otherwise rake will break $(INSTALL) -m 644 config/database.yml.example config/database.yml $(INSTALL) -m 644 config/options.yml.example config/options.yml $(INSTALL) -m 644 config/thinking_sphinx.yml.example config/thinking_sphinx.yml # TODO: see if these configuration work in real life $(INSTALL) -m 644 config/database.yml.example $(DESTDIR)$(OBS_API_PREFIX)/config/database.yml $(INSTALL) -m 644 config/options.yml.example $(DESTDIR)$(OBS_API_PREFIX)/config/options.yml $(INSTALL) -m 644 config/thinking_sphinx.yml.example $(DESTDIR)$(OBS_API_PREFIX)/config/thinking_sphinx.yml echo "# This is to prevent fdupes from hardlinking" >> $(DESTDIR)$(OBS_API_PREFIX)/config/database.yml echo "# This is to prevent fdupes from hardlinking" >> $(DESTDIR)$(OBS_API_PREFIX)/config/options.yml echo "# This is to prevent fdupes from hardlinking" >> $(DESTDIR)$(OBS_API_PREFIX)/config/thinking_sphinx.yml log_files: $(foreach logfile,$(OBS_API_LOGS),$(shell touch $(DESTDIR)$(OBS_API_PREFIX)/log/$(logfile) )) build: config $(shell [ -d $(DESTDIR)/srv/www/obs/api/.bundle ] && rm -rf $(DESTDIR)/srv/www/obs/api/.bundle) # we need to have *something* as secret key echo "" | sha256sum| cut -d\ -f 1 > $(DESTDIR)/srv/www/obs/api/config/secret.key cd $(DESTDIR)/srv/www/obs/api ;\ bundle.ruby2.5 exec rake.ruby2.5 assets:precompile RAILS_ENV=production RAILS_GROUPS=assets || exit 1 ;\ rm -rf tmp/cache/sass tmp/cache/assets config/secret.key ;\ bundle.ruby2.5 config --local frozen 1 || exit 1 ;\ bundle.ruby2.5 config --local without development:test:assets || exit 1 ;\ # reinstall $(INSTALL) config/database.yml.example $(DESTDIR)$(OBS_API_PREFIX)/config/database.yml # patch our version inside sed -i -e 's,^api_version.*,api_version = "$(OBS_VERSION)",' $(DESTDIR)$(OBS_API_PREFIX)/config/initializers/02_apiversion.rb test_unit: [ -d log ] || mkdir log echo > log/test.log ./script/api_test_in_spec.sh clean: rm -rf ../../docs/api/html .PHONY: test open-build-service-2.9.4/src/api/Procfile000066400000000000000000000003121332555733200202540ustar00rootroot00000000000000web: bundle exec rails server delayed: bundle exec script/delayed_job.api.rb run clock: bundle exec clockworkd --log-dir=log -l -c config/clock.rb run search: bundle exec rake ts:rebuild NODETACH=true open-build-service-2.9.4/src/api/Rakefile000077500000000000000000000011101332555733200202330ustar00rootroot00000000000000#!/usr/bin/env rake.ruby2.5 # Add your own tasks in files placed in lib/tasks ending in .rake, # for example lib/tasks/capistrano.rake, and they will automatically be available to Rake. require_relative 'config/application' OBSApi::Application.load_tasks require(File.join(File.dirname(__FILE__), 'config', 'boot')) unless Rails.env.production? require 'single_test/tasks' require 'rubocop/rake_task' require 'haml_lint/rake_task' RuboCop::RakeTask.new(:rubocop) do |task| task.options = ['-D', '-F', '--fail-level', 'convention'] end HamlLint::RakeTask.new end open-build-service-2.9.4/src/api/app/000077500000000000000000000000001332555733200173525ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/000077500000000000000000000000001332555733200206545ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/icons/000077500000000000000000000000001332555733200217675ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/icons/LICENSE.md000066400000000000000000000004551332555733200233770ustar00rootroot00000000000000The icons used are a mixture of the [Silk Icons set](http://www.famfamfam.com/lab/icons/silk/) by Mark James licensed [CC BY 2.5](https://creativecommons.org/licenses/by/2.5/) and [Tango Icon Library](http://tango.freedesktop.org) from the public domain and images/logos that are all rights reserved. open-build-service-2.9.4/src/api/app/assets/icons/accept.png000066400000000000000000000014151332555733200237350ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˥KSa;vvl dD!P{$; ż,Kݽ6cL2r^H)-jsNm֔2qQB̽BatoL#z {q' r=)La8,u%2Rg>ݾW ϛJ߸Pd makD|=G Vn6[Įd桚(Pm.0Q`'Fb#&ܧ6aP׏Q12[+zi; ]C17оpI9̾jD}›?7ayze,hXAK^3*bk @+wQ=!}uXzq:g쯺n= :d+_GTA;Ր Jƣ.!P)5!H:epր"݂"Kyw|{H2!i~3z_X;okBZK* ^R:O(jF*^ȰS诿_ gЬycIENDB`open-build-service-2.9.4/src/api/app/assets/icons/accessories-text-editor.png000066400000000000000000000010761332555733200272520ustar00rootroot00000000000000PNG  IHDRabKGD pHYs  tIME 7);RHIDAT8˥MhA3ݣH"-)X+xEж'< "~1[M\bӍLlZx0Cmj+ ofރ a]VzxG/fw؎XM9WOJݬ;-T@YXݓ zV_LKNJ7qĉ?}MeM}'O2t3@zz}KAI{|-Mu=۶m;R>n&X^Q@dct| UsEΟ0 M ҽ \y7}7FdY~C;>Yz=iv4nK!?y%;nL&q]7\OPV<G;+H%H`YeExA,CkMVCk5Bl:ud})0oZy IENDB`open-build-service-2.9.4/src/api/app/assets/icons/application_get.png000066400000000000000000000011051332555733200256340ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT?Ka>OeH *?[iZ!$!"B(l9zM4eD( su]yK!~>B=?d;Ꮉx8aGf;O'3sӱ!vc۳9`H?V?GbK<*QDeR%IENDB`open-build-service-2.9.4/src/api/app/assets/icons/architecture.png000066400000000000000000000012431332555733200251570ustar00rootroot00000000000000PNG  IHDRabKGDXIDATxŒ[HqĩL(jHN+P!B Q2ꦠ n Z)^$L4 i7'n:HDһxo^=#]X#}/ {u3lL&-c)V{K6 ̸k:,zDxaZ@$ ")9,4JyD/>{if'P(I&&NkZ7׻ޓk젡1G/a뷱vaOT*P]DGi=2ԃLE}VD#{h{x *33.E,!>PbO 0 e e4Rh %"",-! 𯸈Gу &TGRV* s03;@'69)NIR2f:JeFƜL{io#%%F-*{칳1OwnxG,rܔ>o2m۔OhBfNSR=77o(^-6l0a8v槄A#&j-@wN hTo2Wgc~4܃IENDB`open-build-service-2.9.4/src/api/app/assets/icons/arrow_branch.png000066400000000000000000000011061332555733200251420ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˕RKTq[(IV߶桂"0i):DJҥ@2tX%=T`J^p%޳".徙Ϸ*>sa9^Mnw4 twft=scl&wl߼ ӏ/:}XdOLd@HeH=]?>Ax6=֊n! 9)w #W \:r@ف@BqwF`,f-bud_~P,Ig!!Ds܍bܞ ]3W4C.}L* 0W`3Hb5U '"އ"B@27g/"@Ȅf/"F>z~}~W^UW`,XCKi>xwrW A(CDa?֬./IENDB`open-build-service-2.9.4/src/api/app/assets/icons/arrow_down.png000066400000000000000000000005731332555733200246630ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe< IDAT8˥?KQi H*.EA!XX vc!v{;,ӻ\}Ï7Ibc0u2B^h gw`4+@jɥ s5"H hPǁh"H.EBFݜ& 5EɃZn+;l8r9~c$ h>*k>hLLjƳRwUd:<$hݸIENDB`open-build-service-2.9.4/src/api/app/assets/icons/arrow_redo.png000066400000000000000000000011611332555733200246370ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˥OHQ7Zcf9UԢت fdPlQ RȕnE"à6e-r#apd6Yc|lDe{yDQؿ<>3;uDٖ47KgAg)g$ujUO 2kp s܍ { t?<(7YՔLX]`$7id? Pָ#i)8:S.άj[/Uv loL3 k&~j~MLGJf2Bʔ̘Zf=EfQd֟wLۅȌ#fd&ʝulՓ;xLH֌ ÉȠȠ~+:#hh#5=&N.ehiL(B:* ƑC0a`7O9f܈⣦e׀oNϼ[\{gն ^#gHt[f6s s$&cϮV]pacl.B[3SrX^^9|FW7sX_KmI-4  7!+oq0r; ƓgNAEHL ܊blߐ-F+ڎD%:QwF5d? ގ 8c1%E? ^_utN Fp(-d1HY8ʾR&H$8@2484h\ 2D))j\<qw.PXU,oū}>zt]/,*,.-̧')bE@Ý3N`|AUDbO6 \O_ٿ눊>IENDB`open-build-service-2.9.4/src/api/app/assets/icons/arrow_up.png000066400000000000000000000005641332555733200243400ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˥?/Qϲhg5j%'F,[ LH 3(vw=fcoN{O~9VM4Q7ܿ)v/WQ=&bpSO ^'&^:\˨6eND!& 9꒣_|?\ srx,g*,(F#d[OaAA*P p1O+C$`)*w`A#0$ *?b&NRIENDB`open-build-service-2.9.4/src/api/app/assets/icons/back_disabled.png000066400000000000000000000025211332555733200252240ustar00rootroot00000000000000PNG  IHDRrP6tEXtSoftwareAdobe ImageReadyqe<"iTXtXML:com.adobe.xmp IDATxڬ;KPsЄC[qC`)uL?P m t/8Z: 8 ZT,jsDbb C\eXfk09GZfr !<~ q |a[ǝB.@eYp ٠ɿR&1X>!dYPɤ 9@`Cd j6++$큙htf3۶%Y @a%9 Kq~3}VT:vSH;?ijg0oE1s\^(Q^}hAf4>$0 ro=hXVAUU%Nkx|jJ~hd0π>znt]?=C7A,anMzňr&AFi!QH1A-x0wAEՒ9} 0r͓IENDB`open-build-service-2.9.4/src/api/app/assets/icons/back_enabled.png000066400000000000000000000025431332555733200250530ustar00rootroot00000000000000PNG  IHDRrP6tEXtSoftwareAdobe ImageReadyqe<"iTXtXML:com.adobe.xmp lIDATxb?U cy2vMeYF(Al@̌S23F122222JzPO Kаηl :kIDATxb?U cy2E߱u# e@CT%P  X1% ,K##s.0? 7  v۟1h%L 2;,"b&N@Wd q da>xLጌL@Ze~R0j#0`ĜeA|tvt_»'7ON?_IPDQeG _C*D hZr @(]sΐ.Mݕ3E04AG__UE#JFەWA_J\?tB6^1,w #˗-نHleA/PØYTQyH~԰$et$’ "a0pQIENDB`open-build-service-2.9.4/src/api/app/assets/icons/brick.png000066400000000000000000000007041332555733200235700ustar00rootroot00000000000000PNG  IHDR7gAMA7tEXtSoftwareAdobe ImageReadyqe<VIDAT?HTݝ^ٟ ju ɥpOhi/8ԩ!p*0Ў;}E,tnM2|13 "byK$i^NFDЙ;4eZ|Uj|jf^$ig)kYbIuKWPcJ'U>4MKB݆eSJtO9iԄ ʆn#2u` *_ R4+*BuϘMYU׷oAaLG۽kE|"2w9m[yGwSB3{er~㫹<{ @% @IENDB`open-build-service-2.9.4/src/api/app/assets/icons/brick_add.png000066400000000000000000000013311332555733200243750ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<kIDAT8uSKkQ?7K.\*(wՈ4&4%<1I*Ye&) ̓&NJIyϥ-:>ΙsR7nn2Y#1E1rLc2S2r!c8b0 }=q {oJÁucG=Nt: r\xA{~L&Y\\Ld2T*I]N% B!tn;;;FT;N>P(qihZQl`{{{Vn<?R0 r[F#R9(EB^th4 29#'D@jZlhrbGITu(\u=8L/T*{l{V 9 ,^ˆx=mܙ lLZZZ*Rw&6o&}\p?̯NAן?wF0_)X Cl?g(UuOIENDB`open-build-service-2.9.4/src/api/app/assets/icons/brick_delete.png000066400000000000000000000013511332555733200251110ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<{IDAT8uSMha=1#:tjF۠NZuE_ 2StsH~0S'JNufY?Q^>+>RhMA{O~j=7Pcup8L&vtkޣD\vbf㵾Dm.`M H$ܑ t:1??AkQIA`NDɽ9Jz$&* ,rsHRphZV@@^GPqCqȕHDAi5H (bjR'RdJFIH j(lZ8<L&GI94|.bz -X,7E~ ^PΌ7%W+x?5E`c:.ǤlPwPx3IwFƻq` 0;ګp_I4l4_lnnrݔ3փ#8C^ka#x#;q )sSl.AIENDB`open-build-service-2.9.4/src/api/app/assets/icons/brick_edit.png000066400000000000000000000014071332555733200245760ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˅SkRQO=KDDc4Fa,V+CX[!T:lզӡsE?v%N&SS66wjWua0G>8||?9pz86-j)rCLLL4_3 v$FՊP(Zj@ 8k4bĽwZ"lcg\]23X,AHr`ۡr$I I0zY"M ~_D*<oV9R/W,X,e!6sG76g2A>G6E2du=\G% XPMLçr9lll`}}tM]4bױ9ۃE]'wJ^VDQDP@&axstDVWWq!D)މeHc<&z _R4j?- 6vlƢjHr"<[0  _\׌RYY'">tTq)Qq?[v ƫ7Ćm'v&Z/[{)~ IENDB`open-build-service-2.9.4/src/api/app/assets/icons/brick_go.png000066400000000000000000000014261332555733200242570ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8uS]HQ>D$xxceWˆ * Ho$Srm~oY2lmssdT `:>s)2y}!H:\.3,mٜa0 m^?#0UKQcccHRH&1v8e_ߪ633nAci9AOOP+2x^XVt:g1&H`dd,GV UJLLL#pE6 tnCR%'aaaSSS~``lhooh+++X^^,6{TP{_p8۶65H%b1D"Js\3O-qKy >.T*%Bg;cuuh'aQ 9x&TCtAZ[[׉(6֐9^#a(S@OkFso#JD?ߏWoIVhly<QG3! 6ByAIAExY$vl t EC~S.<{ڋ;kP!DW3;~  ].ieYE"4䮿q7PU{qdm$EIENDB`open-build-service-2.9.4/src/api/app/assets/icons/bug_add.png000066400000000000000000000014461332555733200240670ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8}SOQ(,QTJLjb Y$(&4mjC֦Vse}vyyν/ n/ֱyY1譃gx^HۏE.NKO ZJgT~_yד,- {'JI "c' nvbC0AJ5V>9Q+DuHR8f""+b? |Y;PΦ]8 AV$6H{I;q>_umNFW.j]r0aPJAs Yd!wd 7f v#$2[&/a\҃,%?Y%LlCl_y:4Đr;R[F{Xmp Gf͂n6 cNŝVTd`2IPuNH% ~:XNn*sɸ6e t⿝g5ъO6z(-3u@7IyjpskFa^ ^155i5'1-rDm*QЙwݘ|t JJ(CXf50pT *R#xmHtSHEϠK@Q[[F( Ene*R |yv⸇Lؤ;ߗ5y ~.9IENDB`open-build-service-2.9.4/src/api/app/assets/icons/bug_delete.png000066400000000000000000000015041332555733200245740ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˅SkLQA_yGŠPT4B%S)ɦ-o,%P HiGtU49>2-;9>sα`S  {8~zߕlI.scʪtwy>wu4s~%$T][5y|Վ>bS6:p5>(abֈBu2Dޗ uuBI,DC`xՌ7m~VM$DkǪ r:ٵX$unh'uZt\t! L(0@POMh UR'xZ?myBnBR ]1n@X5'HLf/kQܟ>I #qw1VQL]I[,pnhdY:c"K!'qg6*4x].81S罜iIq%Bdi#!i Cke8aXH`en(RxuWîV3pݬ@\>%4"4B[9s!t6cqt5*;i-sp*{ޘAF(N|#Cf}&rm6yu֙lJbDHn4yH>RS*@Y 𕌏>m|aRvSinkNWŻ=x"f^/-W/-d6'O\|ڥ0 0Z?OndԿ%MXHO-SH~ Kd$׹ƹ$G*Z[ 9$|[|gczoĽjfVkOew\{*/l}X/-C'#~,@m']UAZ)QB<Lk K+8lߟ;ZkQdqfV=q!o\=zV@$ULQ%m]:b<),^0v^ʙy 4 >v/srn2 HӠ2UTUac]Ưd|피&[ye$rۿ`ؔ[Ю\su* )N p2t1~t`f)/oT%G]p Dܙ(tcN\IENDB`open-build-service-2.9.4/src/api/app/assets/icons/build_disable_blue.png000066400000000000000000000025061332555733200262710ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYspMBtEXtSoftwarewww.inkscape.org<IDATHmlSUƟsz{{{ۍu[׭{7LD`5 $(:%bbD=yi$dFeuQet+{]`d|<<_'Cc:^E_?Llee.}W6sTsU\+ʧr.GgLb kQBݻaO7گ8s\SÖ́)pAEwG~r]RX6:@'% ˦TLf"b `FoɴID%&YD6oÎU_;dOB?ڽ[-HMMZHԐ|vnl sM̅dw9Oou;h&D%f#@w_eX 4i,YӴK2qje@MI P00) 낢P^^.gdBuEPVV‚7nt=>p+<#G7FB!Ch`LX$bjjM޹Q2>f9caccUS0P({A| KY,i=c\<@ܵdsPN 2((H$HQZt7lpɕUC)Qy񤦦Jao6[T*e =]Ơ3h4Cͽ7nivbe㢦ix87oo566cY! !CuȲB}hنEQv(IRd2a3Na۶Kc###j$]ٯ"R"\ ۽GMWeYj7\šW\.' 4 : ]a6 ]넴]ȾPŗ;:6V3ȫpCmWv;}GJ7ݾ@GcrcUb5v{s> l(L&Mӡk:)ԃ2QU`nA䧪RE\u#REQ I2G͎!p]RkTUis '6=7ͳJ%fȤF5bQ0Ơ(6#2a{e_Zc"5M6֮MS5dZͦ"gm]_y]T2G91p~hkk+ZޚΟoȪ sΙ){o|r-%r6TΉ9(&b8 PDXrBɥr/8/fvwwH:r?[4rΟj Q}h^? 4GCk  RIENDB`open-build-service-2.9.4/src/api/app/assets/icons/build_enable.png000066400000000000000000000016141332555733200251040ustar00rootroot00000000000000PNG  IHDR_bKGD pHYs  tIME  'IDAT8ˍOU?շJZcz (H1Doxva11 ̖ilu Y˲ Pkiy9^tdՓo>y|#_WI]%CV1xشR^wz mMlP4-`ⱁ]i \g?P̀a>Ub7Ktk5X s8H Cƣ&㗳`x\1R{\|-50ż_63-I-] JF;ϫ*atyJ ;<`QD69U 5sr+0b·62mqM{nl[!vTɩ*wC!U]v||˦1rɱJ‰^h ŖVj5FW/46Lo BH?ߛԗ +j5L/VLȡy-̤۵(ԪHnQWoqA䀨 a4*TK] $Q3_X@.Xs#v>;Oh3K 핢3i2$0@OUZ1+;Bdt(Mù}Kz"`L/}oٱa ?{п/ەTxAogWL >=lqm]WYDcIENDB`open-build-service-2.9.4/src/api/app/assets/icons/build_enable_blue.png000066400000000000000000000025121332555733200261110ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYspMBtEXtSoftwarewww.inkscape.org<IDATHkLW}K[z--BN.sl:4s:6ME˲̉a_tMcpe٘Q.Q5asNPB)dK9+b()׋QF4:!]DFl?ORZ*)H^7d2$==4[l*4Eh B15[cDl;1;@7[vnE )D9C#^adifCIÒ Jɴw_n=RDh OMt BP#pӋ[4;n_>P¿~R!WH('T1L%.78VK,a~ܷH/-گ`和Tf%p K-۷M&,?xU xحhmjUlZ!6+cLd@B|휖}z( sQ0iK5>9=(Q7\AG8H$ DmUfj{Aim&*J-NJ `M1gd=3 b6Å$kX$ՙݶxqX&?˥qUE8E]B&B$haM[ s?3&E_5VwVSZt,(BE .AXdʜdeK㷺`1@䪤7^NH+Gx1l(P̃E`WӄI{E4@4&0$ۄT<0V/Уr~}$/ = ^D3}/Vdy 8QrYnG\%xT dQO@*(X E@U=Й?oyT̡#[bɍ%ܩ ˼g~Z#FC2HT{vϐ܎C=͓IW1Ei~qv&@ժ[Z*6Y5[]+.Er`䂥`7"Vn|xlE{\,|Tެw<_ߧboIENDB`open-build-service-2.9.4/src/api/app/assets/icons/build_enable_grey.png000066400000000000000000000023231332555733200261300ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYspMBtEXtSoftwarewww.inkscape.org<PIDATHkLUBߖBۗ^Xtøs-LfqIudNƘhq* &I9!8`^^ޞealpyΓc+v3$/̷ChT#̖2^""5XL)ݝXuzဘJA!PBɔdʕ˓JdZإ]ep8VTTTRc^6,Rȫ0E8P"'''ȶmF"XjcfR @P82Jx@{nPWPPhظq#9Ѿ;CKì\|q u$QMLM5Ov)!HMٵV+M$g)h !$6ǰ4Lb!O&%yd2)=^/oAL˳'˧jnZilyy|Qq睾i |^8Dѵ\mF\IvJ2>5u #Hر,RJ9e>JixfP(Μ9m_0 +E];ԛ{;AZ {=y(H"]Df}}9t:'OXn([\Y8okrm `OsŐL)NOOmkkL  I@CLd\Lo;d5,P Dž('o3H0_!Y UM X9vGOy2??Wvh$`||%n^>~nDe}[JU|w~L&;KekÝ5/. _kP \@g^%Dd󛛛z-;Z?mQeS?\0UExHD n *A-w&B;6Y$ ZŽ=DUemӁfZ/Rm Cz/ tB ̛NJ a;rd N,XX'>Kry88 x__Y7ͫʹZȋ ==tajD_RA#(Uxf84ݹ9n2$b'C{M9Hj9,=ݫ R,2Z˾u0(fԘycRݵt#]畸ݿIENDB`open-build-service-2.9.4/src/api/app/assets/icons/clear.png000066400000000000000000000006041332555733200235630ustar00rootroot00000000000000PNG  IHDRagAMA abKGD oFFs1 pHYs  d_tIME- TCsIDATx1n0E  !ؽri(L"7{I"1Q/YbF|ȿĚ^<4mdY9u]p1mqα+9ڶ0 ZjuzTޯ i&>e*`&Ze1ޣ_{TU#ס7 !|Boi9g)ӃQTUb$0&9S4^""o/8k]IENDB`open-build-service-2.9.4/src/api/app/assets/icons/clear_active.png000066400000000000000000000005441332555733200251210ustar00rootroot00000000000000PNG  IHDRabKGD pHYs  d_tIME . IDAT8œ1@'bc[{Ij@H r#,XHm̎Fea`xͼ?JV⇝RН._EQ0 MA1󜺮9}?rl82ڶigu u4M'g麎x|iFe Q-^?N'\c@UUd6m68"" X3Y嗸u%mc' ED>w}%c~IENDB`open-build-service-2.9.4/src/api/app/assets/icons/clock.png000066400000000000000000000015621332555733200235740ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8]kHSq}}( EAAЇnAXVdYդtvl6׌t7 M}پ 7EF܏&MszJf-KS0\u I!}ًf>Ʌ]zd&."^õJ,.4 2I3(PdEى zG˅מ=M'QH;ePQxt5/ހG0/ vT\}`gDhc8 /jRTp ex+JP/)T;ƥ0o8ާx^+e/<6iFہ ~G [app@NyY&^tRE]"taC T%}N+BŨyY&^xrk᪾6W݇$+Ϊ U0L`#ZyÙZ}c!i9BL%^0n9ֈs>i ֓i붻k>Gj ]IENDB`open-build-service-2.9.4/src/api/app/assets/icons/cog_add.png000066400000000000000000000014561332555733200240630ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8uK[Aƃ,.jRpQh5J`ARP4($bHjJM `L(F3>4G/ -zP0 w7sut7͵5Rjaa!EsER''';O$%xt||F{H*tZ?77$Tb@Ȉ:00Sd2}\\\`oo$Sguw(B$. R+=$ rAdI>!F-u0S?/+A;'/@t\.ơqwȅg GO]]jX=AvypK5B\Szj677|jjTX4ndy}!fmm:UYY2?ey1B0_IENDB`open-build-service-2.9.4/src/api/app/assets/icons/cog_delete.png000066400000000000000000000015171332555733200245730ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8uSKHQofL$QT0f%Һ1ht(nBtg JC (.E,n`%U`bCIBlbb43 6pg{syoXZ~J_e\Pȁ8M2 \3"Y.}Ʀ=l2crZLk{X~w`kkkLAIDJXRbE!}ܷqpzzz=K$䅑l9#慠u"*sW2&iT)Fhn *aP ### qD"yfD=:Ж*,--e,C۹#U$R|@}NSBM"oHN*FNx1L3ٍt:݌cF>W!0w+kNӶi:µ'V()Q`t XLUlIn)tP -E9Z%{*8;>>V'_L7 -`saz0bz.6]UlЋ.f1T&Abpp---{[Q҉; EXdt0dԧ/Ewc UⳘ כ2*JiX\!Lɡ+9;RDEӕA˳HJ$^ѯbEpzD"y<0X ]=K\ ^&qPUU+//?Ɓ!`vv2,[WWȳr4"011Ntwwӊ@lpꍍ%qݞ^__bJrKҲu=8>iVohh ˥$HAVQdeTȀU32Z W"pcnssHImmml{Ja=9[WU F`a8m;g>!¹nIA4Y=lӆcy$cW_T9Ra NVIotS_IENDB`open-build-service-2.9.4/src/api/app/assets/icons/cog_go.png000066400000000000000000000015331332555733200237340ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8mSKSq>6'zG5|icŠ%d>,oB~!G}~?d5_IJ1t95v9%/1~,ښ {Xl_ZZ566Z̀d' ؏ulJ`j5DQL`A9x<anF&%,j* (BIl6MNNd3 yhbx'EmooC8V?A4ojj\Pt: uuup8@irNh 8 hb VX*z@[cn. Cpd%dH$œN'X,j766 J|yhy[ 5  SSS Oii 5ROU-/ep@`ue FJSL_d"$3nuz`;,Õ]$Ξ87776JJ(gS(B Fa5AFUzmP=onn#MI&v>  $&Zn_RYT5@ p Rfj 2rن`)d—O+O-`8Yԓt~0,Qjok!BxAPk>aZC+Ǹigggsnpn}s Xk= /QVISL`[w!4}\⦤ȠD$pb gAF&*⯂eM~yE'$]I8.H* <o\QIn |깕pL᥄}.Ȃ-.P'm"|_ MLOu#\> k@Pt]=?^$x./EB8DvԘ PґeTM6'^_z8"WcloシTp.KuewlΉoYܳpZ6(4C/4ʰ~QЇjc>LFޙ|؃$%ˎ t\;IENDB`open-build-service-2.9.4/src/api/app/assets/icons/comment_delete.png000066400000000000000000000010441332555733200254600ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˭I(aGƔ(E]&˅pqHl%98(L,%d cƒdyg̘}~yO@MQp^9p 'ґ`ovo ґsvO8wO%ׄ}&\Kw3oΥ-Mп"  >`#ao2s.oAUF o_H`xM'{Qb|3*qήye-H*DZN@ͯ% ]ՙX-Xh[ so1 Q? nj<%P%e@m8qS*9䊹hVmU0&yݭ>;r^Je0qN=1"Y!s yQЗP\L%Q;_ל:ӯTHX 9TIENDB`open-build-service-2.9.4/src/api/app/assets/icons/comment_edit.png000066400000000000000000000012041332555733200251410ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8ˍOHQڵStSBD,jH hQFtI۲CdBE ,A"HelDݵBumgye}9 X=6=P7NKꥱVd">`m+!tQċZ9~MӒ!wA>>?3ᔿ.Ǯ2 eS7#׸4uf C 'h@boU:}pun P4 ,n^w B=VcW xC`/C*6c=ш &.ǎ]2nB-KzamZ,0AǺK7OM| yL?1vq` ,pB&qv7=hp1tn!ެ)(^/.ކ[u1pJ73AeLqnBIDlXר[Wt?J)˟D IYAa+F=?\J./E.p?!IENDB`open-build-service-2.9.4/src/api/app/assets/icons/configure.png000066400000000000000000000020531332555733200244560ustar00rootroot00000000000000PNG  IHDR szzsBIT|dIDATX_U?ffgXf(1$D YT z,Y7 %*Tԥ e383߽=uVs9f1;Z/?x3 k<;KF-rYyƎD`ߖ%~:skRmDQ ?LL P: u/qъ}sWu jMe+:[-C ug&NpQӂтL'-d{]8ׅFsE˶T+]$]ݶzBX +(%DP 5/.OOrSNj˯zA3bUxa^2,濻'ݳz/,Yp.\dEv"_}E\x& lUDod͕b~EИyjDcb^) uq"^EJ/8;93  ݿ=7tǍ*b`_wCbQuQDy4F5v3ΈĶ.o(x~OVp v7mtc nF|:|mgw fL%8v^ݒ&JLB H6d%(m u!7܌6~zX #G!Lml꽯6Ֆ[ d8ׂhLT#BЃ=aJSg5aOv݂N7mW66X:ԁm<6//+ |-KkU`e .]SK9M ' \*/yNx0A dʠv?*)S1aQ?@iR Dܥ gzvE`b1zZE1B,N|P$0E`SpEc 9ؔpp 8"*^@DFlW쫚!wž"›'P9wA ګ@;7Pȹ|GIENDB`open-build-service-2.9.4/src/api/app/assets/icons/debuginfo_disable_grey.png000066400000000000000000000021271332555733200271520ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYspMBtEXtSoftwarewww.inkscape.org<IDATHՕkhU?򟺑ȦejRE )DL7.&M2ElaEEDF"r!Z낔K366~b[y Ásy>ysD[)}KTA׆.Hsssce^_c}}G+4M=Y.Lnm۶:k@!7^rTx|,=tto[YUsaZyQ=Of5k'ZW.%MkJCc8&Khqh r.[uϻRJiZTx^hXk o0AcIqHT􆇆9سwϬk8J6iz# 1S95P9\q8u/ V=kO46Ǧj-=a1T-0j];~<U}> ?VhljjoęԊM2q,@;~7w[r|=Y,Ӄԁ7(2O}{{Ms.)ts|/XL֞^^+֎go{{^q&" xy\k"vJ z:.ş (T^nnndO=+تu{ Je5#r,Sx+@)GW/ll]y ^rZdn:ʔ~y}s*+ GC^…f#bu.llkKBx| B;ހRԊf4g[:Ym'#б"צ?Oݾ|mj*ؕgájh@._gDdڭo'" ;TM͟WQX85vU!)UH銱D57X+C4IENDB`open-build-service-2.9.4/src/api/app/assets/icons/debuginfo_enable_blue.png000066400000000000000000000021761332555733200267620ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYspMBtEXtSoftwarewww.inkscape.org<IDATHՕ}hUǿ뽿{vwo/Z-٢%"B6##02 " *V,Iۚ\[-[wJmᦳ ~8ysz_W#_ktqpoi>xtV{4UeYgLy׫y |4ql~Sqs4rzv*y`0KλNS{l_ԼW5Ѡ/>46 F6s G"偬,4C0@fl)WMI6 mS˦̦P@"\;` w(p7ae{O{el,)887c 00`9 Pd 7!s*Oi-@䝻=,8 2 D|tU*tAb t:%+v5miv],x~v(!qLC!ddS%%k~4ؿ7җ"c"k# O؊JHZe h`929 ȫhnw0~&h*d~FDj=x\B&KkEVϦ:K@_! @ KhZ7c|/ [{IJ(j97: խGvl.($rKeY ;?L뻘;#@diܳ!`&%mɚU l 8q(T9{*1nmaB>kk228KGLL\ҦWJ;ˆq(B: +S?T_lh':RUQDSgo/q:$eCOMo+wI&Eo9^CC O69Yж]dmmmByTw+ynLt8`4r 0jk̿k?14IENDB`open-build-service-2.9.4/src/api/app/assets/icons/debuginfo_enable_grey.png000066400000000000000000000021651332555733200267770ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYspMBtEXtSoftwarewww.inkscape.org<IDATHՕmHeƯ~sq?D)k)zMs{j쏠~.O@4AV.)>0q={+ا--- @F[:+{sL# ߝʼ5=Nz,Ҿ}bDۢ!ݟW:>2)%R\@tLOC)hG@ɤa:UXUճ݀SvsTb6?M%c\Qx=`4nHu~y؋t)D\'@PJ!Bp4  kJĎ|z1rmO!J)=:  Ài4 sé_}cB)3_o\)%ֽs#T&߇``rs}B@ ַG+:+LY5-gyg+\WڄiBP@Hmυ0w(ƙӃR MgVGn}dj|zojfN0:*ݠd2Y\K(MpI8 `ff=>> %kGG.#s5mkk3/ZL@VeSP D[;:wܦM*}{ҜPCGBzg"@˲F"ppaHr޻pC3I>z ( -w+I?MѦ~,<0l&ϡ#R:IÅe,V+He=?D0_@!n\ߟ e/ ZSq..;nn hi[vomme[JM /.XO4oD["˲~]lnhzFȉ/N ~9%5 { waW lyт4IENDB`open-build-service-2.9.4/src/api/app/assets/icons/delete.png000066400000000000000000000015741332555733200237460ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDATxb3? Ǐ fx#ӟŃ [73j[SM ?uO egg? FFFE L$$EuEUwC8 Xgb663zl>~X߿"t?H-@auL@WMb勏 w>f``cCL@auv&7àk00?dx9++éGjb`y? _c`*b`ݻ-Vfff6B-;~`Wo2Nb`߻Som3&1i` #\ @$c`9Q;wXd->YDԓ@,0& .O3دnf`xᷖû 5|a&&p4@0YX8b`xo;P3#,! B/œ;+^dkd[ aWm@qrAmG rdo~E&3|^k,| ̯^120z ^0sb`tc/+~2|Xkz?^abknA>`)V ï^ +J OuN>dj6?4gs531|LP`^nJxIENDB`open-build-service-2.9.4/src/api/app/assets/icons/dialog-warning.png000066400000000000000000000016721332555733200254050ustar00rootroot00000000000000PNG  IHDRĴl;bKGD pHYs  tIME-GGIDAT8˥kTW?fȨ6C&ImR2B@lB P 4 u1С Ҵ(hbk3:b&djw>{/Zk:`tEዌiøwRcvu߀ j͛7UlT8&ʶn_DkȃRhH? nT*݂U7_IdtcQi ^I/T,Zv733{,o^QvcO#hmZnؚb قgSJPZYd5O2R$5kRMZ761 =cӲi{͘M8R;Yg BȣoK/+y=ޮEV}H "F=b )+pzU(Ï߬kjӶkI Ƙ Jq(S;JJQ֚Vqlk(9UTUeީ|ɣ8Lҋ\RPg,͐!(u3 zcmp_=s!V¹j M"I $ؓ}Xآ ]=vV"hZ^mbrGV U,"ўWBNXFKuU׏"55:^x0itQZ5.lX8 G cZ1^Ք_*Yd(Z#bQGkZSR݀n@iGP(N.7Yj&+Ĝar}L4ozt4u/d M$Y)+~te'm žP12 X;0<ہFF7(9a|x{ /bPfWkt^lsQ؆Čs bHv9|ݾԙkT;3E)im'޺m9Ҥ܌ûF }bQ])>ʈp|aj%7ydo4/ %4|2=8Xvgxrn#DI)P`ǎ&+~fݿ;^,Њ^.LǾdm0H7aRc.s/%դlz}mn5BZm|J_B82^Jny(u,0f.]hKSIɴ/ADuas~ Ѡ>p(7M<#>d_n48[m7}h*%p-ݐnh}w0h.:$@}#dJU_}$,'*iά? 3V.ѺZ׷+xP 0,u|Zu3.&fЊV`xu>B`T}0`s,i`8 7C']S(T^D=0^i8G'GyeRJk"ߛ)NkbMMRKOL;0ZgչPt_;7 lp󡿽I/ܗSܳg GI;,dzk]WtJCf70Tb""I߽&N.rǓ &l-k[1 疄g/ (O\ZcBq0=ʍ'O_Z [BF1ڗ] 2F7e3Wy!NMZ >r{5ޠ!'/[1ED0~axЀ @UDEUS-H;(pIma'ӇY΋1Wˑjzu7FTzݾBc+wSw+Q؛UZCyui-i^\(wOַz>ܗ$zRs+h<%[/NU>z^o#0icpOSU?G8x#A0m՝ Ieg18q51pصh ׯEՐވc=Yǰ!I0ʥfqXH?\^>|wV8b;XbB.W?}f*7A1˳P57S6͡!` h1S ""d#i! _G&T2#x)(ʂg}L3yo.v- ݎy|Z{_~( QUQUL!@TUK{;WqEDhNoFp^XqO p!Zch;Z߆tJW]#|>[A.@buz nc5fC0 /I.Jsul1B0G(j\*]("W@$1@ P \:7wԧ8qΨ,,ZeI4ylj U_*G\ߜ[ad?9ם5seZP"ZqZ&?^KNFr//6B&v(M%'kjhԾ/ wԷyjm ,яDBwoMb""?NշƃEUwWWZEDD( "{^|PA=vUm=ׯ۝s\#7{?!*#G '|PxBƒxJ-b} jɣ9-Vo1ad4G1k#8 X(``DUjWڐ; w|.WuGOSkq\ۓ}uPTk6_J!߹$0o˼FͻZ VzQ UtenyFNx+|G:K53y^ OHIn8ۦHwF -f/)mMgOYꍪHW =z#7HJpإ#sxxWw8v+u(I}Dr睈[B%qaB1\,cID~#"u@LP''D$kʫ+V5[F}mk˦y,ҥ?`Р^_ u)oĆ?i-'\^cHhQ&i#7NA"]7ݻW`Dp'I=@-;h!>cHn)"Ą@9S1h5@hRA҆TU}I՗-oD"ʓofos<93!0"8x|)OGJݏ%&~^(< 䳎|Ui@/A'+ sxy/LnLt:G4*"m3)[Ѵ:c6Q6r No=Dܘ6#F-HZkY gؾ~gC0mSN˭y c3Xܑ7hz=e,#kͯL&w9CC@VU_U]d}by*.pKhmbty8/^^;ҼF6]SPݎ Jάظ!(-HЉӢ4Ә*ŚVQӨVU|bbr=Vh4۶ڮWPUNy_ʾ$KEA֯zUu~͸9^E*T?ʴiX s+Hx4Sv61Z $z r0E SyӛGv|kGaL=Mjaxlq)q6"bZWXU^l`^~#1@$\:؆mHP7Ȓ=M\q&ٌ)sRgU2ěQSv eEHE$4 ~N["\"R =e&CpI}V8lGJjG_ļvڑb-40]kmC(QJU֞?]y/%=U]qkm^'B%u}gTJZ+!"T"U}x5Z{qkmNޙbRnX*"A]4KUW.ցn5U5 ]F}iNC? TeRIENDB`open-build-service-2.9.4/src/api/app/assets/icons/distributions-debian.png000066400000000000000000000073411332555733200266240ustar00rootroot00000000000000PNG  IHDR("-e sRGBbKGD pHYs  tIME "*aIDATX]X{xf23Ifr!d@(,D@\Rbݮ(TwW>k]ql-jD $$!2{wly<32yCe@ї̜#47`0s\ɧyWÅg7L!Yn2D4P IDY,k#aD`fHA{_9mĦNCGg}ANz/pJ)ئ~gÂ>ce4o\c]9o:3`"@w0>"jR,g5ljv5y0XsAR3j2hD:qʧ6>KLnhX3_k1sN`SL4ԙy|U5)fJjf|wkSU!R(Ϫ1,>{S.^5W yV(/\ǵUʹB.=mݺ&"*D=EqpF;zL"@RD"0PvĎezZlV3yD̪Q)(ʈȭMD$Y_CBmo}D|^M %?ڊQENM !& }<7+x;'LfDJW-oy}zodwbX]VWsQDԙLfv`!q_= @KzCwl 3ݷ7L\`qTӴʵu%K=<969]@+}MVdR贔 H5JD&hDucҲXJiWa2k4uSILȞTbt-1M51@(DT'$J Ps?""ZUZpzV-o@h,s=>t9+,X#F^ Itŵ!"r+d (t)PNFiЫg`v)D64F;>ߔ>τ(^Mc?[z[3ڶp"98 UHf U!+lDb9yE3n[أ@Z93UqdR(svVnZQ_f02sL1P)>Dt@Ud"աm)ut"'GŶ^[jfLicfI;acf8]ν ~V@?K)b F '2s%ue D06sX jRWAY]l:U=ym萻`ѳ{]?.e.z_eɒf6_*YEc~-@S^`U8 s/2Ɵd9+scxu=w#?q~5)IRUS~]EKgu1sv58$٥"T9/0$\ VMZ~sg6lڲ67EQuoe!QZ4XT!03Dž-D<>@!3lJ.Ytg2 3.Ẻ:4 fd 3W(,JTȿ(U9 kX|G| _v9pqFxgn1l+==e==*ݞZvپp8|ſ/_~Szw޽DZVK˯[,RMt>e˧l)0hB %"jr< qֈZ.6]vG}u: r=7GvqşlCV|Hggg.[3# ]]?vZ `@q`DQ^Cs^4slP2ot:4s~3Kdٔ7Je)M1%UE {iR䊍cMyC^mmݜ?pPyOOO%KN<_aXhZ놽`Ν{zzcG.ض}o5]wg3={0&V\y{{={J$]9NojVeFR5j*ISDYC+oNdߙǹt!.(Дdsw9R7չ?[{H)mJr$\`fxJW>Ddo=xǟztÆ2o:忶] [VNX̬d_J:i\jUSt!JfY! ztmǴiwc{S*4tZ+7H:|[lȲtDDfQVc@~R)p(H%F=~IT+Y@y?˯g{$*&w]eifIENDB`open-build-service-2.9.4/src/api/app/assets/icons/distributions-fedora.png000066400000000000000000000064531332555733200266450ustar00rootroot00000000000000PNG  IHDR((msRGBbKGD pHYs+tIME6+f@ IDATXÍy]}?{ߛy{ƞ WK06`ZpmB+TmUEZ(-)mT MEiڨTi ZT85$0e&7;8ekxhݱC `J~޾Kl]Iedl1&rbq˲ n16!1:5^'87Ke? 8ߢ6-] q|// 1;3C!.kQpbqň_{~u:.LsӇy# 4쓕7F1q=떷r9zN033q(g<|䠔bfږ"]0d/twzJNמey[~i'gzxizNOQq)B7m(M$>m(M9DidLrsDJXY<:#w։s>ɻgf8r1b .b:Zyⱇi|s{~2MmK5΢BUע9"OˋlYIs}ƛJI{߼wS yimMűkl~KV'qJAQ@nznHzPt6o⋟8(2bW,溫fNezzJp""DE}#N)2<W@!(„N=oA}34?8sF&S30>ř#޶)Ogxt O؃chؾ}[ُ#o#F"w-pu(dkFk\AV]XٴO7H1;7p (&%zD((gm @+Fi8\od6_ZzYތtODm;RbbI8Qŋ=qf Np㵫0dK6rMg+v)B(ʯuq<+A! F+_`x|si F'Q!9[`/3<`[ 1"`HޟYdsD{.a:XbFY,ssY-It1H8/;ڡ6}Y)WMfFkN azJ! 4@ ?,]FŒw+„5N~ӻ.xe: V6P|{}/s\o݁bsz8>ѷ['QmHX!Vg6T & V`Ё!*MK˻6&WWĉ&( K~Oe:@L L C6Xe w-pB<9 @vEP,XWRcUƧA~QxF>w6nZZ2HfV,N{3~f|g#\E%SΕ:>)RFS[]Kl#0 ?#ɪDIENDB`open-build-service-2.9.4/src/api/app/assets/icons/distributions-kiwi.png000066400000000000000000000071651332555733200263510ustar00rootroot00000000000000PNG  IHDR(&sRGBbKGD pHYs  tIME ! IDATXÍeWU?k}ι羺owOL3$(""(%U,,?RBZZ`B! "! )c2!@f&dz9{{[Ts޵*?d*"r#tL/=w͗'Tc#[Ipk8z,ihu.񬛟}p}e?PE·o=A: I\s1ՊĪWUHv۸бH͗~ /WFx'xчYkFNF{ if ~N9٥*gH00,PgLc&9*׸'Q47|ŵ.?ȥ'u#ktWh-/1)0Wc$F0)k7'# )a2s/>wtc /xlS}/Çn]fe8'OXn1.7DD Ap,k+01e-K0bgg?;1ƾ 7OF|_?kc;iWg("sx@*D*k;˔ XhRrek7 :9.鄯~yai|k~1%"1ZlX#DBP$:uNr4?nѤ ;} 3|-Vc\W)5 g`%B X 6$\]TaYlL"G{\>E{#̦4d7OrG!<ޓq 0*B0сw N2#ѡa?ٝ2mwɼ"^,Cjh}'?+̓Mvg[1*&$6(Y^ho|NkIQ1UK"V If Òkl;PLP)EGIp&{<%)Y8A]dec 6iāb"B9WiBY* LFH,ǸR)Q s%̙ Υ9hv#/ nogceЂ^DŽ,HE"i!]_EFrh$4 9ҬAgdj TTZU܉[n,弪4hFcI@bpYh;Y*hl\իL&S& s#=JيXFY%GQ$.sr ޏ38%fW\%`Ѽ?"̦OfJ\,BgYFB *e%XfCVuj g۹obkԕ#ueEb+Zma{Gzc֖]XζRUB0ˉ[IC{ْwlj@b/R*fs}o̫^[4o_z/i0{?*OY\4;*,XX:Kv K9rrA^I$R@Ԓ-,ZK8g˃mƂMzP1KYTGji^aqd\ .w^0%bͳDh3R/Ya_iw=3OO<&:A<$aP[ !/KrC&M<`!napWyַ#BPKF (~ uuRY!qM"\/'U~eǃۛ1߼—'G;l1d M"H ; t+eキ18ǏV$kIv͹_V~$e9λ8z>pyECޅp92M()q$IJ&d٠iMZyn+gę[ncixŃ=xhoWNlwSU%1B(`p%I4!q^nKdww_̵*~IENDB`open-build-service-2.9.4/src/api/app/assets/icons/distributions-mandriva.png000066400000000000000000000103211332555733200271730ustar00rootroot00000000000000PNG  IHDR((m OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3-bKGD pHYs  tIME !)IDATX}lU?Ͻ[Ju٤ Dn#? Ѥ"1F&^̖"˖MsKRGܺ% 0nF7/i9q -.qKs9s~ρiMkZߕ?ߗ=RM@3G6_ߝ/WeBRetl 7˂ o5㤮0{k " !Yf6J-^kxbvR3/>jј_~w|{5-6h""n `/mUh$ : @sN]z⿜$ 4C& S |Q`}^GXu䎘I=  GSg;6SɸLR)F(wW !OԼWx :nހ4@#h13$|@DR!!;{3[Sj3p9KF؈_㴸x9`@ ayF ܰ+N̶&? lZ|.\"({%K 9P(FX Dg1ysחאc`\?C;diҩ}yKc"~g}hBi,!V`|<[HF3 mgvZo44a FPQ@@(+J`i+ ~)jϩv=hn]b8DW{-Wi֢r?gLkZ׿ݝ䄤FIENDB`open-build-service-2.9.4/src/api/app/assets/icons/distributions-meego.png000066400000000000000000000031241332555733200264710ustar00rootroot00000000000000PNG  IHDR( :?gAMA asRGB cHRMz%u0`:o_FPLTEWX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[#wKWX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[R[ZWX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[TZZ{WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[TZZ)tWX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[S[ZzWX[WX[WX[WX[WX[WX[WX[WX[J`WWX[WX[WX[WX[WX[WX[WX[]T]WX[WX[WX[WX[WX[WX[WX[WX[WX[WX[!xJWX[!tRNS\5a^;pCqMqg.' ޚ };&$Ei >N+s:sϨʐᯙK> 9Wjte(1_ lmo])ɔպEh#<猇SLjy`J)%o  YOœ+4Tpi|F]? hJ[&:! pHYs.#.#x?vIDATc`fae`dd`cp/b3 3<d``DR$%-#+'VTTSsJ20ii221I320[HpZrXY0H;8:9012*{xz2x3GFFE'0%&)%'gd211xgegk1gF1WTV 2U74255wttwEO8irӔtR]Ϙ R8kvɜ/Xh%KY/^rȍ4ebJY'R~Ć֍47m^ϰ+umwhŤ{}<ڒyGý;7?!r2gΞ;xEK@\k oyVP;wc=n|։%tEXtdate:create2012-09-20T14:42:33+02:00f%tEXtdate:modify2012-07-08T15:33:50+02:00 EsIENDB`open-build-service-2.9.4/src/api/app/assets/icons/distributions-opensuse.png000066400000000000000000000036041332555733200272410ustar00rootroot00000000000000PNG  IHDR(LsRGBbKGD pHYs  tIME OȐIDATXíkUϙ鶥KEP Jc# `/*J51 )D+bm`%( Kֲ]ݙaӽ@j+/9>\_d鋝N}ĹF]ի $/oߵ}: nK3!6`QqeIǂ'@>Ueh8e̽~ +5G0l*C^1~t4?!)k{WjI?l5Ӱdֻ_U s`NTIO`GOewB ??8\h pe1;|awf=g0)};ryފ޼/) k.IENDB`open-build-service-2.9.4/src/api/app/assets/icons/distributions-redhat.png000066400000000000000000000044031332555733200266450ustar00rootroot00000000000000PNG  IHDR((/:sRGBbKGD pHYs  tIME !),IDATXÝX{LT?cfqf@2 ]uЬ IvCVkc$ĘlJ!%j1a!*n#A\k(BSQ:ay|\'ǝs~|}DE !C<Ï=ӧO~H)ϯڰal6L Z_Ϝ9SSSo"պgϞ+W꺌̦^occcII %m޼MQ%a!.ٙߔ!6mzbdacMMMFX$ɈT,#[RQ@≛D2iTvݻ3f)`}! _xѨA< '@ =D/ +Q O2}~&oݺ5P%y!guwwDWZbgP Crq\~d2usa;!’l|V_taܪ쿇#2~!Ymnhh[WgG<@&()L ╕VhWm}V4*,s{@)GKUsUUUq%=ϟׅxyO?7^> WQ" ~^|! M U5.mݺlܸ3լW^>qDCCQEΡt jAp r 1忆A6iC8޷o6=nwee%,Bzuuueee83,Ro۶mK3 l7^k-IӴh4JK 555EEEh4Hx۷o [흗oKfl6[wwwDFGGUUmoow0oNH$ ;X,`0vu~?Sux<Y 1jSSSVV׍~ge<ק'Kj\~#kϟ?-̳f>sddd֬YK|Ә(L$mSSS^p:Z1~ MOO\.ɴX"X,~?7C0ѫIENDB`open-build-service-2.9.4/src/api/app/assets/icons/distributions-scientificlinux.png000066400000000000000000000101241332555733200305730ustar00rootroot00000000000000PNG  IHDR()GksBIT|d pHYsvvGtEXtSoftwarewww.inkscape.org<IDATXXyTՙ}mUU]]Uu#h@6C:ΜM38cs drYLb4G1d&Jsb&Z4؈,twUtUwmݘ ss?~~{g;@ Tyuzy^t}ֹ~ əY:[D4Hy!(딖ܒM޲&y$;d2 txV Q JKM4vXOwtY["^_,}d瓗ww/9ϜH-ZLx@_t[M^ҿTmlkΎ'߯Ȟ5ݐ_;eG8g/t /Τg66{_뒮G O|fx#153c摿/oRux?|mox5yjTXlDqK) jŽۚOW;i43kxUJ+ '_cY|)C7KH:f̝fx2#hVB|./JU?륟EoeUu)"%}%|d-Nv?scn B6{óhC|fahrt0Bm4z7Jng.&r`Ơ~R̓zD>~v/@ yh>ߟϱmRbi~=PGkk9i\WW@`@L&mز>1vU'qCg\N?p"zD¶M0s*M¡]Nhi:w|6kשk#8Ҳd74g fC$B@P&ͦŻvmԨ8 T.5p|N$7 p94|mD;;o)FqaPRjii>γ+QIwk%k&[WvAr*FرBw- E=OLevlisl?7q ّz`|_z4ͪ?FGUJf"Q]V" O|3*w^cΝ8o`rk"Ac{uY]"SgkPĢt&M!h4tljLնEEJƷr޺:JI/hog0K&ºGӬgႁhv.'&kVf.gYo {z>sE$ָ`0xNTjA2} +:;zO͹wǯ:Ec u]r!g*hj#*ߡR|aR̷2Z0+}ۓIÇHk6 c/؟|5ƀ&a; |gf2OOʕ1+(eu^`*?tzB!mgr[J3JbV [{zJ.ynܲq>y6oތ-[H*:J5 CӏY< $D'G2~Mq ;D,u,Ӈ__|!l}<9|=3c10jR%w}-[[;xP޽i*|࣏PضX"Xqݹ?8&[w3">t(]0 d3A/qr?1V075<}4V@2M뿉M%B9K䶵_477`ͽ_yeۻ7|Ǿ{6jZ1JۣfCmrGRz9珁 wMMզ83so3DZ<^R= |ENEg:b "s泗gk,%,#J1V6Y~{m'd2]'N FDx^}X &MZ&"D`_J\PU뻶LLfL0T3ٶitwww$_@ Tjx5MeA \S7rC^(0ghS | ~HkiZ %'*)STwjӥt:ݧTL g>Ƕ]l=DJ t]ɓ#KZgL!y=S,n#fQ XV4Z?0RJ2F]*bM̆7,,he鲷l0 'tz`/"5 7i۲ٶdueKF?vU0n !87ybr>?}8OI[< k6uBla% ]dn2*8=jn TɌew+%W/"xx@1|>3 8#e*rOLf[]`Ɉ3ѻ2/)Rif [J9FR:;M4^0A!jEӪU) rZ+ UFSXLLZ댕&< `j,ٮ}(Cb([]vSFHDPK.n3Kb;;;aغl/ts{?WL#wxTQ`  =@H rA@?!#U۠XiE'FZk'oK-˸[e:5hrpivG"fKW5K٣L_Bfq!hzi~ E@?rpqJ{"rDU/%Isd6W(v`Wh]QJіKNPv8MΡu1ĵ|0WUہs@ʧTdӇkSDv铔{ jwP1UULpXrW)8%;|M"usMAz6ձ@eZr V-.Vy!`!_X>`Zc psit Te/JD6^[9JD55( y0o>NNm -ڌ]3֬XeXo[\kćfH7 VoX.?_Y/M_Y)~;)P.f`<"z͑ kZ{|6B},ƽ 8!SL\("(ˇ¡]/MRI,F 9=roE Ac0G;4,*mAQٻƄC뀨!߻SKHΫ"ddiGb~g,gQ,WV`x۷iOcA*Irķ vt s2` >taT=_y-~I(b`*oQ~* ͿOnKK` &ڮRcDxu gӼT+APeR|9 `YT<(^='ϼB߽@vWC ?>_K %a<@+Bl~7^lLH&Vr/ע~_,Q*vK[`㈹l C9]RVxM 3і<+@`E9 y?|mUNd„OlUc]>JoY9bpc/qAB 60HMK^`4.]{btQrߡ{/^ @21 *0!Ak}T  Lh_ aa6"a B>Hd oдS.((p48ЁmVb݋H$38);a!l@.*K5kϓ;롶] a(@r!&qRu cÐ DYk# f|vgĊ5\ Wq\<{mNuT_ @(C9Cߛ(ł S[ b-H62 _/> 1TJd-t 9 QWY-?"gq [?~яCt+7 /kQI@'֒"Bk5?DA6rJDdG-)Hd!_{RAa_.'qb͡n P@RA2 RFA' A# t\?, ڍ,UM˲X_Sճۨ!rryCq\Z "1ZDxp,Lƒ cBиn3MŸ.}vuUb<qeu RN q r'YIRM.]}N:WS`MyBS RH.ǹ|Lr{dB3T NrKc .n1 cf.wlzv/Cl] (x١ԆBr&r>_]PaO=pE EI?X6G{`1e4lG, K&<<8khF s=ap)oif>ϯzZ~0d+wr՚9e.4( hRÛ:{b,\@v䛘pи(4 !tCDł*;kӾp?HFG`Ta__܄"s:ӵ׮[NGEs{u 26(퀎8ͩמ@)O=?H`0.^I_BaEUJ(㠌!tiSxկ岔8N37r$$cpC0SqݏPH%Bqho@;(qA%x1x Ì6 yyB]9rto]ARqt9.Wї^<(vQGC`M4,Tݴr2ӿs;}l(CQ4&>FupH9Ū?y$%qE=LZټ6nZ5Ө0AP[Q4>SK0 $Oogxs`Hzr^%19adnH߼jlLiqz%",_ 6>u%:pλh,= ]TA&E`qۚYhzcyᯩ[y/]=ygw O ֭C!1H(<$gnu&t ~ nʹzx,-= t* À?c*g4os < YB|lK#bώX/w;9Ahm.Ul?PuL|nA/ĸ6޺+*21VjD0fTm<еJD[< =$>֜#{/U@K8|1ཏUX#u[^csg zb&F 3yƂ zUFdoKqas/@RhBbj9<1jKԣJpݝ_/n 7AFSWrY?]W8i1iqs^E/Ӷ9&P? 2ŋHarfqL9B$7 œ=,7K/@Z ¬LUi&@ו۞ ig"@Jeh):FH<6660f0q4da^jee%bJR8I A՜3![RC0Ҭ,*H*Qz䁻[Y2F譌,UM(cNU\.Ag

    m^H$nGse89yÝ$-rk>|wb/w": kN}a 7?gp͟|(_yb(C=4 ʥ:gFD^RVyumM}(ǿ''C #V+)Æ":LgPxe}5I+xxPjq}UUezddwWkX('_lx6-p*־^ =W7 ?p86M -ޮ (קBY f|3WQ^=rIIENDB`open-build-service-2.9.4/src/api/app/assets/icons/drive-optical-32.png000066400000000000000000000024721332555733200254660ustar00rootroot00000000000000PNG  IHDR szzsBIT|dIDATXMkW;3Fm81J1Bdg1I: 6n ?q@ EC8El#![3sdl5]B_t99GwY"877w$0FX,~}޽/'Oׯ_hR `q\)8ѥ?P(tqrSSSJ%,BcXY R'&&ѨSpT';m$IFGG_|x7 jGz|>pm_x<>dVA~,b||H$2`vh4͙f)ehOviLNNloorI*ʁyPz'N  pҥGFF&mkKD J144tP(i/^V8ήZrVhp)MJ.EMӴϳ,j)~ձm\.a ѣs)!f{H)㌏z>؂+gϞ:"NհmӧO t:M^*n[VVűcPJ{H$ѣGmN/-۶V+8{ۂvT*nܸC_u&''w<BB)^SÇ>iJ)z{{㻪qMP866H)}(۶FA&appZCHӻi8B,"Hw0@.ٳgH)9|0ia`Y4LbY_FA" Rׯ^b #1FIܹsai0 v:RbH\FJix˲hZeVVVCut]$5ԟ<d2 "\p0,Hgl6B`rwL&b~?8O^PR黥ou]O{$I޼yC>gttԷ7wkMӐRcVWWy J]׉D"!Rmll|;G~v^ v=f6pff\6SӃiAU)EVX,/nիW ; TI ؑORԔiК*Wէkkkݿ׍-7T:@(d4 ;+HwW%-TBx-2\}wxq濡Q-˲ IENDB`open-build-service-2.9.4/src/api/app/assets/icons/drive-optical-48.png000066400000000000000000000042361332555733200254750ustar00rootroot00000000000000PNG  IHDR00WbKGDSIDAThYo4KBNå$%hHI"؅myiVOR_P!TrJD͍,,k{Y{/MHVF7w,Y>g҈ҥK~93!d_?=|q͛۷owuww&s\o߾ŭ[l6s~f:>388Յep=$@o@,C* ޽{w'#@v9dY,ճ}Ur8tAz̙/D"WUlgg'q똦 IpΗ N&a,fԓFܒ188}1盁r tiؽ{7d^Md'{9P(&\9Or{9t]Аj-!ג$r\viz* HR(WO#V0 b}0SN;)\ۑi\ա!dY_mliػw/D"Z@*j۱cH{{;o@-P9,LU]o.\ WjjD躎~B~[UN)@ Zva 'L&T*uKEQ/zU/8q(KfصӧO{.]sֺE۷`ŋ*R)% ۳xUQdn$'j c ѣ( 8223Me)T:9o*ÇQ(vZ+t=b&vލP(t*".wG~}+HN@ J) ?w3 ZZZܼeyר4M,Xŀob00==T!۸Ve{tmikk+/ugƯ"5>~^z Nr;^+i$N8L! @$Wƚ!AQByg i]˲ىp8b茥*DQRъ_)UUCI!1X!D<^t]03V0D4HaiiNEAkk+!hnn,o/oIӃuBF,C(i΄B!b1،1PJDŜmn]eF*<"cp`8$!```+++ΊPmmmx(NP/{|J)A:kgDa 2$Iax%KwPȲ B":::$g Aa@)‚n@᜛WJ,2 L,R BDQDPp3ƐN=WvEQ(np1! z}WVVLA jO_|;wc̉p8Ǐ#H@Eg0OebL&yȲP(@ 90E$LLL1Ϣ 4M{9??"={kllkBov lmmɓ'sgGeM@)utuDm/" <RZasvv[7b# @ RJ7oC__W###(R[ k `rrݻ%EF^+ΕZJZs^[ssMMM4{1SlJ)DQD&'OܹPj`wqO_"/wMMM';&ڵ @|َ+Z(8X\\4{ 0M3 ŐdHt/7XD]j1*ԣ(":nQ#uNReYR=섢p]\v{]#xp(H&aRsFsM`۶pI T&[E9@J"/J*J  /瞃#.EI$R6 ,ֻPc?QZcEiq#>.@7 ەՃ-ܺ Op| {^`%Wkj/ ;tf]QAq /[X,ݾyݚZ)6vRM<ۮzL:l0 vr896ze1 IENDB`open-build-service-2.9.4/src/api/app/assets/icons/drive_burn.png000066400000000000000000000011401332555733200246300ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˥SAkA} IREr H魗RK?_ kC%CSALnRFK6u]3Y^G~ٌVZP# U2lgY4 v~RZBg|kY!t]ⅴ d2( DZpu:n6\]KV1D"!MD*Bل8I:y$0DH0t:ETBۭy8 DIK"N ԠLiC ܒ(ʊ(ϋ\.G5eX+}!1? OXz_RYD b1LO|61{Ǣ\yxHy=`=8'Bk4aE3š}}6tw8K1~6^rpǘw7?&ww-?KW=OnIW`p䆷a`R=IENDB`open-build-service-2.9.4/src/api/app/assets/icons/drive_delete.png000066400000000000000000000011641332555733200251320ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˥SMkQ=ofL`Kd기/BH@?@\ŕPOЕU Ci\0LԤDR6$}S7zr߻s=yf3\$\hZ[<8mdxGPi8Et^h( 8WI ԝ6T%KtNhif)(Jrj@?HLcH\.K&0{<|~ _kYb\>|P `%S-c(މV+ZQC:p+b M(?W\|ĉ;d|GBiշOίQрcG]9kφG˖]yPo֜Wx/~ѾIENDB`open-build-service-2.9.4/src/api/app/assets/icons/drive_edit.png000066400000000000000000000013121332555733200246100ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<\IDAT8˥SkQnvm~i&buKŔR UH V5xP x</ ^ UhH ҴF&m֝X|7fvcvi"NnooMV 4Jb,Q &(.+i5)82T̪Tx( $Ijfp\eD1J,u׋bG cymO;QxML&g@ TWm܏Z Kh[>WHl*JQrV]&E;k z48GGL wIddq_׺-􎣲6 !vUCF+9ԝƆx΢&|pWsfɀ *9 Y78/wZQl?b As*|)waEŁKV,>62l$> xvhk<TFŦ 9Lo!=}9H=Cv?5xSzIѵ n?ϲ1 vDzoa9u=q—M/jwY^VIENDB`open-build-service-2.9.4/src/api/app/assets/icons/drive_web.png000066400000000000000000000012561332555733200244470ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<@IDAT8˅kpǿY}/u[ hvpP/ē첣<`/ `Z/\uj4K/Ń|?KB ؟33IBXYo5?ʬ0_>so={ݣ~(Ս cvy76u g"^[ŒZ3ٳSO@vb2NdMK7wʽZg_ժPT?E:H8j8|@?C껿B0)PP^E]Fxz}y(}Ay7c:Ȇ`LQh˲ vQe% b$ 3⭱zH$j "IDQ,ˣ$M o;rfnҤ2iL g].:MF0H̪v1!Q̝U&#u"^*̖#Yc똖^3y,9MIENDB`open-build-service-2.9.4/src/api/app/assets/icons/edit.png000066400000000000000000000011771332555733200234300ustar00rootroot00000000000000PNG  IHDRabKGD pHYs  tIME + C IDAT8ˍNQO H F\` I$0 x^7` CX҂#Dj{y]xyPԛ ;fOT~>TMR*]t]#ȳʤ>- J]wn8qjG5V> `1KSeW }7 _.cV٤f\ܴSlhc]<>z7acm 1 #_@:fe٤Y͐]H[5GX@ ̯͇J?K |>Ok ~Gba(@t=чp3 ?>#Ư"|4zˍ^T!Ea{;p7`\* Kz9Mq:G>=G$.zގi<盻p;]%2h; T6 k aEI[ h]O @ *w+jtIENDB`open-build-service-2.9.4/src/api/app/assets/icons/email.png000066400000000000000000000012011332555733200235560ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDATNa@9T)Xk\{nݺ3q!;M.0ݳlRJ;әb<0'\! 8:ũ✒:%uJTt:?|oUe9I{ȱ[IENDB`open-build-service-2.9.4/src/api/app/assets/icons/error.png000066400000000000000000000012321332555733200236240ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<,IDAT8˥SKHkTC[RL5 o= ck)c.$BDP,,NY(FFfkaf>o1Hb=fU~Ϣ=USl.ZkP(9X(>H3kR x_Oqg覆8t]tiXas_ j'{Љ_袺I~}^OTj5՟羇}vM悥]b7(Vl9o XoCn%M+ciѐ+Ci@IzW^5 @Ee6dVK>@dW2U/zW ѳ'BOlYxoT3# Yd(,aPG+_Rr\:m;gS o*0>N @aΣ5;50?kkz65ß} /qoI0-  R`ZUކ1̌ U rj1B CeqƊ@޳H \):xu4E3'ǃxǃ8>!@}X;Lc٧S/IENDB`open-build-service-2.9.4/src/api/app/assets/icons/exclamation.png000066400000000000000000000012751332555733200250060ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<OIDAT8˥Ka SAoc6.PDC Dբ@Zxh1N"CN:vf}󶰾6uvyy1=!4ИvCv$ HS8d*P. M`+Sذnv~cHVr}ފOKW55Bb `l`yj`yk1AjE @EoqS2FE64(l=$6#1![?@Dw$-WfW&D$Fn>SRJuZNWc#di@% b_s Rۆ^t&:?!DmSQeJWeJÈqMT 'DB:RE_as3ȯC2Vz9W[9ŢwU*B4!B|zPJ fVcdEmZVw!Opřzp!MuS>x9f0Uއx8GHv=}uʡGBy=-Ka J8K+${?`vLЉ37ӿѯLjIENDB`open-build-service-2.9.4/src/api/app/assets/icons/eye.png000066400000000000000000000013561332555733200232640ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8SKOQt:}L!-,@pcTFcT6,L4#\ 66ntF+DDn}Qܹ3z[!q“|{/߹"QBC=_t}R2|ۀ#S<~/~RWeB`B>۲qu:Fashl]?䷮Ҁ&LDmDX[tģ+ڈfl h*a`9ee;iՐ{DOx' 06m`yV&8nC >LɶU[nER*Ѡt 8 )Z,znq-r9EQY|PK V욃~p)ZmhE3&Q(6lc al0%l2$'ZM6}b h1\SUPa!Fp&۔)A>Zcbfrܟ;S D#фzT/_lsX0i#`>{nm%FȮ{O=ۏsu#R^j$Są69|Lw< tH.8iNK<#|fUoml`eAIENDB`open-build-service-2.9.4/src/api/app/assets/icons/feeds.png000066400000000000000000000014411332555733200235630ustar00rootroot00000000000000PNG  IHDRabKGDtIME -Mq4IDAT8mMh\U~LCԼB5PTхPݸ0]("Bq.WTP"K~-Z6Lt&o޻4^]?#FL1A bշvBFOL.UMfM7X/2fuZY:'Gѥ:a"q%Vj,~ @*[Hs%F0"m3 K'_M *{F:ǂ+gN"Io&fV܃/Қ3ԃMU 6y`kW#dOԆ]M,QdΕ_p!BBAE;ﶩ.|,!4z6N0OYm=u0>860cik͒UȕY[+ҡRBq=`~䜭4ͨv fFQIENDB`open-build-service-2.9.4/src/api/app/assets/icons/filter.png000066400000000000000000000014641332555733200237670ustar00rootroot00000000000000PNG  IHDRa pHYs  gAMA|Q cHRMz%u0`:o_FIDATxb? @4---3 999$ V @`BFFGdջ^z:gΜ֬YWZw  0' ;?))鿦AqWWAzlܹs׽|DDyyyB---r֭@olWRR°_YYÇ q7^/^|dȑ#G£iii x57g ƍ?؝0 b۷߿>}?Nn"+  sÆ K. F7  EҥKO8w RÀ@z CAѱ XH/@H0g ej`;WW(Pt32c AzL1q@@7Uы/mS a^aaa(dKǏ?TB @`h.V4(߿Ȩ. \ PАB}}@&@0()~IENDB`open-build-service-2.9.4/src/api/app/assets/icons/find.png000066400000000000000000000012231332555733200234130ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<%IDAT8c?%4ydÞ'/XfƦ"ʀGviQTo@5 )%k;o47Y%u ZbVTyE,Xݣo6ةm=x)k?"eKZ帹yLe? '?e}KjmѹÓ .dfay 6\Vo]Z9մ6\K/bĬ` *YBc~Z: ola_[S1qIbR/*.?##;L@ : ]_@$e;@1/x%|W4 |F=c 9WA12 5zaN?(8SSQEQBZ(`‚?֎nx% P`_IU P,aj(X@K*п9}ce&R12hZTֹU|&8cP罈`r'L_Ck_Lz 8T!•x$ m-`ruCI /n>e躾. r e@r.zJ G|e c ~GWVh),t' ު]h߻cF$mR#xt`j7^GaiO*j.7>\[JX:'L(6 pĆݯ -UP#2 ߒLjk¬a,#2NyV޸7F+Qp4f;,pD%dC3qmO sQ( ִs<3"tv3QȻ[./\'K.Rwn82AoHNZ@ o s@@2Ի/BR*IENDB`open-build-service-2.9.4/src/api/app/assets/icons/flag_red.png000066400000000000000000000012311332555733200242350ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<+IDAT8˥_HSaqQ.,-a[ jaHAQFe[ATaj)2-3cԅ&E,XcnobXz|7?ZSNg4] 2#^99Y{]G=<[`deXg`*`1Pyp,Lhv܇{ ʾ;%Wʭgst .7~?ïOrd|_lx`-Z1vV: k³}{}~nU;b͜ @Wy|:H- (K ?0d 8=J{B%˄IENDB`open-build-service-2.9.4/src/api/app/assets/icons/flag_yellow.png000066400000000000000000000012371332555733200250040ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<1IDAT8˥]HSa@2(0.(/n*H RfHr>F3Ӝ]hnlX6L6%cM*=wd;`cy./Ϗyy^M{E-ֽ92mK T.kv h_?.O0b"uDhZ˃r rI'"wٰȺеbw@`s_DZ=\G;dQ#7*c#/&3~X ${9 0E+rJ  tQלqRQ CUU EO!k cme0GyTYxar;. Z|*m#_ [IHӀN܏Sa:"XuXCTO{js|uʫ;x$ {MzX0$BŻDNp/+w dο"zjZ'b}Ñ }W<_!79>JaIENDB`open-build-service-2.9.4/src/api/app/assets/icons/folder_go.png000066400000000000000000000012661332555733200244420ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<HIDAT8˥KasfK)y)hQenpS._PhZ2 $2 ,C9ܾ}^[ᜇ߹)a; ئbώ'+ە״. JP8MJ)b†Zoe- Pqf!?:$ˣq JJ?K}"񀠂ک|iS'0B§88|_x +Q#Mɛ" WatgzR'9)&lz\v|JerРLZ\5kB_h;'_LhKD@xLG99;(zү/)y_Zf7IENDB`open-build-service-2.9.4/src/api/app/assets/icons/forward_disabled.png000066400000000000000000000025231332555733200257720ustar00rootroot00000000000000PNG  IHDRrP6tEXtSoftwareAdobe ImageReadyqe<"iTXtXML:com.adobe.xmp CIDATxڬMKQ竒."m\տZ#. epVBw$!!RΕ+\'3V^/X6CMPH;X_p0<Gѻ|>Jb+ӺLmp8|Ƿ\eu4MƋ,˫Z [[U7I3@ ƭ΅^դJj/PKt5=8xEz\.`K'EatXL|,J(6Ao o ͦ vL}gȖ2f1Oxb;"Hg}T{X O\bJμn("? MNҟaIENDB`open-build-service-2.9.4/src/api/app/assets/icons/forward_enabled.png000066400000000000000000000025441332555733200256200ustar00rootroot00000000000000PNG  IHDRrP6tEXtSoftwareAdobe ImageReadyqe<"iTXtXML:com.adobe.xmp 0IDATxb?U c1koϟ]i( Pp!LL,:+kyřArz/`M̬|흊Tr B2k߿fbg VX/4X?0Hpr fKJ鮭o}@^F֘+Q?`aaWЪNC:fl~ݣ>sf6;CqϋWo;БCO } 5ݫx {K֜ 42IDATxb?7z007ۼC1A2 32 :Wgc a|kbbbq:3PXØ 7;Gfo>'MZ@a4=8 c73F2Hg h>gJ^PH+P#No01qpT+F!yX~o/ydPg!|pݽЧPCXнװ_?zh;7\k Ŀ\0` D=؝s.{{P#EŸ'@o\е.xdЯPd/> zy o\ 9o4 ğ5 20fhtsAoK~@ O(L ?!)9ƀv 0KIENDB`open-build-service-2.9.4/src/api/app/assets/icons/group_add.png000066400000000000000000000014471332555733200244470ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8ˍ[HqW=CO=R4Qjs[McM}h:ee0%胆өK=[obB:O×?{8$$>ΞV{-η}qf^Iݱ憝KyX􇭗Õ邱=裕vs/TX3?{'z_gÝiXc+= Jv" CŢ%}aO]r1X8{>G0P0Gu<;(A^'A"R>y4godKOofh@0(M CY U 24% g~´d$@h:rFS 9U (bB|}PP%`{ 8xF*̇n] (a!{ MTiyPRF-#(_r}k]3\#@֗TU{x-^1: 8#@ е1DRcR MI:࿲ 0nq R D44em'ք:J27 `q& pǕ|(ܖzҘyٵ]o'@JܜSDGAΛrjd$BM*J,Hz%W߻/d/NVIENDB`open-build-service-2.9.4/src/api/app/assets/icons/group_delete.png000066400000000000000000000014731332555733200251600ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8ˍ[HQgA|(ʊK{H%KABp:t7̵fR:Z ^fޚbk*=-"G[$jj۷Af/9T?K޴ze h[Vrg254RiMy Py R#tj>6 kqŒfdC ?+&6@`q\llo$d3 sXQB+AV{? k^)cE܊ڍJD1?-,݃= }e_R9 m%q,y؛x _GBEnO/V]r=EkffSQ'@DY'*'R!&y$4#A('bW қ(Hap5Bd$H4C'|_X~b(-4Eb{zbT0[N230͗0Ϊސ{|/ĵ!-Ь~7 B φ7!.iA/Iм89rf0 w\f0jZw-g"M^BF(TT7N< )J-*:m977^(v pgyT}1ķNDg>5t[EMtQeS9JDŽ0ҧL:  מkK3Gb;cO>{1IENDB`open-build-service-2.9.4/src/api/app/assets/icons/header-logo.png000066400000000000000000000024161332555733200246660ustar00rootroot00000000000000PNG  IHDR.\IDATHUoLeR٘m0 #Q싙nFsbL i3fl`6!kZXK+k{]W }0k}[(Ƞv䖂dڪ "(B,z=ldcgRI&B4A_! 4DV =%$S!bF˄_ ҲMq/z3=m'a񌂶V #jѶk/$ȘW8/A2 I[3Y&=k)FP>eC]$Umc:Q-2R h: !E*S>B?@fB"KXd:2(&I˳,  u *_e?wYdLrXcͱfSanm"-on'2TUrӘ4Cݝ?|hЍ^!FM1ďxIS}GVs"Tbq~aWTq\Ɛ[6W,Sx}΋a[A^!.iQ"3 t*pHF2 GQB#\5byl歛g\A-؟g{*ϹHc #Lڶ.GP_BrY"Uo{ש[It#WR0hI<Ɉ1"#Jm]= s&rO9so0ն 1ÜKpK Ҝ8% Cgm5> ~2wC4kJ[y_%Y<b2"Aۉj0qNm;fF-b:tQxUN9OD=<'yEE3%#JJb\`(2-o. |4V9lg})[ʝnafsx.rR~s%Vs8 ?po 'm/5jڹj;Fas9Jg+ωZGNсW>\beɔyMhm#wG.-}6l"4zͺM ,hnշDf>ֿ)t@;mT hL/IYlIENDB`open-build-service-2.9.4/src/api/app/assets/icons/help.png000066400000000000000000000014221332555733200234240ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˥Oa[Ohjj8'r t,)53 j꜊o"&ʗ b"_DxU5Z{ۮg c#Z@'U4Ş6$S=:!O;4A*g; G hhh!9U@8rk2ه.s~ƜYG# W'?ElqNdkr5XpP˄: 1XcHP`%:I]`t)*\C8h3G ?~sA jMG]Njf}s!t&6QcBU 4P۶A3-nSXxm$vJ,hHVb v(y; 6*P(GtFOw>B1#C6K#G#O4P$ꃄsfV36 h7eo %cGY')O4˹@tmA,Yi~0 -0>3A3 Aٻ|F2f+UvɁ 0a4@`#'  I Lb7do"^Y$`@$[ne@BՀ3Рsp'`%= L΄LJmޣIENDB`open-build-service-2.9.4/src/api/app/assets/icons/icon-search-black.png000066400000000000000000000023351332555733200257450ustar00rootroot00000000000000PNG  IHDRatEXtSoftwareAdobe ImageReadyqe<"iTXtXML:com.adobe.xmp QIDATxڔ+DQc +,,BDJl$k0CX KKD(=41Ӽms{f{*F;pcʤz;8D?D#p `DQN'[_ܣ;.0_D0spj \17 ,n@hФwN46.ů;$oZ0Z5:\ @E|t:ڴ3.zCfa%eO2 jvNk߰؏B{]IENDB`open-build-service-2.9.4/src/api/app/assets/icons/info.png000066400000000000000000000016501332555733200234320ustar00rootroot00000000000000PNG  IHDRabKGD pHYs  d_tIME %9tEXtCommentCreated with The GIMPd%n IDATxeMh\eΝ;3ɤI̤FMZSVP'J PӅ.EnE•l "]KR`6`(EZCM:!3M&sHZ=r8G|12~ܖ@-J+կݱ7}{^l7n}׏va.j$8e& _>nZ?;28"a4[[SXa=F+xgLng>4(GG_z᩽M)rJHY)p*1G=];_:IW > Cp"{i(UPJst\=.^~'=݃\ODbpQk-RJ(DJHP~\S`yR"{z[ٿ+GkKuԣɃ=]o*V dbGl\V"r FKtR !@RHၵ8kY)d-sn4TRaA͢O=0a4Rk\VqA6{R z/otjcWE!1GSI`,^:#(So1+Yw6̐IHx3XoFTٔKD|3W[~ajriU>ͻ+(ZIϦ _/}F1!cRMfU~N&FApؕJc"!Rle'2X].Ӭ'//=V&]S'PV Y˓S_577uJ/ZvY;ٷ_)KӜ`IENDB`open-build-service-2.9.4/src/api/app/assets/icons/information.png000066400000000000000000000014121332555733200250200ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˥kW?wLdL&1 6Ѩ(hR,M!R;TA.\tm\ >VtZ΢043$BH|=/_>8U}T!su4-WNV8 (wOo^uŕr#ɞF֮`!r pzyeHnVZԜგ[C*³2??\S +K;EСzrc%5*cb]3_槻i4|vQ @hԎdÅ"@IzSlՒ,Ѿ1AֆFޟXq AǏd'bβE.r`o+)ȶ6P)G!wGCqnfG SJy8ux8q8+g~jnBs14({^&xqXxXƘ0 `~Mqrd;;?ln]-G "8:Z &V#_M_G_8T-y/LZOr_wnYf .m[/-q_1rdߪr^LJ&KӼ~-<]0(Œ1n+iU ' 4`)7 r珁s?w ?{Y!IENDB`open-build-service-2.9.4/src/api/app/assets/icons/key.png000066400000000000000000000011441332555733200232650ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8ˍKOQMYpKBр  W bRhH"4 %RR.Zi;s m'y73yrΔ(ɗéNEw$aՇT 9:$Ih^U8?A80?OZ~ԁlN2#71ɭ2Y$#@{ve+﯄,Mx 4ߡ;/ @buP=|FҲb3{!˭8\OǤ3}W9Mp*-LvԆ=h=8GFyGڞb¼P޴TxJT2A렇9sOnDZk)W9JZB"c@kw6m~RA\ve v[lWएп; bb#YX3Ԑ~~!yC,A uBvNq,fX}ۦxQ3/׼*ݨD̆kIENDB`open-build-service-2.9.4/src/api/app/assets/icons/list-add.png000066400000000000000000000005031332555733200241740ustar00rootroot00000000000000PNG  IHDRabKGDC pHYs B(xtIME DxIDAT8͑``rҺ`6[;tqDdZ &g7Z,Vi1v`9=0+n/\ϵUR>.009xrbD["[-$mx\9XwV2@@PiAkeRjt&Rȿ&ROXNOӅ43kgO=TrK wIENDB`open-build-service-2.9.4/src/api/app/assets/icons/list-remove.png000066400000000000000000000003671332555733200247510ustar00rootroot00000000000000PNG  IHDRabKGDC pHYs B(xtIME ;"\IDAT8푱 Pi:+#mF stԶ"DO!ʳ3w&H6暺`?S-FIENDB`open-build-service-2.9.4/src/api/app/assets/icons/list.png000066400000000000000000000010051332555733200234440ustar00rootroot00000000000000PNG  IHDR szzsBIT|dIDATXŗJAj !Dࣄ &SD%3k^vPDNm`gw\wvgow_uW4Y|rvjuW4 JxZ. !(P+mB>sZl tFV`:dUln] [, x  `#\FAUw@'m h 2Pt];Whg)@x-OEU1ʀhDw? ⋆׽fDTwm &g"rj| @l6#;nubvC;p.d:0?/k6EXJ%-Ls IŔ`CkI[C.=`: @y DQnh > OK(J3vT'Q?l.Ϲ^8G `s?Zqr͇oEIENDB`open-build-service-2.9.4/src/api/app/assets/icons/lock_open.png000066400000000000000000000013271332555733200244510ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<iIDAT8ukQ;i )R5pB 7+) a܈@@Ņ ʕ4jh>3q\!{8{jzZ{+cc c̽ ^Pi@R!y+++Iպ199Зz|(OJ%h4h6OTRaNzq)bl6/A Pfgg) ; T3ưAOsjj:ohXk{ibbb BWWQSXk9s ш5IRkuO 9w*Q c(RU#fO+++JFEvV8UU0F:Nɮ.Růfxh)2$g~d2IMMMeo,--EQ8 Ba_R\n Eggggټj%^]ZY3jH$>ooo7^%A"g tLnłJ\oiIcggr庹zJ qFFF,{]§mS/>g/3 e@T*a` KE?*{7lߏi?lCIENDB`open-build-service-2.9.4/src/api/app/assets/icons/lorry_error.png000066400000000000000000000013431332555733200250560ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<uIDAT8˥S]HTA-ׇU7, Clȟ(C $`( ^ ,( B$e%Z$\e3WRw{t\zp339ߙQXPa=:apAaeu]H55 A0 H2Adȸi @F5/rRt,7Z7qj]L U7 :f)5Vߋ+G+@$6o9rS_N 8 "$ʾbKE.~2,Aד8zquj)eRԬבp7":ۍi/J׹kzLKPE+2.CX?1'f-&/Rμƒ\U37 o>WnDL@iNP l㒥y΍G wFkC5nًPLIl함 + ߡ}yS>|^ZY0=SX%Z8<;hiJh=9x/c&E4t~&[6Y[f=i lm($:?[f)1u=ݫ3#LQ-[ܟ3%IENDB`open-build-service-2.9.4/src/api/app/assets/icons/lorry_flatbed.png000066400000000000000000000007021332555733200253240ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<TIDAT8c?%a po>ߵa+v.ޒF.u{/>Y$P_|wO@'?u[7q)Ā 7\ܴ'&?Wה_^"x_y_#zgW ?ͭc\yw]|_%lĀx_V=Gs1?^?k>kZװn_RRR"LL8 f 'e&<IENDB`open-build-service-2.9.4/src/api/app/assets/icons/magnifier_zoom_in.png000066400000000000000000000012501332555733200261660ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<:IDAT8˥[hqƇ]D]YhXtUXZjHc!$D LkXk[+ۚ}n$sjtI}/oɯ3kݦ%4(0~ _ <=qfz!xWAԮl4seUjbHk]Y͝˗:te%k~IIENDB`open-build-service-2.9.4/src/api/app/assets/icons/magnifier_zoom_out.png000066400000000000000000000012211332555733200263650ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<#IDAT8˥oHae^'", ZD(z^XًQFT(ˎgednn?Ά9w^6fv݊`E֋{xT?Nj^fZ&la σBIFx"Ȥ@%d0) LRԗ<i&g.;pDddfD<lY ֱ/hbRT7%`8*|1ZD؊uߘ2*\n: ym|nێ Z6U:!#ʭC;ێdwx}3֋HԬDIN?ɂTd 2OzsQ^.C1H"勻-)>El4:Dokn H(_L@qw4W4M.LXzn zG,iˏBlSopN㋷荔x 0[;Z[M[Ol6wYá_TXmXcBzXP@V}np t[c⚲|mEIENDB`open-build-service-2.9.4/src/api/app/assets/icons/mail_generic.png000066400000000000000000000010501332555733200251070ustar00rootroot00000000000000PNG  IHDRabKGD pHYs  tIME )]q|IDAT8ŒMkQdf6RĂS"ɪ+Fqpnu-X"5u{\Lf]8{{#zkլqQޗ* I_$:k,T-GGa$Iyn $IL(*uvvrc*55?Y[kbm" qzO8YF0v#Xqp}7?"9;H?|fc=u`8[6[M^<O lȻ=u 1ʿ}kKxFLA)&֖P T*jKܻaHgZ ƀk=ff?s0֠t_ŠRx)x2%,2UHCsϞcusւ1)0F0c X!ʪ#gIENDB`open-build-service-2.9.4/src/api/app/assets/icons/monitor.png000066400000000000000000000011431332555733200241630ustar00rootroot00000000000000PNG  IHDRabKGD pHYs  tIME :4$gIDAT8˥=hAݙݽ"Ka BH!`as`$jRXF.1VHXB*AD=I 4 w;11!'J|eg&(sl#iJoj93T*zF;ORG p Er%cg.C2 <6r>dDi١:J& i.E%)"$ ֘k*F$1={s #B vA(Fk˥]SH$1Qq՟ N,A0ǟPߍ,Som7?5;Xcz x(UX !?pY# . `uG9.=hmVcqZg po&k4b<5MZkNAT@ z~7E8@!1Hz\\0)~#wӈӈض.,m@`} NP5X#b`felF9]!yeo==3 @ Vo)+;XռZ ^t:8\0Gq vՌ9=J֛;UL >}NB9} Odljv2ƓGU_stJ`# IeQuvfWZX(,Uqͳ8 ʁ&>b4zPB!`:a"1C5hiL4 tw3d&|Wmv!c{CԪv5IENDB`open-build-service-2.9.4/src/api/app/assets/icons/note_delete.png000066400000000000000000000011671332555733200247710ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe< IDAT8˥=HQ}zӬ#J>hh\KBmEK9A*jA b^x=Oý~ÁQ"^*bY?Gq%DRY[ĥK5xg^Tn(.J_j}"HЕ "P5 Vh9B+~ VB0H0H"r Obb2C ܿJmΚL!芉+o0?Fp4ٶ`AB7zg@ (A|Lax$D@`24ʁwQJnKS4tD012dR])͝? sCcxgn%%7 *h:w=O{d[/`ȶv!]kl8X "ROstvݢ@(LΐWGZ\&Q\tboш_|E g> 0D]D 0ub^62ĺ]D, !8bwx1X=ѳ`bb]b b?'[[M."{xa3`hCrT[@Ơ.2C:UZ nKP8`zHP;4Mt]0=iN 8"'(|NJ؂"RVNIa軟hu.ݎM}z+>8n! F(sI>~Z>| (]ؕ{okAOvR\5gDR}l>׿ WKf(klHTģZ2O{_bg2hç}^chQs݅o=oRSZ/JqÚ5HukrXIENDB`open-build-service-2.9.4/src/api/app/assets/icons/ok.png000066400000000000000000000012251332555733200231060ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<'IDATxb?% (8Qc@7 @2@ĬHl(A!A X2lj okȧdP}گ? ?YB/oV qj $dIH6]'P"@10" H3]"rTW__WہÀꞀOS ; B׀&03$0Z11|88> F1  F " @aC/Gpr000r10 0`^1a8ph:P"A %^`` S @@14%$@ xjPM3 g 0f6Or n5]ex4ؿU |i X?=L ^5S(\ `N~` /, {b,HIflb47Ź QU!6IENDB`open-build-service-2.9.4/src/api/app/assets/icons/package.png000066400000000000000000000015251332555733200240730ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8}Kh\u{g̝4L)B1>n\F)PAD QTWf!"؍` HC0ijC1ͳLfw(v!g}69)wyc/T4"Գ X8ѫ̛vכ*WGU}[E>7&tJiG+*rQE>{ `=9o/rorgaw=MЪUQVcƏ;}ź;qD>r 7jT1aU&m86l7KVio{Ǥ9sVԨ,O"aWūmptk.h-V}֯K*R\M- Юޡ'O'CU0)79Ms&3P#V 'w'ů N,op#LRi4Qj h]GS\;QƤBH`1@<Ѐd^{9] !"dlǁD @|0mp  Q a+kK7 "$ h$?$BrN#&Cif N7SV[6laL/HbF6tX=ֽɕد}GO|뻗zUC5rh8D>)4Vwͦ|"L\>2FΫ[^yjW?'q{gBio.ȧTctܲ^&IENDB`open-build-service-2.9.4/src/api/app/assets/icons/package_add.png000066400000000000000000000016031332555733200247000ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8uKh\uGϝ;s'͐IR4(icҤJ*mJ".Ŋnč&V!M[I yiM&$ 9KUlS!FT#W7YLS7Uڦ#QSX`VE?S#>;)`fVTȎZ'l| ;g56ƭy9"_t)FPum-GqAK3:ŽWq\۱q2j̩~oMc[wZNM;nfT YEw_وωOs;6N>9M@<>ˬ!4||:Nzqc@nŰ6tPUW~}6"9Ū :!"5ש& KPX$2JvGQ#V;]C~O)L&A9`ʨ5]-8u]awYTbB$_"2j 5P5 HHr].,&!"ama(A\0Ep(+dʮG➚f~,]`rí($ 0%P~\lكl cy<Ҿ91OgG^8k A; Ȭc[j}O~7o;_;m>1'εcHXVD|5:Dvٳh/)smrOLu*\c/[9>8h}hbf*&Fu39_>֭"' Ǘs׀>o#)S-IENDB`open-build-service-2.9.4/src/api/app/assets/icons/package_delete.png000066400000000000000000000015731332555733200254200ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe< IDAT8uKh\uw;ILIcjӖ$5ZӊD`j!B E""b`ADD\t!5 !&c:ӤL2y>,4ǁsl^AsGg9ܪ- SYf5}߫|KU="{I"N(wx HCcmmxyBu;r,(nN11u{O ?5dOԊ Uc/xk:q$)kw=Z~abfq9 }K$V4ZJa哴隣=jHICJuM M1bo*WpwAl(n"Noi). UR^_'% ~``@` p${ @6`8RAH-@Vc)Vo$~ee"ʵ9uBEi}B` ⁩N:B-qvP~8SA}!s%IKGqgF~>Fh6b̫[S͝O8)JY6'l!O)OlFjj G~*ӟ}3 R?wj%K(ܚa?gv@HEuxҪ,v-eQ|LO|HTEzzͰv.(|t#XIENDB`open-build-service-2.9.4/src/api/app/assets/icons/package_edit.png000066400000000000000000000016271332555733200251030ustar00rootroot00000000000000PNG  IHDRasRGBbKGD pHYs  tIME #FIDAT8}Kh\{gq2vRZ<cfQi\ mZ""JucJ f#ERJmChҦ-tLI2dwfrE ۜvD.hPE>n:sw_Dɨ~";UWX_ϨeusL^kTM9{5Lq$.zYU/TPǍĵ^V>*ph${;z3PC!u='@cg}İ7WH3 FՍ`pWc<"٭B&ыxsIrhn_8=Ӹԍ{5 bڗ($ fHg tTy<: %hw^Vvu`e"T2 +ULרՠNIM.? RYJ d՟^*QLc{wTP}E>5= YI+Q 7E?SȀ1cGE:ԑc~bˈ#rpԃQ1Y~Hד+߹BTE>.ƻ?b1~ oyeuVFESK,l+gso 2/*y|smH>wf*,OŖW_4<IENDB`open-build-service-2.9.4/src/api/app/assets/icons/package_go.png000066400000000000000000000016021332555733200245540ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8mKh\u;g4$M$MKդԦT!E¢"݈@ .D('I LLcgIژy޹?]hms,UeL7"hTEފ}_@bh"'7eF*xhfMH@U_R}ZC1;I TC.9?OU䬊|3xCP`ˮcA9naj7S--43Ɯwb'kvW 6m { ˬ.`"n)KȪڹj&XLNQ^IW#/8tr c'T^ϒ_žO DA)XmSӿ?Wb! T6VO},B27ʷ)r5R` m'b&b@ XSޙj@0*Opσ^ qZyV?b{.MqNͦ8j ~5P5 .Hʛ߃8xc;`xy~oGW/"xT F#Aɬ.E‘C|$a0Cm&̀ss0*ኇ'.qW l:H-sO9\X_#o0!۳GNZofc]L/\~Ã)OYENGlϡq7v1y &ϔ= ig\o9"o}K'Cc)Gx,3ΚogT]D*vK#5M1ܐz{<kIENDB`open-build-service-2.9.4/src/api/app/assets/icons/package_link.png000066400000000000000000000016531332555733200251120ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<=IDAT8uKSaDA@/zY/мlKwADm9)+jv,.s4.v!+FtyL=sΚsy6۳R=_Pp=c.^V%?_&2ץ2CĒ iJ]OebU4 ,<>?$f)Uh8bqp,¾"`/FȭǢMgIX컿d|J`?a{soCzuGE\Ѝ-*LY{1X~0\&+UK+LbW!,O`z zFiھ.666:99Lz*-\͖eJIRψ'XیFIENDB`open-build-service-2.9.4/src/api/app/assets/icons/page_go.png000066400000000000000000000014131332555733200240750ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDATKVeΙGm$d7fe"EIѢUPmMv"ڹ+6.v]2»68fޞ'2Ӷw>>1J 1d+y M}xώ@d>8z77-%' 䍡'tfȞsBiѶYۿA>K3n:9?SϾ?|~@D,f)F m "P㩕lX $%(JD(Mߌ{=4|vH2 !d'% YSc-I"DJ6QJ 6R! (D(J0҄8ώn6@ڑIE"3JDh溡UJ_jMIv+Π\\}A }OR]kګ֭҅`O{hvfz灐j:}/а6w1?Gf[^߳rbĆEM\;&^3.\٣y:gZZsЋc{5ҭnc l7fZOGd|rS??p`Yκ,/`x ^{(&Orv<-Doz)"H9@HsS_]}rm aS/IENDB`open-build-service-2.9.4/src/api/app/assets/icons/page_refresh.png000066400000000000000000000015321332555733200251300ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDATmheu?9|b*VI+R$he!A!AeSB?D-!}r$sJZf6u;<}]~ǽoJu۝X)Au|;3SyrѝuN  cL^UYR>^.>i|ĉ)DSB0Yx3S<{Zp"̒|9D@(Jy+gmsK#@N'%¿U,̘N&%x3 Ѹ1\0 ia5Y~ /0`6︯~ /mGMQT*n/\ifo" `Y &%KMWCdJg0 03\bN-gN{)nMW GX𪆙aXw3Pポl[+9'9ȹOW|TAS_q:v\O `/ #!M |4Yxi-۷ȟ!DbU; {K6-X:E"W+_!T-{! ,BW׵݋7dپִhfmwܙ+"h!E}ȓ``&19ky&w%ńevqIENDB`open-build-service-2.9.4/src/api/app/assets/icons/page_save.png000066400000000000000000000014061332555733200244300ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDATMVe78bDfai*QT@h妍P;]B@hQTP6Rd%XAT(T0sxM(a6/AV]-_cgNʞ| #[/?_8s|qmibGm)%deUץOl]z^té̦m "PmYܻٶ'׵s3@I JD(J.][s} ÷h&L8)me4ZkR6DE'.X?갳a+k>+4;uʚ/~{=ov:_ >rIqԎL*2V#nt+"֜LI)IFءjMFn VnaG5^n(#e?y?Wu\%2dzjnl-[6әF:;=2?9oՑUXU/F/ˮIENDB`open-build-service-2.9.4/src/api/app/assets/icons/page_white_add.png000066400000000000000000000010001332555733200254100ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˕KP[8V"Apj[JS,889U`(ZRǜKSӷpr/wk`PkRYeUѣ+J^7 >,4E(R.C&A<X,I ,#P.QVJ dY$ (JKthD |dkf&6:Fk96J*Ȳ)=0t:q A#Z&r!.Bo8 ذwٛ4_q`ST»ʼn-_Z?^D(z }=~H@ a\a0m4e5eE0<#̕"&`AGya{4)l}O/!&RIENDB`open-build-service-2.9.4/src/api/app/assets/icons/page_white_delete.png000066400000000000000000000010301332555733200261250ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˕;KBaգnBCMMBh("Zڂ38PC.(8TK 4u∷=^#~y Qa$CCUM`$)Y**r`rZ%Hl6 QR)$ aY |!ϣP(O" L"Ȃ\.:v˒6H@t:h4Rm&&kީoX,&W"BwA+/hP;4 $ Ţ< ['^f2~^PH!9Ǜ݄)^׻lV|fN_: iCϾ4fmQP6qd/*2[|VG sl"2}ݜ 9 GdO 56mkIz]˥a T _#>U)ےIENDB`open-build-service-2.9.4/src/api/app/assets/icons/page_white_get.png000066400000000000000000000010041332555733200254430ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˕MKQM!Z M [.PladdFqѢ@ ms5?0ssaka`0%0Xl4f# %Hj ۔J% iYJ :$$ Z $ORX,"A@D"Y# (P.f{ZjD!`m7;JdA7j UB jfB z.υ$ĿT C] (@Np>]P8c6 #֍+ޥ/>?>/'nqgջ7lQ+LGՂAsXzߋkf껰Ȇij}/bg<7G!Z#ɠlrs`%Co4tQЛ1IENDB`open-build-service-2.9.4/src/api/app/assets/icons/page_white_put.png000066400000000000000000000010131332555733200254740ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˕KKaE"!hQAIe1- J \ jBA Rh-\HhA(::ۜ309g822P h0,0@ YU8.U**rv%@C\.eki$ID"Qx$tI@ Ţx',RѨ(0 #JZ 2 bXKKՙBL}KqP(YL*y$jfB\HB-*vױ}o9 ӭBT%h (L<ѧǨvdx:ٿ {Њ7'\;TS;s`ۄ#\0?vaV2+:.ŷw - ZeR:ؕߓֱ+(\ҴphIENDB`open-build-service-2.9.4/src/api/app/assets/icons/plugin_add.png000066400000000000000000000012631332555733200246050ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<EIDAT8˥_h??{v&]IVQP{P^RdQV&Z+t'̟{=< )ԩ9|9?V̉&TU&4a$ZZ5QM%S P,yB pwΌt3Eвƶ@L$!K4PS075n_ oeDtKZ')(擈`ފXL\*SMHU8D)AIД#Yi*z.T~Zq 1kq6.@זatUgZyP]u}>$8FEgcq)  !+lc&0D7#7 sQ%21}.XGiBiwN'R-8!U| ݁[)8Tbfvwk{;RMvKdD[lds_5739>-`j#~t1+#7@&0 9j~TvGi {K=QWG=0 ` aWhw6pL[D.3 4^;2e?|.t O;eț^@h BPgi긶O9'_{Y0C`~w{ CgED\: Q]񜜜\0J)PKG7O19/Qߪwh/kGYo=_fgoɢD N /b tm*hO&P,;s #lq'DZ=N?t3Kˣ܂9s- VchF0%CG.}Y,] @SƇ[oGFڑ o he`K~Ԡc˔0$G4͸_9W 8m;t/c3Ϭ bISc0u"rn gqzue,Ӥɐ *'vٹ́.`H;"9MZ1lSr{z/z>MÒCt[6pfw+S~Y~8{^ti"t({`y[GFW&H;YGt}Nℱ]_.0$ OfXԵ)0'29aaJgnA)*/L.;`DvH31fV `ۻduoSRJJC ii7_Xpbh||>hq*Ӣh27~c-c4G\jt=؊{C×3dMZ35'yM,ۀ}QSn[Uw%rg„}#MM?r!V7u> 0VDrKe(u^Jn[5ԡ5#DI)IYB}GY*h;sqܜqC0uCVk۔.`I\6?oq%*nY+TH^;WAP sΜx.cg[[~زpҲޢ}sSD`&L6 7fz@mbU4-MҐZ #VP`&,p<'0jReSvul+1!i؄o-"qYa42:^2 dj_]7MO}xBGu[;7OGoGڶ2 uL`2MBO^21WFS!;؍?6t0a .^s2 H49W4MC|"r])D '\8 M3P 0@T"BSc Ftp(B#/%-;v|إ2L b<|T*u@z=ڵERA}(>ίx[p~vbMGX7ݐԞ.N,A``&,.: hl SSQ0 |M:]!HOcgJ%IK0W5MZ(+X,! ,J\ZC'9y2Mo;\LE LVs9k7TXuuKq(u7R.\b 6+} ffu2k_'0R 2##Z*O֮W빦rލz"ͷo!H Jv;Ю+eiR %yqdGGG ,!>VzPYB@AocaIENDB`open-build-service-2.9.4/src/api/app/assets/icons/publish_enable_blue.png000066400000000000000000000023521332555733200264620ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYspMBtEXtSoftwarewww.inkscape.org<gIDATHU[lTU]sϹ03wCB-,#JH#jb 1&Fc|`baD$$ &1b &2N綝G~L 4r''v{q  `}hJk,Htw~6ylʈljQk. 9Ej2ް*T^(='QacNp@hSO}MhYj*Rd+% ])3)- v2 e |⽘{XKLҋ3sId8(nK0y] D޻k]`#i\ =ˣVe;QP t(h3 F~ hDn 'ܜvl0 l1OI#y6kE\hKcnp\ֽp .4"ߵ`,Y%e+$R"u37_9y`-2i \\iE֣ H$SB߈5B 63hAɴo9P;Io_n".牵~`ιₜ.25`p0=S<;lpi*8S`xܣmM+=5RM>Ʒh;v222XV݈!qr#F%(NLUHԥB9mHTWW҉_d}K˟Gg,YIENDB`open-build-service-2.9.4/src/api/app/assets/icons/publish_enable_grey.png000066400000000000000000000023271332555733200265030ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYspMBtEXtSoftwarewww.inkscape.org<TIDATHU]le~}۷v?D~3#NL hb Qo11Dv$^HJ7S 7$ $[Y?a2okgY`7xyoyssyE\4je=fKN>RSu!qb!PٞC|sÆ555]P٩gO94!B}܅Ba;ضlsm5?$HA a%6JEޯƯ]GrgpvpW۶d2$O<梳N{)1 *W8Rvhg] RǺ ׻s+6 4:DH@DH&' "^6 S .=vK4\1j4M3 ݕ-15#VP`&d*۶RITEMȼiz %&y=ܺy LSv׉ D˙;x5%SƍD[o֖a]p8 |gƺ:vp`iWHUJR:F.]^FLЙAL(":opBqu4fK@]/|m?88~tb*%Xg4560tX )T=ǖmc eig+}>ghtO}'$1d&R7fzL2̟&m/\l }\l.mZmKECXBݚiqekoX*ⵗ+لrIB76pߋf V3az&/<>~F-@/_~fg6h؇5c[||S]C & q{y+D:3Rp/r~ M޷ɟ;v׫_o4=w~L&6$v\StK)] MtFDӴ~3MtGG׷dOpDN3?{ n]e7IENDB`open-build-service-2.9.4/src/api/app/assets/icons/rebuild-light.png000066400000000000000000000012551332555733200252330ustar00rootroot00000000000000PNG  IHDRabKGD pHYs  d_tIME%鎉:IDAT8˕MHTQ331)#1ciܠUGBJEETvA1ZEB"lJ̤&g=i1:&Y }{t"1DWxn@Ih< J, M>'^@ Kh7h"z Be``PS ¯L$bvb8H \~y{e2 ^ Y؍aa Diř9 b{&sdUu/VZ4;cr@Cn4՜i ?>|-vMu|OW[` a熸ɠ1G$R3qS7{QzmJ01:Ak玌QcH%B)R[;2gO:& "\~/'nےFkfBHBH6ue\wzXPԑ= Rߚ <};_.|SpgDF`!gZ_c%AyhC9gK.޿m]?),:H>}^q_kݴNqnvҡj0qMgIENDB`open-build-service-2.9.4/src/api/app/assets/icons/rebuild.png000066400000000000000000000012421332555733200241220ustar00rootroot00000000000000PNG  IHDRabKGD pHYs  d_tIME  #3/IDAT8˕KTQmƙ2`dPÊnj[ضh&iu .E"Wb"@l-D~d&5T433s-&D }syssS_Vb) qиG8X\F ]_N2eEB#u(!`a@EW'FQĬڝǢ %xOtJk]![aZ۱Vn8w-!QZq#k`M=W`.YiUK6%ͺpPj` 7`wN)OSOlk7 ,=# ξns3&Yl/OwǠss4?7*LjC)RͱU{<|;{iϒ\g4W`*O,Dxu^T6Z`h_ѽx⏙S(W{c 'O|'o:~af o|#g"ݴ2HÌat=rm51B O+}p61oV 50AIENDB`open-build-service-2.9.4/src/api/app/assets/icons/reload.png000066400000000000000000000007451332555733200237510ustar00rootroot00000000000000PNG  IHDR&N:bKGD pHYs  tIME &7ĕrIDAT(}=P/aAPApKqh`aeF `%ZYY `'MJ WvlnF\sdL*f%Ͷ.A`XVdlٍ؂9/)>b Q;qb?m]~=|J@i-*d8fy嬨y\><R"B8Sbo:pfdD:n%=DZ':¿İ`Wl*+m]v*A>jW pm?FQ5~e\l2)4kis+m]~Nuw9>D{qm] -xmco$xj_6f͂ߨIENDB`open-build-service-2.9.4/src/api/app/assets/icons/remove_filter.png000066400000000000000000000013661332555733200253450ustar00rootroot00000000000000PNG  IHDRabKGD pHYs  tIME:IDAT8uOHaǿm42EcQA[y x.DdtAxb U;v /<}?ItuuAuR)BUh8&˲dYV54`YHBiuݧr7 \|$ ###XXX'˲HiNMM"ST `yyCT81ppph-H&a(zLOO$`0xG!D=LB-l:gff800@@ @UUL&Cb[X]]EJ%&$!$IUMDZERAP@oo/4Mχ/8ض 7~~im/! 4;? ÀeY0Mx<;N!:::z1<<ll.I+aaãwGip:j \.Ȳ|Dq3qz5x|6,_T*Pm+Z g=śn[Xc8NEQ6CCCm z!_.QQ,j qn6g󨿿9e!LIIENDB`open-build-service-2.9.4/src/api/app/assets/icons/repository.png000066400000000000000000000014321332555733200247140ustar00rootroot00000000000000PNG  IHDRagAMA7IDATx}OhS?O$vMԦSF;"<(Q"xcTe`yA=̓ )JT1$%/y<I~/,Ɗ'5ظyAn)2;v]=,"n}]惵3Zujqg}|h n^*e<`}HHxHiH$:,ʟfֹ~@L-8X H`ija,pXSs`kXdU@ͽƺ0b_6nh]sBPP+o>Iں1@ی=vO[OxC3ӯQtoZNs`MCX=<{;{.7'^LO2|Ζ[:hM-`pC0YHO[hݫBo~{Ǎ|UWI匕&BfDaJa'J|b5 ,?f4Y/e0T\MSVsU+kG<%(M|V_m*Y'[X1ugW? S fӵPQ-2qGm^P"^q;7imKVn1P.;HEAq޵wAD`hkvѣGz$:yX:s73.hIENDB`open-build-service-2.9.4/src/api/app/assets/icons/req-accept.png000066400000000000000000000016171332555733200245260ustar00rootroot00000000000000PNG  IHDRabKGDC pHYs  tIME 7*ؿtEXtCommentMenu-sized icon ========== (c) 2003 Jakub 'jimmac' Steiner, http://jimmac.musichall.cz created with the GIMP, http://www.gimp.orggGIDAT8˝MHTaL1͑QlQQ4&E.B(,V-*ETR( "((j["DETjܛN al6<=_'?[ظ3 T[&Ÿ?ͻϝ 4wt퍭Uk:#km'-Q7˺; <}۝;ƨf(ce#+s >b$y,%_FDDdsZzD/Hϫ/.wY%<r$qDxXBAA(W'n۱wp=/x|vl_PPP#wҘX̌M4єXi^"2"0e1.XͻǪ Q_ͭ{h\Î,4M9H*|!mkص)i8(m41MCRO-hm aPYYޚ <۶qy!<]s?D30DQ,@.x]:R,Fs4_;3L"bmIENDB`open-build-service-2.9.4/src/api/app/assets/icons/req-decline.png000066400000000000000000000014641332555733200246720ustar00rootroot00000000000000PNG  IHDRabKGD pHYs  tIME - kIDAT8mOSasm%@+xij 6X\:8iL q1P$(Ji;&_M{Ua==1DUv \Be3?k.]vͭhRJiY|x23^(X5*FN><}j&1}QPȑ|8v728emZ:M W,JgZg&xccw6ǪG Df8޻w$_r<֍TD"̻w Ծ>Ԗu~ ڷEJJ.hϟ( %BaՑppq]Qpz<8;wu@ y%%}]pzXb1mc6 ;Rww(8vԅB*,e_)D V[Z޺e=gutǃ=<혹Ʒoȕt)冚a ,6\Uxqx4рԝ:iբ)+@[VJ$(~MOnޤP^\$k$oF4|~?%r^S'[AW*gX-kSH c:`C6"Orbizi~inU|^L= 1mv?x.ElBZVO$֧lvƆ(,nƭ"zvWiEay3MR׆IENDB`open-build-service-2.9.4/src/api/app/assets/icons/req-revoke.png000066400000000000000000000012121332555733200245510ustar00rootroot00000000000000PNG  IHDRabKGD pHYs B(xtIME #!IDAT8˕Ohaǿ҈ABCxbDP];<Aui%+$IENDB`open-build-service-2.9.4/src/api/app/assets/icons/req-showdiff.png000066400000000000000000000011511332555733200250710ustar00rootroot00000000000000PNG  IHDRabKGD4g pHYs  tIME %/IDAT8˝MkQ;$MfJR ]hE Uw&o$ADtYwUM;AT(VP5G'M23EHIBRgw=GvM;N!#Lv B]wf2f_Kдkw߾J9zjmŋi+dk&nߌĢ5(4]#Ns ^R>q>sƕ]$ ORpsӶQ %#6Kot Kz Xd4,U~_e% EJvK0n \$[5ܪ-]pi-Nlc5ߣ-F}=kr+Bt4M¹|'+7*|\YT[z,?",+| 0p8TNVdT앬/dߧ>xtXtk뗧d3 ;e+D, Xm'+5]~ f-OfG̎>促 >Х +?58]];> ,ƠFU+*AF SZ+΢)b,HAURk|q=\ίOfGE* S)<Ⱥ]v"@N3ӑy*B ADDB)tM#&C WUQYTPdHDtz Q I7?R BNrWG@avlnR~s6lȥ/1Yf{oKi)w~{a)\4m^{})֦S}템@wxa\hs:ayBnm:Ofu걱ɒՕ0{׍jvw}yI Ldelu?{;,EIENDB`open-build-service-2.9.4/src/api/app/assets/icons/script_lightning.png000066400000000000000000000015571332555733200260540ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8MMhu}p[w`sma0IcM Z㛁`zY8Q2xK,ۓ\ߩߪnc XnhC VK s pW :̯~7-2RaB.$+OҭW*+Ujc$sgƴqsX[|+䟡B[0LTS qį]Zm^xas49Oya񭋱y|7ZʼwaU2H6~B2sɣ/`*MRi*EH>Q0Oiu4Wi!nTDjMQ@5J޾v+169TF-#:50s/[)*g?x'.8gzY\ޣ4 EZs~ࣱ!f~~0c[6a6OX!t%x 8ZgyңPj Z7mcA ?  ^,Pq7ڂ _ꠥonV83PR,QE$|.D>,.R\819 ׻$A&Pe)bݘ rDլLIENDB`open-build-service-2.9.4/src/api/app/assets/icons/script_link.png000066400000000000000000000015541332555733200250230ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDATMhe6[ɒj0lf֯ "2BqWRAxNs\TE훦MmzOXk8Gs(a%DI(N\; #Tͽ:_ 0L}T_= \I'=CJ!ܗ()I)qO>7O;DDNCA(p# ag[Wq1U&&%^oR@*Pf}~i0aXΏ)Q0HiQ( RUt޽>p^aX+Op}K `AE[Ib qw% Z% -B H@‚& xcZt&% Ab H 1 a h8K\)|Z'NwOIn_ߋ[_= ``\X|Ss 3?eO_0]Pt:z=:T^yqtii& _Ngf˧ o@P`0?`wos|>M=t陱7rG9kҲH.5Hb>5 >1\߮<ƘSXn- |(++P4k'Kr.c,w^Ⱦj۫}y0楐ܱN(c,] s$} >,C_f;/r'4MuaSH$"Rp-˚f"t]r ,g(IJ)#83%2$)&aԙӐ3' % !>* 7s]Uuwf W#jQ>IENDB`open-build-service-2.9.4/src/api/app/assets/icons/server_chart.png000066400000000000000000000012411332555733200251620ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<3IDAT8˥SkSQݛۛiL-A,KgbA 1ɮ@6\,ZQ4>BcXJ)9{t:o8q;ADE:T*xW bXIRx#E촪t:OvhpTr88.Ta8*X}"qk)c,u]( F]FӁfҗ@u0KOvJ˚`qiը\ =G̉mHIv_DBz )Xm&_ր,pqG Ŧ!Fۨ-$eq`͸űWٖ=a)R." N!}g*$) .h6<24mebJ;&5 LL{fZ7.ҮWP.C Ii r Q4@[۔z{I]K%.Qo'#D¥X> Iyge)p:s]8dޯҩIENDB`open-build-service-2.9.4/src/api/app/assets/icons/sort_asc.png000066400000000000000000000021361332555733200243140ustar00rootroot00000000000000PNG  IHDRrP6tEXtSoftwareAdobe ImageReadyqe<fiTXtXML:com.adobe.xmp ϐLIDATxb? !ՍT+e@ĩ2 3N ;@t]]4HC  ΃M DKbk  jXM $^g'e!P8D;jبa0 O&4]IENDB`open-build-service-2.9.4/src/api/app/assets/icons/sort_asc_disabled.png000066400000000000000000000020321332555733200261360ustar00rootroot00000000000000PNG  IHDRrP6tEXtSoftwareAdobe ImageReadyqe<"iTXtXML:com.adobe.xmp IDATxb? !ՍT+e@ĩ2 3N ;@t]]4HC  ΃M DKbk  jXM $^g'e!P8D;jبa0 O&4]IENDB`open-build-service-2.9.4/src/api/app/assets/icons/sort_both.png000066400000000000000000000021601332555733200244770ustar00rootroot00000000000000PNG  IHDRrP6tEXtSoftwareAdobe ImageReadyqe<"iTXtXML:com.adobe.xmp o IDATxb? 6w"s ˀLPYY,eqRM- bV .9\e|āhb@lMa@\ ĂXkB.;qpfŕ" 7t6Xo@́5O nh8rWr(f&( BN?C Frl@ S )d#@z4DOIENDB`open-build-service-2.9.4/src/api/app/assets/icons/sort_desc.png000066400000000000000000000021471332555733200244660ustar00rootroot00000000000000PNG  IHDRrP6tEXtSoftwareAdobe ImageReadyqe<fiTXtXML:com.adobe.xmp IDATxb?a0lՍ" Ju: XQ ^C7R-@M=7?f {e=_ ;e U ]uj &o f*O%q )!nIENDB`open-build-service-2.9.4/src/api/app/assets/icons/sort_desc_disabled.png000066400000000000000000000020251332555733200263100ustar00rootroot00000000000000PNG  IHDRrP6tEXtSoftwareAdobe ImageReadyqe<"iTXtXML:com.adobe.xmp {vYIDATxb?a0lwލR d(++'ex?s`Gx ) -@M=7c'f {e= '@$ ?^8Yy'oHM40Ux*!p03 ӔIENDB`open-build-service-2.9.4/src/api/app/assets/icons/sponsor_amd.png000066400000000000000000000062321332555733200250240ustar00rootroot00000000000000PNG  IHDRZ25sRGBbKGD pHYs  tIME 7f IDATxytT?^hZ;`Q@A숔JA\*FJjMQEbdM$3sox/,!=d~33o{!IIJR*S.稯ں_ZŪVašdQރmm h`4l,tBy`5hR>=^ml!NnZK*S{XBcf8vEQ9ih80j{BzPdLE5X* L8I˻D*Np75xV`&jTPzɭMDnDd؄u[Ԥc_}5 \x24кKDOG}?ێ}9f3sYk @ WGZ8C3+kW Y%0aTxmZzjo^Ԇ;ŧm6rmXF*XҮJW7M} iX"ׁ55 ".jt2Vn2/^ӑX(:ߐKbBF|BWUJ߭^Й@o VoZb?f xJ.e˶>/AMSX <[SXY:]57q,ރfE~wE-3cJ8)8:Q6Ql-`8pOFji<}TF98wjp+m/z7M7S*g\5LA꾈 nPcn޷}_'=1 _ Tl7>r1Á{=hpP¾-]|;` Sd|OH,8dH xK+S:={mk<=쀫>VUyki>1"llk>B{jS`] 4=cvNU\-]HXm\fpʀp(DopC .|-'L: 촖@N^g$R*!FW^` Ȑ8sP D2|Hre:ĴQzܬ_;s7eJ Fmλ뼱M8NSVPA ,vd*@[O}ofKki@_ёZWamBcE)CX)Dv4e=deޜ?/cP:M08=ưx8@"_FK*=fw"kqKo9@vf3V \5QB:b*7塚W ZTN*_znH WCqޘdP%b) y}%k-ƴlk@_/hyˀ)o=gW-TRRUu*G&{C<93`\BfPʢŞ,hcυDhWo, r uoϸɃD6W?2Dg]h!y0))t,EL:n8{{Ol:7ٝeltb!R:Ztn)tO,yJqVdxW Sʔ4 ڵ|ϻr$%#;={ &3%,z v@I쐺:QQ6DyqcoɒRB( EMZ,gMt^s[4`)VE/kYѦX12vePˆjC^֪)ٯ)]^` yjר-mlWhKvK< 3EϔHq=%EIJRͭT!I|%f"I?:2V7a0C-|6ewx*Ug4d.1!m|_HB&'LfII3DXTZҭO%{*&k*= JpJ˺gIe{)ڒ*[3I#HR$Y|) 46[z|峃k Rn_0Z|Q_ǫ}ćy .9( H)lD!,ӟ^#ٛi=|XGLNwsҍge7+~&.ǽlWh/cEb%)IIJR$%)IIJ HK_e9IENDB`open-build-service-2.9.4/src/api/app/assets/icons/sponsor_apm.png000066400000000000000000000154461332555733200250470ustar00rootroot00000000000000PNG  IHDRRribKGD pHYs  tIME #u^IDATx\y|U~9瞻e"(&(WZmUUkEOJ]A@Y@ +ܛ33',Оws̜y}f 4hDk 94hРCF 94hРCF 94hРA#;D ?0v1l.|8(F Q1Fb_ S֎Z _=S/ &mL{zKEDxhꁄ9?YZ.^P @' b4IZ`&x=](ϨDy8`3bxVCsie|x&/]#sDv׉ͫ93Ӓb=Gk6r`"bFNIy9s޺WT$'czg-jYg1 QyŔG̽mjf)bLI! DO)ϕ#A𢬵UYHo[Jm*{nSoo+dȉ_A]M@ϭ~a؏C/_n[@9LNl/|ua HU׹P㻳Ҟ5mDt)uct }îyhSݱ}m+e6=5WE͟ztח,Z\ H@aG #Dt]]j!&|tjl##( Zّ@/֭ #cո]'@~$+\,s` :o_j20ڎ܏,t6*ёu<[Al@趪g˭O@u[A:N9_2iW(՚GO7J~+L `ie9?e\ 5ѥؾXFܿ_]U/ „H]mk1Z*%5Ս771 Ԟ"%vx:ۆdŎ~fR=S 78K&mNn^M8Nz`A.b ۺAkB0{%?Qo:4y 瑄m}GΦPC3Žun9k|ԝ߸rXE0zF|95.":( 8@cUPX'=MG> wُJ<-|✍:b#^[_?Aoq ۜ&{kg]<~ͭ_^~> F d y9'{L ~>* J=e׏^j3%IL PYz5tR !gb!Zʰ.39睊vIS9`ZQGju`j/F_ce/Y2y_x=-W;u^5VwIGǏؠ츠]TQe6a:>fIlww|!3$G[Z.oxϫ#^Ro=JA SD9ϊ#ަQL Yirvn⬩h<>wk;vm^mv2(. NQ!S_O yL@@fgIT"fO>2Av`TOּNe9G1>ȔbHTx/o,z ] "WVH(x:I r}c!1ܐ>,tƲNJ-QJd~]<5/ynR<ptE2I8ʕPpQ0C;5``⣮~pk;Tf]vo-B@c͜(sZն":^gZ#I 2RO̓BE^Fo?6 8WoտkԾhFu{mgEpCIh@N,OmmuCفR=B^|+)t|Uo [?xdrv埕oCz!DJ`>mr+֒ DؠQ׎z)Aks*T7 (+vO9A+$Ə=J`#3c q(&G\}V&U߸B|0 bPj7[k#Pc9Qp]908A5$]7Qԃr@d B0>x£MMl2(y'9lf`}ZxKuz@]1g|މD-KteQF [*_ꑯf,VlV-c뫲Eb^ [Nj_k|>~7'Wa:APPi>#֜ۂxuryDbv[cco:x>>%lVJSK?96W¼`4 H̋sO$zL 7ݒF}c3c=i{@mn2ĐS/ ы!PѾٙosW":-#fH>e(+nZS1/rQƌPѾyk>řzg >9ܰ۔YyY17OHXN_eYiڭC9 7{U~iGvt?ߐΤbux)wd ͋=<[Wq:gl98bm]} nzm|^D;aޘa6wZ>5)H}Lh.2<ξct aU^m_W|ʧhBYvWwDD|Y3l3K1C/v s6+OF5Z,8ۏhXHv!9!xjyv|#Vm:&fQ!y){3W^18 rY(j8\~U+sUibksY2*6$lPazp`@B&&6pOqT/Pji2J8L^TBE44J#))w7S(Y #1$7s=q9KZ:e]9sv}x\AwNzeOj{[LLX֦ЩÍ0Q@`E$ARR`$0QĐ͙SU/woU#RE[W₻zc7UԵ>+REu96WX8=Fo0mU񢢱ctP~iiمH{k_;6)db9s,hrf݈{1wmL'ej^pWI5G]^).N;Ѿ%FlhjBι^oHKM|G +#""Ge>_\RRX\RWWO)H?qB p,EE-26&&< ^QYYRZfFGE%$ħ |լcib+,,8V( ֛o?_S[[U]SSSfiaذII#Ңz"mGQ: 4@@Cݫ~g _H;M& XE@wcOe+Ovom$Hz?^6hgL|GX5vڴ.W uvQMh%Dwt_6"/jޕ%ܽ> %v<,::*;k=w޽VxVx^]*A륹sp:>FI!R))s.=kA0 fٹpyy$I`ɡ' {E?L~qhgLe@[->G$ιY^oQ'N `PB{7| A{"˲XU :NIXw].yE%%_|Ar ˊ,˄(}3͜s^Rf=SvBBB8H$hniÏ**+GDQ;gm;vإӉg]rA޻:FEIL ~~[mmii)nAؿ?.#=ʮ={\A7jHQ[Z, huͷ~B}͒$cTQ!sJ /jE1q11H"RB(w..=BWHO`qӾCBwyjbxص{{ nA{Y-[DQI/9zOS+u:.񅸸X5Wq( p⒒cLǣژ+ S%,4[**TfpΧM?L5Z_|RE{ULI9No|uM'~) ,?Ó&}0KU˯?@PY]vEs[r z`ɽqFt>|ℜ'7vQ/Հ1RR~AhH%w ^?TPv,xΤ!n2'#pΣş܆#b\llKkkAFȞ}yݮcIzIAOVU]sX>eqƘz✇ xftYc3O58a&}^6=i_[N'缿b@ [JtrC8ӵF I:Q?}ap{<&1д|u[11w>8c z2okH;u>?'9f߽(Zm/oسoߞ}rwv8C6%y߁;w=[PQYLc33&F 󯻖RҿO|_55B ^=oH_!|"UEq7luZmiys9w;iepDFDrO~N#TTV=eeKRiىz^//,)1q+nڼb!B_ܑvYus_69{u[2!IdC z ]pg~/aPL{7t\:U5UժR|Ó %Z9ǏF2JJ)e Aiw;y*LB)vr^9JjA,dg?QQЙQJ*j#ԋ&?#|>s")1&5W{g!xm_WLfŽ##Gs$i֌ y""G03O{>C)ݹKU9qMz q=jؚ9fLjJÃ. _;p8Q =n9-);rs=^?@+,zsy4>g 懅O<r\HjvNVs4hРCF 94hРCF 4rhȡA#4rhȡA#4rh"&јIENDB`open-build-service-2.9.4/src/api/app/assets/icons/sponsor_b1-systems.png000066400000000000000000000073761332555733200263040ustar00rootroot00000000000000PNG  IHDRVVUVasRGBbKGD pHYs  IDATx{pǝ?=;V+-zHBo# ǎ8~)\8uIr\Ug|`;11` +z!V3}LXoVʷߪC(^a7#h>Ec5~h0 0q(-K8{73H^P*G?woؔz87j̚䀸R{ H~8KbD7aIռnLSq|@9}h0/08]04@;AL8`0pHuۿ~;wm<+J4eŊXŁz\7] @@kčC, :yT.^DGGn|?6p?rބd! YY Hf끛hOHOO':::MFzF_3%6˓A0á sRBN"-J@y j(*jJ_þ'464R}ӧO۾(++cH/K *$Vq>Z~e'`%RX Ȱ lݲYfw"Kә&nJcCc/>O:eٙS Iw9y/c">`  3.ǃkJq$v.m}Ca5:֩;ULyiO% M\Q! M܂\StMpqs &x?1Jw9)%:+;#ӗahL :)SR3!߁KZĴoDB;b0 .|7ɽ35敦1kReN_#p:n pJx1@]`r?n!(dnyid CˎFc%NﱥqPfTRQmsk>:.Ms89)3t&:\9:>HvINj9K"yY35ɢžc<|QPAmS(?:Oފʅ{9@0vU;N&L0XnU,KxI)\˺[ײ8ff!g#Fr+]v7#'%/qӢ#:椷|F9?p@rGUucX]Yz5ܲ3'riZ46oPЯ p$qw?l%'P2\G$2*`U nn_qRwֳsN¡pb=v4U5-J,9qxx冿TmJr >ocE|iZ4`0K&`餕ƤKvaW JVm)bIRu E2,0XwLX8l}+Tpo4 Hn9u}yw\_'W X̤:0!9HCK.$yEE)b<*WhAAPx1q Ƭ3:uj؄4 srM$N'qRs͡|Fy0b+W^ݧw"vXvղ>A!"7/eW-C*E:ϛ"6seSʘ6}Zr+%Cj(((+Waԉ8IM$6## {;j{d=O*|F9S]iClK[_}h%6;;EGD^iq On;{c,C4l).XU^xkK{C3N'$ց.N$%Yd1[l8GOF8v>^iV_!X}j^z%ul"qhOf0|eST iJF~XcUdRomt\TT,ԧMvbwL1!D.ȧHl"BNn ǝNJb- x7U_{rLlw8,]BtH i@#vbׁfPCC'/I%w&gj8>ҍRv}wr3g(YQ_'%@VmȬ32]3*;Y/RH!ak@0 {+@2";-H肩}#`AU*ÑPPy0 s"j uԱS:߈\0  bgW?9q~OPS2TN|6֎=p$t3 ]hծ߄#o.p+pP8gCO>7@P>nzP\*R}?c|ك9R{']t# ; /+?Nزl\ Vv^&gJ8.mBf7 ܴi}.@!x `jgFW[-1 4N4 |0..1 1&fN_s1BBm^O|KZo gZ”=%]I@䢲I;-ur K\RuRCn#a@{ٺp6kA<#%GzɸV&H Յ.;?S3TWߜBX_z̠ۡT 5GiܞŤ ~is2`W?. Y[A܏}'ŦP[G^l )YnĖ@=Ib?{8Cwa$pP!-)l.N!V &nmoQr[ZL׬ SБzZT:t;uP/2;bwk{GXlG~k{ks')5 JUWx !ض5_i^J GB^>2dRȏS}؇W><&ܭ Cw= ,2 N(C}u=7Ձ,ETFGq*jk-~<< ܨB&2F-&\!9t\Oؾ˂N"ˑو N S۪ȭ}9g 'MNu! Q XS ⵶ѐ@"*a񅺐eYu^YQɩUPPR?}N@?+U6FBQ#lSjszt²H!~:B)2JI߬,기AjC+UKD!\g݀w u 4ii 2 qQLuvzO@"BP U􌮔M@p$Ti5{)]fZaYjV^έ?UdBH@<c{]HUׂagYS=x6֬IENDB`open-build-service-2.9.4/src/api/app/assets/icons/sponsor_heinlein.png000066400000000000000000000125271332555733200260620ustar00rootroot00000000000000PNG  IHDR07ǫsBITO pHYsaa?iIDATx\wtTי* $!M3&q`֍؛c''śr6M6NƱC\q  !DG" PAH3F{ͼ 4I`9Йy{w>s89 p" 8vz7(IgH6VWD(}U9apĀՕ;^"0&,W8= Š'#0,_ X8 HDI%\s PSGfw_$q=MANB9sP3q2r$gEx&9Z`!R` R=L2Q~7@P/9HNc`fE?i;ε$(*=`q00@ks]̛(>(=@0>u 5Ood ,B%(øQX(A鉭 G9͇f|; GD qe=5(`m`8T ,hDԏlYѐ44 hr!u=Cߖp "w[_+R# n_TCa>G֞[!P߿7գDQxX8'2m(Bou&.Yݘ.k Qsgs7=Z{a~%L5WG6QCWt;C{Q}hq] 8.g]nvEUU줔 z:&F#Nu2ngrB! 0PKEӄ• a^M fF! B` Y0( AP@52;竦,= ϵm싯\G.nY׼tm'iV&Pd}XT _$C+X _șJ\t*yJ;pT:D]uj(p݇圧,c^bbBe)MSi"(v]%M, jsz]Yq/_u:r;PoY(L K Wћ^ϼ*,IW Y #{$DpyH$b.mdEyMbι\=65RԌ׎ߍU㢪pXs ׀>Ak"sBkν=|#p m?, "PFdd|uxrdhpy<ޏ6m6y>erq\fz!&9 qz}";&#r>$r{IDEbs:.lwc4mWNOw$&1WG)1VuDC$hjn5=r Q2Gԝ;tXSs 1oN|EͩbMѳF{;Ϳ'xI*"Ƶb";_}}͟Y,b@ >|جr]n6|EQsFO6Ϟ9%ʟ\fFX=p^nog~Yon[ʨ2FlL297Xx'zg]EUbD0 q'-أdfGo29QUu+7+3645}Qi+?8](cх)EB Eۨ!<߅Ql6+c %I""W:ӭ˿m3wm(qqQHKkhlt=s"H$KŢ&IAS((v͘jߴ3C$)h<˯zgoOs@`݇685MX,Eq{cE|>B.7ً &knm4 !@@S&wt1χ[J 2v7׬|(B &߹`ތiS<E4Οs<P-EӴ.B)>j҄x;6KE8MƘ76k_ xcCKK YU; Gz mgU9 ,{|;n7ּ_?Z, 746]l6XtmQp ns˭ ̚67ݿnNTT]hh?a|8ǁE9OHO_ikwʄlWf=tȴT4@ tb{[ow{<,)~pvz^Oăkd1g+ol2s&#qggk+짟|`b>=~ͷמD>7GTںs\9;^/A#KLHgBC28޵:xeiY)cw! =Htxz_j_ei[ &VZR4!/:p@hc՟C ӣ#"xpɵ)+*%>>δq@vSu7lTU-Խ>E9YY]8i6Ņ !ׁRZX0eV Z,SUd1DEFCEDb<`pN B0pNk~JDDeYAď6~j(qD.z| <1!#11FPj2JڳCMo@Q[&ͩO !pEwBӲLR&Ȓf2=j~ ""g]8Ěa{MWuGWI H8ZWUi/>#8ĹL v!%4}lVXd}>EUu6BifLŘk A&jf$=H ctz$ͮ={>? +?"rPG%D8-OHB;NG!Sy1 yT!! @L!/g$Ifrz~^ϖw(?HRgWWZGxlZ҃ňA ?KH9jJiow:|liE1o^l=t\vU8A&'<|朑Łc+W ?'N/;W]f+V׶ sG,W(`d,_v|}_B!11a+O3DQҚ<"st&C_ln=wnl$G7Vٻ_ 0sԻ\ѵcSg? G_:&c_S{ tupGL>'+ڝ~Ȍ;QQwtu0:},+eRSwٚ y -ZSuꌦ1؋뇏T9Z nM2Foپ艊-B[ K֞;mUL%ԙICJ!P?as$$-8үO\jk[ism9Q S'+xlYVUo2Ȳ\~]'椏В=3w8!DezNpCgg!`RVv &͞9}v;XȲ(֝pـN..<^Qf_D ^}cuiqYӷ{M9QDE',B޴Ғ y9 MIOVV=|qGxiI!躾`[E1->ܰ1tæ M"vw{^{/S&ϟsۙWV'*,~c@U^u9-=| ȩ&dT^smRzcj& 9Ox~*D٩a23F?֙, !%x`5fN=/P^Z=? IKMyş1/ (Ғ ]VpVF" FɞsmjLFKw SđYp=^ }D. RÑˠ"jEĴwtgXd91&3G+ټmǺ>"0}>95'iT2@Q$YzEQ#RSR΀kFn&'Qt]w$&X-$#-%^ڬ)%ś|jQd%gOMSpzJibB9i)6צȯuoۃ=j}~QH^|k%Gb$ ʀ+;rtR[O"F- A?ZkB #IJg2zv/pۖQɡgRie˷VEV;~QWw9g溊䠯H~#rVӣoq#3.F3 ʵޮM/c}4CdDu^0*|bT&· 6n9ä1q!p󇸫p Q!w&>"rA FfKh/p C}iv.i>K}aDՅ#ߊr8kҬe4?ZCWř ޸a܌!"k>O+Täj;0G͏.q(({洩™52<#](l"IiJrg#0"_Zld1e_:>DD;gzx7N1!NjԌIENDB`open-build-service-2.9.4/src/api/app/assets/icons/sponsor_ip-exchange.png000066400000000000000000000105471332555733200264570ustar00rootroot00000000000000PNG  IHDRU >'sRGBPLTEVbz >Zf}%F2@^^j۲fpamᄍLg,;YO\uHUo/>['H4(7V#5S&GEE'8UFUUU%3B^55DDDfff"""ݢhS`x333̏W5DQkVVWd{#3Rwww|,Lوfq)J蜤ggyՐ/O۳$$)J9½ pHYs  tIME  9$+y IDATh՛ \SsE"!aZ(b iFQkj |&jUZmP$[Z΢Z\Uܜ[͎=w9 Ooν瞓=w; $xU/#̡|Qlc0_v֦vͤypTb1zgv<ޏL%vX~f[oR1y =l#4?, ;y&`y#NyXUv,QyYRIǭG74׽`Xkw0Цu6,JNlPQ~yW`IuO* MyT455jǺsC;NHuNJqK7!dIs(i I\!%kNV" W (-E z  bJ"4]h8b[RdnH5%@(XS-D2x"59:Id/32!u0Hqn=QڭA+eځ]H+*Xu`̳p3C'm/ dtIPCp]ryᅆNعVg?Lm҈]b糖uk4Fq^h,4z*1 t_ 8"h1cYSKZhSxedӅ@{& ]-`6\VXHXN ;$Ua߾քZ"X0#"Z/ic 1ɑR#kAP%t2L+1/h!eH0 <j]`?p{袨Ҵ YM[ /bKv >x\Iw ZU"F~ * bKLI\b1I5]_M ā6h%n ~HcYX wNsz4F . A])zs@uYZX wۯ~SCLD#zhnS:6X@`TKW <+'R h-r'bs5\/Z -ة4.1}桐2UKkޑDs;v#Y2<2 %{&b|Wt)udfQMPK'JNTqMCsCyZ Ђv k[.~R_v~dZ;-._f`jhAq*o ϛ\C8wʠ%mg#ؿ5r&D܁ ʻ@ŗe2ked"v5˚ҙ[}kh< 2!TC&gu4;b U xRlii,vKg5)ݱ ͫ:CIJ^i: {4N*P*3r"/BBO]jzky.oW+XZ9bʭ'xE' R-~#X$sdy!kSG3Z۟Y"dKr= IH-m$4mT67i=6q ,y2x;TCA#R EsW˗mh=X/^ MwryKj\tS<2:vJYDkO]Bڒ ϫٸxҘQ23L͔MZk+Uc$jj"͕N@D0CnF^P x38,6 k4KҌBnm||=wó6QSXh3,[.}Cea ݕ =,LɵnMsؾKDdDQіR7ެ:U23Bh{ #TZi{xի ̣\`ml+B|ۉ_#UU䳲O*+UVA_"z+*NCzvZl0E͚W/7. B{n }G@jP8K| 1~pƷ9?YТׁ "Pm|c0W3zGA>SQ{@Ob"ffBxUk*Q^W(neA*4 Q|`29p"jsov#$W=^'c@"Սߩ8T}[.C dGv%*rBOUoxes2 0MS q{.GJmҒ!!R߽-˒B? ?Jq5n޼IP*()%ea?;]v{w\JjB`aUU7CMMMk(j( ?~Ajkk'P, /4T x+=wB夫YB"t.oF0IJ,t45ͮgY^uؼy3.\307?g3Ĵ,eaZ&Ҳ0 B_3AWJi477J__k׮NvM,׿l}X7X5WuyFFFعs'oc䊰WəRB.XHia)HQ6Q0 L\&JT3Öea^1B,|i&&&BPWWڵk~:7n@4> ~${7oԩS$ ?i4c%._M!,KL’i"-[nPLߺu" '''yW ÄajjjdҤR)I #1Br?9<\"JSLLM#( H[L˴&ibZ #W??e?h0tTxE40 g`%%˵ZJd0J˟r$5}vBI)eii% \zzAJPcqqo}[̰ 24F0L&GKK#0$g&8s S3)t@J[H[Xi;`%]I%2&^叫eYhF$?S!D 8Dyt]˗/}vϤ}7xilldddu̥d2YqlXۉ4<ʛLNLQ,,t4glMdie2 Z.vrE v?G@Yå˗yꩧWד( ֒J-099I[[zZ0 ^xE!J!E2u&Ph$D7ps2E6Q5N|*+_JQ"@j&Xʲu5J/u.^;y^GwEqN^~B.}YvALKpmt>2g P(@X .y߷S%\w !geǎ+mSӳA B]-`v=y2\i LGkHZ-=5+2dq;^ʊXO{ҞhR"ť Ǐ```kա( b`0twݮ\rL?{K֠FYXX"W((Bߚ6]dsy5M,SGZ#X.ٲCsNWcEqx(9Ir*#N.2\gRiH$DQ%dRn 0::ʛoI"ԩSܹLDQ dú.,S煗"НY&4,4ak0fɻ6M0-o g\A(B*XYL+3X rGM$'OK/yt]GJI(B.BuהW_}zz-_eﻋ`@E4ruq;8~a#!Gxy>l9Y%~fY;U%eP*]/Hyd}Ve:acS3&'LVA@ ,--.?aۅ f||);7a]*4Fb^ylj̱LpjaG,J'CA.7+&eMR,([6LӤKwB!Ldvj^KӼdY._̮]8|0RJxTA@Shō4MH[ W[zZP rBj j8%GI8CL[PH(s$lrKy9{,1Au[fr#p:qq\/} Eaޱu͌OL[`Qu9v=ei>XB̤7O(%*%p|4ǎK%%NgYHLN /fQ#%B!b\`0H(d||iOd !i\zÇeHM.^QRiTE8j:v In_FTGv\,cb -$N%+&xzbjKyX+!ɒL-c$4YQa:LOOS[[c=FCJOr'\& zV9sF-i )O~pc*Ū؋+vϼe*웦"?XXX(s5M#P(4X,}sT4=44oiLNNra6lPv  1|mq ݰ3KӗDpeUg7Vˍ tyfJHH2x+cmr} E_$+;>D[$AUU2,^dX?SNߐ嘚GyK155+W8s~7B)c;lI6K,ӦI GsdHU5X**H̯`ЪK-4wˎ;&a2 a@2ϟD*\״Xř3g8 ˝pB**hSe LY1iN>SXaҊe6HZWzrKij9}f7xO~B 4ifff*d탚gWdwy]׹p{jӧy&.pmdh4hi&K{[-@.-ׁܸzΎvQb0}=̈́QlƎ@ ؠ41 Er'18EU1ĴLP2f(,L.tT~s\H4R#H~M3b1OE! yE555$I|h'!ϝ;ӧ1M#OVv|#G uP q$XZ9A;r/T466Α/d9tcl%ҋⲐ`[uuju'ͳeYXRR1M !@UUijjbI?uCf*F&__&y 0 % MS b)^}UB<O~Mut:MCCd烘j탚f49~8CCCdYZ[[>!4xU&&&XHyգ455#-ίJy G4~wbLQ&''άm(tvv޽{=8{:&xr&Ų*"077[mŗio?~ T`(7nwo_UKAo!zKm`?CRQ&&&~@ `aa'xm۶ Q5|ϿEm]+ Ac\>Ýwl`0i C/~40 'ǪxZ/gBH$~o2r{ *P(Dgg'ͤR)fff|W~ɩi_@qb@0$m,䩧_2KKKޤqM Dޯڃks177gc |_fO8|1\ KC?$4({-fZ[[~l>'ˑdqpB@,#< :\b]Yf CCڵ?BJK B;y$o6]]]e06vk7ycqVZO |gcN7G_2|2 eB,V[{]SWWG?555q4M#H ϰwCjF  mS['vZps"I6oW_eqqћ,B:ׯի2d+%"[%\v_~+pa/,roapnMp !KhteYJ,'KdV)-Ce_n8g~H `( AK0EuYӷ{uEUgEQL ^ŇSK +0P+޻h( ,xɲkr ÐH$>}P(ooW^ƍQ ^ *Zur l X0 K0ȅ+"2tKɝw֨s@w$6cK,|X_~]v؈aZ\(s R!GOb14tF1QMȚ#GfqqD"//znի\zC)wdIrA%V0ϙvSHH8XojY`ZA^T2Z DNu :S:}ڹ^yћkγpϮ;D"1;;D2*H"7گk֬)l .019ͱu;">.GJmYb,TU3n$*hzg9%Q`Mw;=t?7'c&|vH #(9Xey!NXĒv>&MezzuqGzk̥0tl\s'?0xt:|Ɔz˜J%9kJgݺu<^cc sCWCvŁgsKUVwWo9ZB4UCeJroMtwu܄"p y%g"9;xgv횗O&gW8ދ_S QA򢂹pcr9&o }L">:IN/Q( @\Ϟ=ӧ) ,,,/}޲f=;INb^dq[kaUBme3Y.^H8f~>Ғ'+-FDQt]MOp nu&!975 $z oE1@M4x=wﺓ9E4ӳ}4=]mԐfYXXѷ4`S(zmrU._¥QfR(jTiQHxoZ{n^LuDWrB~iCy%J&\.G$4M RJ$7oaΝd\m9zWNǶEys{fK( YΞm߰&&'D#Q\m&'Odxxt:"O>$=wa.ȩ!^lf-)t=VaRYQV{^:~8x5(/<,X,FKKgL4s%u\؅ݤ|>g`'OKLN'KD<#'-)@פ:tF&iOp \\."GoM`x,"PCoѣ ^ʡC>l |wffȪ!X)+jՠG+bUjyZ5Mcjj^z MH&躎iUn]o풓֒NywxQFf8qcDY_!2T`,] K{`/6nvs]ۨarr\.G,$qY\ 555-HTd>}BtaS IDAT,'q϶[2K~L&K*5Lt"4FjvfLzns,-aÙX@(LrnE&SdT7G*DY.8%%!M#JʴΓO{'ϣi \ >t?555LOP( CN\r7nFL{!KKK0rs+ã7 Mx3 eY|Wބy˴PM&h@B+')1]rkb󧝟"PÀ%1D0;Mr\.%eNP.hp a#i$)Qm! "0=\!/KW{H !_`r*شq ܵζ&S,,,P,QUՎnOss3 iܲ];劶s!OʲYKUl]9PDp>);cB/%xs)@)!THktq5mM,.exģQ\#I(["a||iip<gϞ=twwSWW//ܺ,5tO+U8>Ok]uh2MX+MeLk ^g_ND(z~j(qB-3R䑬`,eOvxQ٪4EV?>J5~Y;IJ4I*ZBxZbig4q$$1Sز/1::R&ܲcL ]‘8t}}}444[o /zI8_O~ޞnt*oQR,e2|(U4տe/+B. |iEF[er`L& 9.hmib\~\f:4778شi\p{L&Sf <ذ~-y=X> ({HJv]F}܎kr+aKUUݽJoq%`ŵ©*( 3F4GǙ2-߿t&AMԌq=O4i&8@{{;sq2 lZgtf Y",7_~*333ܼyUUijjj~DQ55h}%%###LLL D"ůсi˵m=,pWBoٕGx hIeܵc#Irnk7ƙ_duleeEQiiiP )eyߝA<Ofvvk׮Oswx.F9|~~1=N(8 kql@8իu] j}f-ŗϵ===\~,[[[ݻ#3>MeGJxF *Z}GB- Dqg&v5M1==GM">}.ˤ QSC4{Y1tuuq!'СCūitvs}eJEE6]ERǎc``e ֧C!:0 \gΜ!N/G._NOO/]{tR!7*̔4y^^[g(Dbq< q۽h$(cdsvҢX(22rM EMMB]F$w} 099˗׿}W4$ b{3ϾH(ϫ>M.²3 27dr|ё-V:0Z<`K.RgeQ.]BQM;T:Z, 'غmXB͖!lr/yv ;tu{YVQ# `;P5W<_<|*E(15ࡽ jjj)+rEp{O^t}]z)~LTU~aN:Ǎ }4e[tѰiAdlϖ Kpsb3 rL>gar[7ڂu7ˋilr)͢ ErE2?I,{ #:l߲Zl2RεkLҷ~=Pk Jw۷s]wqM?>O9޳s3sXXxMeCb ΁.,,.VIRYT˪ ρRUVJY'T"nT,+D.Kă8WUywT۠;y05$9୩q̔iLM'䊾 ۝%(M`rJzQ4hec_D"a٬eR)yLSۋ }}nU-{tsr yyxG@0Ȇ^>FnfpcpkY^iĴ$H;7|!3|1>9CksO~a޽P8\2ɫ?>4E?&J*& Ţ9Xd*Kn+7#Q׳ekiZV@-~h!U#Tرe-m-ZR{qF0]U>t^o|u]466i8k2ؼl0,]>֮c ! ?+e||>J0f;1t-&kjFы7!YJl-`B`3IT!줹["mZ(1A$G0._D"q,u ΢NCqӷf vy Q;F, )%wܱ};[RJ8|0ôp yկC jS8\cai!ٿ?_~H$ut*_.]~~{wC5\v=bHIQ ͛pf?1sK|K̓ϑ#oS_Yۥ^05=mfE,W_{VزWfHBu{y_xl.ٳg|UP?K-($aHa[7J#\#LrwȾ}eF+imʂo}wÁشi7Km?z_>[E4K)6_4KKi=`7o/K[OKK ! 9/^ӟ,MMM|ؽ/M"ܷ6ÇaaaV:::Fڹ)kLXw[9{vN^} ~⏱,oR(I';jĢ16e͚^ 1770iV д0e];XJsKׅ5 сĦMWơP/| ŋw{.<)/gΓfCK{)v i:%1Dӂ5(0M ? ovcH@/Y߷GG!_msUU6 B,,f86p2lw1=slo ׯ^aFˢP(،`0HS(/5z:O}}=^EQXXX0 u-묳ZnF*dӦM|& <47(4ӌMLerjήv˪A!/h@Y|nb* e;77lj'a:tk2|_=,#G}!B_xfVP`ii{&XtjyC://voOX{?Y/7|DB^mNw=abIIP4-a(aJtDU"BA7BTUAuQ놉[7MeX(B@U))jӒ$QaZط?|l9‘#GXn-uu J|y/,{7:*!V|6⩧ FQg~!!_Daxt?!젱KnڕE.k*E5Žn ŞF 3-鯎*%acv'UPTtv~>|>}h4J$e-t i,..k /^ ʍ7_<:@6FNH$̛]GE9_EֈC2ao.Mr  ToR0ۡ,цP}tGnL~Wuqʲ/~&Qk>:lSN1;;˝wICC[l&WM&Rf|3sO388n.~^[] A--hxE()B cf\|BqzRɍHYʀ>O6TSٞLnp}6f͚2b2ٳFٰa#]]]UY]&dž hnnfppg}֫w c1{h[؈!y B@دn*=/x^Y-#^NE@.kRJ[bMnDBE_7m vY`qqt]&lX'mpe 6~ZZZYN L+jHTՎoIٜVzZ!O\Eձs.Hۊ??ܾ};{")k+W8{wN'O<#{ͲqJ׭@&CWJĹ4?XR ټ5MMĉ455QWWǦM}?uAiiiaxxo~3dr:7'9)yp/[7DB:kJMYiXIR巒WW i/FggS=?o`444mV4Mc vZKoo/X aL$p0tO -$jvl[qbeU!'5[wXJH_IWotk l[5  e}'tww{|˲8qanu[nݩK+wfA\٩Ϛ5k%oN)P4 2:{wldi& :M4Pj-RUEuneY W(U`Uի쌦* @q]'ݝ-_m455ѣG [:l>BÏXإ<^{MEwo4-SEnM2ZZj YeUo^J;aI8[7h4ZfQ8vBkע:G+K8]~.)%wy'g[%:?M8q(cccq ]lc#ost&d 7IDATOCU~ݘXбa) T :UkYFRZ09OJujҫe5Ye M EQijj=;Y!wdnYLLLF $Vv kYܽ{7OW^ar#WYa3]m\#9"/Qe۝l#{ku|>OQ/b.VwX'-EX3 L{g&ˍTۮ8|>۷RVrPkzhmmi7eU`NLLp)BcR[{?W^`8СC\tW9?x6az&I._|-M %t^ E_x H&½W k"jf<8ӓ?wKC׼*ĩ$drYmGcM7,;io>_)mXrbIiz'1gϞ%Fٳg~_ i!+P}tu(vݯ)AS(kr._`rrYj{Rs+mAwV«o%|;=N8AGG555ڵ榲 P(Ľ˙3g8r;P &gIe \, EzѠX흼Uvnf hC!Y<+ij=3e}Ϝ9o1I("֭[oM5B=MMM|dvvqGJ꘦I&w A&hZ^=I{j|c ^_bTEe{ARM]/! oؙ7;{4 p]-|5G8rˤҞ<h40 #z=ضvӲĵMab<u]LSz 6F_bSxD㚾LPS@6jtl6a&AtJId]DM&xlj0<a-Kw#P0[RETUu0`Y'tɕFpQF|>bfjAڌ:ViIWx|#ZQ=1y*וN. Q={c,DqRѤTr)S h0VLC6lci.:շn_p$DIENDB`open-build-service-2.9.4/src/api/app/assets/icons/sponsor_suse.png000066400000000000000000000164061332555733200252460ustar00rootroot00000000000000PNG  IHDRPl2sRGBIDATx] |E#I8ANB@**%iVv*,^@WX#*G $CcfIzL@BtիW~U Sg8]J$ۙ: \h|C CYƐ縕g3#<ڟdtRʌiRx/$Tu9o%4zWs =w0OP$l:|O2{L#e&$pE/0!ʛeM"-I,f/wkTϥ/|ݟX<Ѱ".pVd\o>)sR"WuǸBȤ.zҕX#JDE@i~#sL_Rq8Ō`9W)]j~U"SfB78U{w˜8$l9jZ΍߆% Llc᥯pJL8i $g,X_K3LOF~|q[%Vi2d[?UVN+mQ &̳褸X.2ZmU$_UIh%f$L+Pc CyuH&vVD..G-뱒?`5[h),fL:[Z0ZJz _Bb-y^Y~*=ogE{VZmqyAPqP5g /dPzJ4N%EuDRfd, #y&Ly 9z#rL-ހz'iOp[)V7q2{4Er? ~y"01)|~cef<|n S&al^ZCMd&\c [_΀(&JBtj'uJкbuc# $&\Aw/ähVŽ1hm oPjj|`JgoY`~< u_sYN%Cp$kw7gi(X:[$ErGH:n5:a stAN$eN-6pv18İDb!clYl´yM9'>̐ysM-$po)sJ,XQ픥+.JJs+:] tS"1} ׺Ͷwt.&LX#mbG捀=-z7-R!v δ~>ߤG NJd[iCG7 YRZ>x,O4,MȩEK .ÝHJofE!8x3͘M ; "cb "±kX7tS.xEAεH4Uwն HN 8(QPÇZ|%t3;ʨxa69JdW(b,ūeowL;#a;);GrEچ=Ճg&cw1crEP^;`P;J@̳*/Q{H7%KмsK2ɪ!-S'K(,ՓQ;k8 O1 ct~ᕗE3 vnznl ;Ԋm1 +1I(8Ô3lZ=j)N0>-KHOiq䛧{ ܤGU9xGM^ߡ:[-!AA#{F .͵d2k7u3]6_ :=zX$ ~R":+ΐSA.1IS v;0p/-Lі}͇La'sy3v} ӣ۞4TJ[ Ü򃚧;>I N3'B~T7&x)&ݧ{PjY{GpS|^z8*=x=)|Ǐn8OPaa@Ne8 1Mqbh[)2vSYx#=/&b{JL"5f;^ _h&c&}?W%&Qg E{Cp-;LpxQJf.G- nC7y`WpZ_uiÐ5ol7Ca??ߦ-vy--,Zn햅4uBUv+ͲrЫoSÖ%Z&jYs0I%9p~k'r1?5>K;- Yb>9JDf~yƚJz."c ]ZHx V]K` LӒ׽'Q+\v(1ߢ"=IQ\~lP3E&ҟuKZ=FR'Zũlʱ;/=%Vmo,jqg[/Beb3Ru BD̠\'Ҧ]ZY0`gC'w mVh-o0utΈ}ÍIKOo .[; ΞBGhxpv 3OxBk*74H/n3.8/,E%k2.gΜ)^yF{eB(^*bVR*_E&B̬'k~阨aN'zm@{h;i^`W)P СR@q+&%≮"\,QX뮬<T%8<}R?59gڦs׷_҂xuL<k{2*- 9':;}DŽ݈Vs!=X"M4*6Gz3®+!B»|gBM瞏I9{Vxë-WoA.~Hp}!4D>>INOYcQt w0/J@20  ӟw~{*k㼨yـ}edxyw I\J<[&6n᫜)2_j+ucH/O+QgKi^ˀ (8VϷ*mfzij0'cӗU3o z|&[9(2t1ڧP$57cW"S`~?BiˑkգҺkLS_V[mWǸx@ ):Y D4}̦hS{l]=nܸjYc80秧/k"!HV҆18+;uFdžr*x%yLV"篠TpWocPg=Tafmw5w.xLtk`-7T W95Jd7yHx)iȟC18@KgKƴGﰻ|E@ /~ZN,~NgMf͓}CI J ƯI`a/L[xU?p.+qEy'" "^Ћkc@+|מ`k]CK"%fyhʳvYN䊂τG8b6%R(R"rܡIWqo)R,o`m%AM5\@0rȿ>xyh k%X- j]_LO`|jmbbZVa1E,Opfj~f(Gd^TEtDxcRA{BMV-ZyȂ޷+˰βk{oe|?1`"+xkWz(Hcѹ6x0] rjF`In+=pIze"D?#Ȏ8XO \dSU"z{7} Z:Dˆ(Cn1`J )S1FKS>`1Aiǹj|;®ֹڟݷiAnrȦS 5ﲜ" ſjn }0%}gLs{"3 ۿZWR"ݰ[FAQ|X3<ߦYzcqPhLAxN1V5 DӀ*O}>O6$${o n*CJEM<TtBS(%Û4- d JCm!WٸbZ8Xc;v ^"c·)Ъ"+LTiNr%b B'p();Z-7e[Wz~<@%P/z K^@%P/(#GKmi?އ *V=ҞƣfĈ#q ցQFujK|?Rv1EQ^RGy˾c}P.Gwl~~r#֫|'o)cw ]Q˗/?2=h`ǎ!>Uo1Vÿv6xН? z2--8 ?Y unthŊ' a`(ǡ^Oo ?Dmж.?'LU",0E<o*0DV`#]? ;:o<<{oopW`[ PTzCV& 1NBE2ŋө6qQDa.ƔD1׭pYq!PtA፥ :jC]i#b < Hx 6nA08{6N> Tsn')}p.7jhBNN8YxE_`//@oʽkԥ`/E#YF)w;^?L9Y5ɪy x&-z4Ȣ@h\A-qhi&´zy zNu-Hh4uT ̉GӓBK>_ :hZZDYdIENDB`open-build-service-2.9.4/src/api/app/assets/icons/start-here.png000066400000000000000000000025621332555733200245600ustar00rootroot00000000000000PNG  IHDR szzsBIT|d)IDATXݗYlUE3su+t!JhC)[ B"$ 'B]xA,mH/H)EBz+]o{瞙%miKwofswkuYMYh×3/gTs rz_ol-2(ɦOkVwtcl \L5xh{qT\B6.pT߾~/88P''Ӎx';h 3%Kx5* ^t1J&Ko BBspUɎEu%5M\5GHRBvUx$!^IU}HL~tKW; OϘ!=`SNBI3-$(y:gM8vtOU^|-q!`t Kyu:i(؎+2'Ou\RMx5^k ɅDgqPæR\3Du'}&"#C̞[tLXkT6ZRȚ{)Hw:5CV3oO2rX@qhl 3@Gl ZV 5Tzj-1J(K1rhBRq .<98+e74!ÊrvnǢxK n#c07A@U{dT_͖J8 涎0/"h[[n㶼<!^bΑ]Vi碋p)9QIENDB`open-build-service-2.9.4/src/api/app/assets/icons/start.png000066400000000000000000000005751332555733200236410ustar00rootroot00000000000000PNG  IHDRabKGD pHYs  tIME 8{tEXtCommentCreated with The GIMPd%nIDAT8˵= : gAضmB{1CGumBP<σR 0d066QrRy#5#GB+ۡ*4MimAE(ۍR! !/b?8|pP 2 .'JvM+죫ܿ>e| Μ9˲ּKeuj5z g;/&J&oOڥ84P5]R]mk~s1<<ݍYĥJ]u_ZoEY8^9^vҪޣH%HWBT6C~EQ011+l``8%X'O~cOL&-JPUb|tx###f5M[0zf!":;;d`0b/ZD", ؊~3R~__O?-}GGSpT*I 2N:d2~ٳ'k׮=o>0P(`6<>S\p}]ןo[\oO*to7ND"ѳqF^EB@A}}=z{{qڵulVFXcǎڴiS]t:!mۆٹhq URttt@$W^|A---n{rxUe(ku]:ugϞ޷~J6E( B,Bjw/bsNStAg]]dI3~.{FJ-ﮔבmDgΜD1 \ukf@E40 вn6O3[_NMջNaŕW3ݽ)L랞W`n3[>A·V)j cǂ|&@TN>|`  & 2HIENDB`open-build-service-2.9.4/src/api/app/assets/icons/tick.png000066400000000000000000000010311332555733200234220ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˽.Ca{8bnSBT')E)VCJ ǥj ZՆg/h…ݿk n^k[ꝿ2P6c=XH*G`?xԅ{77VԨپ%VHyqNtn[J2^53X,S-OƜoDXx2Oܵ r]L`}Z࿳TU(SiP/a:6͖,A` %S=[ b[a='LaW{xD[ u9J—BGqzfGN0os6"ffhZR".2H-[{(7h @`%E[IWu3e+ lGQ&' k|Ϧ'l]Q=ECpD!? zhdM)EGze%,KHnau'2b-"'-YXQ_=nᶧ2pcPt{plpvV˚d]\ژ4ͽ7MyS0'||9?ȱ=^Y?9df̗uZѡL'ڗGNåZPWmA_ɐ_L/WGWG>>늼xe#{E 5[|<5&~3dj xp'pJg%[+BX|fz~Wbd)G:xTN`K9eTyxeHHuj*Co%ꐓzٰ mk Db&<pSKT,R=ne28Xha_@#3F}7,\2%Γ CK['2 @Rq*k[X*yC}r2)c.hʺ[1@K@"i}r0vڶA19BX5;LOLifV _zL9G0lRJ]#xlЉt2eFSl>DZ.iN% `72xx OY1wdgNn߉c֚ڃ[@"WM:ݞ΃gc) &W\M2TU j;7eWD$wX;|o'zخ}TqG!fg_^|qIENDB`open-build-service-2.9.4/src/api/app/assets/icons/tools-report-bug.png000066400000000000000000000013351332555733200257230ustar00rootroot00000000000000PNG  IHDRasBIT|d pHYs:tEXtSoftwarewww.inkscape.org<ZIDATxڅkQƿ{3W2SgkmUZ]h RT*nD/tNܕR7J! nZjb$3\8Yg1g9 c( sB  I@\j."DDP.?:x.y :(ZaTcVBJ7:Hk<4M1xafaءwFȾZkG>Mxpw &"(!j= {D6{<2_`;zA1 ⟡ġh4zMZ6D$a >L&cx|{-;$co]Dpkb_7,{o? ;e>|( KϬUK_ |;lW`l@hkɞxa(===g,67`I?08i^IENDB`open-build-service-2.9.4/src/api/app/assets/icons/useforbuild_disable_blue.png000066400000000000000000000015651332555733200275210ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYs a aJ%tEXtSoftwarewww.inkscape.org<IDATHݕKh\e{AA5@π&H\mc"n7dwcbj/ҍpU F7érbP+R$OQhqpoU6(|cs/q74Hrf*]$'ԚNow$;4]ߩDA3#OrM3MP h*5G9ѵɸLI?'3ڍ!dtN XA` w驼͛,xdෲrȀZ1*$nF٣̕ĝFxY-U(9eZ{0?q9F46i"W!S Vep^xwgs"[pG*-=S7RwT=/uDT5D^ȩm~L2/dLwm=IENDB`open-build-service-2.9.4/src/api/app/assets/icons/useforbuild_disable_grey.png000066400000000000000000000020121332555733200275240ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYs a aJ%tEXtSoftwarewww.inkscape.org<IDATHUKhUνONfbJ I Q`WeָpᮤtU(BE(1u1jEPuj1gLE!".\y|{q;E ?1i1bF'n a _ v"yrD͠ 17Mmg ̆ZVH(X.+orP\Ɠu&X VR|>H!jHމl&d"+lݴ*WJd2T⑄yO .< ZKA42!ssfgï{]G_|0qK fhH%[R ZkXg^⧃ 0cCK}9nd Zwt6bq-z+w=7ڳA`$m]w){7.^[]0`j)˶.\fZ+8֍r`ԉb꺐erT၁?ؘ;VS$(d"%zInuܽ8ckp*xڭeYcD"MύFnUEOhڣC<;5L0A(먮V,˂RlWʗěoN,(L x2SyVw'###*dZV8 IR R Bk {%ywϷ `0l)s̎ǀ)5?߸= ɶ!Jִ5g.ziWژ%SہY3@AOߥjJ^.˵-D}-_!귘6O~\QNIENDB`open-build-service-2.9.4/src/api/app/assets/icons/useforbuild_enable_blue.png000066400000000000000000000016301332555733200273350ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYs a aJ%tEXtSoftwarewww.inkscape.org<IDATHݕoe?컿m mm*5=HQxы1rD1Ƌ15QcHHxi7$PYZ5]vnw3LQU̚/?,(i`7-;,DnߥǛ({M}E}zF .i$S7KIsH5@Tb(Q|X/$A5Wn-<3uqt66B$vR.raM+$ H4UuZij1]H1}IFd_ TAd–JUg6iD&gJ=tdBcu$nYؑ 8~}79Y9(<|#I+iHV=uu 8}([-akaiLˮʐVPPx]nCR59W/Yz\<}d_7"QBꢼ5:I9:7+40]*G?jiwZM]3AxxDŽZժ\Ъ%l6OoZ72'yXo=uIENDB`open-build-service-2.9.4/src/api/app/assets/icons/useforbuild_enable_grey.png000066400000000000000000000020031332555733200273470ustar00rootroot00000000000000PNG  IHDRw=sBIT|d pHYs a aJ%tEXtSoftwarewww.inkscape.org<IDATHݕ]heyۼI~`]QЊCstv*R'SoE؍^)Ώ;Iە9/[gY4M~<^vdU^zx9?|쇆"\b۝7EG/᩼5u)c ̩wti?Fb.8̎Huo}/>Љb# `;EƊ&INThGs{CW#߱eS[Et|_fib nH=hDŽRM]I<>00? >A&LKziiU__,jDRR$'e1DdE[p#еPx`n"@3444k X3q?TUΝ5=ϲm\˲l`p+iJ8z/D#N_SQq{=]'|{8,\6,e:.UU&"bSsotyϱڗ6rpO:6%9ɃGP^{F ! KU. ȚOZ %;@"DV @Sg&~sЯ ZF h&yaW{\Ȅ/|9~R?ѹ g.jQf:ӑ/F.Pk?FFPIENDB`open-build-service-2.9.4/src/api/app/assets/icons/user_delete.png000066400000000000000000000013771332555733200250050ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˥kHQm J¾*,D22/SP,IbJ C,6VݲY}Q䦨_g Bq?,[L ZP&M˶@-@̵E $8˺%5Q<ܦ܈4DvyUATPWBp3~UUK8$@dqr5Ÿ3?`E^ǨM)wOӤ[QP:tF(zjNW&kb{NTX&d7Py_Ԭ_V ѹ)>}!/ <ox?O\w/l`bGR^O@S^g1>h`عᒓjP= ^( b\+H̴wPcn*`$d[6nַ!vb N5NèY\ ?A薰>9>S Dj "j ԂWvZw#N{*ǔ=PCAGOXB !+峒neY|>Rn1Wx[0H3!bDs"q.34ne^|d\XŰrm"wfӎftpvvQ{IENDB`open-build-service-2.9.4/src/api/app/assets/icons/user_edit.png000066400000000000000000000015011332555733200244550ustar00rootroot00000000000000PNG  IHDRagAMA7tEXtSoftwareAdobe ImageReadyqe<IDAT8˥},`GOjmi^bHsaY:qEC^#6/KKr\5vɽ$t1޷lF[|gg|MC[YD5zL&I03p Vf"VЖ͘z^l}P/on7v YaڠKAFjxGHW.܋E]|0'O߮ݼ MIoQ1肴4N1,uS-.܎| ABNkcwzmhx'D1 Ox3dz2w!D؜X%H+,Nj:a)xsPg(_@~wCQ1I? r.+]St{ϡPwuE"tUG 7?\WW}V-C* ƐU W\FI$O8_y6}.hSJ1'.ݤҪK. S~"xj'D', t)C.,%) p_k<Ҫ2IENDB`open-build-service-2.9.4/src/api/app/assets/icons/utilities-system-monitor.png000066400000000000000000000035361332555733200275260ustar00rootroot00000000000000PNG  IHDR szzsBIT|dIDATXo\G?g_]qI_Si_i#  @ZUUF$ @V8qR8;N;3{0螙3 E qt.'.zW3O߼ Ν;bϟ*!c*U{\o}%·N8QR`m_ .@TXgqKi#޽83o'Cgݳc#cT~?{q6 T#|$8sO gB}. 3XkyENfdR#֚DPPESݮɾWT{P;_1,,;5{G_Y `y١$I&W+^1xruv" ;2,.9\uZ; c>Ͷ幃C|n2dt(_319f3;^=K<"B!З $9\[m-xṀ}OjyPyѡ>v>S3-+OXZi>1 `ߓPoZ>7o:A>RI{;;Ý;=FVO&3$0sT#O9gi4ssʢʟ/gYdQ1*B!12 3OZhN[sIA9:7L:ܧܙ_򠂳۳Wܜ(ꆭӎmG h q豎 >|IFA;z("0&#.,3m;a&Iߠ`!dVVVt/eP*bbrZߧX*d;X\Zi`0Yk'?낱_%N*Rax(ՑUw @=gF/1vqga\n~x2/޳"f.N_ #^ [ja$*Plbp?ѭkĹ$*9IENDB`open-build-service-2.9.4/src/api/app/assets/icons/watch.png000066400000000000000000000016471332555733200236130ustar00rootroot00000000000000PNG  IHDRabKGD pHYs B(xtIMElx4IDAT8˥[hTwgn646>|P4B5M5TZ4DThk&/}/Hh@[d޷=9%Z 3Ïo>W~7ߊ*qBfyxE |ͲQE&NgRg2{v[c ;֦[aHR,(ĥ\6J>iTw=6DrQcJK6 hDčآŃCro::毫<MqIʥ"3d"rIP\fU[,X6n0y}V34ݑApw־D$I v(x/7br]Hė4ʀ+m$yZ$QO&ژ LhbN80@D<ޙw2>s79(" q0ȭXt}rL&zee` DQJx+V5`rGArPpylxo#Eԉ} 6.@o{`rO^Z&`lZ[[[TCz ;fhOIENDB`open-build-service-2.9.4/src/api/app/assets/images/000077500000000000000000000000001332555733200221215ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/images/03_ascending.gif000066400000000000000000000000641332555733200250450ustar00rootroot00000000000000GIF89a !,  i颊;open-build-service-2.9.4/src/api/app/assets/images/03_descending.gif000066400000000000000000000000641332555733200252150ustar00rootroot00000000000000GIF89a !,  ǝr;open-build-service-2.9.4/src/api/app/assets/images/accent.png000066400000000000000000000067161332555733200240760ustar00rootroot00000000000000PNG  IHDRhgJtbKGD pHYs  tIMEۓ< [IDATx]oWzg^8$D[tXi 8@"{daHcgjb T~^F{]'ċ{`Md2%p^N/$;#ۜ3(ؾ43b>gh.-h(:>L}9Ђ3ז[:fmg*)@Ͷ( $mϬ'1Ցt <" DI02%1.>衞o5BC*y҉4(/0r: c -Ǐ1}1iX\1|bM}TmMSQ6h|A#ؚF7a]< y2-&/2 KJIYj!6q 2k~wMX}.=^՞ܤc̢{'wPSDya:R`rM {)Tr"|Cd0_Af?y0CzdU o|zB@ܚS5퓹&QU-d'pʕrۮiO::Ӫk&!Oaqh+uq6:Ο~3a)D覇It7YFiȠ@$6 GmOWdŻsc1$ ˟ %G\YG' .(`܄ulhE# nO*@/.7$bQ8mrE]Gzx.+ Æm<eW>+C諲F Z8WlB`a`N:"oSA?!@K7Ep8%ݢ6U oum:=tv6+ӆf򰵥!Emq\jCnE^Fa3YPNw>+D*mh`h_ƇFsx{EsR <[xz^|:=PI􎩓,U!VقC XXH$y56 P{{?L]VwJ]^>H;s9udy-u9Qn lO<zR;{];oPRo: *j!WJmQp`t]jcG 5Fcwm-ju ,=BX}%HSRr\DS|ua0WQ5ٝYN6pڐa}S}/"JV:+~8Gx& 7n$)@ypԁidYj-hqx́ڽ/ɝ${'ڐF O"eX`)ABhi9ՆexB?);n)skɨՆtm5-Pڎ W!U.9>ɈU@1:.[KN1dsOa} I j'Gx6M J*_qVOnw2zzTdXbʊ4/Rd.NYM#VjVX$Hy8EdȖ˯Igw|2+vR=f4҅^Z+.i3;7$YDE-@ni}@ߑG9[=. `u :l[zwP_!J?>@ngҴcВu%eTI4Epqf[$baƝ2v?R)@X(RtW޺.F=VIy1>*N[f*^tIA:7D]G:ָSJ) Ѿj8toϋGmi{↫CQƷ+g64@ ,qwM -轓RqcQsR֜WҘyxz?@bB 5wxwMcCOLPZٵ ecWb#Gp] ˸asE/@bH߰Բ ac 2l~~ptN!ϸڷS,\!0jC= % ݪoxq[ X(ޙ kp7T4>LǠfJvM]t3^Wq QOߏyk77.ݓviI[ʦh SKҴtB;@P%ЉRm[OE`{IQK' ="H m,FH6dGA'֠!3-(M!8!G_O?[YPi!`vYw}(8ŮǟӞ^YnU'4Bg^<3s,, j٤K5+Yf<ƃtȖeY:)Ky%߷t zS̫b10?(-ygAeLv3;t0Z7mQCNOU6Mql zP<(Ug+f۶8]$pk `dx:qi!(;Zt 94+6@YqVF!^eׄ#^G( !ĉO`:d{+RaDYʶ$UO"iH bd%6L-Os7-Z"3ls 4%~ɦku]bs~MUeB(SlT2hT)iUIExSy0<;Xtפe1Ai9kO ONJ7c|OBuk Q^1Sp?;]3^;OXNT]9)~N=K(S86aUНT;dbRB`sN@s>v-~eK3uz ٭QtyJ&t YܹCe{ߦaPyX>bhGE>4@5  RRȊtb'R /}^SF ѝ- )A=q-Ͷ  t㌲moz:x-թ#~[CaI$?\hhq"IENDB`open-build-service-2.9.4/src/api/app/assets/images/add.png000066400000000000000000000014051332555733200233570ustar00rootroot00000000000000PNG  IHDR00WbKGDIDATh혽AG\Gc/h-z uXɁ`e#6X(A]},vv3ɝq`0<ϼzѣG=zsk߮_a{ww~VYxiT[9~ QdIvi[Z;#@ [-nGKtL*R)T{Jo E2*,y>4ح ٽ[$1e&i][GE$ bL_4Ժ+lT,y,Q=ԊϿ  o01,oE?7:37%&20>{WKXx7=FLEʕDjqG8\pg3'2m "˭`\9?JKI`&[e40ڃ~z|xu+rMX@Ӽ\".ʀjOS$i%[r>Q{15s̐D 9 pLp{8$F̵G[x?7ٛ,ƃ\-\mv3 |}˸lϯ` duHdy"BDۯ.G=z_Ri/'YIENDB`open-build-service-2.9.4/src/api/app/assets/images/ajax-loader.gif000066400000000000000000000012411332555733200247750ustar00rootroot00000000000000GIF89aBBBbbb! NETSCAPE2.0!Created with ajaxload.info! ,30Ikc:Nf E1º.`q-[9ݦ9 JkH! ,4N!  DqBQT`1 `LE[|ua C%$*! ,62#+AȐ̔V/cNIBap ̳ƨ+Y2d! ,3b%+2V_ ! 1DaFbR]=08,Ȥr9L! ,2r'+JdL &v`\bThYB)@<&,ȤR! ,3 9tڞ0!.BW1  sa50 m)J! ,2 ٜU]qp`a4AF0` @1Α! ,20IeBԜ) q10ʰPaVڥ ub[;open-build-service-2.9.4/src/api/app/assets/images/ajax-loader.png000066400000000000000000000007671332555733200250300ustar00rootroot00000000000000PNG  IHDR##jsBITO0PLTEHbtRNS"3DUfwv pHYs  ZtEXtSoftwareAdobe Fireworks CS5q6IDAT(c` 0*s̙&$ջBwNB XznSUw820W߽}ժ`j{*` D_iP#l߽+͜Ys̻wF͜ ww`;%:`Qp!AGG#\P\S ЅˋЅKЅlF/ `B`@"AA--U*$Sc 5()„TCC\B0{B.JJJ0Evqq )"9W,, dll*BSX IENDB`open-build-service-2.9.4/src/api/app/assets/images/arrow.png000066400000000000000000000002441332555733200237610ustar00rootroot00000000000000PNG  IHDR ctPLTE,%tRNS@fbKGDH pHYstMJtIME zIDATc``h`8.1"_IENDB`open-build-service-2.9.4/src/api/app/assets/images/asc.gif000066400000000000000000000000661332555733200233600ustar00rootroot00000000000000GIF89a#-0!,  ڛgk$-;open-build-service-2.9.4/src/api/app/assets/images/beta.png000066400000000000000000000017731332555733200235520ustar00rootroot00000000000000PNG  IHDR+ DNbKGD pHYs  tIME4 >@'IDATHŕ_h[U?$7MbAǶv(VY"c}@ E7a8!A+6e[u4ma7!M~s~wWG7+أb&ۖP.9eGQ^E"8y6BHeiZP:Ll!KQ@(*TҥOJֈ"[ MmI$>I6OU$aEq١$@0W,~~s6M;TUn;N%a āY8]ҞRrY-)x#ɎJuW(FC 6 /_=(lb.ہ\ΏW PRگZu9 w+jZrlvOtv>Hٶ-**Ӏ L3.M_I)W]I)dICknZ~#{╏|>iZ-_lZY9w[[@=+ukk#鑹mmrW趘8R="+SL)WI)kkpY)xS}(@a?WZ]]Ng2'~m,ǟgHcTG+kXS )7 sRś1t>mԋ@eUɖKN֮D<<}c(JWΏ堦k/G5=+Z=u8kUni?{a )LSk3QKE[Ea8O%G9=mFL+6eosڛ}\*f󣸝r+6 &Ūְ" /B U€^hIENDB`open-build-service-2.9.4/src/api/app/assets/images/bg-content.png000066400000000000000000000005321332555733200246670ustar00rootroot00000000000000PNG  IHDR<::obKGD pHYstMJtIME 8!tEXtCommentCreated with The GIMPd%nIDAThֽJQE5x46X!S$4IkiZ䐟ۥ=pfh{r_fk̷=}&JMsmW~<]r৻=2K׬t1Yo-txNd.J@ơr$I$I$I$I$I?Mdu,cN IENDB`open-build-service-2.9.4/src/api/app/assets/images/bg-dark-gradient.png000066400000000000000000000011211332555733200257240ustar00rootroot00000000000000PNG  IHDR dtEXtSoftwareAdobe ImageReadyqe<IDATxbTSSc@,CAa211@x"]~@3> p@18%W'Sl~71+^@$ ^@ f[| J80 SrM i]wD!!A63'`0 p*4 xŐT-PlSjMr x/*X1cS"+Es/J A)2X&}_)uסo '28a uynpB.^d'ۘzJP(']',xj- Rŵ6(N \N)X&ϠC< Bі3Ja_U!!HvߏڌOgY%ngA@`J&0ԋ M lclꩭn< # JU Ң;y4nƋIENDB`open-build-service-2.9.4/src/api/app/assets/images/bg-gradient-crumbs.png000066400000000000000000000002661332555733200263070ustar00rootroot00000000000000PNG  IHDRݥEbKGDh pHYs  tIME  {+CIDAT(ϵ 0 p_:/̖ {}]*܋Xbnң۪Ĥ[ԟII-%y)ZIENDB`open-build-service-2.9.4/src/api/app/assets/images/bg-gradient.png000066400000000000000000000006361332555733200250170ustar00rootroot00000000000000PNG  IHDRzsAbKGD pHYs  tIME9]q+IDATx;@EAƘXXXٺ7<ӟyCq`0 4sp)7 gM23 a8zVL p: WL 0\7Ndl0`pdd0a)u1\Ia{[CWuI20 p5t3X&I 0 31d`aS0 0ei0# ƵI"IENDB`open-build-service-2.9.4/src/api/app/assets/images/bg-topline.png000066400000000000000000000005261332555733200246720ustar00rootroot00000000000000PNG  IHDRځebKGD pHYstMJtIME 83ɘtEXtCommentCreated with The GIMPd%nIDATmn@ [$D-jKG~BDMhk]|Y`E 8-To"0R"/).GDK3a4Ik#MB0 '.*D6MMEg_g6Rˢ@6 %;P EW="g!ZrINUIENDB`open-build-service-2.9.4/src/api/app/assets/images/bg.gif000066400000000000000000000001001332555733200231670ustar00rootroot00000000000000GIF89a #-0!,  bxT2W>e`U;open-build-service-2.9.4/src/api/app/assets/images/default_face.png000066400000000000000000000014311332555733200252300ustar00rootroot00000000000000PNG  IHDR sRGBIDAT8˭KTQǿw!"QH"E\- ]DnڈmMVZ% dX`Be&j6388-A.\s.GG_[[[+H}'`oA{w<] H$FϷ=iijNjm.--"03a)eǕ2v0f>}ޣ213@HoVov 3j0䶵̌pAu+ߗ: e C[QQQ ..*J)cX,6;2DfV9AggP_r*~_{zzUUd2d٨NAJ(./tSS* ZCCѣ^ӹ;qc Q - KH$rxhhh_II鹙K.1k'mKK?VtcNO-,,T'I7,)%AQZZ9úDQ uB@JK`""c <{mQQhoo?F" " Ak-ƙ-fzV\qߌE[,yff03<σb4bmmUxy:9v\@"DRc_}f8 45TIENDB`open-build-service-2.9.4/src/api/app/assets/images/desc.gif000066400000000000000000000000661332555733200235300ustar00rootroot00000000000000GIF89a#-0!, ɭT2Y;open-build-service-2.9.4/src/api/app/assets/images/distributions/000077500000000000000000000000001332555733200250235ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/images/distributions/centos.png000066400000000000000000000065671332555733200270420ustar00rootroot00000000000000PNG  IHDR((msRGBbKGD pHYs B(xtIME ygtEXtCommentCreated with GIMPW IDATXÝ{|UՕǿksIMx0g"Rp"؂8AiQ)hSTGCVQ':(/HHHnn㜽{n A?s?gV""!jP "M ")U1Xk11g.U TBD""2_DVծ@WU~#sg-TD uE$QjxHMpʼnUuXl;)K-@UQU:S| j?'m2OCX,"Hap s0L0KGS"6:""E*%x6fQU lKĦ-ֈr-ȈqE$vԥMY= ,fPSH , T $p߸ozӬM~H8)?nK56~_LNUYz*s-@FDl"(SR." UB[>!'/[1ED0~axЀ @UDEUS-H;(pIma'ӇY΋1Wˑjzu7FTzݾBc+wSw+Q؛UZCyui-i^\(wOַz>ܗ$zRs+h<%[/NU>z^o#0icpOSU?G8x#A0m՝ Ieg18q51pصh ׯEՐވc=Yǰ!I0ʥfqXH?\^>|wV8b;XbB.W?}f*7A1˳P57S6͡!` h1S ""d#i! _G&T2#x)(ʂg}L3yo.v- ݎy|Z{_~( QUQUL!@TUK{;WqEDhNoFp^XqO p!Zch;Z߆tJW]#|>[A.@buz nc5fC0 /I.Jsul1B0G(j\*]("W@$1@ P \:7wԧ8qΨ,,ZeI4ylj U_*G\ߜ[ad?9ם5seZP"ZqZ&?^KNFr//6B&v(M%'kjhԾ/ wԷyjm ,яDBwoMb""?NշƃEUwWWZEDD( "{^|PA=vUm=ׯ۝s\#7{?!*#G '|PxBƒxJ-b} jɣ9-Vo1ad4G1k#8 X(``DUjWڐ; w|.WuGOSkq\ۓ}uPTk6_J!߹$0o˼FͻZ VzQ UtenyFNx+|G:K53y^ OHIn8ۦHwF -f/)mMgOYꍪHW =z#7HJpإ#sxxWw8v+u(I}Dr睈[B%qaB1\,cID~#"u@LP''D$kʫ+V5[F}mk˦y,ҥ?`Р^_ u)oĆ?i-'\^cHhQ&i#7NA"]7ݻW`Dp'I=@-;h!>cHn)"Ą@9S1h5@hRA҆TU}I՗-oD"ʓofos<93!0"8x|)OGJݏ%&~^(< 䳎|Ui@/A'+ sxy/LnLt:G4*"m3)[Ѵ:c6Q6r No=Dܘ6#F-HZkY gؾ~gC0mSN˭y c3Xܑ7hz=e,#kͯL&w9CC@VU_U]d}by*.pKhmbty8/^^;ҼF6]SPݎ Jάظ!(-HЉӢ4Ә*ŚVQӨVU|bbr=Vh4۶ڮWPUNy_ʾ$KEA֯zUu~͸9^E*T?ʴiX s+Hx4Sv61Z $z r0E SyӛGv|kGaL=Mjaxlq)q6"bZWXU^l`^~#1@$\:؆mHP7Ȓ=M\q&ٌ)sRgU2ěQSv eEHE$4 ~N["\"R =e&CpI}V8lGJjG_ļvڑb-40]kmC(QJU֞?]y/%=U]qkm^'B%u}gTJZ+!"T"U}x5Z{qkmNޙbRnX*"A]4KUW.ցn5U5 ]F}iNC? TeRIENDB`open-build-service-2.9.4/src/api/app/assets/images/distributions/debian.png000066400000000000000000000073411332555733200267600ustar00rootroot00000000000000PNG  IHDR("-e sRGBbKGD pHYs  tIME "*aIDATX]X{xf23Ifr!d@(,D@\Rbݮ(TwW>k]ql-jD $$!2{wly<32yCe@ї̜#47`0s\ɧyWÅg7L!Yn2D4P IDY,k#aD`fHA{_9mĦNCGg}ANz/pJ)ئ~gÂ>ce4o\c]9o:3`"@w0>"jR,g5ljv5y0XsAR3j2hD:qʧ6>KLnhX3_k1sN`SL4ԙy|U5)fJjf|wkSU!R(Ϫ1,>{S.^5W yV(/\ǵUʹB.=mݺ&"*D=EqpF;zL"@RD"0PvĎezZlV3yD̪Q)(ʈȭMD$Y_CBmo}D|^M %?ڊQENM !& }<7+x;'LfDJW-oy}zodwbX]VWsQDԙLfv`!q_= @KzCwl 3ݷ7L\`qTӴʵu%K=<969]@+}MVdR贔 H5JD&hDucҲXJiWa2k4uSILȞTbt-1M51@(DT'$J Ps?""ZUZpzV-o@h,s=>t9+,X#F^ Itŵ!"r+d (t)PNFiЫg`v)D64F;>ߔ>τ(^Mc?[z[3ڶp"98 UHf U!+lDb9yE3n[أ@Z93UqdR(svVnZQ_f02sL1P)>Dt@Ud"աm)ut"'GŶ^[jfLicfI;acf8]ν ~V@?K)b F '2s%ue D06sX jRWAY]l:U=ym萻`ѳ{]?.e.z_eɒf6_*YEc~-@S^`U8 s/2Ɵd9+scxu=w#?q~5)IRUS~]EKgu1sv58$٥"T9/0$\ VMZ~sg6lڲ67EQuoe!QZ4XT!03Dž-D<>@!3lJ.Ytg2 3.Ẻ:4 fd 3W(,JTȿ(U9 kX|G| _v9pqFxgn1l+==e==*ݞZvپp8|ſ/_~Szw޽DZVK˯[,RMt>e˧l)0hB %"jr< qֈZ.6]vG}u: r=7GvqşlCV|Hggg.[3# ]]?vZ `@q`DQ^Cs^4slP2ot:4s~3Kdٔ7Je)M1%UE {iR䊍cMyC^mmݜ?pPyOOO%KN<_aXhZ놽`Ν{zzcG.ض}o5]wg3={0&V\y{{={J$]9NojVeFR5j*ISDYC+oNdߙǹt!.(Дdsw9R7չ?[{H)mJr$\`fxJW>Ddo=xǟztÆ2o:忶] [VNX̬d_J:i\jUSt!JfY! ztmǴiwc{S*4tZ+7H:|[lȲtDDfQVc@~R)p(H%F=~IT+Y@y?˯g{$*&w]eifIENDB`open-build-service-2.9.4/src/api/app/assets/images/distributions/fedora.png000066400000000000000000000064531332555733200270010ustar00rootroot00000000000000PNG  IHDR((msRGBbKGD pHYs+tIME6+f@ IDATXÍy]}?{ߛy{ƞ WK06`ZpmB+TmUEZ(-)mT MEiڨTi ZT85$0e&7;8ekxhݱC `J~޾Kl]Iedl1&rbq˲ n16!1:5^'87Ke? 8ߢ6-] q|// 1;3C!.kQpbqň_{~u:.LsӇy# 4쓕7F1q=떷r9zN033q(g<|䠔bfږ"]0d/twzJNמey[~i'gzxizNOQq)B7m(M$>m(M9DidLrsDJXY<:#w։s>ɻgf8r1b .b:Zyⱇi|s{~2MmK5΢BUע9"OˋlYIs}ƛJI{߼wS yimMűkl~KV'qJAQ@nznHzPt6o⋟8(2bW,溫fNezzJp""DE}#N)2<W@!(„N=oA}34?8sF&S30>ř#޶)Ogxt O؃chؾ}[ُ#o#F"w-pu(dkFk\AV]XٴO7H1;7p (&%zD((gm @+Fi8\od6_ZzYތtODm;RbbI8Qŋ=qf Np㵫0dK6rMg+v)B(ʯuq<+A! F+_`x|si F'Q!9[`/3<`[ 1"`HޟYdsD{.a:XbFY,ssY-It1H8/;ڡ6}Y)WMfFkN azJ! 4@ ?,]FŒw+„5N~ӻ.xe: V6P|{}/s\o݁bsz8>ѷ['QmHX!Vg6T & V`Ё!*MK˻6&WWĉ&( K~Oe:@L L C6Xe w-pB<9 @vEP,XWRcUƧA~QxF>w6nZZ2HfV,N{3~f|g#\E%SΕ:>)RFS[]Kl#0 ?#ɪDIENDB`open-build-service-2.9.4/src/api/app/assets/images/distributions/kiwi.png000066400000000000000000000071651332555733200265050ustar00rootroot00000000000000PNG  IHDR(&sRGBbKGD pHYs  tIME ! IDATXÍeWU?k}ι羺owOL3$(""(%U,,?RBZZ`B! "! )c2!@f&dz9{{[Ts޵*?d*"r#tL/=w͗'Tc#[Ipk8z,ihu.񬛟}p}e?PE·o=A: I\s1ՊĪWUHv۸бH͗~ /WFx'xчYkFNF{ if ~N9٥*gH00,PgLc&9*׸'Q47|ŵ.?ȥ'u#ktWh-/1)0Wc$F0)k7'# )a2s/>wtc /xlS}/Çn]fe8'OXn1.7DD Ap,k+01e-K0bgg?;1ƾ 7OF|_?kc;iWg("sx@*D*k;˔ XhRrek7 :9.鄯~yai|k~1%"1ZlX#DBP$:uNr4?nѤ ;} 3|-Vc\W)5 g`%B X 6$\]TaYlL"G{\>E{#̦4d7OrG!<ޓq 0*B0сw N2#ѡa?ٝ2mwɼ"^,Cjh}'?+̓Mvg[1*&$6(Y^ho|NkIQ1UK"V If Òkl;PLP)EGIp&{<%)Y8A]dec 6iāb"B9WiBY* LFH,ǸR)Q s%̙ Υ9hv#/ nogceЂ^DŽ,HE"i!]_EFrh$4 9ҬAgdj TTZU܉[n,弪4hFcI@bpYh;Y*hl\իL&S& s#=JيXFY%GQ$.sr ޏ38%fW\%`Ѽ?"̦OfJ\,BgYFB *e%XfCVuj g۹obkԕ#ueEb+Zma{Gzc֖]XζRUB0ˉ[IC{ْwlj@b/R*fs}o̫^[4o_z/i0{?*OY\4;*,XX:Kv K9rrA^I$R@Ԓ-,ZK8g˃mƂMzP1KYTGji^aqd\ .w^0%bͳDh3R/Ya_iw=3OO<&:A<$aP[ !/KrC&M<`!napWyַ#BPKF (~ uuRY!qM"\/'U~eǃۛ1߼—'G;l1d M"H ; t+eキ18ǏV$kIv͹_V~$e9λ8z>pyECޅp92M()q$IJ&d٠iMZyn+gę[ncixŃ=xhoWNlwSU%1B(`p%I4!q^nKdww_̵*~IENDB`open-build-service-2.9.4/src/api/app/assets/images/distributions/mandriva.png000066400000000000000000000103211332555733200273270ustar00rootroot00000000000000PNG  IHDR((m OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3-bKGD pHYs  tIME !)IDATX}lU?Ͻ[Ju٤ Dn#? Ѥ"1F&^̖"˖MsKRGܺ% 0nF7/i9q -.qKs9s~ρiMkZߕ?ߗ=RM@3G6_ߝ/WeBRetl 7˂ o5㤮0{k " !Yf6J-^kxbvR3/>jј_~w|{5-6h""n `/mUh$ : @sN]z⿜$ 4C& S |Q`}^GXu䎘I=  GSg;6SɸLR)F(wW !OԼWx :nހ4@#h13$|@DR!!;{3[Sj3p9KF؈_㴸x9`@ ayF ܰ+N̶&? lZ|.\"({%K 9P(FX Dg1ysחאc`\?C;diҩ}yKc"~g}hBi,!V`|<[HF3 mgvZo44a FPQ@@(+J`i+ ~)jϩv=hn]b8DW{-Wi֢r?gLkZ׿ݝ䄤FIENDB`open-build-service-2.9.4/src/api/app/assets/images/distributions/meego.png000066400000000000000000000126731332555733200266360ustar00rootroot00000000000000PNG  IHDR("sBITO pHYs+^IDATh;iTWo(b_E(@1['1I:2qgs{zOIwg}cw$ . *( PPUP+ʢ,5'3sOWv;NcP( AjrT\!ҀH_ UpfzB/GO2r0 rjc#(ec;=WF!B`|7'*>QNkoo߰aCg|Wsp;FDQ0ew(x[Z]lY1_I_/pBA?6k d9]ƾwAOkw~|Q& oxmƈs!TAӝ.-޾}Vϭ Qt}x$=*|fvD >r"U-%\ k 2 omQ|2RRJ[k6C'/6`ott칹aݼy 1In7!lcv !c3O"m!i1+)~pbT7IInIrIǦh><,LdBH݅ A;}UM&SggnxxfzZ$RKY].WOOeMdZ6';WR;~\?vILf'Kh]yεKBKXޑ~nr8 S$D\*wIa P(f3Bxq;32<::禫CVucOxe?2,';{׬YPlMMMG I}0NJL*/ߺcF;qz!JC8X+Z5Z%!}K):ֺ1{u"Q2D>´E? 7o>Ussd2]PPc fIE(e[ʎ8[cS vz__߮s/'cNsB7cu~OOK/}n59x a @d$QO1@!CN6WΛWyl"B ʼ#RS_ 8`0;_kҴJG;;;1!n%"BܒD) :xfCb6/Ϝ;v7"HD($Λ޹no-m@0(-g:Ρ" JO!`8n0$"j4rfpYV[UUY/N666w"ɴZm^^~xxlOϭ[z\.PN|oT*(<"ԇoXMv^eɕqLM4glbj7Ɨ</Yێ|zjB555?xXh0]]]M16m4==MMMmmW^y%=-bLԜ9t!Bp:>TZZ;h^zrvF…  ϮHnr oQJ[Nw4tb!#JB10b PBD-ې3~0`is^i+}qd'֓8'zGuo]'n)+Xo6=8""ljAE +XV  y%O?e>LVUUuN?BHeͪlnooJ x|Ŋe[ʎ|zDqoo~$5%56t\?quniw;. APn~gΞIOOohhQaAZVx1fzRkaWwwOO9Q3js$I0F)ߒe\} ֕X9^lnvd+T@u؂w Sx':lA`;::0֢[JivvvjP˳*%ldN}"q:]E܇$r1X'J."J/HTܒؼ|pAU*Jn V gAqXp>}UIr n(o;.A<IuRqxi1ӠhZN?0>>XjJr~^IB͉WM7:!ȩgEDDTVVC`3LVyFݳS0B2-.tBɶl~,@TdO<; R[rWUTs B659'FFFܹK\R|1[.łtKcbEB)ґ1&c2Hg0069 Xf3]1(g,Wc QQǠ3Ƣ"6l R  ( \!,b_ Q a!JAӭYɝt_l2u2>%F~zg`L7P~`0<"rGӊʰ&_,.‘a^A)dY;:;[ Qd'CO_:y}@)mwcOKVGcI^nlfBܼ0MߘZ]%%%11t4MIg SrΜ=vT8֐^o?M&!1c@]Ӻj+ܱB6W|://1{Mwލb sJB|uL-7 cw}<`%gŅj{A!%%eIII˷SSR p醆v>00ɿZZ"<="|lUl6o~v3c[3bx t@H784wJ fǯXdNĄXwߝa 8/NE rer:ޱc ];wAB(;+{ SSS՞j6` B2 "kPQytNNN;p`Аb|rl\lRRYOooha$I&o455I0L+rWvJ o=M$}Rm+EqS93i@7{U[?حtB•Ik~^ ;[#!a AP{CO]>7N)XUr߱  _!Dm?ũtF!fl'N_ä;w~`[WWwxLfZG#v[T?W{4oUz_*={nqҩ pjOg6ȫXU}1ڶLWqho},36)=VqKoyK 6l}IQoO۽x.5uOP!)ǫS6ǎ2׮]/Ruh/j*/X@ϼ5,2:kzlF.%{v҈нol߶&i^3~Q7Dҝn}_\Ã1&ʅb޾c0JA_|)1[PPBCCC332 訷8aII_T(dggl6Nym/11W_+P[JɎON0>(='>n{ aT|xΑ)B0ʽ}۞BAO3nM, (Bd2bҢ#cT  c p£e 28տO0h.?w`l?_?IENDB`open-build-service-2.9.4/src/api/app/assets/images/distributions/opensuse.png000066400000000000000000000036041332555733200273750ustar00rootroot00000000000000PNG  IHDR(LsRGBbKGD pHYs  tIME OȐIDATXíkUϙ鶥KEP Jc# `/*J51 )D+bm`%( Kֲ]ݙaӽ@j+/9>\_d鋝N}ĹF]ի $/oߵ}: nK3!6`QqeIǂ'@>Ueh8e̽~ +5G0l*C^1~t4?!)k{WjI?l5Ӱdֻ_U s`NTIO`GOewB ??8\h pe1;|awf=g0)};ryފ޼/) k.IENDB`open-build-service-2.9.4/src/api/app/assets/images/distributions/redhat.png000066400000000000000000000044031332555733200270010ustar00rootroot00000000000000PNG  IHDR((/:sRGBbKGD pHYs  tIME !),IDATXÝX{LT?cfqf@2 ]uЬ IvCVkc$ĘlJ!%j1a!*n#A\k(BSQ:ay|\'ǝs~|}DE !C<Ï=ӧO~H)ϯڰal6L Z_Ϝ9SSSo"պgϞ+W꺌̦^occcII %m޼MQ%a!.ٙߔ!6mzbdacMMMFX$ɈT,#[RQ@≛D2iTvݻ3f)`}! _xѨA< '@ =D/ +Q O2}~&oݺ5P%y!guwwDWZbgP Crq\~d2usa;!’l|V_taܪ쿇#2~!Ymnhh[WgG<@&()L ╕VhWm}V4*,s{@)GKUsUUUq%=ϟׅxyO?7^> WQ" ~^|! M U5.mݺlܸ3լW^>qDCCQEΡt jAp r 1忆A6iC8޷o6=nwee%,Bzuuueee83,Ro۶mK3 l7^k-IӴh4JK 555EEEh4Hx۷o [흗oKfl6[wwwDFGGUUmoow0oNH$ ;X,`0vu~?Sux<Y 1jSSSVV׍~ge<ק'Kj\~#kϟ?-̳f>sddd֬YK|Ә(L$mSSS^p:Z1~ MOO\.ɴX"X,~?7C0ѫIENDB`open-build-service-2.9.4/src/api/app/assets/images/distributions/scientificlinux.png000066400000000000000000000116531332555733200307370ustar00rootroot00000000000000PNG  IHDR00WsBIT|d pHYs B(xtEXtSoftwarewww.inkscape.org<(IDAThZwx]ŕzߓd[X@1/ʼn)!f @PZ N)8IRblf6S܄ޙ6%y I3ΜsF8R!j/2>RPKUg)RI=T|A$ ȭTG8ci]~3/!b#CzZ+&I$Z3s+[82SXVx~޶ Flu\`H&['ڳH$ZI&ۏ%ԅI 퉽{+n2]Tje yף{#wѷYe|khxXj]7Xsz;j዇þ@4leg56ƇfWLp]Q3FSJ7X_?ӽc^5]fw|u~Ur%|NpBDx(@8OgA'zC.`۶PH( {L8(oo/\%ڑA{~βB8Y42_`~)Jő_ F9L5Hj 'WN[^LVsnw(T̐2/='Zd%pR0jrl=; JxNN;5 ;}'ޯ.uu۞fff3nuS:o}`zHOaV:RSaf)>I?ӓվs@e4W4DblpM:ybqkw_kjm~i1<:g=9ugxZnR\cN\M[pxEl=i fTJ/M d̲ y=A]P./}CUs޽{cD==hI$":aԨQ>|ۮi )UhO3TҴ?G᥉u40RI=|yERx4?f@"%m;IV*rYͬ>" Ml{ƲM%|…JWr9y> gv>qt+27f!H$21jIS?b!-J. ]AWt:y/%Z>?ɲ]7gyD+{֮\3c]×JݿdrU zEi$ݷC,t:=u}m\W]8*h)2DhR^F)^-9R~y8td<<2.} pIټ4ync,huۊҹ3h?7O$߫V5B|CD'D喞!O2lw96*hiN&2.hzsLJ)1u>eǩY뜬heH%UѨ8"1"2Y)uu1|%8(W *i1fe{q( ZMީM=mb0(<ϸ._&$%u]?>U*ozL#q(qF)%M$_g6; 6 `JR]ȇe߁ԴOJI@4y&37iOT0qMJ!DH"T/reET܅ׂSb\׭1MH)eOJjJ)}dʏ JQf8Iosdl$n<`D0 ّ͢8&P-CXf::ǀ:<ޱ%8N㧆aMg]U(tg-+yФp\1Ymf~Ng3Oe]asX8/YЇ}Xl\Z:Ja_xD²ILѕJՏTJ42̳KǕ⫈hibQl;Yd֌XB}++7uO Š~c,ڕxUkZZ_zr<ZU]麺qhcq<|vo% Jض^<mJJma >ftr+CM M  FK_zюyTm!^}gόu͕J"MYG ȡe1]WPx%uh?p=@TD_*U}ۉѧ#BBLd:=c/>>k;ɶ5i!Ja0p+DK_ `0^B T[@Z=˜ì,!0T<|M#7ݜLkwdH,f|įj#NމZѿm &'448TjƍHpt&92 B)tz֊}Mk@]]S| B馩B躔uI)A*%Cظ\7 vޅ۷O GhlB':{_!w9$ʴ>7/@_P!UIENDB`open-build-service-2.9.4/src/api/app/assets/images/distributions/suse.png000066400000000000000000000023721332555733200265140ustar00rootroot00000000000000PNG  IHDR()bKGD pHYs  tIME ,+sfNIDATHǭmU>Z댕&< `j,ٮ}(Cb([]vSFHDPK.n3Kb;;;aغl/ts{?WL#wxTQ`  =@H rA@?!#U۠XiE'FZk'oK-˸[e:5hrpivG"fKW5K٣L_Bfq!hzi~ E@?rpqJ{"rDU/%Isd6W(v`Wh]QJіKNPv8MΡu1ĵ|0WUہs@ʧTdӇkSDv铔{ jwP1UULpXrW)8%;|M"usMAz6ձ@eZr V-.Vy!`!_X>`Zc psit Te/JD6^[9JD55( y0o>NNm -ڌ]3֬XeXo[\kćfH7 VoX.?_Y/M_Y)~;)P.f`<"z͑ kZ{|6B},ƽ 8!SL\("(ˇ¡]/MRI,F 9=roE Ac0G;4,*mAQٻƄC뀨!߻SKHΫ"ddiGb~g,gQ,WV`x۷iOcA*Irķ vt s2` >taT=_y-~I(b`*oQ~* ͿOnKK` &ڮRcDxu gӼT+APeR|9 `YT<(^='ϼB߽@vWC ?>_K %a<@+Bl~7^lLH&Vr/ע~_,Q*vK[`㈹l C9]RVxM 3і<+@`E9 y?|mUNd„OlUc]>JoY9bpc/qAB 60HMK^`4.]{btQrߡ{/^ @21 *0!Ak}T  Lh_ aa6"a B>Hd oдS.((p48ЁmVb݋H$38);a!l@.*K5kϓ;롶] a(@r!&qRu cÐ DYk# f|vgĊ5\ Wq\<{mNuT_ @(C9Cߛ(ł S[ b-H62 _/> 1TJd-t 9 QWY-?"gq [?~яCt+7 /kQI@'֒"Bk5?DA6rJDdG-)Hd!_{RAa_.'qb͡n P@RA2 RFA' A# t\?, ڍ,UM˲X_Sճۨ!rryCq\Z "1ZDxp,Lƒ cBиn3MŸ.}vuUb<qeu RN q r'YIRM.]}N:WS`MyBS RH.ǹ|Lr{dB3T NrKc .n1 cf.wlzv/Cl] (x١ԆBr&r>_]PaO=pE EI?X6G{`1e4lG, K&<<8khF s=ap)oif>ϯzZ~0d+wr՚9e.4( hRÛ:{b,\@v䛘pи(4 !tCDł*;kӾp?HFG`Ta__܄"s:ӵ׮[NGEs{u 26(퀎8ͩמ@)O=?H`0.^I_BaEUJ(㠌!tiSxկ岔8N37r$$cpC0SqݏPH%Bqho@;(qA%x1x Ì6 yyB]9rto]ARqt9.Wї^<(vQGC`M4,Tݴr2ӿs;}l(CQ4&>FupH9Ū?y$%qE=LZټ6nZ5Ө0AP[Q4>SK0 $Oogxs`Hzr^%19adnH߼jlLiqz%",_ 6>u%:pλh,= ]TA&E`qۚYhzcyᯩ[y/]=ygw O ֭C!1H(<$gnu&t ~ nʹzx,-= t* À?c*g4os < YB|lK#bώX/w;9Ahm.Ul?PuL|nA/ĸ6޺+*21 WɚmfK.8"Rj2/^J!+D@z0qvqޮ'VΠaROƫ7Ceu>Ե!HsEFj6= \EF1U@<æ4uohn/ΐJLp'X;M2$9Ķ2FO3#׷k!"K"m3i߱mnv5qPDZio`ٲ[ihl Iq9;DUq]k[ab ` q 7Qօprgkvǡ,mǟ{?BS|&SC5qy1#[ZV_⽫oöU|z&\"TĶh.WUI-!JUhh#o'^g޽D"246=̊-ʡC'޷I\I<-<~#]\Iu@}H~:Dkj0 !ph`LccP^a_w󠡾!_3d"(5"d2ilfe^7|n %_Ňj Vbsșٿ?'>~#7~{@ :kL:?'<[}[7}>2OzϬϮBIQkQU\R~,ӧNӿg7͚5kٶ͑B!Ecg Rj}?irzyH$7?UBHr5dXksRh-h9ReGϐ4k9W˴XVΤsޚb/qhjj$bDjtٺ\_dzQ7G4ti!ëBދ+eZ8hԛ7S[[G:´,/PKܹ\.ϿɳnTAZ INU*X3D-[!Jb*aÛW~h4d:6o2/tj IL`BaI* @`a^[*-Ƕm;qf0-D_|m7Quo'O#Gr]@gO~1M+{?KO2v6E %̩Kbrg022B(vY'Y:x#ssIS{,geY c]=R] TOX4E.\CqY*d]gll W044:{ys K%/t!؛FoaFWa8۹JOTr ȻThZB-zƲ>J<ӽ,7bDӫ+&1@6gb=ט;$G*ߴEQB0HwRjWSdG'B@ӔߋZp]u]+6Mt:K:z2eϤDŽB~G~a1}4 g5N6AǑ#DPS|7dxQI%\.qtuu٠}Ie(Rq*{3!,g1n@BUE)UL, K_dg44XfNv45 Z[$xM$AVue8+U1M 3ul LhNOݻvmk#gNbkzC7/(4`=~^{>'tG#˥8q,[jmJ)HR>Lr!`m4ēh5kܴ&wвan 5b][[y][[][[][[][[][[][[][[][[][[*][["][[h][[][[][[][[j][[-][[9][[][[][[][[][[ ][[][[][[][[][[][[][[][[][[][[P][[][[][[][[U][[|][[][[][[y][[4][[_][[<][[_][[][[][[][[][[][[][[][[][[][[][[5][[][[][[][[H][[][[][[][[][[][[][[][[][[][[s][[][[][[][[][[][[]][[][[F][[][[][[][[][[][[][[][[h][[][[R][[m][[][[~][[][[ ][[][[][[][[][[][[][[][[5][[][[][[][[1][[][[y][[][[][[][[][[][[][[][[][[][[h][[N][[l][[][[\ZZ\ZZ][[][[][[][[][[][[][[][[][[][[][[][[r][[\ZZ\ZZ][[v][[][[][[][[][[][[][[][[q][[.][[][[][[\ZZ\ZZ][[][[a][[][[][[[][[2][[][[][[][[][[][[][[\ZZ\ZZopen-build-service-2.9.4/src/api/app/assets/images/globalnav-im.png000066400000000000000000000624361332555733200252120ustar00rootroot00000000000000PNG  IHDR`V@tEXtSoftwareAdobe ImageReadyqe<dIDATxb?) D5wdmKL Ƿ&yG5H1HhMPf`$A^130$pwwo_~{; Ѯ=;Ke}/ _FF&&~~~z@gL߼y?/Caq' }PN:tqrt̝_o>(S,X!(fj ."RJA!)҃H)Av-at'D#XWaqǥ+Bo]t䟯ϷWojmڷo={?gOVZbB-~ɿ&\>S/_uQ42/^ .gx)C,ׯQ褯߾ˤ|7c@e7pS7=y6?:995=4KD ό#®("?_^QpFiedPS2*OOOFǿR.l3(>0.~SBAz$xEQQ-7ӕ*E۰~VR Hg_0ɵ~**D$ D^vO]vF@"f#+9/,)mXf)DHu(S)#vQ$b QYjk`2M/~R2?22 :]ڙVGF\J򋊊"QTT?mp}ܱ |iLG;(ph ] RGT* &ݒԟ=22xJJ˦Ge{Nsh}BBb3OEQ4qG)6? 7%:(b799="~?yrZX9sFn50~NH#Emͷ6ВUlh3V̦KͬwPv?y><3 &w,5t$i>!jo#/M_޽fS@1!dГdO ` nll u[}bf~3i|6pܬ6]s m>;淯-y`Jp30%BpCVf7F Z+nEYw`F!fcF 5PDKd9`߾L ٰ?u;U؊QѮacJiL 7 ]ֲ ,rѴQT{g+;64w < 8"`*ިPNj7./}gV;i~Ya4Tptp.[_Mwտc޾mBљvͼpܨNR`=cN6xwJ'ғqQɨv1{[>wٍP2xn>0CAicw[n ?]nSv‘q' Tܑ@d%&V@sDR]?:NFEZ!JCb.]c jYcvn:n BlI՚3W2q;5e[+fd'z&S#A8-, $jwX_'OSp bα#mH"@)]b?2 L =99߉}fYSkWQkДW26<쓒w湻G0/NeNZxM`#@&Ẻh"0H&9,cDוHG4R&(~zݢi#qLrddB!iAu5xI€[xI"# :,{~r/Du =Mܰ0[⼸B]:nY\-XG!Bn#UԀ8n5ңjv峋א~MO[;ݤQI25?o2Xc}^pl 8+ewv~;mL0#(F{*o7_z3=n`X!wx/MmIe]+:9lh|  lXwG;ϟl ͖lm3I((Μ/?urPl@cst|9gUPm$R,#E@KAl6p8N¹>8A#ȰWry|h4smm-s8GWCH|@t4iXp~z/b3β8kыYɋ$()@PIz]2SR Ы\jLnl0H`;s]&i 1subTcp޽(--TTT$ D f{/_ ldm d2)Q·>۷n%|}R DPk4 s <ԧq̗]9^gdf»YXgfrsll uuuhll\ZZ/IFL6a9raa>rXn#bѢE"Ĉq|*ٰaÁxӬ 5UbO-E_{2p? ~đjq)C \0-BYY A)CW$~>\GAo0(|`s@^!::.B`@wNF$sgucx^ (G0t>D'NJG'/GqۆX}`XWzA a%Vᝍt(Tx#zRɴ%lyL?{]d*`a@2,C)> d2ٔ9yhFa2FFQYLۭЯ^CO'0dN⃴2&N:̴8T_ doy\wo(27t@&e]ֆY\EA*GgQ WGDy23휐𗭝Jm[pw^442PS#xF>RaŋV:g6蕛̠҅`ߧ';vvvp8`Hhc]&Ģ@{i3<:<@Eqe[ tCʢ"Q4qQ-cc$Ih&j 1 (ɘLL0A7DF!`ٷ^7rRTqKݥE$& xa c5n;2?" {ðڢ:kf3s"ߎ[<Lp|%ʯ1_lOޞN^n(O^|w nayLDVwbT FM''Wl WR $n5&g' \Y3g{/3XT7cSF{wX9ʲ6ϝg}5'S/5L4Fql\( ٣g#t*Xz'qYy!-d22Y2{[7n?~o+GOHjBוA<ϲˡIÂ6Tݨ, Ls <;PXHWe\ /xx@jdV[ol?1p=GVYi\*ɴbu"sXQ?Gj )TmMތk3?Y_|e~ ѴzD nO3 `mzB6A5|};80Vk(9 ,Y %HJJ'f/w|?DblU4+^dvMӡ86!Nkʮ5f%Pq@DS~=2 P+5G5MkAU *WcaZISr0],ܘK51,H]Z9>xoC;EFJӱ0 ,OVp-T |#3%(Ӷ8^ha*RAW?]Ÿ?g^mϬ]ZK3>"Lؽ zahz]L,ic%uex圧 (DS^VMC2+@qqe}}}(@.o͞=Q VL"~~vQ.+,LTT%GZYbF|JN-K~;BӍjz>e):yg{yٳT ~p )_={\,OXVѣG5 (ʯ֪kj˛kZxAFFnvj,b5b[ A̪ou?o)lݴ<5T4zԦѵ훼˨լ} (t-N; TвeKibNFEGOXv=\{2/N?y")`lj/ȉ`߄H&pzWe qV% "8dM >r{eժVvJМ?ԺupXDK x"\L[ Ok9R^~kcge}ꄐmX‘Z!V3B 4A0b@^{:M:(9gUď?63\_[E /L/*o_vc^lC{$bH][kfMx6۲cE{z8ɿ2M;"6B!0XsBz]Sg@̉dtL7ػM|-iI`w+^!uqlTi,jU[ B%&momm:T@9RU1_ݕ+Wf0DwޛpZ+V9Nrʄxc:H1 `R`<\t@,mIzIwMv,K^wY;HBtt I~fHpFA;{(Q) ͛@=S|kXpsH\;:ցd&L0m4NnֽLJaamO:8Ff̙3xO:th%CݕRB@BC[t1cFzxTyd2YsOަM;xu_œf֐g9[;:jܔ PN<{cR2)~G\Qn0n,;s A\un $>s¿f+E|wN`rOfc,i@iCgf4|n`VHnP( &K%1 ˗ dlH.}xohzf?ca2 PgZ748Fˉ'|e(YY7+Vqt;J`tr֨`LhkoQ#Ť+MP #`QsspL(8dN90BJz"T Puӯe }/t)znPJयFC;&8 Id}*Dһ`c2qMg1Bȯ C| c6@3YqB7qr~L5v| I<nH=9V ; ϷJ z\'G: (f}Fx!)75- h6 o>Tq`1rhуؙC*Ų)D&-+<ȯm#$\ 0-#J-kN`,ZT3,C/|NvR|2B9Jې=k1YrQ DITk!y=oorٕ]%xch:,-'BFo?#7sJU)e;oyM)=jܮ]W{DV^e~49.wwnu_1+`ZN }&N%OD|082Pbrd*`ȑ#\G2 &ilUt],M @5hLXNU7)q`7$xPvڏKR wӉ !u*ZL\;n-mm枪Ѓӄ@n]KyQH dx]6YtҶ]νkHՠ ޓUUmΝ•IADEIq&r Q4^SR4EL_rQCM'"GTTdBoٲ.`Zo?|oנQå=Z{€NEEAC>|5gՅ2{6Dĩ6^ժ`^k@85*v< r-F{wᵾ'6> gӾx (V㐣jH^ '_8tȒ`I#0P满cD$Cg Otv@p0?@"v#v؃39tr Ye\.q8V֢j—*%D|uq0m Z"]4r|<@Bt!Tzp'<Z~Șcvӡ_X2j IӘǎ?^u7i-4/\x^iZTpJ\?sf@x Dz _vŢvM*+Zŭt+*Qp+/;˯jz'Go, z|%nWpMf)2'v҃Sgs#0"-͈0XYLq8,h&>Y# %wqXk(2(..dݰ`sZ0IMt񸁒ƢQavƭBл^yLW츊N [9*VZev5kR Fy3#Ō֣ǎ1c"[^#%FQl0$!OgM_I\>$ ZzڐZrsCweEhgLhVvk3wך ]ڙX=JQ|jKXqbڎbS.) eK6B V'Fz9•+a5Q`˱4k]Ge'hUow~VYYٰ}N8~${Tt*UR6#u*΅F90`5$ @u7h`TԸ)A/u;'XEE-2Y͘]{C]f&6duMz~]vߟ) b0pkȩ,0UŨ;C6pW0_~|u\M\Nƺ!XրŀC:7D|l)񜘵.~9CzmHYk `Xt)+PX8P+v  8R ULZq5\C.Y8h~РN}$ 6n;q. 2\wnV)f!%~תೃ1¡,%u^]Hfw ^;m]xKiNdWR2$%DW\* 4zBu TBc7Sh #~39K$>KLqvL #O%Vi>jah<&m&qh?k1,!$i:0rZF]])$vvJTJbOͰ&Eq{ "_h0Bi#Pf (2C^|*(6 _;O5ϚFa[7 ]watJz'KE=b%!Y5ֵ;H Zx&ge9Z舚Q&.oROABGז@BbÉEf"N4 >^\8[%_@|Щ3恏#u^V~}8010sm@;͛`׾}!ƗϙIo\Q#Fy=M~$wv扆 m~Ѳ ^"1̀#K*A%r$͚|+eSo䃿'B#p Au˟Ƌ 2ġM};n߮q;_{ϛ8}X y&{pl$_* ~5_1fpp/:a„v|s:M45(/+BX􆛚r\d)T!N} Ҽ!,8 zք+%ԘlRXYK;<ͷn7 % $Чj?6bȱcWCp&EX,g͌Y Zͧwv( G0LdL;;n۶m렠U3gl~zqFF@Ze  مYcLf[~.1 bS`9"\JlaNq_TŌb@]ԣf}$ 3ȱ.dbj4oߞO0VeŢE4:u&@g9sAm ;?J=$Ipܪ&¥_2rGZ"XAC1(Q\}"7JִЅ ӠTpU0 :Ca 7qEA֔f9x tO!:pssPfMgR`42.a}+1TҪ!hv7(KMNlr4̴= Gbq3YF&rk7TBym@Uz [C`ϧ ܰcWTTөU(AG8]JdlQ7A=ka!>{ 2P:_SFX.</4#:h1xZ%qZA&c<tUU<j$4i&w3[1PP 40KX*j4u8b>p!2(%K橅FT)WZUm ! ivbłV\!VdtYS l>4ey@ eR)@lP5{RP5F_*I"z7˨_[:B2y0_?|8}]n7L54,j4v={}}U*h4s,&qc%)ݦL)֧&{kǴFsüby1\p} UB@KqO;a 8갬<H[l}+Eg@Cw؅M'N:h :sw->zEдB.٘4 sm0(ٳ'u|3%NS@)*DBJ%HGt '"Wͦ^(MgΓ1GԪ4yB2n_Esni-x-'o2oWo ?JV]%cMb{}3Mxg=Q+sVǯΙ>es[B3}h7:f|=~τfǟGp%ޏo:: Bat `J[q>\Az< xϤl> ޕpKɺiKg58?H ݸB -l֤`777>`7'ӣiдhq:pi! *`l1d[(j4^_X,ɉ%&2bl/4GNэ̾7|+_ z#hXÏ_*W!JzvC?w!g` ـǾn}9EߛyU& ZAڝ4#oD,>V7O|Ac^M6jV)*b8{Pr͚>` 0zz2VGŒBJ r$vd_ЃQ_;ba>(Xؾ+>(M< j3}˦oYЯNrnbM4[u~ E 6Q `iӣmY5?iph7E7nn^L}xK_;j#@{U}{3LzI&-@*UtXDQUPQ+(DbD!RHdf2}3LBB߸>.6Wι{4-DCG6t2}=4x7en Y."rϙAw`;^]j YhO5~hhypBRbQdzܲ#(k 8"۽T zd9D;|{xæ0ShAk>L-]D\Dō`)LoWǶ'IdzEޡ]6 QtmƂCL +Jz[ĉɀ]nsg^S+!(H+܄lBF`%R?:>9l08 GcS^VZYkϝT(t[ݸ mۀZU$m*` :?s\;KNwG4e.xbQb |~K.y$QuZazQEH9wЀCօN;ȱ,ZLq+6nֳGO/\̲^>w"6lEK:#4D`!2z M P|!^QA> 7x ^j V2gV/MM` ^6-1[H|~j+O0YxƛS~x`8!^$l˜1ձ?DžVqj .2Z]S2)`Xc`Ħd4{ @lÏ?T*{RBac&oY0 Jyn^ͅAQ28q!G_+Q "8\щssPeGJ7@fb2 m#zX$ff] ]QȻ$Ir$GSvpӵar@(eJSDRvRFB\̅;`Dv$L.In&g*@]M8i1Dr]SsʈF ^.8u;_Eʀa1tx._0// D󺁘 YvJUסh2\ߩ'HbYCs2ܺ0LcPh$0 >>kR:l8*O<3Ed|JWϙ`=u# v)zjU喡}Zr^NjWXSTqYE%7NeΪWQpQ 'DVM j݊> ??@pSqT2FPZ42@LQ>%BV.MZl].p}&z=3h1DE SN䰠{~>,π\ »: UTdtnQ~9\R"n_rM"ș*#<(^n8L.]/&``9OÑ<1Q2nڠ%x\^.@1?Pٰ5PXl?ǔ{MK JPU8M`B$֏}^97Zo۷*_O o6 Y-˗6^~tipRt_s./o N ==.>ysaW:* ln @Dvw@Ec+HPW bN{w`rVv!f_fݐC9c9+ʄnV8 mXTQ)y գMb/EUIRc"hTJS(&p#FF@0ǥG\0@}V4-ee_xӴ FI_VvZ|-IN56)$q/KaBG2KWmvz#xsZl6[2b(.,!C{7 s[ϡ(AHlvThvΑ56J5%:sdv`BB $(Ы>/{=J&;cttt`ЃV]*  kyxt*K:΂5Y^;Ռ5+~I;x ;&/3\ퟗwðsnA]4q-R=geY3L"+rE(Yii~كq t1ڶX,&(*b12c@tT׹s/ lzP.Ȓ*` QzV5M|vqE.q=p86Dz+ѪhOu1Hw֦G=w88|~{WeUuy6}j+ڊe|4=jM~sseH: !X( !I??l_xX4jG[i6m LK,\-!klG l.o_@N.\ M0@(W* 0f-tRս"w3cm풁Y=Au3`1=/kV95yJI/L:Q7Kvk3aƍ۾~^1{LEL65$% ٫p鰳BѪn.B#)!)L<+Eʙ2?+//cbsŀC=86ީ>̬=-,9_W ")HB)[cIJ\9>=|咩[ -CDx8s0m^DEFAۤD@@k˖-7oM3tZߗׂb}ڬ _ klea!nt:'j1|sLF .u:M"ˋd# wiY)}֛97m(K7^Z` wW AD)SaɍgT8 ޘ}ͪ{Iʼn/ Hc" 6_0:K13xo ee3} H昵NB Q$z-Iɣ肻ZAn``7ft&:8*L70p;oOt*%Gxq@C&@OVAKv-TV:)S_xb6^1.Mulql':Px0cp3n{uE=jsVSg _N~"Y@V& J9~rBPW߂_7 8:-wUn6&80662b"ni;~_snYuH@p󸸖DlnjpU|nLt*BƟc K &}-p V![ҙT+ǍCZMW7Yγi]\nڰw} }jh(!-)0]l$EʞRbܹ]$GxKAzrE-I6^W֘Z b(*ԩEgNvS*pfas`t$`t? ?z9cvPuD!Q+IƔX-\ثW/Α#G/V4= MIV'/sX?SKO!#`:XWe#\H,ֻhs+Z'phVa;:F8ndu>OɊ0&P\lxXTp:Lv $,&7УU5dr\"-\ZxΙhr*Sf6N60G۰a0ۑ8 0ZH,VqD|cs8/sy8$ 06 `` %2@8DE%%S .T)R"o/ލ[yxSYtbUl$ h4* m]xC}GԎL4[Ą6Aeb5kb+%!g;jg:MZ^Dh9#r7@f(`FndaŢGNkT[EA@@ ChQŌ0NHId2~ի_WYjk 3arxLQ> &§]]k8ٔǴtɽ=s]6 cXl.a }ɐ0`fMN cPy: |f3HbR,$ V fkJ;GNƑ%]TQ䭒j(CjN.!B5UOJC*%_z$>ݑ:Ev[sCD^$Yݿ??RWr!8u8xhy\|GD֫3dc.f'@ À^{ y~zF+"NoSf+*S-gB6+cƌ0رtF:pZLAp @ oNHP8ru!^NɔL교e UZ._8u!yE|^3B eBl6>hpP>PTVhp~ P0+(-J X8N;_cUgC+p@w{uVwU$`6fכh6eEl'uLSsJPiឝFjϧ0ЉrnZeuoݙoW$&LMķD ?;wZĩ558D ]aBͬvKW3r{0;C 6}W.)zjݞ{J DPO b456 W8e&L$ufcYJ"+!,f܅T[,V%zRնM[?'0kK|}=rAk2IRxcrOJr *;]p $Yۯ@D]""ĸ6=ؿb}LgYyFܱ.Bn *[3DQ1q֞]? 2DpN`cǏ^%GAV`w{$Th73J*Up¶6VkשCǝ[½UN'Kz(2P= 5e? )ȹ=f@1iˏzewKj $2Ys2'1h"ǎ{jͨQ )?+2?mLw"HyA `{F"7Ǿ%B/)_뫦􆖬\x72VC*1ln`㞙螄zR.U܋Ϯy"(T9b YI߁ɦYaPɂg뷔y2*M0PtF.kegz>#ڰ[T8}{G ͖\&8{pߟ|=R3޻ _w\i,.sk{j?ڵ UVM؁ 8|@O?l ڽIt9{dd$ӧmGlRAVA.]bD؝Z֊Lq8pCC᧦Ho0Y$cf$4ނ'|||l+vWq '}SD|)HeAcP@AU.a- 4h  ~bzrh;Ou&SLUsߙ9?g`b5:3CmmqcAuueI($ H܈#W.^ ^Āldۗa@FFF,} sq^*"d\l܈ ݼya'+W7j| ra﻽Zs=Rbn"#~|2 * ܮ6Ar4?: n r[!>i=?l6]'$Hvmr %Hx*/l6 ,2M0wYc搷7s4oP84nܸcǎmdRL #8???Kaa1+ViӦ{NbvoΈ珝j9 ߿mn3 >0uqFsO naSOHH$E[/_: K>r!Ф EaQw xuU A$rJBO?-<|${S ,:m /zCZG"|AAv^1q.J@UPxԗ'׼7֭kM'WNj/] Oۡe&$$p[hr+{۟!&nt UssOOOdԩfHQXɒiQTG8tP%R!ˋ{;̽:S?$%ҹRLt 1SFCeȳk֯2֭z{> `M+b8Ǚ}2tMst bC4lH_Ct^Ol5!]^<2I b7Rܗ)/X4q{TE`Fz }^ڍ/^tPW[A޶7[&05 U.D|xȜ1h䞷q5*!X}5/تEq/: lΜ,n6y+=WrJDp/ +U7\OhR:FcJ,h}Xb#>ljFǶ&s/RN f%n 4l& V8P{FШ! VIi Bn/ԫŭc#.`&0 1ܜ3M NqBAH3BR=M7 4Q͕IS JSD'kh)=i^K|W Z=L6wږ\|W@Jp%22*_Ln>@3<9a2DOJLS\lw1ulS5yRrn_D[0^v4-_BՠnS'ltNL?uv֓/N+.̢zk X&،hXb8idrhHKi}abSЪU+d}½u_dh [&Ctxw'_ X;a2 HL{-8Gh :5'$jej}&m0ڴ F0[LP*Ux `nrSvĺ$wa| t{5Dm7THW1m1zR;A:afȽrJZρv}3un\j0FoK`(́p9ljH>йDr]`rN;X7@>W\1 ͽpp A=,4J),p N["B!U p$mCF ^I5"^4!mDv'¿rZB$z(dOvň8daUSalWcVip>ɟ@EnCPx~J]3ӽrS!2S  |q"pFTIE\\)RI-\P& }ޗwu h&%$fo"nFv^"w.@ %+r(Olnp/15G=!7O^go&,JIpt^׽~& ;pE'6M#QV38vn#5DL?Ч6WWXzӳOcAh1 SЧP@SfBNѣB`GhE8-$E1wzq!H˃t ¬Wֱ#ac|I$mÆ&hV CPʽE^AZ,{+ȅ kϜgCCXh{tn^b> 0k//?䥜sƢ[DSd3ϝ6UUӖV/XSÖ瘟 + 3K)D߳6?`+ i1y[|>%R_~tkC#$)\3ԚSRgH` P?(e+,7+q/yy7}2cУ$65MFk/e@#=;PlDyñPV^E,G.z6]~~zIe\O;+3p :Qzkky+1ڪ?L׶5ǽ?:`VZl9Mꪈ4!%`[$iނ.jLPC+MNN_4t v~Б}Һtaas/5h5bP$XKIUtƭ ELDx:o? y&Pi>f: f~oYO lD;w9~u O[ bV RLtmML^T@7 >/-kߓɪa¸mfP]-=ǎa[͝%*#̞5J%_p1|i VPPXD"mk 3B p6ab\!F3Օa|f!15b*1;rC&G # [K|bah h8pĄwvl Ih - ~ĈC q5ޟBμn2ʩO2ࡔɉXFiZ,H C{01>)Ξ2@B܌? sDS#=Rv oO!?e>/IENDB`open-build-service-2.9.4/src/api/app/assets/images/group.png000077700000000000000000000000001332555733200315622icons/publish_enable_blue.pngustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/images/ichain_login.png000066400000000000000000000016071332555733200252560ustar00rootroot00000000000000PNG  IHDR,m<bKGD pHYs  tIME  ?pIDATHՖYHTqwYŹø4mTdeXKDHETf{=QH Ab9dH"bYل93:4|ӽ{;!/o%d 2I5ȕq#Wlv LJ8mيŜ:s=D](!&bN ֒K}doON wAR`HևuR(ʱUjU- ŜʜYz헠ӳi&`$%fC8ŒHNb*_9 @ 5u\~ cwQi+[(rZMO;R\݆\$c[̩ [kSQ#*V+{~;}(KoSUe#w|3rY\D &Ә=s.;nVNN I& gGzJL71GK ۻEAVV&e˨|Yɒ~O(@#rE'._H(ʞC}m,aL10fҳ ~7~ӊ'vقAjYz8sSONntpI F#ID4jmԳTx\>Z>;K$]XTPcdmZ(ܲϳusa:CLūhXLX_,] }-ŸO4+hr?*)s*}5(({@R#9IENDB`open-build-service-2.9.4/src/api/app/assets/images/icons000077700000000000000000000000001332555733200245022../icons/ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/images/icons_sprite.png000066400000000000000000005527421332555733200253470ustar00rootroot00000000000000PNG  IHDR թIDATx]XWK/W@cwXk{]ck4=- Rv-.̛7o9sEAX`X`Q({X`X%J C0SڟZ*UfCҡY;_gHN3dL7ڧE^ٽ{a2Iш0y0L*3`+e؇:%~ /Y J1k=-i}׉Ť~O*؂3TN#L0d9&!-(Tf<4M|Lp#a7b){iJk7Z^<`>tcTZwM-F2v U;婲 A}"E_H'bR8>>-oKѧj?b@$Flg&ud$($J1t&U)a4ěI;&!4#QP~߷<\@7Π6͔]] tЊ43?_.M+'RByg*ڐ`ekRdMۙRf}(IpN%SqXd2]7¨Ҝ 2f&xƍGvv?~*S *T*y cw^Y`]X>>w=͟>fK'~DCΧCfR4UT0*EJ\_4?h\l461rզ}iq,ȀPS(_ ѓvMjԸ`Zr;] =yA=_Ӂgi񚝩=D1fH k݀&XIO_NXHL$BZnK{U~ar?~-S$ΉZ`A&.L7"UxKA44s KYEԡcq-].@͚}˗98V`Ҟ~.iA9؀4 :?}I.Yƚ/fcLhEQ4cކi >MZ0tm>`Lʼ;X ,H_2)Gsw=RǾ 40=ܩrJIMys1˗TBy5V`RVZDy-Z'F2A܋ţcCGMoߧW`\^4o{~<8. {Ę(Tm0 4n*]ިX $p1Ggx1)\gRL$b;;;wp0z1'Ϟћ7o<}BcH?R(L]F Piʜlpvu?Qyi螴|HZd,36D%"s#ъqӖ[ds4!_`]q$LVNo~&U,3jpqbs!$_Aoʟ˃lMUv&&76>1Yˤ-0o^QΜ9CTi۴ uQZjty5?rSжmX:@\J_? `:ʹ}O?FQ=hϡ_v@.^s~X+] ,/" lHSܤm69 ]g@ƔvΤW|[jL6K|?Rg1eW 񊗧-\&I㬓do>m;4p@aL9z-Ή4hR(#>FK)GwZr99jOQTxaӽ%u H',Da!ڰE^6m8Ӂ45Ը`N*rhuM[Ӗ҇#o|7 ss S?}Hq? GXE6cIN1Ye‚4@';*';͗3IR,MꐵuExyz##7ِ*>M-jņgk-hg9._L}M;ޡݻvS>_IR%֥V3N\f׍[ EԜ`5Љf/DWInY0u GF-s}" Lٓ}+ t% އ2wSв?C'o9@/ @{vRE+c2cS ZS;SZf y(}d?89NixZ `b|WhXEӓDz7G8|34\[ᢠ">TnD{GͦtM4quHcFbbb4F%B>f,HCu&Oc\'LMV#BA,N޽Yّ#gռI2;I- }[TŁbaow3Ic"),ʑϬ5WԀx<$b`gkM9L3 F>Ä&0rtqPP\VԴ='m]hn7WT , kޖz|Kg4I-]ͨ/|^w#V+Tj ӭ%'C]@+ĺx3`mf>][4kn;b%HCH~Ƈu8&1GRm ^SԎ1}RQO,zDcINwaڥw(b\xۛC"&1@bTIZ's4:tM{JnA}馽b׎kt䯩2y4rd]<}ͺ_}-\i׵MxXa ۛ +H =, WʎkfwuVYg'jPڎ y[0+Qq97+;#ܣ["~{;F*ܥ`ڼ\iNx.5"Sx4tQA\8 ;H$֦ggBZ$g L8t?%`|rW}$u ib{=YstE }HB/H!$[fI}a/mo_%e$ؚCo߽W_7o1e+x&TgLɴFA wxc*5>>|ի7aĄ9;h|x;JlT&Ll$SjV'}ᡤhδqp6:8ރd:Tߖi欠T2Ճوظ)pmecr3f(ʗǛԯJsl Ȱ97ӛe܌ FOYo EA`^Ɂ@tLM!XcZ}ه9}D ;g!U,)j}V1O|ӑ"Ȭ&>ً]gA*H|Jsp΁`oEo[)a$*Ekؿ4?9cA%MRrB(L 5|~9÷þmAbGj'߱ tLR>-O*Q~Qf=$˔d? )\renf8R7LM-'֩Rl1c$},11gLw[ȧOQ|Em.O7W5˨~횲:Ok8n1һ|;nW/lYϏi3I(o5sBdoLvdV(Fh O–0'3vWlv)fśi@ּD~>=q)fF@t׋K/LAh֢C_wN@ r ͣ0IJ%)0_1uU(:|"/#H 0&: oIDŽ>%$c0<05(u"j_ =5+%Ek 5ɆPνdZZv#ў?sMuˀq`_zE?i_fmܺL_(ѓ#}y%\nz}a*=A6v#IRD B[wekx~cH`7ƍ/^z1>y,y`e߻hzvъ~b-m2 xbًǡi!JAxWNmu@rq1KٔęuPo"3uZ^$B 22*Z>P'?NYZPs6YѦyMk{>4,P HȱtQ.EWВ|{I4ܫ>voa:tmݹMN󗬠gpнu7NP`imWo P.=ttЫ?=vc?QCҟ'H:<<_Žo*26nΏG8E 4w.=i<g>q+צu6$ gggjۮ թ[V2)"J~q&Nnlmm CIlg3ښSyp|.P58c R&M Rm"gH o` OӝbGQTNˆ|8Z%keePq6;xX'}cKӣOɉt7gc @C[P P#wvv$ 3^1A<p"BZ -^=i̔zD3iZ2pM#cJDŠEV.ڎ{M?{%aZ\jҠծ{p[iŁݣIh^{y=~{A3h WT`/3M k^9nDa켁*-M5V&WWmI=xǟp/S5Wׯ*1}N=Ic^ NBAnQRvV9ZRdVϗծW SFhBz/"l_S9[ϗɒř~?zz#3i8yqR|ySWGYq5ie$_y;06k\~ݿNϟ6bbc >h{s2:w 7=Py)0:A˷/f ӴwB.-Tr1>VʈP]?Ab40elkP>F-$A&{7N& kx?|<Մ_zM[v()}҉*)Mc p  _ ήGT0^؉IFҳkyC4ɯ*W{>=i@о wvƉc }+>|6U2-\F @Z6c\n ? o/ӝ9ת^M +cؙy;5U[",\0??黁}[G{4 [ yP#ӟ ??'Ȟ>E}T| 6範֟~ K #񀣌(X{so{u4~xW꒥ǚ(]nX}Ӹ:??oÒsznpS@ #NjC5S9.xX)4iS߻'o\Β@)e ۣ+99:jLJx9wR'EYZG ۍԟ@*?y=`4KpR%KjX6edq>M/A#LW*osT?*_WH+yN7˴9aFuVA?#^eI[^%@:Z۱g[;pY{2^u-7dn4g& ?ߢ)z&`?.\q.XR{|8Vx;R oa(&SϢwUKQI MM'k,e:i#Kѫ5d9&(x?ҧKͮ'K\SԖ }$ \ȉsO4Xs^ o9KK+QX[\%VNɹHtQKE%Kw{%mTLbӪ\H٧6JIL1q!?î\Ɋmqp3*'B""SW @E@3Zr 3}hǺk1[ +m,fRp$?yMU;$IVo:Ngn_7eW[/8="c67fJ#I6D9n ʙݝh%Yzfߐ xC|޺"pʉ &6;/˃%HQA!j9 ߁;,ki.Ht 30J rJ1 cfBmR$xjM?ZT71H1Izz)ؤRYūS V Ⱥ\k.J#eZWw J⹈z04( ^ 18FB>}И>I6/&$wއF=pmFGӿ_ъFzI?ջҕMcVr]drR(3d,Y^*,I{OФQ= 6>yA5 QF=L5:тgqej;@uKޠnÍWQ1:vZ>Q9 iU tkN2rCy. R\-$%hSA}~oօdKB>$3L׊`mDFuȚ+UƚZ[wц0ڶn%m\HO6\L[,cVpٽy DY]@yhxEceDz\60ٺvp,g$nOMogXl$z` &T˛290yx9i ըRZ4wj)-Y&<.4Bߘ&_ӦJ8-XG!A;uF֐LҡɕJ&dxTפjjۇmJQ&NT|IaGսsv1Ab˟%mb~]MW{ qĩzxa/0'X>BE.=pwϞ忮L21Qs-8dDLeqL윇Gs-_&Ï ׵` L+l#3ƅ LҔAFtä{=ϹVJbR׳1KylPk :)6nOTRHiR+!a*Jg4 9NQ{S ˾I%mv 㫚}!G^z"4zm{"I2KH 7VDsUr] _^G-3c|-YV@,hkr%AP/v V0Hȧ_8ȟkH`. =[vkyH/w'qKZl?sz=W1a'[93$|Ruiߘʕ,&i׾M6zGʆ۲լ@-* R%4[N3&ABS+ŕX[h|r$\2vSJUGӘ,FQ/01}7|@VEk"OR]?CC _eGMt>xe2c4uH w׳[|Y@3!\8ϥ??c~9QDD ŸBV0okǀ>~nX@NJuC|n3 'FK/l SHX0gRzu\\w |=`ay%aǎD8^~zMoF2lфvџ[28%J֣|(|f?coep\H:{ =^[v=O#ьyh(`X$bHh#V K!R+pH*2- 32XRȯq?RZh $xLAX3.K c{b&5s '@{b:#Ay_Wǒ,OH Lye> Lj7eıTby-0h$O"Ϳ[>H2| /, ?q :o5e"ժ,B'|J3b4F'WGoD: υxK~3^[?;:Hr(Q!zSf߽@?,2pq'6G)7wH9"v6,kŒUԥ׼6+qh Ten),)4C -eX#Un]mL2Z7m#gݶ1 דkmكbDwOi^E☍=*oϖ4/(LXYYpqgxgySAȵ`Mt.o1?uHܲ~ՕJҒ3YڼVeӴ76vx/ilܞٴqRadܼ?,޳~yrӓ3 !:  )eq6~ }?zދm . v QBdJUbڎMl#FҖ&hbD}?c{k/,CTϝG+S * "oiE~pߺG=:C?j_BGٽiAeSo9D08NF֥ T;5)TV:q۳ev#oo7`-6n):ժNƎ˵[tp ҚժxϦ>=iټ Qt"ءqo^Ks&/Ujeڱaѿ BWe\4shsZ2]e1҈n $37E&D3jR_5[Yh=ҁr|>TP^5"Es-/!|r;_ط$a$筧JS*Vh yRݻH#|̅ldL<j) T FoNE'XqME?T2۲t]Ĥ~kӗ|nU^DaqF5U 9cr?'H }_}wj ʅB_?>sΚ6,G PRD )p~d7<I IlMFr{)_^oRI+-_qw٩MC~~xe&dZ7E~ajΙ+v0Yc"c컩UG, 3uKs傔BI]8Qv#?2{mqΝPyue1x<>Քڢi`6DcZjTeaC4ux, oZ:wi ~]6O~޲^rZWH1XMTZ/ẸnJjIm$`[_H L)_"c"w_yݖnU1!Ċ>XLG҄$BUYD27fĘI9@#MRD.ocrVFb3tt;@듪saJ' `qRb~Jj?!D X2G[@#Y쥚o?gA@~E^gD31LtrϰSXFlR = u5S)ŅS@eҾtELh2:Ȟ)W@-hjhhrEY-hƉWd9t ;nh %O"_dɢ-ƪ\@BcCڙEUJ5(cɪ|;} xamѪEV:FxhRk78Zz90wO|jU+!'O ߮E=nM'e A[YS2")U7ia Lփف$$\~!yY89G&_cw߃dma!bDNW5)zBX`^)SrI6wpMymwt!Zd$OWgׯ{+Zr2y ӸrXWWgҶqDM̟ܤT2KW*-s6 frNh)eHs*/&Fq9jI?!iP= &}AiI, Ӄ*WYtuq ûѲyE 94[gAY >#CuhY^ fW3;|U%w)kt'4DI[srd~=c 8+D/(#bxJĞPۤ@pIےM~dS/C6F٘_m_=GvX`A#*J6!KSHۖ 룥 P'gwjӼ.^YWг ~1lӬN:FVfNiS):$+LrfW/WjI8HP*T+ꫮdx4Y7A_!eDMMi4@䠹 ,0?ZThQӷ1ϴq闓бeAعM#mwGDR-+r6BnHS{?ƕҒzLrZR8)X`Ab03TTIz !A/*Kc&/3{P>& ^@I2ng\kBO1 ʗ噍~8oJ_נSkI⼦70q!H! iI2muA+JjG#r[ ,*WLYM Is=ɮG=[kT@k!cśyV'4z^62qJ^9]N/[p۽-Z#0⼌֍ ={&. ,3J.I~~&M(~ESodtiטʔ,jrJV] IKbOznKY"b֟q/J ZT?7@{LfL _OuΓ˒ , z&x `#L殗]0a(A *l@$%Rz9)c؀Nb4ytMՋǹ<{ ess!#8zRʐ!)C6(N*drOZPŵ38l嵀LV[t~aߛ `mn7R_!=RK,H}ԨY$ID=yo˶[& iMmP֏FM^j  @jYx IH)"'Z;nKRan^̡gPْE),q Q1A_OV$pw,UhA,k#c  Mq8&&-S˦L=d2fDOժ 7v `SȊ]X@+[F?3un590rĈ'bToxO)Bƃ-ę:KZ mԸ!9gID8Lۛ>lbR`&JxoB6̤Ѻ%hɜ;MjX?MD]YҵG-sstN-uXً+9-1kei st~UN{~JNa}M+Ruiٴc 2bHB6,pԥ-oڼ([RjZZ9& 9xWRˆc)'Ό 3uVlLߊk=Dі{bgZ;FX̵)G``֭`ѼQ5j԰ӧ[I&޻{f…jY@K3c,N,p^% vo%'{5=e-L:M!Hĉ¿:O&b?}9}j>=Rje9ulӈԫB S.R\ǝGh`\ԞX֟'gkީ); &f/cjC5X$hy(;aJ̈͂lBc"\]әEzI>"Q$gj-v~eH`7@BVl١C,a6vvTdie]FY|;/U[KﱚoI ȑ#+_6$9crC69dB{Ӿ$"hU.N\8p${';;[|>gϝt#aML'811Ɗho7af"zp~;wBCOCG^аcI?JcяivXF.Cp,o>kT.C>nin/ B)Q:{x޹rtЩC6mZ оC|*KN5Ξ9L^>I] z)=~>}N?pqq*WZTzKP2OYXSdɺݕ2倰 bR5WR6ۀ)o6k{69QQҫCԼprGOéCqԱx4z.{ȵ9=hMԥǵc ;>);㦭 o/fY 22(\K1 &|N)l2WOG]׉ļkţ=֌Q%Zݺu=#b)<</*777E6w0w77;Z aaQ RIqaᑴ94-3Օ*UL6>m"JhtGMW˶4oU/ x~gy}HNP$G{>/qa{=}^ S0'(@2[ZY?!ۤ1A1c`[@^[46xeڵ$??ݳ F@I!O.=T1~yWa'ʁQ8!PyKc;l],,̗Ψ0x@S___ҥס;WSeK^رM*TX+7k\'5y!#vf;vvzL?I0c% M/xM`ANÇ>͠!cӻw6'mωisbr'Dj;Vwg6_GㅉyԤ%Tji$X9 VU0H^ALxz0AM'-{T~뛪u`?GkPDU/؊07Fyn..us*VTz Vzgsȱ![lݩdɒVɤ)4l0,w5?NK6xNFm?_hr}(2l&7*UȮK\ ~pv8)P\Խ7F;7&CcF4>[>%jIIGTnIu(0) \cf^SepkװHƤHXe^T-;#P7:u('JG۰aC<'($Eyzz(c(GNe~V/hS4g/fEOС3밤daxf7jHHTЮ1.9֒ ϥ ɬW-WtTvx KZbŊTLRN1cF#3VPuIEz% ?>K+k{]`nfylYg߯^ͨ8tP/#lBHecgUA.ONNv81e5KEm++WSs B¨ _b<)ރ.ř423yX?,4FHq GA-[^wرci:k]rEDGIba@cgbJV7+ 4_zqG:sjFTP(!;ׯϋ,YXoٲeoA藇hx.k[W)}ϞROr*OvѽM$}]6أWvw93]lȡ]󺼺FDJqe$DDDR蛆_ɮ}^ eԠ~> wd|r>|v9l 4DVZgTTrQEr崿ŕ+?9Tv }KuB|wT߬B걷\}'Her^)b76")(#iBgg,?~^.**:ʾ<& cB)U䣲( K+mߺuZ>>^q~jrvvt޶cK Og9܆鑔kjPرYj)}XR x{?s4JL`vXBJă-@!,\zwmAv֙.+>.x_htq/MK7 hrfYgKH =z.qq ;a48U=Ys').x2 $m-H=r@BdU9o}G뱜{߂k-m0R䞪^;e᭼H_;=\bT1ȕN2 DȚpQ*ٕ~QؿoЃ! Aj HsT sHU,(ZXa777}w(&**pNVVvX+9\^Ҟ$cB!Za} LJaz7 Q:r{j8|/p.p2ҾeG8|?CZWtiھeߎ;)P,OIRw[: #RiRf8ۃvmn JiZIuS 8`/ BXd`;D`[VtLWTY 6L\2b<\VQ4|] <'XUv2gTFA]\3zI~e(7=kZDFk/IB|y1~czG*"11ܳQٲeAlFWex z`Y SM5˓QdwNگz̻xժ? GUt3ti:y$[A'&]\\ePz3n}Q{LaND`jG̬U],[I.Y<i`P[f'߫y5?5yt4PiUnҥK<0Ǚq*D2& =B}>_!ICB5*02>Ygv"Xډ2$UpI?Pߥ[׃I9ߺ,jjb۞m\y}-zu]*&lOٲ=aJA>[ݹS %ySd)I_0A` ;wk~JD\gdJ y3WQtYpr%kvRLɢ\@R%P`;yūP4k5ҜOi9FhL P{LMHS!!7;V3L*|d՟eYqFH"UG+;[St`u=:%Ӽw=w\bsz;#cɜ%w8.[l3wlosmrPO?gv%Ew}CAw8H]3tcҏ]` Sc/.^u˶Iɕv64T2m#vOh}4O{mo?КM~x M':w1#9b|4ӧsa$~D%HN?p<0hI"%i2J i&(ˀ |T*6kͪ<@UUomV+—9CY["Q^$Qے=tH.0ؚ/ֆ?>MS*:nn+pEBW؃1I*80"Gr6;[(v#;>`&17>~s%K/riRf'J4zH MM VJ$4& ܪa©AF]͏ǛCLpѝ{wtͪ۷o%BhPť! |~S}JV)X;`jO''`> s7JN>-Y <6je䔵h];غsrN+wl:_A)4b5L!UDJ2?U2UOϑWǎ]ovd% ;G-w|& o޿'F=zz?2%3J dɟכ&ZCc %Θ`,R02x|%L-ƃg=y)o=nln;6 ]~;<_J|i'p7 }aZ2]Q\SuH ctKu]DS/6X_R { S[w_#9?5Xt`h UDރʬ֥-Z/2u=6v ~c(.HZ T!JxNӛ;tԔYzu^jiԼI j4τWexMԒ*~Iujw}gS\9emt~|8N 00`/HmĹ28kepGEF7T*e^ZP3_?EPVMOɭ%ȿɖobr%6&p}.Z*ּ'E,`ןp(GG25I VnԠN%:~ APА5kjX5mQ_"-5n!s4Ĉۘ(3e)5m?> ,*L/RA=0-CdeJbH9c/x &u0,aX;@j]h{-imb 0y0:d*lI[e'-L ? Ov'>/h$J؆]EQ4LTܤ"jO9}O?al'DcG/;쾈 ϟ˵<$7 AEY8Mܺ"cϸҾ暅KoNF AƾfWbo yPg/k`҅ 5{A'5 yc^Z5fd9qoCk|ɤ*>H]( rH\;_ iڶy]9(:MKr/kѮ;MYyQ䴔wa+Ԧ?b֕ڷHYI' jr"6OjjdKoXRP*- TS9Yɬ`c3\>Cas=\r 0g!d3G4!J 9LWp1aE5LZ`A EP%&o>Xr@&CٌQCI3oZ ̯o"c5>zO"1er츴Aif7Uxn=SQFU()];뚦Rn UnНLOkҸ]TY  Lh]w(KBfZ]2_pRyqgлUʆ)0#[\MB2n|(7L>EqHr sik?:88LdR8g6&/iD,i~X[3#Gj BcҨ8YDmQQ2Rļ$8iLgDZ8u3v+5)mͤ6i:ѲI-:vx%_:V-^86..m:t 95ɂTaZ>D9*x~^GNj:cn;)$ }duʦpX ȾuHlR l'%{l!Jr^2Ah*Պ^?}OU^⸆ͪz%]&˧{XiLk Ϝ&Xwo\;34, 2A)L2 j̮qmⅣH`g(h\!>gALFJ2/K SOBk}o{4 iJPբ _tW &5i2x$ݽw**f=C;NF*RD+q7Ƿ(#JxoIu^V-㾛9sXyRr/ C$8HC~8?x)oO  VBKK*ThK)PZ݂KDp !zw'\@ّͼys;?3="v4(G;Gz(De?s3ZQj3Laݜ-.^yM,6e]a8zz:D0}ĦC5=ԔKUգ rlٻ~L64xT&[cر^uEۃ,Eg|:X02Le/H6Ѳ #cG<%c 7C]:@' ex9p8ce_D~w*a,ϣ|˯ˊFYj蕥~P>c^/Ծ;|CTy)O=x/L{*u|ir=yuW<[dJ;|8ɼWJ@ =|/JS4௔&]S5KɡՈvc}j; v DB>~"90|Ô3Q1٩Y;=;o21]O]{NZI(-Xu͸ZDRA@|.PXJ"Yw3kDpKf#5e9A1.[BH۱Y\D0ėQ?vZF {üiçP*@1*-0ˊh"e$Z~@?PU/(@%X"ܺ8=?=N` ˶H!/ûȻ^.f  ;ϖvujH ?4C2J4/ʗ(e|Q<eJvLLLYϥ C3J=j=zM(()(PL}Q/V>7yJ]Eڠ,TI)eczk~;o-] oۜch3c$q71ܒO2|fvԐ`,|6 n$`W0 abR<_ ޡsN10i,coPQUiѨSPYth}3 zyT5O 2 c@Zkbd|lUR2P`gcryI> XQI?s/-`Al\3C=QC!pB>e)I.He5oC)a%kvle!e>"iC!oLդhwέ%۷X6Z㨰Z=hs!9ʕX#- JTr\`g'4X5)% JR[{M]A|4vG:qw4mdicfha)*44`v89|8wCEzE(ç}' 9S? |~Ŝt =[k2jJ:t4@+t.3ߡz;ӕ}& vsխ37pDgnM 3>bs34P8,| kitf(mZ^V'隵jZZ[b@ PʅO8XkwnWo7 Zy3/ DسnGkt#YvbQI&sߺu`۶mVɖ-[TyfN6mƍ9ٰa'ׯdݺu]֬Yի6774x`]eD [l®bzqJpG cKNLSLOd֢2yR=t.X/HN}o޶YW\4Aj(ꦏ+`uxڷK}u ;aYI=v@8!x6JIZCF/`e,'y8퓼s<z" >>bcc|uIQ+㼌b@4d'U4 03q3 ư gcŸ^S>~o3A/-f׾g2"yR=t>qI 7R ˍ7 2 NL};lajŅ}U;TFAYd [On%ǐ9SL:|f۱o'Z dnѨ{eŪEw3]䟱c=#6o[Aa%ۇY s?6?G"G\޲_~vn-L98M6m:0z PGy>_g{NN۽{*yZ7C k8ҿF)eO]gVA؎pjǯpfpa, c=.4noLf<z s1n'J="W yvah.v /׽\*>AbEb$q6mVs+~*?a :OW)+-a%YYXX@X姧 ;$t$Q%LJ!c9[qN ? = *||Ò–N\;z622/o VT#H@(BVl0^/ ΌlS+̌s| H4z*w03̢SG(Ci_jz#A]U `694IS njFXZH&JXԮBt*DX̂t=KLFQM6Z-?㛆A^47mrSHVjt  ***T(KCE㩃m[neϻ~|= gkX2Z8@3˨o94r6ḿBa@BZukjo?0qמ !@fF|&Z;+zC"+]&7l 0^SY*ݪ8Ǭ %W-Tl[|vL/ٲBGM>a7un1ؖ՟z4w.90M6Phګ&0MǗA~~>uW$k3*l'< A|GiOͿ?M) lx<`/:^36X2`WW,̰Ց~M Yrڤ& QFGòU6m| ߸7Ժ_Ai"$}&"<Sw a)2ߓ6`gyP1(R z,>oa1Ĝ0L{.g^[yůrUgV[jGFGYeEC5dB/`!j5X P,r&MS,eaYC-#2#Mhn,4ߚF3<޷X(DKlw=Mѱ sϭlܽȃPSjII$y{{]+Qḭ,͍L&T]3~lfY{E1 }S94Ci"(-A\5ڠع-G `` ;tC(P:#2:F,i1K[S%ˣ'ʷ1ǍD 'ج t>#b罾[ϬIJY^K;Ӽ@/-/\-Q˵E??#2lg3YCϖkofDШyrlǎk{GmC=(Ĥo*Zg^V|쥒"hs:8ZWBlJ.@*Hy .Fual0;@ /ko ء.9`Gٯ5Ja_u^P{wLל#5H:E򵣤Qr"* ۧRA)iսSId2U͘P` >à N c}".ԅEnF'ժ& fD]Mqb߱B?hƛ,{2ʾj+qΜ9yjW^Glk2k䰥eӷo_AIK׊dD{B΂1rL8&'pV]e)r\xRTϥHhdPXq9V+ƭjy'y/}.VN9,/厰dH̹Ru%+#?SʙqCtV5 V^ R`X'#P$˃۬M..w {إk[5< mcdҿHV"2+djKRxWNԿ'V0qYudY c3>̧4Geו뺚"J"%kIH= '9WĹ$$AH4(gp9n}N|W/0⽡qx9QIJڊ⬢.aA |ϸb9p y&OȾuܞK/6B].{@"3 ZNN 223elNjE% + 9kI>_desXyѥ dhmex$o|s ER&~Z݋a`F%s&s%rjĂ; gͿ('/CF/xe@'Wm _E[5nܸ!)M[,OeYr{_bcUhJȲM+%vL؅wh8#>,Oa-@ AM@ oA"|ta)F7DtGM3 D518y1QDҸV,\-i׹=].ϛґ1ZKul$Y(}32߲D`<Wfw'2* IVAj}8m$h Q;";t>o>P ȓ?|:}'oWͧ/oO OdR"G '` 4oĖ*e$ |i՚΍;\Z a۩BK*^f* b޴IݿۄIS'Tx~e!u֭OE yQ2Ne,* aÆ$`X~2%(u"( 5 |͆GʍwPH`&vwB*(9TŧXRVVs5'9Շv31QZ["\I7\,2tIp65K֨1'l"*b^wvr^B l!d;SP+}JB$غ0 5ںF=hjpYѺpU@-Ɔߗ O~:Su|rwyռ Fֱˌ##' ^KCtm&nEEOx]rrmD"1ӤQcq~s7߼EsM[xU& ԭ[TԨ"sڀ$NgydB( u"g(xkyd] ˌw1v1Ke L_7K-Coln,Zb <k4 馂*?xd<[&ƨLo5X<-oBnW(C^ RY֬wWp;LivF6>mݾ~aBsz^Na)f> > V@'VKy׼ - *ػ >HpH盬y,{Uy<~یЫUI%JM^s\L$ ՗3j> ŎBvzZmסOyrq<:rĈ{2բwJjBqDdtA^Z$y~.Fmx54Rd2 Uj}͉o tATA Nv w w`n /[Z|TK\t+q'%jM mqiHTp"y9̚A+8$t<B$F0PN-\>OU.wM>UFFu0`4#òb|"6ߧ i k H8v]XVu:C Ldڽ#>>̙NN )1YV%! V^nXUgnXǺ+GI9,NBM)7݅be(@D~jõGijV(-LKJ,"P]PRjH˄B]eŨ9#5*@MT3975sy,#JEDm(H8;j/ 恱O\1Lo~Qa#*heFu -5 w@$3:+fNZzJ:<;Ml'Eee:gYFd؏&b ~siy [.\/Yc+#nZz$d2ݻw@*riE6|[[66E(`emm/r2s ho]r-Ve y(5Yg[2 bJ<2sx_rB&`ע^4,)ܷ,}ŜG!W5J5ϳÃ;OOaΰZX+l #GFk+DQZhlFa #/_BmDZqlT@k5 e/>x| #ֈq*قB9Ly}XRƾhoHr9=j0@B.Dœ.BI2eI~,<3*Ԇ&br0_Y;gkrNM9<-uu2#o SjZי;ݰq߻L-)ج3Bc䏷̯O>\mѤI-Ɂ4mvlݢegmblm\Ʋv^S?:DiiiZ' ڮ6qpp*Ů՟SO[=/ʟٹG82G)a%U N[{쎺JkBRJOZGz$Аռ D[KO3AJmH{Y6jL|0ۃoZ)h.ôHe %Dhn[LX](Hl9]AM {+|fomL4rIFT<:T-KK]tulؠ7Fv~ (]m&F+]ȑ#u+޽{Ο?sٳg̙3piNN:'O'N@xxpq8vJ= GÇÿɡCp߿{={@HHH1ɕ_ADFɰ)c BЫVQ=&Btng*V.S\ &6;AH,H4$=|Kh23_!!(x%@P7vC\g6ʼ2WT+P*˲P6-Ɛ!C̓SRıq ,1֭+>灵@k {;;u:e:*:'[^idlBPGBiA24QfpNS"u-֏G%7wiaVv87.1'TGqm+&M縼k# R;D0*浂S%+`"rw*b>KyQU=$l߾=3!ym[` d@d ت)U$H`nn^pmuV榓/$i|őGZ!i-֨"IHTjq*EVJ) i.\ҥKper \zU%׮]#%ɼUԑTi-LE/[>4yiĜEAp%n%7[ oFtC멾C>_y/ 6ݻwkמ={NR^+4.$z1 d<<ի7JG0LL>}RgwϔWOGFE8>\HJJTw6Z/ w㧤ZDE_*qM2Nb xrs(?'ࣦ&P_HDFG$ERr8Uy0";aV"6mpìJT$iNsê#Ns:PztZ.ěD%Ilͼ-45e,8ph!6,6h'qqVe*vd[d&MB1[&N i t8K?i"XԫFչyZuwź:j/~K.0%07pSi4jAeq#?N VМqPYôâ=ܻ!PEi|Z(USq-"ԱyHc'>׭X_ 5MK6I8xƒoU39}8j/J7-.aY#('b#Rk8_7>X~W|ARXx(\rN9{d?/w4h9>\?8 Ć"1GTnm=*RIDDzDD|\-!䴌$Dt44JqHaDʵܜ#=OBdHLWyudLƚ0RXߦ'Llv  VFY㚜9b&l.,sޭOXy"vz[㛆ruwm54os*;-l|:y~iNfn:G9?eL>`&6k v}әnaIM/u]32F$䞯iS}K:(3{=a'$hxzl@8irzZqOkof^k=h-p0o7sBԱCCq-sa߾}q\# Boxng $x5Ψ^B2i Y('Pc`_Qq^_UzyMİv/7(,Xpj<MbQQy4[<$$-N@* J,np¤*dO,dISiO>rF@Fqh(HO"0%1~1&&&BBB<R9* W,I╈W'YLGQ&hg\k8P O8"{š2hG{w`5/p#i;] 1i` ȮmncBpoPh;kZg>d.N|buԝ$eI6n3w/,Gw$+y"KPw@N|q$}׻#i.-* c̫)jݽsup~jMĚ5Rrxk?EyqڝB 0;2mSՇUlz._KnH_ryJﲡ`UyAw)ؖC B26 +͙3W~o=xSU|gsss{%%%}FoTL#^|ڬY30deݛ7os1 w-Bc[ɂÇZl\cc8.奆M`{tn6i*x61^,"(N,x~ѣG0 Z˷1#}έ#5lKg>KzM5`Gj/{ Xxks [UH$I-$Ʉi`ais-99JA I,! Қ"FE+'#ؐJGJR$B$Q# ]I*Q, QQQ\:44"&޽{vǏ q5#J3 |c6dʕ+A0]qy+.]j%4 |v;R۵k-6SG&ŋ 6ܰ.[O~QISxbL t<8c#Ѣܻ%?{؈ƴZWgN,8̌K~^CBBb5i۷oϦzR^MwVXx=M< f̘9nݺyQԇjI#T*u(Ȏ|P.i%A#rk({ϊ+`հ~zظq#l޼Y匸DW3d8$o̼VRr").bcwpR6<(#٧zIOr N<ޓyzz~D6qHFՒEcl:!J<ƺ8 e=(4dqaE2 -YvlȥTD4JP[ f (6-4#0}ߛ="LFp&%r4NM { ڳgOG,%J x7.uG@ǣu[mA@a0J̹pQA jsԳ]G^d [X, :WYR[ebɰ UR@8KiQ-C6>[QvD؈f͚^u&^xV$cEEE+Fez"U4% w<Ԧy:G;Y\T Aroż% Y(p,WwN<-\i|Ze,|< < gH]bylVV`˱SS|sssZ}> b8 +vucӱޤ16w&&&^k"m Msب159p̗[/!!!(==ޖM?xPu[EgՎPcѡ+ZwBUS Qۆ Fi$L"P"ʊBiPpQw%C IP%EQ EyIj:51ZP0qxo;sumzsW&@IIp?Q ߠL%rT :T2~Y  ?;kz\$OH& ?\:Z5H )!dkkIát5~H}w{*%$1 ik$ [4JOS(`ј(L'PA"x=0xsF.j~6,7;U+w>Fc1vTLΚ5k`CS|R{ C͛x>Cڵk8Ai>]}9l۶MoH#JhvHQ(GV"(;{ .N>QC]d Q#..Ɏ=6*Wʢ,jthk$HR2= aVr \_i|FB۵~nX`Lj~$+a!iˣ5 kY0Y0eڇ0uG0Ï 4D8Mj BtϯzD˖-+6N&J i\cK$J#) Ŝcؐ%JbV+-V(m.5[s%˯)>Ժ՟[hQAbu$hzG\y[b@~/܈B j~g͚5Iv.V]6˗/dJx2Q"i,$%%n߾M`HEgϞzɿKӝ;wRGFӇr&IDNECgD)|_8z_2n]1$۳7i "R{ >ZȗV|o"C?PǪ;{;9r%rDW o&*ѻC ݻwWde=U]CPk+SDX9fxaۑ#Gf:uÇcG.:)7o. {H(?>~ݺuMfΜ2222i*D踕<9B2 7}sgY,kZ4u S_z61B==S dqhh?e(d?ҫI :A^PzrU%ܻgΟ]{ C=^;A-Gd5 Qo>9Jx-$C4w<Ǝ³̋2w;i= Pq~gVP m?J5)?ds3y7ck`ND P()K4?9FKz~H%Ry6q;w_wC cuI4oN[GJtyHFXx] 7q_u dqR6I{ αZ'1- 2Ϋ68&#_s{]%v}#qglf333֝;pQV^'OBbb%O̎!ž#!ڐW]2l,u60xgbM=k! sIx ]k]p%H.HR$C ;qjؠzGU; y|r)JJJ*J&I=8;^ qK輬Crz8%_PA#C9 h ($"=""I[K%9JWlU'lĜ;ؐǐ'I=gM9m(ϳoF ꎱ^Z^ $fRhZ,K񛸴y #|O0l*$;O?w? lgA}0 O4VYY<}._= 18feg?ʏp HN9 `HiSvrIRҊ{"ROu<-?WGpa~P$n{?2'u ~ 4lldY; j u >ks~M :gbh=9}e) ,, ,Xjaw Xĉ҅iN Sa*ʇ|0*C>Os1S`HOπ'Ncڴ.kGNfH2KdVnXiHwn4)D=QU/kZC5 THy%U()@PvR){P>EqH -A!aD{?ȑ^aCǢ2*oZZ:,kɋyٰIX@dt<5TLG ll׻!r;r6bUC>=Gv-IGq`~raQ(,([+ ЏF^or_G?|(ENlG8v ߫fj/N!Mv!yy oHܨȇiGPZaC^!QhWK,"Y_}   >7+Zܑ,ZAZJH6//I[|9KCΝH\&PY U5u# ~}z9{(Fٺuk{0g70/a?KnA- SNòrЎQZ*WSУf╰i$R\OJoI M߃ƍT 77b% Q7u2GxG5I ރG/Ҏ2f!CЈ+Cn^p5ZL0 6ְkO1կ;|*U$ff0 ajtr!:a< -ҥ9,i ~z$"WOD8^iҧ}hNL_DE{4ȉ=ANdeDcR`2<p̪V|r $w%DWshiBrVCzwY.d=ªʯ=7/u\EI`o9;@1,g_TS^.Wz""~ }GŧӡcptiNI=UmRҋMO%mUڵU[U5,K/T"\_Rx] 5Ƀ>V2!"BcW.Y{WxHs$gDz4HVl!r}ڹv TPzc((M j;7I>2Psz0OӼIxCZpj 5 G|f؏C#-UQҩV-03-톏ȷ?t sGooR:}J{Ŷ̡;;;KyG1[>Ϗ*LE;U+ 3Fʣ1H*1/19 Eً a=:ML@}i;-._Q>Gò4H$OcypNf2Wt z(ls"H)RzP״eK,'DDl> 5S'zJmQ=je[(N ːFF{E9bT'!3U֔wQ滪Hp0bx6k+2 .guyߴ\9i"JG?iDM7sivnU'j"'as4Br 5G-iiҤtrwG|rށ =j:544(رcx)^HHsl:=c|E^uk쪁f?aMofZO7t8ѱc\MekL~=6\Y*ȕRi6;΅`\\|dGVI%B̈́ ei_771VH\Qt1sMF>I5YfFY62 d@aH LZ=?%EF}O┅*:ȇih}IUt$B$)4KVdC֬;h%iti`٧20хx=V߁<'r& ZzO\V_Aܹsb$@WWמuQFK7n %iӦ7o~eʮVZ|}}njj@m+>7AӜzbr9fE\do#-}M5oҤʹ50v`;22V(khO߄!O `!Eaʷa666:R% !)If봆T}7f!Sڞq3h_VWiiP^e$@{!;PdIB7ȑ}YSebHhl,ho9z~,+#.Q{SKyug(BYG>i$mL@dMȵQh"looHp묬f744l#}~]:x^Q{33n|r_-7lp ;;1[>(˫#i%L"g5ψbvvna;W;+G34dpR-z! b$JvCn]gIОp<}>+ w7Wndԙ3>IuMg rQ滶k7Ӽ𼾙l2dICDSBY%T0.0@2FL 1vi0rKI* "n;H\n=|6K-/ZDG<\]aCFB5J_Ȋ ''i4y' wŵJ9 7l@ "5 o{H).9!M6a4G Qzp C{6h`jSPQ($ S'G꬗EH ٷ)06p AVXdbdT ΄:ؘu2den"07+#I\\\[@ND N+'z kR%ؖ{`~X3gXh8qn,?F8~lʀ|ӎ;%;T>J'5+'w~[[ћs?)kW2֊ {kHΑ^.sWħ򖖛\?QG|3?FJqss Bӌ=&,Kj_!x 0mkCCFHNعyV(ݲ_}G4Բ2T(M׬?uA! ? ^n䶕 XX VдhKjβj-\/e;B"eR ?hDAk&y8lG- ! Z r eoX@nIkeȪ5 ,w4DM_qc~A\$zy؏9+@(А*o{3_es 8(3RQ[p+([[lNʿquR|vɴiϦ͍(f;c7]ü=0nn~]w 4A삂?(l XPwEHGz]%\{[.{KMw^7>:9W.ˈCP=,k7r}7Pj=@)rҿUv:i"&Duoc5)NNB nͷoI)RMfP:J?dp}$~8>#/{헟@Ӥ$!.+ 0D Qx(TJpqX,+ụT9zuxuKpe^mҨ9<0y0rU9 +W#)$a\CZ1f)*j ^:580!&}x~Zca}9[|ic _> 8dA)  'L\\ $C / />77yu\l&𪪪x= B ,Ek %J`aژEΥQag y(.67In2 Ӆ0Ixz~< A`0rpۡV^(uXPQvЀk5Dmb~{ 9 '7|<܏mZ x Tv =FY=V yy ޻2 Nߌuf#VOsDHAv r?T;4ݪ dk;)zV:N?A9[0: 5Ȼ$^D eRHo&c&))yg+Ų2tdkaa᧸ kWUO-9g=b(7 d=nիaAb +>rӂuoeYLϑLL ]|8yd_p7q 򤊃;.-?сt3䉊k)l6xwIl7o,߿1~A(***e˖v2 z<6훢@[/U*Sa*mH(MAIvzXXhͶXn111w xļ۬-stCOA-ǷxŘm 0]KV-h%QcشdDa4m*S}q4A RtfjN3yM=$''׬YwcJJʨ9(i/Y":?85JޝwJ@~j>̼r\vvǘ癑#G* ƨ{ikb2-rա?֮!*FOhۜ !v5&-@T2HA R.#B:i)&)&!!(v{AKHn )`to"ދPyQ`b|iG@]`ӧdeeF̙3ĺ}СX>x]ֆvv3v-s9iCX7:\eNg\ .ft; -w )HAH3%?7#55|TT7_o@rC[@xo߾}9@xɓn)N"1sذaU6AU(y/odA<)^y ,e%X;sdʴc/&]dׁ/}2HA R.# % O6Cئcǎtyc~B)(똓dIAUhZARX޽[5sO TZcXXX__OPkw[7'jq:̠GmרYOj'a ˑM^\h9@ )H!P:,(1C$ߴZmbDD)4x(EZ s8Q-QW^lUUUQFFeeeob~7LK^0/DKC*=nƤaGj^ tC U*'aݯ3¢DA Rˈ(***Vv:tuffyyy8p"p(qkرc<%bh5pi#G>q-x1110/;sOnz`)L6yTMcx]T^g17 @ɾA )HA%2Gt]Bi12990iJ(qҦ㪫:t(dޡSN ~-<{=rz%Wn5oV1]PJh>'>^~Qĭx1?[2HA R.#Bυ R߃(YnDh4摑L)z w8gΜylciN$}aov.[y]vuuҥGUzN~gg%s3M:}Ãeٵlz?`F,Ͻ e ]fDw}Jl6 GPj~qGqqLkk@h1翛~~wn]6xozqx/~A Rː TVIKKgϞp;255cǎ\YYyq`Rl  0RBDD8Btt111~fbccG )HALT(5\{ XB*|$T| )HA R P)HA 8]jI& )HA R P)HA RTy R.#'Uˋe dž0^DDlia29_i[X$!aՈ]^˳s-8|][Ο3}5a=>id/o( 1̷9CzufslS7wդ2Og"GѨ]l6p146D*1KJ>&w/Xݷ;͂~_5X>4J NԺ?2HA RœhL4TVEE*7TS iݱMgneeQf f'cx^3w6nE/S߁g90?++,5xt <cC;8xWIA RT' ZP_UU+V}YnW૯?~<[nKJJx1Cg}]_Vezu;e։!V!C~_u}βڋF 9q '?wB>BHgYͻEEj hlΰE Tx8UJ}|͹$ˍo s0)KAK| }72HA Ȁ?nFHHHQF!,[ opB@Mhڴ){ーh"α g%5|2y{ws:3/><9UF sB0)/8p ڎ]hII&`ÚwjK#zTyWлx+t=._AdAR09pE P)HA:'RJdk֬Ν;'OO= 233,A2|*V?fVKT y'Q!D$I|3čIz}B fYywBB,+s!1Ͽ%ӲT[,m+kxo}b6?,*t[Zc|\Gh4L;;<识K-K ?_ChF,ln4hm:I_@ʸ>A On/=.3;gy>4x?4N/% f9"0$43k<]D81&EEBFX)))tZTY*. ^J/|'+g_]+A_2PPJY_^bxxG~K G}EiwggV QyMTThTXC&fmblbcuWuU%{\QѨ+->]܆[RR|VUq]9IXp'V#@y7mY`\ + x78[RMK= y@˅hbN 3 ڤZxw]wof41xsn dt0iS |oPwvÚ*h#`&@]mai̘1[oyUZk *YG-r&Lqʎbr r de9DpHag{Cttì~Ok4\;ns0{MM74QO@m4jj6qTW\lm{+j%%<=vXXqN_a5[AG!^9wNC@!NMG*L.ixҫШY=~`_10_uLHGSTm4F0=Jl14.KQ< < i(R^;QˮN, > ̟??(5@:i_JFU(epL0**aC4jBM$R Up x2*X]%cBmяF#߄<YjТi_F "p>[z}͐$Ee*pPUyDE ThN$ì0<toZ>w1[Q L;S;Cg{;T [ur`6)NF؇OB0H`&ԯ߂"a1zh9&/N:zsoK O<,XtPzxJ |beԇuAIryRc%jk$#ovV[coc^^X %. xoԟ"X"#Ӆ$0ࠎUn"2* T,I7nQI4P]fm)w)qtD\|lGGTod!c@u/zDž^h5[B߸#O7?7 #o~e4jܖ?lnaued5PA/ɰuGddyGlvvv F>"PԠ4<:J]l^THj%vdj|Z[͛'T84VPjB H 1̉Y޶M.Ftc\jUը<:N٫iV0ژ{n]P'g+p}'عic |$Q>AB!=,Sgp; -&NTV"`5d)HE*(me at/q摥H8QU4ѣџG2\Np8Bَ S檪*. 6dtVPXX@AkHݽ^*+5I(>jgɟ)]{<0Ow̮]ER$2^:1;3gɓMK"KV#8-)Ow"pnv^;ux4cǎB Dԇp_wڈJ'ij22/5=$Tj$^&} 9t4M~P,0KM]Lޯ<kw4G??=W$ghKg~*lǔeyKuW_}%0 ,? >|4F# 8fGc9v@xVYO*؋Er+,v2-pj\,̊{OCkCC݋aa!uIMM?K)?Z諬2q i8e!-zMI^ݐA~RO/ m@!x1^XsJŻ33|$P q$eE 4\/#UWWS}Hbƥ*n<[XlgόZ]cNWm4?_k\G>2һ %%AQdAĭkn6$,ΐvQ0?J:a~wQu<;YP픽*Js~ (Ir$;vӧe˖͡jPă Ä:XMs0p@\s0`O,!4hP-W]u)S!磼|~AFاN$PItVѿԑ$E૑Ciבьԓ!rO'` ]{Pvԙ.'9HLE4QNdWxw}4Ճ2_E,81x.hL:%~q   =+zyg9-mUjCi)Jq0uZ+BN+ajQ}I<L T&iL-9/غymGl؇#$[#Hb NUu]ń\) ]< Ctc2Q3X6^ [jk]Sj"e o;n5[x+ŢcoݚGYVüVZ,cdžZwǑJx~taE"ZbV6̉^" =;UQ|?l8JL,.'#mȜ:TۚizhAΔ!,fB>{l;(ś?Ν a:4J<'b&*KdC%{_/  8⎈J_h$#_nT;sq$SAcqFrTu|},2L@n~MGo){ޅS7EWLX(_`яy]'xSqJa'`_ ^OM.,D" X_o M$Xܤ1,hMѰm,#f:ϓTID/*/b05^fm5HjQe!PQKJ'vk9Ŋ ]+"-\D 2pHvaQ,6*>oVaaKbִ si[ܗ.|W!fNyD{'\L8gNI6Rl7q]5jֻA,yv +fC'@i5 K $N{Ր5]EoȺvqM'Qll?*<`WK T&IX%aCdf.Hi:MmZ@͡udHNn M$@B| F@[E8\Xȓ)L6͛ WKz,A v*9^ɒpjJPShKj͔s̗򡞋P$~j$Ɇ~Hm(iĤb%2F:Ny;B=C~ kA'Wlar?K.ѿ|/_b+fn[ݞ{5҂53 ^pt=:t$XVW 0j@DL& EG1U%}.w U^nX90p׺l߁{GqMAkN(@06j'A8s&󠸨J,Pe+ՕeP^^ '-ŷ h,ΉCBB 66V8GKz,bAկ[e%>QUz D;sWV"L{.$sh$D>5XJy|x(5oÂ!? zG됑#FP01q@J܍z=Jix0u(VOM1ͻp3E<ӇCBpVWX.ZmjlyUfd{G/HIH8̫,4V`.#)6,߭Akh>h@DM yu&~cd<HVoKs0b=y.@)KHr%:~88qG$Y!11^*D"x Ƈwg @xY6pTX`Ö]B )Ϸ\h44Lj~sGX|9ڵ JKdd$ሜL[AIaqcg@=IJIoҺȿcV@"'( ^g#|B%[C ǯc3mo/H]>SgOWH`rkY!3Y2~9:5ѠI0= S0C+?H93jnu 5Rܒ3& @ kjKHW 8ʎQ1.DӷaVa$1 0U>l>,)MʔT0l' FFc?sR#)< ~@SRUW8_T2Mf)TDAN̞.̜Lې3 Z *$ԮXT lhK50_4D j;?[(+B W6 ߶D*X 503:fXG{g60ba0j.L}D`qp@VQL4 Jxy&E +q9G<^=r9g|?}];YDmg+C|~۱/Y%?<[Ujx Nv6** _]N'ă'P`!O;x`ϭT+q t*I͊3 @,DIC#I~s䔃ŞSK"Ӓ!Y 5qb9^^ޠ3t\PpSJV.;*ODDfڽض nח偮D4f 6377H;U%7ALͽi~uVέ=V߰rqbIGUj4kDۋGae א`RxDQ=>+7̄+F.| !h}Kul (!%*Z@/+&:dFx3-D>GOȸ=v Yp8v"? gr6U:V傊j7T9X }jpiқƣ.ȕW9'Ê+Ek)rIexxOSDL8vLh?mQh,HX$c#&*{AnvsR,Ku:'%)V9Ҿ\~RMemoNOī:PlLIR;T]f2rQjk"99 nVEDE7c '#:_LGZOco=D3yW>{$y!vf"S.J|3a?Q+q@a~+>9D@R2b GGȖ^iIaDI*r#O__WQZG^eJz^ @ rР΁[.JW{T3;T7yE[8gZƃHɲP?xm<*_}X%LZER[kI"$C4͈ahmZ=W8<Ǔu(VXhk 2d %iE!*U1MLhN$Eh!l]3L%K)44@CmB/dj*AL9(vz%(]YNd`U~%(>؋IQQ;i@1a+V0|]feqlg p9VkJo* <7"BrOnnuڰard5={ `H Hv3d#:Ӎت֜93<,L #B}wJ;OXi|_Ŝ`cl!/dP2_ ^бQΊ35ssˆиOakg`m7uxlݵ(e""@PiCȂ#‘E34mo 9e4 !~H8C6~ INH) [di_9{<淜oh'` J : mCR%E!{%IFI&BPJ^ VR?bw"EV$khw"2\jA9 DT%5Q}updt(F+^^#h+oc\_+B [2eO*4U,4rx&F͌4 )";P XFU\ɛxb $NF=iK,>jYPNV fQv}OZ:#0C3wzW`gksV?D ٤ۋEqq4TyUs^_QP0drF,RVz癤^evʽES i{)T' gN-56Z|ϳ'|VVqV6Z_ݎ5~R+ih vvSA!TbSyWR]o4" kƭ=N^d$C8[ H q4e%_;MaߡQͣGaP3U F-,(O3ޑWq u Z,ӕW'ju:`92Bk\xD4,>>22x߬oc$L1v4-@O{5Pd,\slZ9aH-0LjܱFpMCQ"T=l~]5`buWR 7GYC`du+$Ka$ z>oFHgfH 4ޛs:u/qRS0χ(^-8)W@uIvgBF#(9YF΅Ld۬2P8@ B<3ah}YZ`Q;,N?au:&戋+={,wÆ͛۶q~?$Fj4(^9j14oxE<j{ Q͡ +l*h;2Xؖp}>puG,"oX\й8dς-$U:n5E Ж(x(l۲eˠk׮yI a(MD[0R?W˽_P(GR^H*^5T" ~goAOGN=~HK=4j([V y+\1v(:4:px '0ˤJͫ1ʏӑny|y7SgSpȋɣ@9Öܑ `7 Fn]$I%/?q桾eFNЏU\!:db߅\UUN:{4 fEݮ=>We[jbNXGx *RK<&L8w.E@%ΧDP?DI Qݦr]_eOqKr "`~PZ '].6TD6( ME󘐗/D>}:Hos7y++dyyFU`z3ܷ)|ѣM|԰<=ɯw*TɞcMv$0FGW0춃.g6~En#~ 3-M8LNz<q3º*WaZn}"":רs\C?{ [c #29aڹNT2L(ZԠObW˫){I1X>;4H )J1;J$"U MX&C :ZhPy'*2 apT"I (]yU@i_%[jW 2OI23M*Z /%' Q*V(,A_.֭Fzoʕ KSJpKb|+?"Cwo@tҹ!FnPE;AWwVEN@^[B)R5p'1a*0C4%ц"_x|cYg YAp[,4՜SSw망<^Ijĺ5[(Xy~y|HB!:Xo tVǡ:/{ڴi#82흎 0駅u +`b^L͇׊,}ip gr8`*+sqXݢE jR奦&Sxw ̖WL̚^z{c`2v {`s44ٯ hcY|^(.^3nyPtbcQœgx3698eehJZYl)DdxP|W'/N iA|WSWClr!Z;̃_M-f}!VPV<£j. P*Ledd\)N$JݩckATaѨI#]NX/!S#IJjVqM2cME @&+g}fy״ޭ ~K!v=~~[ )#a*-,%R4 x;2m0h89|~b{\qc>$/% W:yD@4Yz M͉W>#@>hKďJfًyp g08sMJCPaP}V5{%$:+bTg9mjS?&U3:ߥz.|܄ ~>DNf3fx_PuԩG WxR4MTz>/#r{`Za1+UvxglQ1|_nReF-xܮ1%쑒QjY׹;@픖&P˾许CE>@KI*S!4Eir퐝WL9IjA"dD^ *+5R ?8K^V4w!j1uk"$U*,Im%%%}*E* >)BnEZ3$w)27YG^u](yҸhS&rza<4ٗ\iXƗ9R|1qƦɏ0ݎo~d[kYz]c#&mx}-+{=~; "ZG/2]ύg:,KO(y&).#R9 ;LʶC敧z ƌ5)i%))I[ڧk[l)lc[hx8{`޽{ c5<"M_X#$͋X^:n b 0(! N|p܊UNO\ rk9Y,ixwJ LaV-&rr[y~ ~SJe01ryP/cT,A,nRyU9]5ae'X$\^"۱V ir (Ww^O?thyd~"y[wPsќyۂ6IR"Il߾B b4u{AVM`?=jr5)^9nx!p2/__} e}=^M .;*B Un۾].lDOރt xK"Y%H%-.8:u}W&V^s?+4$&4(Y? i<NIeQIkWqŕx <yY9^uȄ&Ox1#q v ad TA]z֓?^RQ:x>u3'<Ba#5e;M7o`W;^]ĈжҞ^QY|v[0]4E$")'%=ii:FSȔBj}D  T/0P̳.~) k{F:3D $2Yrǐ/g&sÀ^ :)\\9r/T9dNMJZ8^צUz=爈>z'Q NS҅Cv~hMk v8)`RK h:(qP{o8fĶN hTy&IH`j\#F#at{L4 >u)SՈKIf#aK* lհjJ(//c(퇆RXmaQ!`Mmo>eH16!9x7okzdͱ<oz$6!}Vu+F(hJφ2|y8惻G5bṲ#*4׭{:\K)..Ofh@zI*NjϹҚ"% O_)-G'\SWi(%`w+B=bjHW`{o= "Qs?A)(CSR(\eUf>Wˈn:sszz=tFs+tqd ;H,{~|Y>X3빙mHX-Jx@AjHc-x5|8;Q}@Cqs,>m2Fm#ݩj }B#[9zBE\XͲ5΁zl69̙#HTO΋ &Mqȑ#aбcG`ƍY%%en.URPd-܋Q-p|qϻ!S_EjF]!A Eyyxju!߼樶mQlƾXÑk K;~|l|?ʤJig"\nuŶw{_Ḙ=@Hޔ"U`Ŋew]]Z];* * bCQPERKHBzw=3s3&Woܙ3gΜ9sc%%{}VcVIZkYh`O@64 }T!}Ò8J-> z raҤI~ViQ+y'-$\Nq鰸Vgߙ*$TZIj7&+-򶄞rCygl 22~38uD!!VXxx\lyi+)ׄX Q)*D~ʤQR},V5rZu#;2jjxAݏ؄d   Z>M {&wL󠄽J@_>FXFɆ=g\b׋C|ݭd$ל4}Z/߾5, x/_eK ]^皗{-WRB`@?p@P pa_xx_9fu e-9x`&?C=Y#eȭsjX WAbd.MA؄MvhsFK_{q,ck6N[ٕM ~$] X.)imII1؉IOK]Ĵ IiqqMD4S2&%93jKK'}RiiIks)E:%+~dܦF\м_z-&dhƱGQ$t0&~?& iґR['!VYADGG|á'u"_(+ N`A2\L-iҤpވ޸Ih*Zc஘4f c:S I1k(̜2 /N\B\d?& Ņ ESF⢩#Ŗ>q8?aTL3@G1hXeiypȾ0wz047_Xxz12OɆ=ĭ!u?r9`s 0bH'MobTX&i*1ἚBYiy8꽹NZ /(/$I= F9*׺s:jMbtQRc_z܏;w,T-7 Xu<|D˗W_Ͱ4'۷ kyyRqsDܤ&F5_J v#U}?m `7tףvkDQFQ!QjIZ)˷:8NeېѺI RFx9*e)<8ǭ2;P5=Go?ococܢ5:&:ﵯ10|Â/* nRָI&#s! F8о:6Pj ԚbA^ Mx Ʉ5_o\D ^#̍0ahL0Ӧ\ǎ¸F >m&]p>f\8 o7p bnFa՗a)2i<G`Ԉ=j5qcbl̛; Ν9/ *4c*f]<_4Kf9u =kfQ'Rc2sԉXx5xp9A =b9JZ]W!v5x0jm01@j\%/ G?,+ZΖt Bz]FO@/^nkDDe87 'zhї =9>lecOjRze.( w{x2!A=ơBw\+:>7[%/lK21 N,Z 9."s M20s,?n޾s }M8dNgiaaB7in?>~=DЄp<ӷuu]ĘYk+bDxqeIrx SR$W&&np yvVѩ.]~mbbn8SW}*MW%Epȁ\# $w,LpZoO $tǰy髯׺ [Ds.;^,ZcN")f<⡇ž[ W^/l}r叞˪Uǟt5.=YBmvp:t| @͢%U,tu6=;`م(J%LyŐ!q a"8IԜ2J(@WA U@8J=VCq`0NͻIW'\^zoNw}0'exAiuϠq ՈcJO5>a}!ZKpK X9tP1rP6fcNUtAΪĸVHKMAjx3u~W;OƠF%pn4,`Ng%M15++dnJbyV64x v3<߳͵;\j\ghd 7X7oDoTWzZ@2#a#u5{l_ñ?%G(÷mEdDp.D.iii 8ǽދ[,eI]ކŕ)))±eb҈@7|3"LO?89~%{A/ߛo%K`ҥ"? \s5dJd'Ɉ[.gXX Qd2ct<ߚZXn y2*"XOJ8<'-<1Xϑ^{_2[;PaP[ń1~AHrK^KUDzDuuRiYcàkQW\ݼ i=6ᐕʤEATc(> Ex}Gz%a'd?Ťz@W-Ҋu!XSXsr~VՉȲq }U*+V5恷/8P^0>@eN8scIz=h뎃0榽ȩ9QdqAl{R2PjK1x? o8ƿ9ۄ5V%pgKacu]Hb[~@4]+O Xq\F|\ʀ`("kS&XQejŌ¢"8gX]q$d/2 Ouʁd׏d9SF:U-K\aA|S= YkW3epd ^ fo>,H~^e`I2WjYKsLc"Xmrek&v1b_)LN.KxB\7>oY:i{QJ['o0UV6x4!kkgu Q4ipfO:$J'9?/H]i"u0E2RCd z&=2X< <͙3Gw]vDWlDqy|#6l:qu1X1J|2yY 7PVCEyڴijuzҎslu`_.V"Wp{F6Dt kw<q}\Nd{ %.Y4c+: up~Σ%PC<<,!\j F#de®eZN`b./e1.O?;A' ,Rp,jU|b[M$"Ie㸏vQp,B $k-[󵜗O;g /A {GԩSVj2HvBibBtnwYjAMC:% |{n!Vd`1 Wgn0??dz] r^|BC.ACQ]̰4Pl+e%$quߊZn$ooiڵ>b}e?HKr"E kh8>p87ϲyS1i˂a6@"dnE `˺?n础1j3 /7\0xz a-HٰlTba?El`a88!Y?Q%6,b({TGlD_fAEۧ*Q:t ۷ [l'L2Z>gtR8mb?v#Lw.Bqf0gTWWl)5~}9"=k4l~k>s|kzG|u˖-;˖w{ヒw8"Ga)c9@ͪ};ߜŮ;#yEʪjTԸaÎ}83H68DSx , [2*+nmj1hbN(+vfKS*="-4"M`eΏElXzJ;L;Q5i@hHgDL<N%ye%_`v>ueO^Z o@_aVxiUrZ=4@y+4mX1rw3XΛw9^31kiIRق577W(Z}ٺMA9fuP:]jlҀ5V1`\(%a/9V.G\^,Nuq'6z2?ёfXWPUƉy,5I&0YŐs]Mg3i"#6Ub?_+ UAAnO5fx~yQi:6R(}9hrJuˠ"a913bTH*ld[pK0ps˱n˃kL$až_򿽈.w;UVN ,}Wa Q&90_{0-! 0s[QɲD_T h< I+wNѸ)/QHMRV2(TI5 Y秾M|"7>#_sS q4vxM]abfű9*ntVGy6Mw*Q@a} secpqt<ϟW~䵗<%; u0^(tdgxmZVS8 ˖RUcd4 ozp)n~|~}&FAm1&~D fJ .%q齋q+"=#WbIFKk@t, w/qGDn!q}"Qg R3,eA~oL&suk IȎsd1$| h ^C[Y6'Dme%Yw/]x w׫|C<;+P]Wl:Φ X*x`(@(ch"U{ӧh8+K:q%0FBT$c}REmI4'$D dY t (~enVxTAWD_dFQ+ԉKP˪j_\#NWa |)ƟaNn%cZ8΂t6K*ɤ[7눣HL;WKΗB'S!Ǎ)]bZ^Y"#2iĮTl٩c['@2#syk7m1{2BU(-+E1 ކAd,AR\}!V|>}n߰ŻLKD(vw,ZGU $Jmpqdw6?mAkF<2s~ C٨SZ]~֠9tVqWsӿP+c#)QD; F|\ cyvP:M(~9" pP\e#tSZ.= ;b1!,8J*UX {\v Dű(*,D}e98j̞q>xIo=tnlh%M 3&ak z/%%#iEusV|2Cq?Uh4W?`xl:~xdz3|RpVծBsZ|VpiyQT U ޕ~ kQmc-pߧ>*%bqiLl{()dI~EU 3z]Sq(#q82j!8U8 iK͟ןuGdD<4κl)[)) I;s?WUU`ԱVxt\[7Q<|22a!n^Dr:oM_qfd"`0Cj}(>=*MMEmY lzTWUe(!^Zvv>#[n%[f`.:3PtZؘ(%(.E6ҩ*SUT` dϺH.>l:Φ1 ނi\ҟ;z2͹ *:cxdsl#TOì7"wP9+Jt-oAsIj& ő\{Yg1+qB) w$D6=r·k4aRheJmGȩ!놪rb=693lN@v` Wty ^T|Wt߾+(®C\UM8Kޏt#;P}9jozJʘX̫uҳ:CEV99[8,Ƒ+*qQ,%ƑF)eXX< q`NŮ_c}[)p"K)(!S'; nt^>*qcٕ{Pl:Φ[- o q[&Ks3 #F?moa ew8E:/'Ǵ+%uE`umXr 1چ2P U+VKƨ!1?!^Tێ^y>>Z| /w]l%D0ri`ڼnܭ>ASS+:S9z̝;W,'O駟o !<eOEW'9ϽV: }-~MSɯ#0б({~d-2$5شuЗ8F;k XIQ3xrCmq@޶vG_G^-{\'Byտ?}~eO~^/WuU++})x9Φ'=Cߪ@IRKB,mI!-l : $g\qQcBW066U %#Sc1cZ[޼ ġ q zWp灍cJ4Gof^tj^sBuu$5|Df0MT.} bd7@:g<6(؈#`ov`W9g7ɜs>F b EE%8xJ˪1R vtY|:- Eƚ2xڲeh6d-?+Ν;]'`!//Oo`p9~ Vbpٕ~X]jdXҜOꥉcZU5PGMu-F  6Yyص砸\B~aCFOXcX+yqxjdو{Pٻdn[}^Ƨ«ww>72i_.TEZʐry\:B1YD 2-z#O>[357{7y/O=TO?}O> ;"""=n:U yZF lT?Jѡ] :wl 7u^^Q%*K*7,, A'%}^wm8{3Vx6q5zĵб'tGtv!.D3kd4c:mR!ۆc i(+Ȇa˗_~y&zG%K~[l*ӻd\~x@GE~|2!cv}nOyg#n6Ԇ+"*I.%+K bLox7, q5{gp:aUZH=\(ݱ5\p .q뭷/{bTQ_8s/i,aSÓEyXyBoIsX< Ro[}{sK O>8 Q[# ڭKBEY8/QW2XXS`dzᩫ7?D޴%? Ҙ[bKW^BuW:2w&(jE2)kMJUwhҮcnb0>cE4:z0~~WUUA3뤶v}:N85tYi <@>|/~vnGwe\6-e:(>ZC-sWաQT AŢ, FcYؽWYN'9 ,t fdOގFld;C!ܾH1mFÐo" ^6dub`w+Q]^N,i±k$‚ pe-pM7 ';wu^xXx\>f^3Ea]NW d=wu [͞={YD$.cرcM'spdkRO 0Yo^{Q\6r޿זK4ǓqE=q5cE6㱏ٙ!`Kl{y:\^Y۲!یK ƁvkSU]6v͂pP5Ļm)_:w;wWC+r2&eBZع:m:6܌ß&\GӍ,[N& pb#/NJB !BU1lЁÊrq8ۻGvG!n`RqNJy:Jnt=W6=y[$@ɉuMra]^)lN/Q 2<ulͫ]Ԍe0ټ~` 60QU5={6[|pXBcb` SH4Gxt",t0Oe,pMB}wq0(I8`.iI^S{͋sVp0J|Z6"H3#Dzw)B rl!;'}9ceX̝8k#b ֽCu fv|?|[~ymcsKش%^gYLڧF!UuKėA>]%MxI 1ĉBm#L<1tDTB]Kֽ-Q?Χt?zC@v'βhSHq,.\r%~ ^iŊbom˗y1e˖z- }_J ;$}d ]1 & *m^b؈kDiZ믿>955Uy?=^{V0ɭou}bSm޻q76=z4lx':gb6QSûæ 8RsjT:EߡGAۼpC~m;6S~WVx|~d25bRT ZOPC q q[\\  3WskoY2clIJ] G(9MR]Y8gJ*SP {M0%УPyR(LQq^0rgdoe2磸:+Hee0Laa\ʐ"3fkw~7\ӽiuWİ o^e-7oCoίC`J^#{qY{0 e#e}%=Cݕ~9B[v?.DY("QtWh:J*W/&USfNѱ$N2!>F1&-ZT37(leHۣ֘ȉO`Nm0]@v 9.ۆ=_ r6V 'q^G%ZŇm6§ߋC-}SX?jR յf=QA0vAR"}L;QHƶH8Y}̙1Bp!<2Hr iֈ`k(j:UC"Y*uM#fL7ްBm2cJ֭Ib#ŒjhE()*'+@&SIF ',MoT%ttNs߈F^˅JXԀy0a@MUE!ab-D%Sut{cF1^HO?H;}m󁌬]z]xPeV*x˾F`#=8ľW=NFJ$ʟ0:YۢGWWf'Pd+ظDEFB9бC[9&CySɆzl[Y£‘ܳ1 8Vᒋa09`` 252\2IBm/47%p "كjGlF#!)Oj->(Aj@h/r2ҕE I~ L5TmJ (s%P0mZC&Brj8t6؈*p,,NgK;5kV6o(^N`yTv֣T_aCnmB]|&iĉ<7*umU{raϴ[vp}.%q.Eupi_kh"ʮa۷#3}>qE֣א#$?‼usF8/u2j088Īԧ.W4ܼb bxt]LoF*KC8̭0%FJ ~S4'\d.~&4sU2zr~3VRGFiubh^˱U1#wx"vELkjKO/dn/n$9@GOjP!Q~Cl|`)P/](;.UBb|Km'9`PuIXKi`" ?I,HmPgJ[۴J@RB,bcȘ8絨vum!EJT OxD@ $v_""$:\mpz={"нX]ÊtXiC5*ioŵȪWVFmu0K^kcWKRb:OI8z^~GNDWeLJQ9 ^IE|T龎o_Dy.Xte9czwxJ0-).B ;^Z@VQ l򠉍w"""ڵ0\0IGt;xN=GHn5DCCcmZ aOzt?y!zx6?Wl>6ۗEr4L .lP Vg fA2)˚ډ& JpHuL: .ӧc:FLFVI?a| j9*R][N4ݭjm(*Dfv^H壼HX<K|R|bOĥ&n.A-%+Ih!Nͭ8%8m^"z\&yfc],on}lt 4\ѥXZM0%c ; @mF ʣ!Mt5lvWK`TTqɅ^<+Zs:-b8RTlMM ƧNVPR'k6tDm_, [F/ ss}[w`w[' Z'9>."r8px#$ mv*zV1K>TVrr(MbC)RڢQW܃|g]zwuytԾ}BC-Q9SUX Z¢`s=;㢩d]d^*heTT@ϔ|z}N+Fe#+$>.X,F6h4>UkS5/9OTd$u8*jpNb,ENN!qu4sK}n~J `5!Vx9sȝsu>I1F6ҞIuIufXv{) d׸`oJ'*(/[-e`1[a2 AYykPʇ'NqoDZu`@;%$9tCDT:+CTHJQ(:Iu9r`aO<>~I ө (Oyָn78gf)O zkjnk5Pg]OJ^w4/?{s""JƅFr .\OzB8BURosӄIM*ޙQe2itΙ-"9-4;xs",C;Km$5f\U1oO]|!*5PuW/u kx&nq֒qKj\9.W~>QrmU] !VG56F? 3 fqwD̡V5;/Dm#MV<[Ȩ):'G>?yy4/w-/i#{$aQT&UUHQjX)R/6i/7ʛUc9 *d/#pWGRhzNT!~! 2Mk *I*P+@8RH\L$:oZD~(㜾Dܜъf)) zo޵Ueذz%B`5OkI# HVP.5Ey.zQZ@q5e2ꪼbB~VvfiɦpUjࡲt9͟/FeisƤ_~唁ҋVa@JNňT:'8ٙEAgĜ.:t&М wFu).Λ3gN6Lt*zF]rA˗/\XYV?n2=§"%ؔ,EuiB6{`W4$}^z*rG3!#*Z:jܓϪS44ؽa!VL×XZZ.i*(#˭>DR}h g B,9l{;  ÒBXQ_Ww>EaI1'cG&9xN,ʈJ -2g2*5K\+RV[Ӷ wiOVʪ1gn˟iQ6&h-K349}U ꬫ<G@P􇒢‡_oŚ~[æD}.B;E'hB}NyY]r96 ?yxbf%pW%yI<Řk@):~cj`]%֭ƆV\b1i(tAJ Cn}e٪1mk 3krhYq~*'js7\3Xs:ek"Ÿ kn_9w7}zN:~ؘXTVס]J"[NjoAG`m7 undDЧ7cQcL7ZjX"5Eͷb [| "`g3whV*TVTUDv_)lSUEʉh_FZCoHpk8[ [ohM#bNIj;*7$ǎ+tܩ(y}JJF7*gNO\Ň (X~#pTgWef9%6bR9 HĤS;O_Y fMj I99`$?(o2ƀ>X_~Uӕ8.+j[];a 4a3 V4V]^죇XM,vN sSb~H%8(-P2Jp&%$ EY 8q̎{dl6PbWV- l-~ = ΟK`ؤIQSYBJ D4}0}(iaع=-$}M e RS%rܟ{Ֆ'mfRKx:+vcUX;_Z'MBnmֆ$j1)_o=ꔽҟ_v 3TaCAQVQx$^VpU\:gaVYݞw|ނt:I糏&exx!vk$܏F|-c$:| %zd$ Üj,`}ޏK6576hc[$tp B}VCPt8.3Q E1%xQi]ow=*=JB!4n55x_fިrҨl-Yuu3)cQ&*  tM 4-ܢAVIa@Pi":"h 7k* {8X002S7 Ą$\:kpW<7D!rTli`>[6&`_h~ONթ)LS?Z֯_/HYн{r9pTeYWE`3Xm 6@[ b#0l`OJDX<'=+>q"NKL zM{RuEx!<4/ ) vzas݉X#SL7ҽ"ѧ_o <:#)1;w#=l {Xo`8ZGFf@lUd%s8-Y8\toӒG/d콜*))k׮L=ά9->K.|l©VN++uMEΛ"/"Νfc٘5kLL>Dži эuc 2Ƌ]0GZPx*H96ac *y@xBt0{^EsH=IzGHB( R {RTrUDDH=$@齝6Z{f|?;ʔ33{~*D X0|01r񎏿~ NfA ^2zܽ&^ؽk6̥x:v =݃|ElE' ޶'K hp,+GDN }[XWq+ud5Jaڴ bnߑsm󐝙7ᓥkA'c`GjhRQSǦgi];{1RHBa-7C9r+ ihNBlb :t85`P`[T``F?bᤇ:I%nFQ TeM.vi=/oN2z#U> Sz;IU$!-MwNćht *(A*endɴ,`31ONxȀRR&_7* Tcܹs H3Ee.Z~2-OJfF챙`6&$MsѴxmx%CHP.w)>'2!\,̛?ZgI+xfþ  S5(>2p`(O VWpWZ+XQT"pS̏1KD9.4lc&8FIƥ*9qJ4d thh.݀JpO,Djmmm^z1Kwwwl\L̒. @A3b23Ɖ1O4 d[݃2 rb\Jȁ׳@H A_-ŬD2WN8PWR G x+Ex*FpV-+_~6 V|< bzpuuwG٨ژT ؉EYI,z>lHJ2 /gyk`M?6k>ZwnAFj9H_V/p 2n*tD$M2skAj(92p;~8K=GSJM J}a6Tp҈vܯ~i48۶mcMiJbR|ebp-Z%7a/Tbɋ޿w *\Ԡm,n:Z$c>.ԅߋ.lj4+S>sޑ^-MɮȮ,bbb\_:ԆSJ5A_V[)+_.yNi0;ldB(fG@Pwp3r{Je I`LGfhA68x as}LMd$9%n!6"U C"؊lS0 !9 Ն_SkJDb#dq׮];ӥA%Kօ& )k/fU L9BvvJ.d9$lgoɖu>yXpbvGc8Id>"rhBDKUN❬/9NzKc+8pshg TJ"ZfM!"i]:E J:ddĉ:"dmLR*^ dn*f؀`2Ȳ2q#~P9q}AHsU(&D9FMU:6i KZ-(J\WBRF,^͢ԏQJPMQTZ ʊmtZҸ _9{ڵ$P)*+,-pCϋlRq?( qOL'=)Ot5* Q7Bm $v 4x^&;ꙆWFIF\x1X!%#`RO՚zk;~u1w{F0+V3沌t911N:lz םm(J/۠Y}g3Vߤ+$Fi)0l{p{';<bx/JPA`j[tCpa*XR)b yAѦ+n@vA)F`2@U"T Sm'2=-*@Nc~ZjxI-J*IMD^ ,.JH,^}Yhh"&Nl}o9#8A!(To85q`͂}i/x'# 5LEɦllU.'ݺeEjzm?rz㼕-T*{)}* ٙ)kQjh@cA o 坕Aoj YQoJ5Hte.I 4h;telZ`cy<,2籎R(),BA8d,PVs"iral,򩧞B|&霤W\9H Q=@}t%d?̎UN ᔦq0`gTayj}}h,zT!< 72 *Vo=5Ix~`oF@m\ztQxe±sѱ(m##4#bDEL,L{V v0Emގ; a0\k_MIbL0ZflT12 TmG;vJh&T;IY71I0WaڤѸ}t:IYw<1'ʛo'^ػwmbd@~ҟo߾RJJ_V;?.,,d; wsXbTrwov OK2aB3'8l\MMܕ`:\fg9o1 p ӒӢd҄ISjO8])OB.ɋ/Dp LKbC}a'çÂS`þ+۝Qi 2;[I*ARD$DeBsJ̩Ѱp0v&UKR`)%իWuK&"Zbo]!7%O#ES4ߵCkԾtn#븂/'>3 Ǝ { "օLoLTpŽ th#!*t Oԇ@vвEzCNm{(J-BNNZ̓Zkڄ4.""+o+oz)aw% O0yQ CCL@@BHL%0$ptpp`SfZTnnS"aR*7Rrrsy_w;aح:r1X|YqWYW-sε̜9sg!Μ9̎;Pq,,X*==C~$$_gzgwH+%n}PsAQD8K޸?Ӿ2P3ʖ$edsV*#۝SNk1x8%N;t' ΜPEIlOw$/γu#<Á0]0 &>=bwd0*D1=%kr3GdRd mPR3+]ዷGN@R'PJF= &&}K5H=YGd@BPBE Vh,{58)x!0ṧ`ܘ|0ÏGzmό xa(F&V^v+a #ᵗF ÔGg ߇)e"dC/>7V-^7; >۾0f3-}gq_ퟂW<}/:azc2|0Øqо0nӹi0t@> xj`O9/}fx9#s2P0mQ@ۈ"YuLMRDŽKaʚM=ȈG*:&#f `("Շ8kfPPW@^*1»p3p_ܽYyGxa 3fQsJ^׋LB9ظ ʋI Co/l_Ш km`QJA.sHHc(}ݷ?øeԉ'>fzvI @K2~o{8EQ C+wՐ™qrG|U8m7B0DŽS SFi4 #jRLWlddX DR| $7zXngB5Iڞ$I& lKCa[Ѿ^ INnCG,7%5TFn ~=!e36H)`N4$YQ qAqq=xq >Zڢ۶eL | _^/6²U’/7b{wAqpޝÈEVo܂c'ϱ(7vЦux{7oW~/Q: A6-ç`񊍰zVn`oCς6PHc8̈́ (\ Z!H;{xR>JMA:; #i^jzVu?[9D J#$5^!)3q"I-VVj{r; Jy(dˁ=MeAӖx+I 23Dca @ŕ4B}o({V6$: )1?RRJjyznRa (S"+#m$H%ƘױnT"ir,r4h 􇸸FQJmr"v:lrJ 8[ޥ};y}TXs^|jjj-{~xڵɛ7o6ҳI\iJ_`-H_a1C-)JŸ-iJX9\Ps]-8.ߊ)h{%[qLʴAb.J*Aқeh (5 Ȁ߅ Î*2HS r5xi xM.lb|$jՄv=ޟ0tIz^A4ru,X)  ;[-cf[&*-) SɚX啙1=sP(T`ckiQӠ9KI! [ pu8w9Σdd氺\!׷; C j7ib,\]Y='ԝx%p5lEϗ-df+I8-;SC-`:!F46P.KM3-W&1J"ZOJlCP{΀ʚA$qB]$A+^$,OlLV)73 OK/ rbVg/g!ry<'L\='[g9h _aLW^ 3~$sy BZD? ֭3g 7olڴY]uR"*(m3D7}WdJ ǓjM[P֤V֬rm$+rhbf<3=mW6(d 8JIBO/1^AZZ[nME?' 5k&zPR z5oYd".#! psŜ'pWf>UrdNDFY"0V/PکN>ɞDH(\l2`Ң-/aM`i`{`!= jU*WyL&3I|`Hlد! I[!+>z1U@{0lp "E$S`ѻE{tuu^;x #ک=(<=LMC1ß6@>> 1Otܼf:iӰd;h o9B.,,,j|̎^T2y$lg8|˲MT*%h5BXF[K1dq5,o␁᣹S#n]ڱr|}| 2ԄC:v7 eпO'>( 0$%ꛮ?] UzITZ7YPܡ D.V=0MB8B9_ _p-*QΘUL SR;r\hkqvVJ+sOL NrY>< PU<˶mzlݺ5-JG\ -[ZrecL::NRג%ݻwߗZjرf4;땃>X>>8@$3_\Y{ni5{BJVӧ/k͐B2(P`E- `yECLd.YS@z@@_&Ъi 0ORt:Nބ@r=9.y@ՏYU꯫]xLr,PW "Si8z.l9|: N\I0xsyEX uN0l@زu8]xiHwJCA]K}7i!<<ȧAhHeyA$01 &&. #؉3peG๧)Yf̸d׮=0{GK0?W@|Bܺ}n%߁ɷ!%>w/n n݅\`bj-Y.^K$芕kUaᣏ?gp$;|] g]I<օKXAP yyХChb#km:ЋF[FXohY\ւN`Y 80+ܰ*T Pkʁ72 VSM'O9+MMT)l_`׊,]3i`& ^EeGvc=a }a}tKq00!(CaԧPi#Wf͟L?󟇧z7G;Z ɮZєT 4Ac3Ԉ B&ԕ7Ax7lA S2׮X;e_onuÊc*g z8|X9hl fXqla!zO?F8_/ANX퐙6"3[[aۮøpy!R ^;`CpDo~߾~=,v?|r~.-Cvn>;}Y)2C'hԭaH ~"ˈB}Z?\cj 7wd3o&F Evm{g@ Z Z[k5. v&D)oHd!G1j,-maB@=^s!]u$KQ/4GK̍Kj3>p|@ S7P;GL4%11 gϞ% -J9st;wn'|?>33 = v5 \ၿK%>>B%yl\hPrf V*s-~QYI*Bh55|nq'$701:Ӟ lZNRq>غP)ϷA*Hd6"05 1#54ܡ5  %z.O Fe=[5l-|x1̝;Ar#\B%ɂO?_ SNQ>| ب9KO Ko0C$ p.ަJ1*HL?c}iD|[Ăp8e@AA l&7ܟGvFv,,!G{(-)z1Gq:6~??*l]ptq߁6 -x~ P"ة܈ ; 3 3{ WYX򱤀?1oߦ]f(E!5TSIuC>[Z2$ |<=EO lAeI|x{?J2 w`켵s>_~}q=^= 3ʡE2+!`,AJ*hI"Y;-8D,&Z ?N1̅)w8Ι$|GǕ p9xptг$=o E)&Ֆb<\yAf1\,B0,$>q[fB}QN BbPFfTeLF%R}(8MIM2?UN#{]vV]L6mWS+{é ZWvVIk2}O3^m[_VV0lh $)JDAjCwI"+D$8YpKeC.T=JCշgljڶ2`P-@+4 ;_eUYc9 dB0QTKU "1 8{{Ր|Md=*P " GYY̐yE"eZjͻِU*ϰNլ#H[()5ƺqpBJQ8SWrz;hܦ)J~p~Sїߦ/qE_)/N:c% P!uhJ0KI$FN44z^ly qY TW@9$A'/([靍 8nڞPol] vCy2̇YGƜ#VIVb%r&k*1LbOڄdRଙb͡j']WC jP/\uY͂7 @o6+ݳs1KM$x1AlAZ3^y4a ܹs wVS-; 2Hr8(ʐAYz9d!^v (P;@2(-)Cy9X<.RCo|;L ^8O%+N)_OgӱSRMnl~ `g{r._w3?iwRl40dF`IS.mȵL K%5}w%x?c봏8!lo9ʼn㺶82[QLwbc!b53n iO4}OP 0Z(F}JD,'HXX_OxptjX`"d㖒ڵjL0dbM\Mc>^N0si (su4 Ʌ$2L)zo"_kpwhaAЮ߳:d!_ 9ZHOSAz3q>Gi9*dHpo vAuZBQAX*S. qip!0: K,1bz ӲT{|+ُf7Iת \-$.g}[xܳ1Yn|6$LO Rfa]CXG.J6 /ZW),w&k!#_x Ddm?*Yg\>WA׵ţdIv5,.kHR,;w̲# Q- ,$yy:ڎIȕ kS)ٳ`jelR[W}uQzm]ux D@;3̑T&I-M Ȣ 'rP(QV6>ݠct+hZDwO 6}C˞# P.-{K'-<<"Wd^~a^^'q.mAVXqy\VE!PXȽ 6#> +T;. ;(Ld28K^;K\9K?}-oO=ϦڬM-v;Z3oW3Ec}dLTjqJjN7Q!@RY/XdwRPttP7DثDWaYad_լR˚԰ƞ\DB̑aż,օ"le/>wF ~**؊J.kڤʽj @ִ4 uJš$~ǂK\@r!6ri&BqǗ%iRi +g7Ohס #=:B=S.tQiOhݾj"vvQвekhִ% ZC֭a@Dt!؁y\"~8%fIde'~9䷸׏[<]dZP̽5|D-^O?{ IsH lӯ7E2r۔-gBΚ=LwKR{#N=pxie9;CRF` on\^8-\ $o#4ni](CQ|T~,H g/%3W 끠aj5 $ ,kbTkPz`rZ '62%ua t~!:i$###@+ 4<.w /iB0bf1}t>9:d-;'m9o 8*P1I}K*[::JUwIR3e{σd0^P[EH@1|j,\K4U}Ely{Z:Rc=eXT\⺦EZ'+JO.![ɓp Jb a•kIP\\C7WGe֞)XkI :RYTCTaM`[1,yi)N -Bӝ=$Rp[J ===9XUqy\j(`I %1JnvFR [Cbx!BeǍG\8(Jl=r]oop黛X!.m0KXJm`]83׭e'")¦ z #e61Y_/2%|"&lEziTnCq`)ɰ`CH?x:C92|d;E!h8 t&Fc9:sRsZVVxt-WIڥ!F ؀7-Df 2PI%6ٳGP ;&fh/vqǠ>P:CԦ7] п~?8!Y3Y8gIYQOqYΚokT*c%`[V\=:E:-  #W^e Xg`%]h<0x,:mWeq76bϡ m>#ӛˑ([/w Y<KcCԲeM!I*/̺a>< ??/55epZ$KkOԗrgͽ{QJWxr&gxeL ǒVj.~`T  5QMBf r BLXlIԻ.`{D:I2ssR"1%exX?IaQFB ~ X8< c黛7ߑ ʶȅX`u^fY"OFO/Dzq7p3 ]Xw 'WZ5NVuE[} BԍiRnm? z Q¥0(G42# "=ĖMrdV`CX*f+e¶Һ2^\ǎ)ח~eeگ4*Ãˋ1jIJ/ɜ>}u tѝ.R rǂ,8g?مfS褐#'ydhr3zٲe}W\ oܸ1tẅ͜㦻;AZا\>곳kF/8(t~~4s}z& VԷpᇱ+VZj57ֿΌo/++vþgwg?d?96~ҕm&kw}~?8=/h䝧Ra}[h'^X]} ȂàǦkG>?=JmPD4M7^$+"݄8Ӕ:\[Ѓqj櫢|%"t!oP(x:bm#.Gv a(SYl~Sdz~ E^9mWӶcn {8Ǘ`ҔB-w"f;a,EyĄ|7dF&l[%M̰xdHvU_IIQ'ҐljSAZ%:8doyy9(Ϣx7V}ţ <{۲~uƺߦX,۶m ڵc*ܾ n.NfrrL}a@.ЯO }zv'zQ^]Bn.<ѕз{U<ѭ5 zvi =;#i++?&D:''M3FP&gBbZ!4@NnޢNncr3g=6w#^9,[spum9 _t(NLl ٹ M%x,)vb~gÆU ;Vیwr6^_z4dP+4 wS%?F8.^Rkwv|{CAN a P ʦW1g 6JRZ kT2PSrjJSsM+ɓ>?7Rb{Y~IGedIOOgɫ~͑'\i(Q{u!}b &W҃:bRVKcnԤduRRS@^`#]JyXm;ut.8)>ʷL`t|wpTr ̨;(8\0j)h7VR_E|} aK$xn=g3Kcpۚ)1eq7 ;}mdlHۀ:#<{fepylKi[Wsj:qN/$‰3$Ӕ;}?_m/ÑSW!95ZEF'e8@(1}!.Šư;m7+n//ޯW\+;dNL"RSsr!+WWW7ة̺]h╙/dáZE IeLw [jt=4NV%Gu?ʤ͛L^CO^9&_})P(rX(:+';q5Ee`3f/xv}%fg>Qڬcop(ef)0tMF>&rM.sy)BJAVX}VN=w2l/'!55UcQ"A``'eGp( 3GүuUwfBE}Ύwȶ@R0_h@egK=sb쉓g +3N:W^kp}XeʥwUFQq}:%8٪OtbIKaKJJZ>jlmNubܥTiZ#}}nMc# YѯrPrSERw*99!i"FSAz))N>QW(Sj/"-@Ryʝ $3A2y}c-v% I=KZR=_ӌ l(6b('If,.&1iiq`;$V}fvCcYjdXNnH|WS~vJX PQ5K.,}Tvf:\rxy(氁/܅py+p5|-߸If`. ók!йC4)Rwʂ-99C<_Kss1مNX|0@ t:ȶtx/xwuJbZNC!Uq;w}պ+XqWu)Y:JGKC 9y /lsb̢?NX璻ŋBhW\_V}4n VM WPT4닯~ŗzӒE?yYZLb7Rbc?= 6…wG(ooAN"3D&쏙eӁ&{;(,, k?n^FS^j4Ъw?}*\{nNxDRP8!M"Nq9QtI~RdʮdL3TNl218~:M lRV 32Xμ(ncõ]er~~r%-ubj<-B\گPoJPp"v8mdZpewʍ̷ҌWL NgìHiT4=r98~?4<ݝ=( 3*\fSoT_6rH*WR ɂ >ӇU=?]JKMkxJ!RU ` 24D .ؑg*/}S]bGnQ=B~d9r@Aæ-/SxuQ+2!%)Ѥ\M[]=,ďcHWH)Xt'(4:[ Y^\ /7/ÂOq3 ygo2;+> .9wͤ9=~7,B=ynk{{p&:r/q-U8zRl`ú:y]Nv.(KVG*8drYZ'\#4ܚz']G^]߽X_n;{8O(;FzUkשSscxޱ>V{Sk#g(] &9Cg\yWW<>CJLF.4&إ]Mtsqƒ( yK˥yGsUwyNZibɣ[OSaѕrW &|hΩ뼐s@ԩb ޫ~R黛,UK84.~ lpQdCc‡)os|[ pCMQ swrqyqx- wJ)> <$.eEW?L_2T!FסCo0?n\=7oCI\0m(xm1Ka:.(H8n80xaSоMK(+3@ C*N_^:_þI%;XLWZ&kTTsq cM+(VȐ)"T%UBd)Sw}:[)/.+:Iŝ{]aN`0(\y}Gz! 0V.k̼5#l6"(1Jqݖ95PAӹs#oM1Oܢ*-eq*v}DJ9X=vLt5Ν_!W,퉏3~W4_kc cXpR&`gg 6g.wYҳgIZrUZZrXF-#'Cː5G/;uv@\sEXT)XU% 2P"/9{&𸹹4.Pb}}E嶸^.>Ni[ְÑ.="եgW\\b}}%%>|2cxbR͵ ֭[h"-W{,mXFYbdJQt"qBo2ff஁_G6rWju6cʝG>>yeZiIIGC}nԫ=: (\ $! R).RJR [%X! IݾݥT~?dvgvV33> j gEX/?(uNTb>A&f<Ex!gvh8,T_w\Ic]]my賈9i*ϊ9y&ͣ"81,ԑxS4Hm}z|q/4%d_ڧk]yl_±M~㒾lY}hgVE>ya/;'QpkW7~Y-8phg|:޿l#K k~Zz[ J+yz;%7^*'2>OL[) JsDen0'7փb|XT-[Yo~',f::FZ?IxL^/~ӵ>sj*4>s~TLMK<>=}olM/Fpz,  u{Md<4:=xXMJz |UEʵkfW˫Ʊ[|a :#d"q*WWU+Tz d k$ fzN]MbHoˡ8Ocr I@kz<D7*h?GK5}c娚9l6'!1P,_aycOFD}6;w lv.޴%x뤱TdO'$.0̾۷͂stP>h>cEo?$i~ #%H$+Eu=AᵅPևj|u2fXǎNIMv97C7KI)Ȓ# y@[q|w-N֍s&Bv琭C3B()U&IerԺ h2yV)ULl3$K>]t{(~<~ICƲjܶĊ@~(&SI Y,U7&)2jNe<$̐A Awrsr!uܿ}n]; !w@@1{2,^݁gA:xz = 0YpFF3A4dbddCd,mTHaH-IaHy֬YF47D_tqlѽJ'VM^[|!tD! ?nq B2 [|mT(p<׉VLϟ_6wF|ɪ/`2ґHx/z[gzJzآor= {Nt؋M^iaC~$wwn_ݔB^aQIyQQD_ҽF H{%vaaa5-_-X2 -8}άybw,#OqS»O KJKHXta! W:zXDY U2fITA-@"e$2 EŚ8䞢"GNrrr240` IP(b&K[["e6 żn>[i_I>23. 4KLe-SJFNyB.az>77 O$WSGƼzKh ^DŽ ֧(!//>16m1ޒd FZdxyWHЎ~ /*s#C]E$x6a=6ڷݻ{bE~[С?;W$ \3 #((:ij%FLM"gɺIi36xb)2:2+N SR!i^o/4_WOuZҴ<yxXIZ9u[hhU-[b%'^J崒3>LA;.3>Q4"EhXHMid]P7ׅ=jQ ŖIՕO+3I߯11Ifr .{uGe a2O8:?#Çl@nwW͇K_Air l˙I 7fg룏}l}yDb| rmpR~bX5L&4{!!!cܽm#$QiJ>=L͸%I9@'䂢LD+xh|ty -(/'zH93Tj-9tB2 y}\]]x=z$G+C~'^~OQqߠŊwۍv\' GR=q~G(2w mn*?}HŅq9\q=?B }Uqƌ ׇW 1#*AY(o6p" y#~Dsќo%4_N:AKt9fXR>v89s0=я>'NI'ɓO)Sө4ggdٳg3ŋ /k@R599S+ӭڜվخ+VY4,!XkrD6|>O\)eib^lfVOK<ܺN ֣adǡ :*(2j=΀V!boȂF+**?/j쾀d2E8p5j3zp'2ca"Ug:^AߡKv=z26νIrBqqܘ@5.FU,D~4):ET2^Ddc(u+..;w7쾀e2y8ksK +07Ђ}'7/^_]]rAkZmX=}> >xu,D3C k̗p]~A}uR >'-}-LV\Ucbkױ/ցMfB!LR8@\;xtvEL' Elڰa[̴LMM-Z0oϊ[ܿwo;wΜ˖-k( coF̛;wbfM_|1UFnڸO=!IN&׫k`C\c0$ֻMG=O.|ЩcU( UWI> ۅC1nS]uJu֕޽գgwgϞT^=)}zS}Kߏ0?5p\X^}_mLo{KC/֖p8Uʫ@K 8J_O!zQAE""Q[( 3UElXJ6o_ND{k9b 8}h<%OL[UTktW'/3; OݍoVqT KDWR:0e0ȋ?{9iX:&uaJ wƑF}^_TlWUQP\R Ivhe2x:-rBOk? E}k՗3sGƦ뒡K Ye>sPNO-&CK- ",Jg#4k "/;Mf^|=9IȌ]63Wc_cV&ϧoK0wТe+D[|ddXy)7m?~K162qcΜ9kV{ƍ_[w$$8xa Crr[˗=/(gsrI4 *d![|+ 9na<թwz A*5}Ik'mX,?MXNYif 0JXJZX+Ƴ,%k5kJSOʌQX4}alFvN{vIٝ#K?QtߣfY_N^XlԢϾ@9Gnݺ}X Y%K)&ȅVّ:ʳDK"b3ḁue|g'Q{z R;~:ͧ4!H&*_oGh)JeVVt:u8I7\訮O;vԨY!%OǩO^y?ʂ~]1,hŜ+Hw3&}Xo+`kclRg'WaɼdǽA_3sp -P}h"cAլJ6p)"$v;_ghփJLA2`o.^>|}|gt + +,ݧ7V3n 7g%ΉwpG^ij߾(gѹ).֕գUKWd&>_޻ݒzڵҪū.]ZחEB-f!$`EK(q^B)~^\]2" DU"MI$q 3\uSMf!Hr~;#^k62 -S(}D>OO̰cjV}H^k2w?:'TU< vz\m L:ZnՄK6~>;IerwkM}O,hݣ{wES+}h"C7.+l,o']aڭoR{oGvt ɓ'充V={ 33nܽ[k%t%Ml"h6Rj4-_6#_ym#"8%WqNovNˡ("i+kW?.,^ϳ(H AADG٪IM*qٓVJ6,yRc蓎2WXVrK ,v׬OI2 HD4.dxalchh>R#O?};oGruKBʍms~T#ciȩCnM]g3SFF&5J&]|M`%O_OypśNүov͵=MEID$+l6;Ív6Gom;sԺ}蘻{qĉ1˖/أg///ْŋ:sfM|J_w?Æx~@[;֭[Fx̆ߏ#7JII_&  T}yբMh[4Z"hBvGQQ o;@RCĶUV= l>,|kNh>k̐.;9evmұul[Wn6*K,AW`i+VT0ѦxZfd |:w5vzwөCd-4swuutsq 03|ӽI~ү]. uӿnvjasoig{(M|`{ЅܼBҹ٘QCǻÅЮ-mz7C`Cr,/IbdZ-WO6s? }9W&M8h7E|Ww޽\KK+=߮_;v̶ ڵ=rt |={FM½ڵ.1cE'MDp&Ie9qO>$5e5hZXþ4o#{0;SW>8ȱF&<gprwFz}l>A}LF^WOZ! C>:} Fz}l>H_(iF`"U7CK#J!88QQ%kl&}"Q27 dn&$?,Ƞ~z?:ܗK)JFkwQP6Q5Qڑl5d #!ui˃+[=aemҶ^~=Λt;|E{I(C(P~+hz[='{_7Е# /\M7\|9ȑ#Jd75>'Nasx}ۍ'Sө`ٌY0k3{̛_.%+`UjXW嚯aW7o7w76WwȫW9jT;smΜm?5M?E#)ʧ@:6;mjFA6= >j@aTaBcH0^+7A&(Gq( X#RRŚȿ_W~E/?^z ^9zgOUM GXXZZt}){ҐY -5ɧFwfh(BqMiaGb >Eƹs̳XY ٘98:X5؁X:;XTQQIpʽwrICMg KZZu5u3Էh=)g~]T/LyEr#L^{{ϮN rn\#C <ㇺqGzKn2,>/y4+3NNg$zڥ |&yN pl15zό4Xt2숆 W\I>z2WKKk_xd\.$ǐc9PP*䊶P ӡ5+h;x;nȗol[ZTqkZ4['l~>Sv !PT\nnh(V;:*]#xk,Z[bho#0<,4 lK.[{hd-WiM9ԠRMP> (/ʯxfU@CY%7>>=zDxР{0ݞC>!R* pR*@NXEPy5BFdʙ9dROܝf7ԅX|vub޴OΎ'I?6216dw:=ZDJzy.]z}~9 Kki ϸա395#矢>h?^#ͦ!rEDj/$Pp`}S k,7pq'֝oqݿERDG^D^ p&WMѶ7u'`oܹ>,0чhAQ+ d+CvV7J);@'LEEZV 5: N6@f rQR-Q~gx3VlkYPPv՚ҥ2kk(̠rRwtgtPGwO%NdRf(i9H N Nkmsj˭nܲ/ lؘ|`T#jWz s2rJY$Y% [~Mj=&&O.>F| =ޮ>953@TҤ"g wbA%77hjho|LAYcɞՕE27†}Xas%<A@)k*qysHHMuN"@*!a@ZlB춠UԣuԳ͑T̫Wq'ߵ)@^3RAQ{Bban EB@/+-KԩS ઃKPPѣmel  &xuu9(tI} |â!s7d[wXv_%l醟,Բ"iaGJ.k|]]I"-[[(_/f2|[k3c?6l{j]-o#cR"H/<1|ógswv/#YzDſW'&&nWkGm.BlzN:,!WCd4yhdhii1`wYZZx|"D:|HyZ:'H(+-06-?x4 H,mXdT e=Jr>XF@i;BƯ`^,Y @ZPmFm2C,SB+įY(`bbk073}C⫻슊(60XOk",i80J`O_msr 1P4IoSbrAVY : rPA>h6X(B&rSQ~e5sz+k rdgi+Li%t&V^*i}R1pd:,l- xlUDY[ӫ* 4{ýCX`R ϟ!U7oGK@:UNnhҌ}mAGFѽ666|jp4()FxrKf@!(4E\qRI:W*ų犅IKCf=ZsLҡRe(7g{+ qxW17S$;|ǗW<&y[IqX/A$%R)H}^-ߎS 9 Yِesc`mk11iйsK|^*0d6:KO()' *+*ӦK.ǯ4smX2:A&HhRDwٹzi̓7=*um<~";?͸w?ADN>5pz̡R,1HUB?~lk +mg[`hucƨ#cvQ&P^vaq!G<,dL3(Yj#AQ!&T ;!_‡"]Z%s]>#q1fpW^>r:p,$}_]\L]XMm"Fx}5iΤQH`~˗OBU`еߏ>tU!Syɩ`fgn`jdTru%yLQh{{ I#*Š+A$ if Miə10ҖzvؤM )1ͺAV?8Z;-DD2 =͜]v=AVZ s븣CwZ[M4Gt t8r+ kM޹cczqTG)|bHϭ2E9J:bH+,CV- Edh<9=<Nmfʭ kާ-(>DʴWT3^\O@2AҮEm^ 륯 W6;3#@Iu6s'/\LLa{0C,i x4zZŌJ(WE2"Dtq9i4<R (|3a1y\-)On(?UX3==O^{-f!Rd|yHIߺN/ʶtt[-yԫ8u9b B^ЫUoݮ7ٻV K/^]D@9 2&zD+2a{*o Ju4-\\ zv zun $L%%5 .w."ͭkPFFgwIx9>[Ak"^!=RANII!EEE 4F޳ h۬ (X hcDy{v'(e1^yh2(*2=VB# klOo#:&,,,}(]w R<"1i`ת꘼}W~> }ͶiaeŢJNzrj$wTRRr4!Fj ,-@lD 3>2HJRftswv^Oзw۷Wcn&JX*fhR KҜ aEq&smZ5r*G;:up1o@XH~u+W )Qf([ZXQEws/ \[]dJQN4I:y}^ի%qqq7nlleNNWnC;wy #,. mz\[տ|X8 V̱e,`c2@NR*Ԁ-9`a% `zf!:4-Mrk 5[A{Mjx0 Kj]!TTuDdYg'AhJUspg7_w*d0&LrNNw}7C VǤ}6p.=zZ$ @a.Aj~GīSdsulYh(qW*}i L]mqN}ʞhޜKoLh/MY,Ytj*d'A΄د2)y;FYp$wG{=.dƥcz-r! [ Pc D-QIEL/". $d1/~磯2SOfn36nÔ7aګ00a%Ub-Wy99򾜪K.n-w8H)'rF~:`TRY5z$L!g= Q'4:{kB<GDD@xX$DMpK-^̹iT7G<2%c/T{p!-dJP~tlM|V,> 9go3Wk޽ՋERl~䐚d\}uk}*`ڧ]}y7j녯7;Dd\>{Eqx\ͼvwMrfbkt\4\ +W!GO}\|5^[]%: P6"SҔ)SNAիW?&c5ѪiiigK 3"x n7ݜ yQdU jlzel‎6 G$d@J2>g;m<\ C{ꃇ<,3iL 9 O% X8z.BzxH=2RF>SZV]A+Ȓ@&V<=X92kmrzߏ JS{0k:&MV K\IMNfFOZv19뙼U"T/.c1(У HC)<t]hS&QKэSVmyic?C^^bBd)!(1'Fv+w,Wqkz.V}gqffvr^g0IW^0z?=K}֖Zz%θ׊4** #!EV JwAa:Qd83૞U(B2LJRRA`mXA7 yn sƎ۳ȏڻoECkRG ~vu|PJ@yy8weuM.BANy{Si*$=&6g%ZĻ+HՅ]Je#^zO;y8T"x}ZB,I7B[;ȏsyCS*}P=<[0GLQi7=֪eK.[͛$H,--š}ߍVy\xyyPFSMtZOD\2 (40k6Tn C`σp1 /F7%$1s50!"m/b46GՒZ|\"9x4gm_ܾ:EwV>JdCyؖ(/1ʦ)RC  %,Zms+~Pa;ӛ \Vz{σJ<ذU͜9YzΞ=AwqɥO]Z@d񮬥UjtR%WR "  s! EeRF/3>C">p1D h"sHV ; |f,}I'[K *i4w44pܘ@$ʕ+pС7@\ML@e۠1ssvd= j4$-C|t̒g%zf $΍gv$-PwC}W뺾s:4R"\߇wzϛkpSEȷaGMdwJd}dJ\YRfpKu3 Z40w +8<--dGg"5Ӻ{$c{.mbM@,[)-a)O M(} MȀ'GmOVN.$ 2GIQ6rwNGoyVMwsϏ޶ \˘qI<7 {cx}H7&y9 |omQ!jޜ  |s ml 2ЫLG2R $%4!Z&^4Q5QTc0jY 嫁\΁,%K0 (*ZPR'x, 0ޒ'P7ɖ*7U|zpI(/dr ]\~pJGW1$j4ƞS4֨11{GQĆ ҕ.rqof)AI=vo˳3;3;` (B4Hj*'aMQ]nIvL0!(|5x]$2,%f͊5f<({9CZ׃TM!?R˻!@Qg W|qZ) ɫ~YўLkAVbAF@Ws=|iPRrP^Q)  pIٷ8]dcm=(сd' &"t n8:,2/tsgFSj KZeCU}=c&ޛԍzz' _ye~"xQG}vg >wf_FypE "tM =rIVʱ@o#LϮnǑM[~~yH:::yx?DW:1eҌo9zͺ^4ET y^ݻaB ]ڂ܁ OOo߆[ y>F5qV-BzUtdD( xjN[jzP&E| e@ ?jBnӥhIbI=4qziߦ̟?_zz @1*K=H!}! d'Nbߤ/>uҌ?> ~u:(b&EduBEd#ȸepn=kZ4*:d1@|^)oT$g1 H<\ ^*c+0o 0͖98@" %V^OBцjZTP"Q[4cȋCJH /A%yv^#!:@^"'c9 -+'),jX`ZhW.1aĢ™ tۚ;pi+N -F=cΚ0"n:ok2xp|1]. ^WtLt}}NUVaUŨPX\м(B -D=դb"! A%|(AAQB^>2ρ4HJJL22 !1!3R$%'@tl$$BJZdfg R`~OZhzBI zLDEU-rdg@ ʎ>Y9 Z!D$'!+&wB -B5B`0xj%<0IzvDCT.Gpѣgw P;vWWW E zEsW9Se2<|IhZhD-$"sr Xlvg g`|x9 v<->BXz84/TcĽQ -B -EA9Y`af  .N\C;wpqnZ@Ν{n`ei .C\\llDޟ ~Ȉ3B -B %K~ IL< /K.K"]~My]J잗ܻ{^/^[o}q:ڏPO]0$@ -B -uݙGr[3;}W3ou~~ۜs8mn ,v[RK-W_+VmʵnkVw[n [6ouv]\ߡ>|r ]ϗbljZhEKnW-,=%-ҫլ^B -⟀(iGB -?R -B -C+ZhZhBZh F(SbՅ[˭op?|ӝ;wΜ9CbxWmC7 zM|yv0 l dt֪jvM}~!!!=E՜(B 7nܰ]'>}: m/^l>y<\7%'?= -H)_ "dTo[Q*]C+IRSyYi9>F}WorU6 mgy*22׆ Bng!OޣV(hY6l2ο;99qAƩ}Nj/6ݽ{7ٳJH~ =99::6dc*_5(4^տI9%E6@Q&Yo b7y>ǖCX\`4Sa MbD fN :P50 M!~'>1LʶҦ,Libj F%C͋M;$ɔl]GtޭKܐHEq#ʼnx\ R*6d2Y\A8i{slQJ4!}Rr  [0*$$f<~! Ӑ1]{ݒk񼗶x20AQ"xXpUԗL;뤊3""w}}o&&&VH,H$ F3:t(;v6ǣ$-S-q ܛv#nq-=fR, M7=غu[n駟U3Jq6ZYwr}?^bܤRfO.yw= O=Z,ʱ{΍FxYKrkq͓l6[ а!===\*۰DEE>ĉk|Aɩqr_!.8Vw__8ax)>/dɶu֑격CX-W[$[vgnBGuƞ@Hζ%-jȱu}Ё7FT!1ZC*J9Dz@jK L&h\@Y(AJF⋝ ~AS&cQ%%%٪U+}[[ۚݝpuu͛iT$=t2E1Z(Y?xP):Tyvel6q3G [{8ϧ&-csTZTn=ta3aFFF'~l@e9+<[Xfhd}rcJhn> 0]=˓.9Ʒ%.̳MզTmrCS!־}{ma/-;;>c6mmxoqdll]OC頗zJSEĿ;|C< ՗.=y| ;7~".$1L$_0{!Kz,-^|hi*à^@%,ƶ@= --ӧ\t :\xʹ͙31\/7ŪCKM^c^495f*`P05/Pl apg)h \gm΍W٫WAlq[ii)˗/!,, TTTTǕͻvZMk$@w6:x $@nAi a43cYh) nV ,?a4nJ }yU[ -k*7n>Q~=]Hph3JY-nWG`ilv-9Imn[>4z6d؛vj2w5gg+م@uwsWqʂ{.(}řTʠM sfee5=1>i߀8- Ժ^4|Wӗî)@E@)#MIC|kK@(CZ$@ (q Fu|行jMNbƆU68Tvݎ[ AvY|Ku HŢN 03 Y3ظy+* 郉% ĀBLW$&8{$ʧW!1|vGAqq1dĉ6yyy] JܹsHm@;HǢͺnR*BefqH =!n&f6/^ 0Es#SɭMҫPp 55AQ q븢dG+]r3W]=FmwtTژD Cy38zT ^b ޛ]^o,TDސPS6`42!f@z<= %. T!sw8?Lm0^N (uԷxTAAĄ> 1P*s50A)UA.;$li>^HdPGP)~Z ϧ۱c0db* H$ Aǃp=vtc$vIV]xǗսq H$bLe8$#Pw2kJgqWq3V4p8$ߣ)eSzp)A^nؘ1QwZ[  󲞄d=j6N' Saxvvv>L ۛ_(p@p$3`AbGLپIP{[Y9IQ %[R "u}@,85K^3~ #;pf`[ E*W JJ)XfpB bЃ!:3D hű70;t39TȕZ{7&my|UB;0ϝ$q+[jT]+PۛWK$k{uSE +`WJe? ]޳/`vBPg所G>++++Y^**,y92QZ:.xuܹM6e ©{طo?44tnnJtu'ءWABtn'_//KZPrC3Ue& rb<==ϣ}ӉQQQ8b!~k jENvD{t7obZB)MLlĮOz)4`ib^/!99y22)2 g6)IUl 7JσL{{X 3CaK3G|1w|Obg > 6=? ֓! e Ma5|y&t9e -,;gmsb*{SCL 8=gRcB"sfkUCB>ylqsH/ðKwaЬmL]o8U= 6TI _6f >^G@ PzqR4W%uaT,m|^u0pP071KeQ0*h&_AxZw(dHH/NH.Rq1󓡫 p ~/'租^QN#BQNplyխ (Jb=y8r۩?>mnsB㋷"Tbe rn@ ߼رt$^Tt4 9 ;|;] 6JMS&-+Ή}Q %pZ|TZ;=Z c#,GO]rѣg[RR'ά2\!G_4\ԫDKwϝ|_|KK2[[gݹb٨W}/HnX *kF -nTM>(0˫4!!n455@2/]+G[S%& Z[!/9r% ErhEfxp̝;w߼y5 D!R  6* +dг{5|B@<\yB`<̽͒%KTmUnC|2G8P21 Sin$ccA2q!do@]oJQV !O9h)>sB咊Q gFڣHq(,T.QLJgҒU!w˗ECLωF{#Gi47`5n~!o=vgZZZ"TLju/ΤdU]ray߭k;53#=Ebе[{#Z=Nv`̙o`ll셛7oRD\5%AEkB@ 2A~!(~|DA'nC>Dx@ D dvSWt8oO0ѷt?x'QЬMTmNb(e3ޡ>@$2&%lyXY{g&$JWaȚtX*3aV@qF_w`sF@ y Eߞ~ݕ|295EJL+\(.ʇP3c4Sn?O&|gf?&6B%TC0T ^bʡiUTaAbDL[[(H9t݋,7n{ -רj=k֬ov*X> ~ϴt\+ %| %e뷃Wo9 _!IJ/#?t)܍eTq>2$9XM9w7x0TP ȗ܉:~`jɉxlbL) }! y O^a7E$Ah@([˂{PLXN\Ldž32֖ff:1:VZpڱX|3" 8qA>s=lSjLjDrrr8'RGwEqMZܘ?_Z[P?^mn*7sνJq"B5:'QW/p?*p>~* X|4hŷㆿ)?ٳ=̙3#AS咕Q2"(*)%PF(x<2.G ފr]9u것{[2|? O|AH$Uآ O+Xm7^fm ܟyZ|)_(+ X"#͸L+ͬyw>1쀀YiIWG]0Y5h_gc >i`ƍ$ =zɓbxɳg n߾ݠwZ|33QSbˮV6n,"UhUە|roRFؠ6r斑ß(hiC&U?wEq~ɐOF4t/u=glT!]/fqO5DFT-}tj50Ϙ1C u!3FY^Srr_ti͛7ݸqxsW+puxG۹1UaTWpvvm2dU@^ "d լ!pkh(,8sř,F[iAu{߻΍ϗUYKWB -{ejO je anֹd}n roB FhZh{ PjZhE ZhZ4T7BHHȬ~~~Of50[=|M _×rR:̨[Q&"b˃^%ƔߺRt_s\z< _], -i>`\jP' 7oqK/dUM>XS j§ lے=Sa[M|\٫ {U2wM*o]_*C?`c?p3mɜ 8| _m۶翼>mȐ!/0a֬YS=<<}۷oGީ>ңtahh(CPl0x3 4Ν;7np"Zw/#ikgn{jζ:utwuʳvL.iRן_8Oń/;wsl#·E#ܙkwOֺt8uyucߕ$ L0L˄ѠPZz-p B`fAхпS)7uoqlD+V[.\MW:> IJ, dR`*fȀÒ.Gzz *g@fVL .NpWpƐH$  ͑PO6mڥaÆ٭[C}9!gr#U<-zB2J2]8]2%Qm2,Ys6v-߻MydRdT惬?29o7dyfvEhSN0H`0`emCt2obbM QkWHž- IHt|$->-%v!/Z+ذqm7M866Pa )___LJ0BuEkT쐋I/v:2|xΌK+o\<;?T`0Xi@JI~ߜ[C9^$_):ymxރxоk[ So{^,r|rTDɓg6ފ0koo011B<Y+b 3DjС ssow:r-߻UNS3A)CuIO+"Y>}Y׮\PXPgc< mBP7o@ZZQ!ImA_m(I)TH|2HH峠 "HH$= LEDRBjWz]  '$?ͅ"'!2N/E" J$02{7JDRFUjl<,ɤ!~$77^ℌsxtErz^gL͠z>4{ku1((1JTF!i)P2N](5$uR2gΜ?qU.H$?2{nb\bMukH$|P1H$tPF| T(VqX(_>> |tHrw /> `n:/v֨0T*U0tZ뻺" cM%^%*Tr%N6)Tm16W86)ENph,+ ǒ%2åǑl<,>G_u@(435YXXc"I.,u .>sv8JJoDJ!zzzvXHLir9^ aر`nn~r<ɏ͙*CdʌJ\~@tdJXҡlZwhǠ.r,`nfNmx@hث 9O R!d+} Hyw^DbON븸2+H,0TĊDRF4%:7]KQ>A~ T³ؒ)]Oѿ2e4K`k^"nuٷ.` eu_d+|0`ٽF$񲡱¦O3spax ___X`'g̘ٚ<'Dڰj",(>}d?7[1KoBU@eMU55̲fَ&wN?ۮj{=zlڴIWy=:0.W|L<@"$W*IH\uY$ص1F$,[̀=ʓԅIrDV2*@ȁD޸$my*E{UYAvyXVU)~lˎ$gJ[-" 9B=TX5y4L:W* 5aalOɵ@@pM+Z$r\}q\A",hIw(Ó0f}JuwDhΆ_~= %82D끑ȑ#ÑjGf]3nEo#Q.BL5k _{gPSeM{~ߪ:;4??P$>s+f%ƠxU`( ϖDՔ%^y\_3=aG}̍ڀU5k_Mu|0˔ S('oyăX5! OOE@(Z-[;o/\ tDϝOYunBJ!Ԟ>72wtUe6T*);| ]<&:YU,ǹK?g$vj-pd}G˭LB*_cax,qqAiiu|ՅPUo^LeB;[£t|^_d0Fe.hw $収7ϔ{yCSROXT?EXU:thЫ4tNуƂm{N;rZ$qA$êP%X$Lrme]L^ \mM 8X >;3Ce 9]h0Q2́UHsQ]/H{PPI@"U- "axnTp =@%N:::8ݻ]aeE0u~P9 ߊ6mZE٢Hz=$ww~&&&Q~ȶ˃(~4zs#tfY&|=~Fg\F|x?>)_[S~?W׍u;4ӻ]vZDGx̲3g, zؔ?xmS >Î3uIt9e -[u.7!/l*22יy%~ϹG>}3kEF8 jzwaD?6#6|aXSE*ܘ}>]gp,sX k"y (.مLgA&PLYq"//&<͌!~6^Bk+ؒ-Ɂqpp;=墥_|9Z~b3q777pqq''''. K٦F O\ &d3bgkk IF6#c [c|nDp 4sOz/$Zc>`XwP^+Ù,D9U\D!hI @hz(eqHb/\HbTHVPD92~GK. h9"4Rq:Ͻۧ-E yvζ`پugIJ@^r{~ lj]Fw4:jD/ YiTm(A88AAx~vgUqzfWXeTPfJW+rxw%p @&4ώBL^W0]&{ V@vo P%0'P 1R tyrI{ S]XÅ+$,\py>}`Gԏު"Ayyˍq {8Nd(2>n]@PB$з{*d//A7ӓr͘A P* szG򟇂bHZ~={n^Ulhm 8RCAR۝[jU"DH-NdT*C:?vֽXf p.6?~!J̺hhvj7l[ \Ea*N} \D4| CwљUյ/\@geH}3`Ł'#@MM6v3@9ߚA!ֲܻ /'..J\O^1Ҭ5][`ccSc8NL%=5 "0 :ʠ}5¡ +.L0V#>sf8Oo/@ajZn2|h쪫rkpߵ {HxUV8uե?!sRryIխ!gIe FjmS$<(! ،H1PᅂYՀ%¬TUHy/\(R@c y<x XFᜬ8'$&jĒoJ $m'P%9蹟Se׶e:I@%<E֌܏;3ӗ1ImiTGKF4;>W|$a!N-x4!Gi :s@Qie;Jb&п9v]Px)u匚'Ǐ1 <Ŕ)S`֬Y W,o %0iiiIC\7{&x|%gxUWyc|PV= cy Dq$ #d:? Wjk}?<uEHc|; e( c,t6H)_B$2+C{؜ 6*C +d`Z7QFd}H HC=trwI 0Eނ ^*>YB1-lP|kb*Ubg|ޗG'f+ 5iԹ˗H^R $mW`нQPem߀^ޣVFcR łOE(GG[{0)FCKRX0ql{vڷ3>_q>d}{EaH8E8q8}4q9j1T6UBPd”Ob^2dխ}a1 =b0 ُb^9}o@Vڝ_r=>[T*4h[ #ҦOtIu܆]nܩ.>˿"76Q>Yp :?U(_y<}B_Ts>s;r;v.H54B{\g!]: ౑}+ f}z: v $}!cd:h}, hkN9E<=Kߺ|2( hO}O 9㚶:Vz-4J(>KFmD8skjIV\RB RJzSJ  .`V)}IME$-K} BH9Nc=!P7#DlsWB \f !$e)z AndI =LX CUE(v_7FKK܂gT% }v/VURn KȀ?ב[)~rM7WIª9+W4DlMz+>ag~dDYT([/ׄ:n.Mv@ ڴupCUv@ -w< ܱp׶½[H(7e3z^JV?~}dW~}\2$lORrKd˛4'aT7ފr]⹞I7EDhzm$tUn`` T} ]E wޥoܸW^˗/Å `VW+J2$nuy-հ"|Haj44#pG|@DK%:."OX'zovUIУʑjghr2A1qi{N'{_~}n6#D][@iB)PHq+VZ\'!D!wϺod79fٹwΙg&?G)YJ@4Ch""#p&ڛº&i_{J:hǿ뙟~_91q|N&vuG#ի;0hHAy#ݬqv'}tTmaP~%nK_w_iceq1=Z_F8ބMZfWCUi͏é>b>;+≹ۧ/]Z,/sr`if O3[4:ifnxf~foo6U5 c"`57Kb,!U^}s5AYw^_=ztcr]'OfOuϙ3Y`E|rf\7ƍ-[0ڵLW.5܃9?^ S7QxM8(nnc11̟qZZ^Clm Sg] { Z'UL3/sQe(wCjTըF5Q-ըF5QjTjF5QjTJ: c}T| G8 ^r܉?qr[]0,ﳍTwV[;2wַ֌؀d$L&W;jIk'?:9>댏;0c`?(1g 2>mlŬ|cncGbFJo? /ZJH^ 䃬;DnIBQ{KB؏ynZ!I?єQ]@mg^_fO= ו>^OHb(!;# gCpqAt_xQ5܀4_MLvWWZuppXܨQ͚5kղqƽ{**tݺuz4?r$cc[C-1  122f͚J vիD Μ9sYg`wm5UC7'x]'^kQg ;՘>zʠo״ lfsPj022|cSXVjh8pŷW7gÖub,[kk.{S5۵z_!I(}ƽec:EiG̙zMmXA:8:6rU߽(pRтYVےW\mZǒ5uիkmL&W><5C҆Ra;J#K/ҩtMń'1e'2!%#vںW ލQ+@Cy MK\C$`ݧ^ݵCg'"||O/O]/K򇵍UFxכtZ{1깚2d6x#A]|%j}ʝL2q_uǂLwҿԓ$"yՅ{e tF%LT^l.>" LMMe1]=uO 'NIwI>o߾Y={UD)%bt6lT6dDoY߿f}7d^9{ya߿ "!BJ hk\wFy֬y1Dkv-!5g{VFtiΣy&Ҏ'i(:'0THĕX]zFxZ`j~2w _o3lV}aoRm %uL̚C W`FvyAe>Vx\:js(8㗂{(,A"S+3U *hcQ(܍zT"ZWF{q-1]3]95G$fam)!mLQ <4ne׋Tϊ (ċ.t9 Z O)FT0yMqRHr;WeYECHP'5߭rL P|qq3ymHJ8::#|BӦMca,--MGF/"33LL$KR*B=ɮc© :>[OcRPg]Yvdt,b#CٙN;te;v[}S+;bb {ŌcUwh YT+ *y$q^ٹt.Y\LaV<[;Kם0>p歁 ' x AׇN\FVSt\"ҢAuE2hnVH@t|۬5tjFzЩ }ӟE-W3Vܒ2v ͈;_1ϊ\e &yb\ DB/nu`e&gDѦXزbW"y^Wv}h٤L/ZA#ׇٜ֨s䈑+ų<*f#)[p7Eܬ+D$!|. ma r{x؛7مps0A}GKTZF{]H[L8o~PpݭT$Yu!bض}r'~u|y GqԩSO駟?s"y'ȝ,O7ϟOs [[ Pׯׄk+WN[n=޽{[!+@:SѨ +H% i^/!I8;XȒc:Yzm UW:^|6gof-^JRٕkais]'3YMt9-k|>xZGNN"QQ42;S Br ,'+N ȍML¼?ZU8(.W'1|SGaTB>L{cHԃ<EDجV: J%cP`5<ŤdAN">EAjb+Rʤ=m­`w륬ib­0J뇴0?A\d44O.ǂb9dBR"W֦BYZWh!Z>]Ե+:Nhz][yyulQItHʑ"'CRB2\zBRD#sq:ih樰X`ρ~ PLJj9~1yPR)`p+ n%r"R lcYRxJE{MCF$σe&~籛`v Dli^eE*f̘aK%Gݴ͛[l/fcc9/jѢǶm@ACNi*\̀[ϟx \kiDTX49j5+J3ݪO_[O,(,Rkٹ.2R[OצmKm!9!ī$lB.Q,0Gx0xuc g !WK!eTak3,*LJڈ'4, h?qPuݤ1 ?ϐӺ3!BKdR5-+RF%BB EbHs`Tw lEh4M|b 'yԼ vr̹>5'(395p"Y]2vn_/3Rno>ۣȔ_ng HQATnXb X]۠DX@,U|.5*7=JbRA-t"rt +LhZD<*jr$P)4x pQE H %˃K}2{&@[jmQR䇚%,1RJR]N^u(t;`5ɳYP<"I\r߾12?g}>V!_Ϸ0S`` Fҗ7>gc/ a `G5z,SS)jOrXR>"FTTmn~žy{ێ!v6lE|nnn;u5Kc{ٳ]> *l㳽0lS kw#Znsc  j6I^i|y^GH'j-oDN".a ꔈ1SM'u V]?D 036Db| U3Ι'o6×EF" pXo ; `gg:ln_5< $$գ KPXZ  } PСHF%"1skړPT, }8(I CfaqD֍0]Ðoż30XH3 W<٫RȔW}v ("#`V XtW,lgĢU')JWE)^s6͜kARV Lez> HN^piWh'P~uny#^Gp7eP/iJMõLZa$#(/YH%# ?Oc02<*xN5 ^ ~FwsLp ͏"ؘIus3.".4"PC-ͅ`D[c Z 5^MrMꁥqce޶J,F "8wZw Xs_Ly+خM۾B>MQ(ݿ5󁋨!ԩ}/QF $$$HbqٸiAcVֺUuaq=u'lhx?^1HƇ0:{vN>}Zȭȵwm[ވhU;7.>ܢݯiy9͟=y|ԑ=~$.:3d2] %FaŰVB;VeHckJ{3s8bN,dML<hzմ=MR]"_( Ud> L/3H[_FҋvȈ!נ9 e_+OOY˦!#[8DG90#ѳwK7r 9 jo@aQ! ӊqӡU'i]RNBŢS#{KXG|BSL 0vx_p&t3RPހx BZDz*.KBTziDzO`5jr$fXӠ8N ~~D$F H1 Sb,i-&[6L<004XUa'u(b@iD6NJjD 21EO[BE"HxUǪKCrN-!@LywN򄝐mzXE޳>|:vAnA'ETTϩS^*4 4rP.CMDRRJM͙3J 4’SPV'JaQ7}zD|ⵐǦXF(woZ$-΀!}3gc;yj:8=153/lرv2b:be!ZZuC^p3kdnVCĝ|VMB +./&T1x8(-@}=rGbCϟ 1j+3^׋ψ+bYkMQ`h/f"'ݯpoT8P b!lZ&>ybփ&.hh=0 u25D$#kbe!a*oG0/"AuDO=h8?V.Th,Et/WIy_<>3-{} оz'[){J%Q#xv򈅅IŇ|cf;ld_}_l7ؠ}SNB*l>rD]b2I‰lIlc yO]ni0yƟGnחDnvm3".q8g'ֲL'D- !e_WzYH']Mt`9Pij<}=ǡIH+L'9:40QrND <,:wĔ/ud`nlS_|y,z>}(LhZ^.5´"f+ƶ%NҎbHڣ&9F5Z)Rݯt܂ߡ针_\x3wgH*$S!, _yw-E0&spW2A: "u3:M*ѰN 6煨[CO՚o>>DѣGٳgkdaݘb:ykc 'o:@'&OSM#{yR|ן6#qZJ5SX }P'p:n~>hر[?zl@["#c(dz \`f zVRN/'n~\\BK?{AwIF2Mc<[bѹ_tx> XӊLLiRwߟ7$CJb2R 17 ш~i.U39"2m|N'[_EpP{rcK7D ΀8@ՠHhjFv]Ru".>G@wQ0?VLqrxģOEjMLzEdNZTF>ԷPo`7#^!>"&xgPD(wAY  L3/Ccx z =g<~ cZW% nr*P ѐKGA D'vҮReLU󽆙,;;[C'\rk* ,***ܜ/[ܬV&_kՔSrSn)c*4{pp%a[3''E|lݟheL}aHtxljP %|c! qߙsҗbKD.4R֐@ qv['b9}gbcR7G gYt:Z1wSXaA?T~kxohՀzL^H/Ħ43ROTXvШ ~iȐ9cxdJQX!b`I>j\ &)|}M2ݛOIu§5`uOYj!Bo[r Q^[k3trA\[O8?(VBTlo a'OgAxn\J͖Յ HQNMHwgX6p'5_MĈ}`6\W)V%U !C̶qɄ7}_|vwwׯx\ݾ}w7o{ }=Ey|V.;ޏ:e[FMk9܃(EaX8XХLP|~'CX\Ƶ["*OlcPe2ܻykp΢*Mr2wc5Ҫ|Yt"7k>a=~1/ԎIT2!4h܋D;&F4!e[&9~dbH#&:2r̃?׵]{>'6 cVۮֹto6V"-E(j֜jbfT$1̄|(vtDP_ ˄u80s\puhrBӹ>Ա_4n` ɊÌc Zى3<YxhP~cnOiTtL1Sb(G j!ȋEQl 9O&- -yqo}>x!!!t?CN:ʝ>l5i󉙱YN['Ȣa[. U#,kҨʪ;B>4D?cc"&;3# ϝ.!!bʨJ$gU|nߪϿ:=kFgvI_ˇyBiO133zҥB|#"+,jDfq# ௰0d dllx2#cdsn逳kq?v[%.۱zXPu>J_d޳QnbAJPމ wӲdAI:0sMK:XO 5lc$֪gW -w]˄B|c2s\X{onpds%]M~h:[a~hT`TI'LF"䅂"~3Ujo]xoQ *lTXk<,eyl3f#N `k9qy㏪ĪÓ:)W\y(;dÆ nj3ѣG|0T2|F3ŧh|3 \{ϵS+Zj!(w^ꇘTw7$#"YȽªpcu_ÑGnI6:uJ+@) koK!|E6KyBɰग,KQ}oV.߆0|M?2wfw|AG>ń >Ȫ4 TnPgE'4婃 *H,70{6잾?Lߟ/ŗA#O/|ӓL]xv{/5#;̏Bչ3Ӟ~|khW}Afğ1'g d<5"6u}7B -#wbGZHII Xrɓ'ܵkS{PX*Bw4`LiRK/m]$}|UCQj*2P9cC3y 1ձ𿝏Zr&T3٫~[zA[3w5%]ь%w}˴Y9{91qE_oK;*9^1[2|G̖H%... m7Ѥn-gnP7sC-U9[wpF.'Od`e)Nfe< j޽8[ъ/+b3A;˩Z|UQ ,/Ė.]Q-Y *Î{-m\¤\M35k:KV?zv!;U_3|˨?6|>b12AD&ַ}-CㆍwVQJ2|frG70mb ~6|۷oG<`y<+hJn $vs)R52ѝٝmLk|z칑ev]|vD,,׆-zv\ݗ_^cƴ7wߵ0mNm5}z[~j5kV{iZzբm!N^`j-ZoQ5YF- wquKUQU R_m,)5u 9FiJYE_nA= 1F$Xa5mמVs|Cc9 Gޥ\Z|^}+ v6jMYyǀ!5+9'k#P(/_DM*U +nE0A`d/:e`'mHe`#EC"Dj;gTU1Tbd*B'H yfȒVC]L<@$8 %k1}ns-kYqR$AaO'ArAml>\$DnԁTU"`(n~9RЎeƌ]|WSJn#;F)=OJ4b}{k>D p\e><]@#Aahh/ǏNӵoų_eO粿. /Ҙ? Pzc|c4:TލR;Җ555OƎ–gBDOk1nv_ݡ½kut-oFKa}Em:7H(49R ˚hEA9ptᾣklo(_$DCz2{FDhk @U)FUǪny*"2rOܦ̬rҬ {i@_6tGopY[ꈎ@l&DMۓ |;muiӌv﹍1jY=hA((6[~:a2۴1TZM|7(b_MӇ /y Ĉ*J_So.6 b$ ` xLq(bp/k%M'08i@_'+{zT$XiT0螬4sVXs|j&os4iOlY4odRRU&N!4+[*fE r-P$-Z֥e5CҮ$]Aҷ-CB:4%r͡&"U@!RĤ^=7%!IFm[ enh/,HˣWU(OU .ǣyGQqr2=G^zi|^W&]l`,O'"؂"EN=.2zp3_K<$mXDE =7UN5,Xjb!K"HQ I?DnJ+24[$*0~Jn__/ɖv40RO b$B q& 3ƕ%A`42A0m06&hR)\4~{+]'EƃX\}'!ɸr+EhPvzsm&vE ,;?OMZjI"!"73F"nNC=1({aTi2k-n| .?xJ4.7?L,pkĐ&q4C'ptŨѿ9SiO6>ZYCȒcI=yr$qѯxH&V!{KԺE29"pp-N1;1m.,$7$ߓVPQXI4R6,a^-xc;"oKH\{y<XC+4X o{#%@\R'hփ#<1L>&^pgktm~L\]BK8OP-}\GY|ji.TЙ6 Mأ>%)),\B C$$\sݹ`5B,@ 7|6xkvR7Rf{OJⅠHc60Ȯ9 Bdf M7 ϣ! .414$Y- H^$?zOBI_чOFZ6 cRz>NzY*NS*}oyqmyϮ6xҨN{a!SSYI"$ZǼV$r%Ky7[Ed(H}*wQ~ VĬ鸅*aRX8T*$ġNh۰ :xY?(ލGPMM(X `gAe p? BJsa^]Jr31S$K|tQϐ2Zuơݺ5\μs.|ۙq7xpyFTP߹si+"T$F&8 Wc1Oi/_h}N2n1WC*eV(o͆ZE^װJȋ4@C`j$.SYޫ4.ܻ=%D(IpI1!FW8w05sHV|^MJ#“ם,CB *9iC$2(%rr?~EYaI~ЏF̾x">Dݻ˱A6nuWeZ8I2K`n抆mQ⟑"I p'[.Jp6~ %5P@@zh^{=bpu {nwƎf[0yoMilu ?_5]A, C{ ]5ID& x͍q{ar wiO$@-(BP~=:3vӷB՗/n< [7lo1|WccBzz8?ݛ7a2CZR.X57?T$KT5Yr@~6_w/݇[]'99w7:5/aFvůfO.a⦋k.>)IY35Z߀J(tapG5x^HI[Zd\]-7,kΘT4k5~p\!|b!Vsc_hhw/4m\0k3ʂl1{}^H8aus#1R!bT"$j6xj)LINXDAJLݭxة7ƳB .OE^yݹ#wZ򀕱է2Uٌ֟ k/ Řf[9 ߹큺 (R]?PU|;,Go %bFg;:oS:pL*L;ko"עqI5J&Fb2\y̡͠7Ԍ<;ЯM[OQ9< \ך2OZT2rx0i1ICcvB|CR"'lԟT9D "A^Gdc ɽEx5l4?x6&CF~Wj8OREoXA6Blˌw#I5?|ʲcz?{͏:VP$)4F EAF Nt2+Ӊ1HȍF\sHsڝQϾ%WVi&RܣZ3~ıc``䀔3߰3~C˖-QbF8&4@/yrbKCtá9-3wAג*xâ !4Ta 5yjhӢ%EՍ8isUBnN<|xD %9 ƭWTt.˭T4.t}P_Q?<Һ©H|9$iTǪ[ԬLy'}rrRR<>ukRnH4H(G#hL 6^PY% \2ssAaWˍ͇peó4OS'L 'ѣd&dܿqN"nNĽ)ZKROZ"9,1yo`OTJeJa/] 5՜( 9yP |?O0+C; +_THOT}P֠ Ox1##&44B' w|bůakd϶FM6z5mߥ:YiVZH {X|j 7G]M |xeE+3'Ox3'd{ٽ0]f֮Ĭ\zYpfs̕;o:!L&8 ww~[ʆA/N*HiJ"e!"x]ssⲸ &Ux\8@>A>le/15`쿿$ jQ & /Bn!ҭ4DD8>hQ&T?D$)}@5g/Vw̉S BZ /iȈ@ђܐijyQο&Y@_qI\xo[p j#^ZÏhE_B ;&v9*ur+ψs &`ڵX|96mڄݻwژ<#FjN$<aw2? F;)2ڸY? 8Q?r{mTJ(|J:Đ_2SDyMG HER 'BG@±2]j%*3sT}PvwqD'@qD,6?fʀX!ȇh#" ˵uSҘ -'g0*J<JoJ>nw㟋~87c/…1vmn~? ÜHCcwĘ=qxz"R/DbVLP)/߼hܨ)&OC?LKO m\oܦ PAmxŸ%¤Lƀ/0&j$}^ѱCϪPoxy+PpHR0 ~)#F,l4-FfmеqKtlN^ Ɔր8tZ1GvN_Oṿ/.1F[ӇB*A**33|+̮Z ]PAÇ=\kK d ?Lr403a+~(>(zǔ#i+j|`ٲ_c>Ė/_mi S מxxG7yu3uX(,8e?&R2vqߛ*SB#TwmSܽ*1k4և@FW_wC)C:3اUH6CCk-BɏYi~\SU}/O.} k& y߾v;ޚa$bsBBB۷on޼)$ǂ"u#"BèwEu?g.bޢF1EcMl&#bî{A"oo?ಀog̞}guM?2XeD/+0j$_߄+*ebP}jw!em_>l'j[289Sz{GN#{Gcn q~JdhsBa/וX6yϢ 9u^N?M+b3ΏFFe/>S®3䣦$B׹IG^cjJv|㸧qObp)}#0>=s̙q_sejǜYK ԪmoGmĬ1/sI8γOCnu+jwFx'|}ouu8'~% QٷO|ݻ?%twoR8-1+-_BB.,,,… ;voܴqS gٶG ټy51Ν;-9\c|}7GׇC~◶8Xe؝{.c髙k8n1^bbKEnپ3ݙM{1g7c6ؒ4=seGLѯtTbYۨ=K_mʝ~>}\>On.ܯxgy9^OEad?۬O)'׃4 30Ì0 fafY(0 30^ ejU3QNJ>>X0tg~@M9dtYnnj@i$~4@2wķF:>O ?'|ϟ>O F ?{mvf///ԫW/VӫW~'&L$7ng~~~׮]r"_c[-\6_|'<6W˾[>g/2atb="F9hO_^DzN b߮͂|ׇwl^f9[5j|.OD+y윋93YS,9VB,bĂ&6k·` .! bmR~v}m :1.?~(^,}1k6%2{NϩzgwC[xó#9Nv agI֡>P!'gQTB*: c~knKΝCj`F*Ճq9gGS:ߙ5}Ç:q׀jW9yc`~E&|ֹ}~+|1#as'nP@E23,XƍX 3@mƚ5r%/]6MgXz&䫻"IQRZ;g|0zB' b˰~]n'w] 9O,Y҇Tv~a\7 SEϛ7wmUDEE ܺu c\=}4{j_-:|s`.8'ޒs ߳$ou+?ZγoƵ#k:70uԷ =?u+kn[yvnaԑosySͯ3P(l)0Z' X[HYW>)~ut;v@[vބV(y%>ꛭVkvkB@n\sS\Z ,ttu'0%0 0#Yw`lo`TW>¦~(ꗗ9obb"h0a|7-,k׮t>{CU㜐 q9ɾ]N']G[zm!hi#{viq,:5F ̱ל$cڌ)[dHhѸ>_}% BK9x)#et1Bޛڈv|.;x6Yc^`iiI+X *FSGAްA#>; Cm0g{r/i|gvR(zʶqVR҅ p"6NM =ycyx껡aCwxxzi]j0&a:XnHљd'uᢘaN+Wbk[X8[p %C8.+_]QPZvXYҧ y3 -ZC`ag8YɿZzy#"{*TקRk4Ƚ}=>@آA?"'VL*k "YZp ,C:dqzF'~鎏7joFcmo«fP!ѫ$UYa OZRƼiiVjŤu~(zxICtTq@5eǠ$N|o1}[ӽ@+wːђDlgj=S֪‹@"8EnNs YY((,2QT bcT uFn4PdٲITi8<7+^ !rr:kڠ̙g9s&;uTvҤIٱcDzGfS68p V>7t4{E6%ȊMjn@.j}A9/ֹ7kae]NJof/5ߞk{,G$rh6,Fp]=&m 3ʥ/ѶnX0e: sqy_[눗Bɏ?tJӬdOؽ(FAb0ͯuB1VjUwggg#55p> V11ȼy@}љBB %V^CSRy8E<6N^ӡakgKG%@%VTdZȓ͚}oXQNh(4NNӶs۷r#"a#{")ZFB : OG1 Tm'64u+lhAW ~z,YD1վ[ 8\LO+[% a^]&#*~\h;W6>e Rֱ|o3}Dzrwof &IYE-Zz*^9"qH$Ry7cc"E 6:}ÒSꚖ"_";>G֍;xp'(IzHz!WV%^۟OoR,W^ͬ]!N4qFf˖-Ν;{2b?Μ>}x"_xl ]yE]2vnЁ=,-'ٺ >KѶ=>{KS; _/]sv}̷cjbd6Зiv%RGf:[ G^1zOxoȖPAγ%%GP?KTP[wMˀpYR⡕IVCgg4[*UAZMx|v|ؤ$ܼyڮ];4h`t *?Ɗ+n#HPM^u$ɍ]!ytWz3I.:־c<{{)Ťaݱ0x.`qD9!!P sCe tt_?Rd>kW#'y)K B)Fz d4\حG>}|[Ky5Q^|Nohu<;DNj?̨US]l\*XbD4D,$ J3ͷ2'k4:[ I|czc[@O1eIr-GjPՒmDE+Gk>İ@ωء˘Sm*G{[y~[&aגQۧ-[Ovd6|䃔rMRsrXy4rE]VFR){ 0\Sm 1c#F2dH|~{߭[:Y3 |ǐN<ܡ되)ituM\}||5rV,HY@#7k|Ч rgN^\oU)q᭓*|p )ZxE0ndL9K{-#^*])D K2܊. n ^C7hIs;f뾲xU^]QyU߄6gΜINN 9ڵk6m_rOx7̸7S:E4&KeψHf"j]d堧K~0/OkՊ]:⋑cyέnėAņϺݻ#%_,…^KKȱu DU$Sm+kx>_*e 1޾ zJXF66x LW'4qs`X:zoZ݅G)S@j:6}\J-cXP=OL\ x+\MW@U85(N|o;}D(deye^<1]13!3bA$͐KK y5u䷯EXPrpDGÃxy7(IR$`*/3Dz]#.͵WDRF.D+txhz}#SSjt/#-_'qnvʕ/'^M#T$qO,ERŷ7:7oȧEhr-k|JK!NKPUx7sĝ%׬Gui,6֚qN{+-a cg\cn$+?95vǼ "^ vNhڼ%,\E (WKfM+iӰ= ENкͽ(4H@#/\f-k]TI.TdRh3Fc\|M>?WW1lذW^f}:u x)6oޜ8"6廞ñ*DZHeKI,$wKn 淝-;yhP^ֽ#$&l D'3biQc=)މhґJ(%9㐙o7D~^mÌU(?>ݑT&ĈnhNh= έjMZ^FQ eCObO-htմՓ^ jJT\Wꁑ₧HMy @pY5c)J3ibblYŚ;">GqWSQЈ>OT0PVЪunP/5l fzJ B)"c<zڕ%a]l>|o{GNSb[9rDtIE0ѝ;wD\iJ=~{/O|<t ":R>~!WbY[ yCc e7#- aXr5,`,A6mHSm`M|ʴ`52lmm=Ҳwc+vÞu -y jz!S{'_+Q '"&%WkSx)\""Kx;n:5p5(~ 8py"--@ 84"UՎ~}KYlq1O6;`Z+?-G҅XWll,HGâ]tKI,Yҭ/4{tKI@ NRئab~XJOpZ +RѾQwצ/Ųk&h?ev1SW6ڷ3xAL>Y:]ÒT,Ϯע2,4`vƂ.$"3i)r-kJ()X<\l K)` y ~Mu^Cѣ4B㼸3.mGCJ<'R\K"2c\VQQQ`^EN>R)0Bv.zޅKzӊ!VE2]aav|=||*Ku L9jEf`s&T{0&K'Uk|S`UjVcUe&9k»ẓLu\D<>q[8𑿿={>ԩӣm>~ԴiGvAci5nO >bנN A^74P&^EqPBcrXXZaL40:v)h1o[ C;X, RjIyyERgK1$U~~i: q\stKMM=uR8>, c|g_gp;{b#t:ηxcxeRt/BMvsٞݤ/ڤ(yy2xfq3z@"# \Ύؤ،&gΣa;rBaQ 4k#|~&jG PHK B$t2"gv V&gAD26p8pX#|2fPtBp*1 '.QS4#o% ޿𓛑|n݀4(eRހ#'$zzj$ lۯCNdyEj0|(gʀvBR$+̍ᢢ?~J\pCC SKb&fJkmi_U+yUinbxym רMF4Omm JҫHl07/HHO\G;֫CÆ egDD-.q ET]@ORr&t f/oHϑSCL]A]fsr$Nj8ҧk3&,$b ! H$ O30 ?kqQF匲yh:g .t?qx>r>-8šMR(9z+%ވ JxCY ㊜ Kt,< wF΃E VN l0_ջ]~ VqQ&DQƫRl 8-TvvvZ;θq&l6ThtdlM£!T/D`r,]!)nʬ,Hk}U,+K0n;Y ; n_J֊NQ4J3^^( YJ< * H$6iR5T&O=S18_W;:ݻ&Nbf_4 Σ<"m7e-WA+r5߫cIYt aYnpbai޼kkkH t.vϳn/Z]</++booo߾skKڤZtI$Q!,\\a  20)3<'Ә8_G ΄4Ft|#˒5jU++깑N]yy=~W?_zUt-E111xQbb(--M]m*"#7 I)ypO,%"iqoy"IQ@X0tj.Ɲ ,Hңg78Y,KaywW/! V[po$_%ȯ&!KEmqrPB3iU>)56ՓȆuNݳnA\/^: y> M R“yb_ώtBdSOw&?낗B),rjεk(QC*K^ էJxYBQk%򘂀%i/ ~mP#|~ϏSU'߆׵[d4e6 BJGQB;zh70 KJKsUAWJbĩ!ڹBnF.Ѩ#::ʫv,!!`U,u3ׇV'pt}GpȞKۈh]=:SX^\}i)AѦ;2Ggi ~b,8&,7˒~j\،w)>/%6|1䰈JISZN<&Pj0rޢ,[P#4(UʔTMbyenJ rp hE_OXN0mѹܚJ$&^{玪{ocÇ c=޻\x,)хtg "_C[8yF!5Vb|?N$f *Fރh[und8uj?)w+ߙ9l>sS)(*B=yq¶F?OEܱkCrC" _ I I K_) _;COJ^Su /xi1Y)4\K$G=j|IPϺҌ ^*x|( 8[^-`{sGZ[dse"rIeSDnvHߛ/o!h9p}[%EA{_sX;.]Jg2Dc[233Tڊ;'&&pB͛!0poF@'jT $Q%yJ%RhjF6y9}aԼ;&D,aO[!:&lM|={d;uĶmۖf6mʺgX[[:JH}iZZ+*~o^U,-y*HZ+4 >)7/:9-SKg:&oOdZ2\DZ3 pjv۪>d5j$ ((I!Rִ2iR׊5! p!΢PT^X\1YڪKw߸|T~Ƃ36*muy\t;D .2/* z MR)n\֭1i$ >հ?edp{J3S< #bYj3f={vgϞ?ý; N!$3)ʉ3++Z\ =Wxbʹςn&K V~CYܣH6\$fÙh w&55!yN|f:6K:4K-':/#{N(^~=X}փZ,osP<;y#|3pk:}=ϰXo-Ԛj0q&z'{i% &2鯇ee# #Zm[ٰҾM{7W}Е2;._kkOhsiކ<}hgkCQn[:Է|Xa(Ñ5vj~zӸMkHlx8צ-op§#| _'|ф{M﫴;wH88f̘Y|ׯޫ }i@B'K/M >sG6`C5_ p5mKfX~ذ鷗mGʪ:;phW)Onlp._Ǝ [=D<""ٙko`,"]䨋' > "\*yyƻZ,uoЁO_H =6sG߄ [gX}6 [}߿oӉK]]9w"|%I-J]0.!l`:mZIYY(cD 5$ւXGb>$6PbhtҥfA+Vh]9[RN1 x3s3c$&Dë#;7V*Z>\p릧.6;wťū./zI8(Bzmpoy|oJLY'*Q;>To߶Xqo" w6~ኧ'z^u<ęIHORwM U(֯5*L99dcݒ>H )x ~~FJ꣧ϗ/߶ tK?ݔC]&zSbvowY5袡_K3'֎z/_Z_ f`}Xg|#|gñZTXC_6nD-Gehb@S~߼79@_e\2=Gl1*kK/"*Ȓڂ _AQ@l1L1'b;{0W|˜^7ٻCkN,whcK|/K{vΖMLMM3M-7qv)pgXXk̋>zFl!.ԃ3\#F'jH b9K`Y5UD<̓݃u;`-JX4l&i;TjcQ=EKz#y燒^qދ%-}"[ _L:IgMk=Ws{os\bĘ۹ߺƈ 30 3/,fafaf4 30 x)罩Uvq#g|'1s~ڌ)铥}FfafdD9ϿtbǮh.}yٛ|D Y 7Vvč-אM9ѢN3̙;ݩӿS~4򣏦L!w4L4⃡'|7mfaf߉jrb9\uma!@lD8[iXw&W%.LIrl0+a`nZ :*UkS.l[\7\9e1*ͬߋ˖/fafZl[OJ,aⲊ92c<.kgK&W%l2OZRӕ ] tY+QXXtmʤi̩yD5{ʹS5+:_G2Kީl)3899$fafgZ,!k2FO 2 VVpswøI1u ,~ +ڶp5quC3+ؘV_[3]cǂӦBy Ζe/,,XA.*VJtz=W90ُr!Ա(//JU;t٩s"}yruoa!-'^ 30Ì*M(\s">Y1vaxB xD}Q50{GA|agV%6q݆`sv+`*sGvX=#0i%N9c]{"OsqLPD}?F^~V6jqigWר[,=1ɿvo#pWqy읯{:@}&M.۾yaf񟄿 %a8<7'RND+K'0 eɞg<WK `ظ!>,rjbbeqWI=0&NB"_E<1ľ7 `Z֖#݆B.Em8XnP,-W収okBdwuXyyy30ÌC[j̆mػHܗqmq_\[{BsK[{&bq(.bX *qUgyPǝgh]qJ>ݝ5?Jl%g7wFΝ};7o'(k5Ƨ6 M"l޼yBf?-!0 3kё'勗TrrWǺ%}dtCARHڳRQiclb*'CdҮ0F*ף_n޼zI[.b8I\Sm}@^k`gsΉ#0'29)o/'%FߕT0U]vn!;Z^30 3R(8γOCnu+zݱy{^>&jfaf&0afa ?IJ\-sIENDB`open-build-service-2.9.4/src/api/app/assets/images/images/000077500000000000000000000000001332555733200233665ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/images/images/ajax-loader.png000077700000000000000000000000001332555733200313662../ajax-loader.pngustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/images/images/bg_display-sidebar.png000066400000000000000000000140741332555733200276260ustar00rootroot00000000000000PNG  IHDR$tEXtSoftwareAdobe ImageReadyqe<IDATx0 m6;"!fK$?&FHįTZ%<Puu]z XcBŸG‹L6Bq1bHi=&,<6;'ב{yͼ۷}F^"zdצ'EW6HIմ@iHxτoV;+4ȅ;g}‹~4˅>kbsZn OW,<= :"3m9<&GOvbUK<*%m1;>K2nZF`zو`rp+^]'8Qg-,|mge3^w-ٿ7Q]kTӚY=9 Q*kW'xD=zn8zK~SZvZ ^G:,aQ^on^ս!7۸C'ڟ7r]ⅾ.Yqeckz_L zԀ{j=L zTC/?Sp~758 =fj G9WSY3ơ.t3@^,.綜M l#G| ΂;q@za]AӠqMAuG6҇9t7r@r ҇*@C*7r;[n0VH/4L#˿o^W~d, t/j:uЃ[3]V>_LtQ9.C(t7M}~}QW?>ރ5f,=o{t#twY˰L頗ϿV{kƿl1#t>ۙwK͜zAW]*|>zteIziM:5LM9f:Kw3]Vk3ɲL;vݲ.s:LbV~լF:ݙ~,O^2}RmY@աwK'/҃ K{c#z0Oˢ%5LtN9+зmh%4Ӈfzsfzޙ^ڑMwMYazޙL/ޛYL;ӝeiӝӣmЙeiht7QZKzBg.zXzt۽ tY:zϏKn@PSULl 2y $ Y P!.۫zT࠿^{!=˲ht0tHv.`6~TFwct с=Cnt; atW+&hVKntР׭ ~ =:]CbЫ5:LtA'jt`tZ?gt5:$_=0mtjt^v;AOkt;^w!xtpqcaFt zۣC^4:h ѷOAF6GF` 0u!|to{!FwH zeY҂CRAhAp(a݁yFwi}Y;FA?Aa.6q)A׸ލntFW?,B>bkݣ :$IC^K!;~Tt0!-S!0_BG z]C`GxA[F7ChWGw{t ~Ttq?StH znt !1`=z]=:qInt9ntĠ~5ƒntѽ!чktHƒM-0A;ƒnt ޏڣktt`kсNh𠻏svS $KZt S(!HlE70;%n|L>&/>&5:ht dGtH0ݏt@Ž. 耠vt@{=OЯ>&Tс}#q$4:t6NAInGhtAt`tFNAr;:3AxktltW`!4} !|tAwA8vtH z0!5=Nݡ.1 A_G :4o >\CxѡA5hktэРK;u&;F_j&;5:HѝFRݎ^[]!э!ttqz;5w90>t;:7z C.uOt=^/,z9:45:} X}LFwэht/W=G;:iЗV7C^]!yG7C^htHl5:$}ir;8 ;F+1~GGw;:42 Gw;:4 BGoi; ARF4: 耠{ƒ.o vt@ЁssO^gFw(E5:;p} Fѽ]!)wAF_:N!xG G>&5:с!)ntF;F]whvtװ 5ath7;}5:A.^ :$]CpЍ`G ѝC^/У>&ltנQݡA;{8{MUСÎntࠗ5H!|tw@B!qGwaSwmB}ތth0;kthtCx>~ϧF;`t4:Awa ,ht`נ$*4htנA ,}>~4:7{Af4:78hBkt!nt;e;>~Cx5:}thNݡA;u&;ѡG!9@|FFF;:0ktActt[Y&;耠[tAߌ{^Stqdtp-@{Dt(}f޺W5H`vS#e0oۏ!<J?8?@?tAC|:zO@rC?5tH W#wCCm裝@xA@j;7 t#wA:XC& ݱ5t 8 ;ܠ:z@nS(зm3r@!n;ܠ@5Av~;$ s1F.w bn }h\CԆ^@rCo#w zܷms;$zmn }܏@ )W-]@`Cvގ@ZCikЧ:5 ,秬U[CwWkFk m;5y쮡@XC[C@>ck轝k}3bW@PCzj砻掭M@>k6@XW0A^w@hzwzD0nA /;],!j}n;5y|Lo#w ~tͱ5 [z0r>tw@`p!lۦ@B;! ')NCz_'?k:4~j:vn;$4a~ʚa 8k=Oݦ8 ڼ]C{~&.w t }W >^_wnp-Bu l\4. skp@_e O}~Uck} ը}j^C@? }2XCʁ  WM l\C^k1{LU}k߻V@H6@PWwl nWkJ:HksA~v. /@/z||~.:Э@HC_7pނ^Ct@%=P 4t+}4Gc 9wG ϟim 3 v |ux@B:\=Ы?==]}ܿ]5{C}|p@?>7w#wHh#Mq UWj:⣭ s:\5WWv z s (.i5tjg^:\t5tpݛ9Wo  (Wh !{K?ڹ;5}^Co]|5t }:$zj:| ύ:zk}굷*Ϝ W]sl T;a>~Rzu (}צ8#wi轥ut BW)};]C@P BG> Wut ; }o;]C.wt }XCXC@@`GלCІϡWni ݦ8Q2B- ~a>Ԇ8k+] Mq:;6~. ,srzE)}0r4 u#wov@xCwl nFSrІ^vkCkǦ@@ }? /b4tHmh#t .w1r}9m7r ҆:zCw;7zH] 7# n:h5tHnFp@[@w ^7 to6@x;76r}[@nm@?` ;|?w-IENDB`open-build-service-2.9.4/src/api/app/assets/images/images/forward_disabled.png000077700000000000000000000000001332555733200351242../../icons/forward_disabled.pngustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/images/images/gradient-dark-nav.png000066400000000000000000000005571332555733200274010ustar00rootroot00000000000000PNG  IHDR 2JctEXtSoftwareAdobe ImageReadyqe<IDATxbTSSc@,CAaX @(A vR N x@a) P@q2 Pj%Qj;P9)5k <a\ C?;s "@*VWLpLjw@;k=!0!;3C`$h@: 1{IENDB`open-build-service-2.9.4/src/api/app/assets/images/images/gradient-light.png000066400000000000000000000012611332555733200267760ustar00rootroot00000000000000PNG  IHDR dtEXtSoftwareAdobe ImageReadyqe<SIDATxb|##9 @,. . NP@t 3Ds*9v<C6BR NH$S@A| #a3AH' )&&/,Hx)/Á,h`8(J8, 8NJ ntJ `b ॏ c)pjF\Hf8h_ROإ `kfEv@2̃[m)0MbEc}Q8BvmAC9 2⃎y;GЍ=nxIh $6=UFiAzeH)%bMlO|Hbt(Gj2ɉr b Ю°4D5% uXnOb)y{_KА}:@L`8\]Ȁkhda#„W e@6j LP5"mH/=1'C:NphZd3OF<LY6Z {Kc -QS,S D}ԖuIENDB`open-build-service-2.9.4/src/api/app/assets/images/images/gradient-medium-short.png000066400000000000000000000005651332555733200303120ustar00rootroot00000000000000PNG  IHDR 2JctEXtSoftwareAdobe ImageReadyqe<IDATxb6m`b9t-tCt#tmtUdbkd-?=B )0 @ vpAg[IDATmбJQ̖@TBE] ڒ,J!ȾӐA_2-PVV 2PY>^t2tngu8e 4tW@]fȮ\]ԷF1Z(H sqfT"DIJ?Ǧ"Ū\.]]GDіj?P t!i8^=2.(|Z*><?m3Q^i%tEXtdate:create2009-12-01T17:10:46+01:00?%tEXtdate:modify2009-12-01T17:10:46+01:00S IENDB`open-build-service-2.9.4/src/api/app/assets/images/images/unfold-indikator.png000066400000000000000000000005511332555733200273460ustar00rootroot00000000000000PNG  IHDR tEXtSoftwareAdobe ImageReadyqe< IDATxb,,, a``X$V1bP E@@ 0X$r&@;0y`w@oUx"Tb @p!3@ *lc+9DAPp3"i Aa2`"A .Z`NP ?q+B@ 8 ddj#dA3,P](>~Io9ҒFHB @i58DcIENDB`open-build-service-2.9.4/src/api/app/assets/images/large-loader.gif000066400000000000000000000062101332555733200251450ustar00rootroot00000000000000GIF89a B;ӣӠƳڱid}XRD>! NETSCAPE2.0!Created with ajaxload.info! , IiabK$F RAT,2S*05//mp!z0;$0C.I*!HC(A@o!39T5\8) `dwxG=Y gwHbvA=0 V\\; ;H0t%HsrY'e$"\#E1CnĎ~J,,AaUw^4I%Pu Q33{0i1TGgwy}%%'R  = 3G%p0 JRo5Ȇ0IĦmykxT_}(^yKs>i_%n=q4e-M¤D! , I)*')Ed]PR A:!zrbw %6"G(d$["JFhaQP`p%†/BFP\cU ?TtW/pG&OtDa_sylD'M q tc b2DM : d% 4%s) uE3 YUtږD$JiM^%o/rvl9'L;99% i9 C "B BDs ^Xf}$P {L?P O4 E咛V$dJ#)pV$! , IiRͧ"Jd] RZN*P*;$P{*N\EА!1UO2D _r6Ib H8 B; "'ZtbK#C'Kw}?Kiz6:xKAC&}9tz\ \D5;x Qd( KW  MBIڈM=ˤs⸽8DaJ`@LG! , IiRͧ"Jd] RZN*P*;$P{*N\EА!1UO2D _r6Ib H8 B; "'ZtbK#C'KGziz68}z~%XK9:0}% tz\Blc LbQ   lj ųKň x(țPX ,ւ|/"! , IiRͧ"Jd] RZN*P*;$P{*N\EА!1UO2D _r6Ib H8 B; "'ZtbK#C'KGziz68}z~%:A/ C} u\ h}b D]=  V)  ڊ9CDK Ku *00StD! , IiRͧ"Jd] RZN*P*;$P{*N\EА!1UO2D _r6Ib H8 B; "'ZtbK#C'KGz z5 C: A/ C}u\ Eh}b6[=Wx&)I9Ԭ@oCT?Kd]B76ЫD! , IiRͧ"Jd] RZN*P*;$P{*N\EА!1UO2D _r6I ƀH03hոaj U {CIkmbK#cK8 {a8nV:/q:M Cu~Ehk6 [_6P.]6!)V! , IiRͧ"Jd]U RZN JjN2sK6 dI)  LHWG 6 KX젱.6d~zhuur/6 X5I;_t O#E {O9V94;VC/ 6Ø~*'MonbX:~]+V*mK_OrKN@.d~qЦDB֋ 5D;open-build-service-2.9.4/src/api/app/assets/images/link_ext.png000066400000000000000000000002551332555733200244460ustar00rootroot00000000000000PNG  IHDR6!PLTE3fPtRNS@f pHYs  tIME Ub$IDATc``pqabX PDuIENDB`open-build-service-2.9.4/src/api/app/assets/images/nav_gradient_dark2.png000066400000000000000000000005571332555733200263620ustar00rootroot00000000000000PNG  IHDR 2JctEXtSoftwareAdobe ImageReadyqe<IDATxbTSSc@,CAaX @(A vR N x@a) P@q2 aǎXtq#bd2%`>! s<wK@&A:(zk;%8]ܫL`2޵Z UWirnT>US `^!Ī7@/g$iN4I{,JBW!2C8շN̂)$y 햂@kY|s*')9kZH|ֻ$2 oR)(&iSh$g!2YLS[Y[9+0L555KJJVBD(կ~i___  5UѱtB$Ȳ\_.^i2̢(NEуG$ !T<4(Ty H79K(+^{xc9PL%IڵkӑH۷oӼIpɔd6mZ~oXs+W?Od2"ZG}۶m `@L/c=?y ӡ"Qـ4_$, }}W?Ts}#!*۶m{?O2g&N)U5of`; 톷g۷Ӫi~G6ly=0K`{Q*k׶my i7lh+l6ACAA  }׮LFXXl&RQy~@ r7囃j skWZb3M@un2PU]I/_(No|FOMRBסlM{{4lٲM{qh4̅-!e,[;<̷0W2?uxn3`-3g_cTUW9`KrxyܬV]p~kkku:E H,ZhnP-<_ݮf9}d06:3Ti!0@^z@h ا6`0/CCCH$s7Pvn2<[f8.0!"[=Ou6WTsдW`0C)Czu6?NBPJPPu׭Wdkq!~=]]'`0EQv8zۭoq׬WR.4lܸVK\A)ʏ/ -OqtZtK949?ҍ7~B1 /J)Μ>}{HG':r|'uV缞Vlš5kd2(2љTNB_rq__5bV %ՃÖ=tAP",\pfbřͯG%H !r<1 X?'僬677f DW'\$۷oAqqJj J!L"JAVdpt:M@à7jjsسg/:4<88x@? iE$U;vd2dgYڊp:Q!kl(++AH e lͤJ)UbX|||<; :4Xi\nY =? Ot$9Ɉ|mUȶ$qyfL +2TThSݧ)/@OG ---kXb8(hw]cyޢ)\nCs1x4E‘{ӧP37Y{.i'q\r"?8  0סwM 5`0,gxƒ:ˡ1SSS=@X@L&K"P(du8f[d!Jף& - § )DR϶mk׿+H 6^(XLb0AcdE0p H4Ӽ㹙!p _Xaׯ^޹2+Vhp8&W/0ws&UMXP`\ŌBM#&N<ٻvUDS g$pQKo _P\Εb^c$ܹsW@pdd$_qZE# 5**裏>~-))Yp :1&P  8Xi!;[ nR{morȳNYfœ  GZ]SPpG69Nvttp800`<\_mM\BN{6ut{@H:mN+s(JD"9.2` CG)ӏ g/G(r%vvvvQR/Шwԩ$C3EI@#GU w>ȑ%%u[QK,Oq᱑zjI(eSYأ8x|b=y򤖊Ο?_g=ZIh^45XF^7 1Fq@qWWKO>81F)>ܽG9vO8E;][ ktw#'N| iɓWkZr^ߐ$.ĉ=`nVDޟ]̥,pҥKPxttt#>~ `nPvOApZ-ע%nIQo~k.d)Mf1T5EXp…/./++3TWWn 0@s$UсeJX `likkw: mٌ` a \Ą<44꺨(eOXi4 Vpu?իW~mHp (Hĉ'Ǐp LF$o <-`yyTful6lB bFիWѨ_ezYYGu~`P@:xwy^ᰚfNǓ###!7 E\Ӫfi4 +l6[Ssssb1 fXrRL8^x񪺁![yqvK vر3GAbR1 @e TUY tۮi|`,#*Ԃj)*_ z0Ep(Ryj\*g91@U93=[mO{;H6u'TfQrhsѬfAEeVI#~9| l`nʀF gW#`I0u֜mfuZgF 5ՠXTٛ4i[IYS(9|o:(MZ y{ͽU5~ӼoKjMf/(ϗ\n`#υIENDB`open-build-service-2.9.4/src/api/app/assets/images/play.png000066400000000000000000000032241332555733200235750ustar00rootroot00000000000000PNG  IHDR00WbKGDIIDATh[lkgg7Y;7),Ѥv(N且n|4HHHH Q7a4 X%ԪEE  |]vvw_/fֱUz5T꾫_;9}YgI_ `TގwyHn0 hZD74հ6nf,T <\ӯ-B@m} mObB&,K+{X!@Yi@74\~t݁~_|dij\SErGGG{k[ , 8^!<hsEʲن ݽ׼qcK_xdmdS4M ̊ HDXU+1DzeE n;LD"i+O---PtJjD oZLM[n٣dFGoG]Z[[>|`Z 3Z6 nhe;v[n}G>TJnll4\>k+g?{3OOw/`0G0m=SGA]ghvppp3"bmBr4oH`f&v6ԩJOOtC}avS:Nv|sζmò, ]נj%Z 5O0sut_u>bBa)DYV,4 8 'Jnʁ* 3 P Xf'&khht]/ ,eO:pG q0f|~ֆ0/>\lon:thf%dq !Fk47CYpl  ,ٳCDS.\_}$)rbޚ@ B{7nqL bԄ$I3G^7o~n\Qd29ID+z!El6"3CSmIo<20Yd"Og۶1>|sϟd"(Ѳbs]Ȳ-l$PgK۳&V A@ ǟ-}ŅK7ݸ偾~bqYԏ k"%Y3oÏaNIENDB`open-build-service-2.9.4/src/api/app/assets/images/progressbar.png000066400000000000000000000004721332555733200251630ustar00rootroot00000000000000PNG  IHDRBtEXtSoftwareAdobe ImageReadyqe<IDATx10 EMT'E`b7`fli UۑlX=ljaY3kr7EȜ.+8o竴E -֞u#í7j>j[$b{gUanpf —Z2vw!Ip]Dp]Dp]Dp]TK@>5<}Aׅ@ׅ@1c]@ '"hBT_@ے$>]]IENDB`open-build-service-2.9.4/src/api/app/assets/images/progressbar_bar.png000066400000000000000000000005761332555733200260140ustar00rootroot00000000000000PNG  IHDRBj̗sRGBbKGDxj$ pHYs  tIME &k IDATH-@]ZʏM@PSZo2#@R'o֤< ʲd<vtYiC6@wZ P XjI14^:3A, g*DDp~[,X8@ἓ;@%O@ _[,X8o p+p`%yK (w?vUˌ<IENDB`open-build-service-2.9.4/src/api/app/assets/images/radiobutton-checked.png000066400000000000000000000015371332555733200265530ustar00rootroot00000000000000PNG  IHDR(-SsBITOPLTE4q>xD{EsEuHqHuH~IqIrJtJLxNvN{OuRSSTxTUxUUUVzVY|Z}Z]]`aabcdgijkllmprssttvy{~‮⁩ۂބ߇抢Ë⋸鎺ꓣ𝮺еҷռپ»üžƿzN#tRNS,xd(IDAT K.AUE&:+3 ]Zbǂ#,E$WȘiZ|-! }&ƨ8"pԏ=a_.TJoXmd8B@D 2VJ]Y(bKD=a!E.+5kp~ӌ(a3S:l6{cR]=D,ܙ#FHOTK8j>lO1f IENDB`open-build-service-2.9.4/src/api/app/assets/images/radiobutton.png000066400000000000000000000013441332555733200251630ustar00rootroot00000000000000PNG  IHDR(-SsBITOPLTE̼»üžƿ&#tRNS,xd(IDAT;N@zzlo6qon@-  #=ԼǴ@!4VӒRAHZmHm*6or錂O=EG#Yjނ)rVao`1"ZEl: ؐoݝ]i9ņff ~\Zc!CO x?@t]*4~׵pPiVuIENDB`open-build-service-2.9.4/src/api/app/assets/images/recommended.png000066400000000000000000000050371332555733200251160ustar00rootroot00000000000000PNG  IHDR00WsBIT|d pHYs B(xtEXtSoftwarewww.inkscape.org< IDAThXilT}ۼllcCㅘ4!M@j%&4IQJZuS(ԪUQ[RH dqTM#hR"ʂ3̛a24E=ћs{9¾hWN=J}IxۿV.e#$h{fFt)򭀎խjzj˫epSO>E)mG4]w?^ڶܾB :n}[S#$#_i$=inj,el^VEe;y3W%}Խ8*H$=K N~G0di|+Nz{ٲ({!""h>i,i*9رzAhn FV 0 Y iPWaKc HN򡢪.O%T&3Xv |mt,u'X%tɒbk0b #C$H'1 23!P߶z dLUupfᓦ4Ҫs;}YZsH^6*#p+rHR>rHbΆLHBS8;ЏT "e%ض5 bp8]9mNe1M3Ls}}r]ph-lMY7PUTx!7>?905J8Uאt5J<1Kx41, B?.]y@XUtU`LЗh%Ο@M&)GX)B 5=z׶ou2Ii I Ձ}QVN95>U[Mb`vKN]  r6p1:: _PBVED ȃ?_({2i-$B'0si1%l! ,G HC)`$C $j@Ģ(`ٓD2 H3!% XB@QxGMtBQEa D`QPb"KU1)eD`",,QHQ8$jip(' gPΌ/le _,6J80 ! ,G HT4E3E HbqB8Ƒ` B3 P$MH,:@Q],%Ǣ+4%3`p802ňiċ/<|=h)E@Q (43X@4ʈErяKpXxT`@! ,O H TCRx*Ѓ3 !f\F V` AB09hb@SfˢAfow&5T5_HУ;Q*h1:&m=u$&>!-^śoR!VQ[i$@p-j3]#zP!љwik@dRSeͣb2X F:c2ؽJzl~>mBfg5;RᐤZ Yy`*)#9:\ cF,N/I뇫/](S;*Xr]Az;>9 .%хb&-M9qvX "fqpr@ 'o>FFm{xBy}Fcmft5_̪E2{p@Dxf@M]䢧 ߌ+1\țlwΤ@ el$@f~U 'x&=yZP"6NOȳɺq"YxoG*Efe}ZYQTR櫍Sq}( i }fZZ" >Q?f2Hr)2Sdn'"h ~"pl~zHCn(C7oquY. gS y^D$Xf* J!o"$ v~ .q?L8.tCDxLND68>Y׋Z. >qJ˷jȲS{BPקx\!2[T}44^_y|3)Euvrx0qt!}aLp___T#,Y`G.{-ZC*d _ 1 ^d:c/ 集_2`fQē&S 7V(/bsj@ zVGHlDٴ6j@odt$%lvZ:™ƻ W{,֏@a[1qjԘO[߳!׿WʽD?!I?kFY'" h=|0]+p" ]$ mff/´LbZw6?7{Pݣu@;7  1=zXNo4'HA ;q.ͭ68U_amPV wQ,_'ꀰrm8tCt 7  \.uZ` (ӳSA/Ģ(wި?3XdXd~ @\[-\  z>X k cO45[U5N]xpa蕟f7Y)Z,Xނg-S6th P03P .D*4 ΤKB@($&/ߟ(І4wnb8v@ҳPy~Q+.\)W%8'P*]T0lRYpŪ!d *k8]vt 5Hm|YTc4f4{p%*V"cñ"k^ <0?^0_m`J`hʬOYs.Ղxc0D&HNvv2ۙstFeU9UKU%ި'Ȥ5_p +>LJ\Q{5uZ kvmU&v+dѸi={OTRX5fWeU  H쏳<kl_@ a6D祫HUOg^Wd>.}j: |/"5^iLhIENDB`open-build-service-2.9.4/src/api/app/assets/images/star-widget.png000066400000000000000000000032721332555733200250650ustar00rootroot00000000000000PNG  IHDR0_WbKGD pHYs  tIME 2mGIDATHՖ[h쬴ZٺXbPT"j(`0&RbWib?7=M4@i M*dqXuE$J%U$f/sr6[\HB^|]o.F2lD"I( OMUUB RJ !̌gv?圧DZc9BQPJ9c 1&}߷9i:00\=NK4 RJH)!4Mr9977]ZZ`bܺu+Jc}[TJ۶e2, TN8nb(cp]nBxBũ{{ 1J)R0@J 0{y0 p|uuU4ź94T HH)d2|0|"8s!(Ҽml6{}||7n<8ۙLfhssS3kO F45 wgN;DaXuO4T . Xky .P1]A b!5R3ؘ:{;:@;,"=j^z(o8p^)㲇Ό_"\=LG=Ai 68*Oh>Pq`s8ҥb#p%­(.4R TBrCs|6Ch@dj:9owaDG9 * j&Co[{ '7!] _ j|gjTJDm f*&p߱<({XPsT! ϟo Xn|wW<"fΠi΃'Lf+Ws ?3췩wܙ>ƒ+wZ$@s^~sZ0X#51|xj5U_Ldp1v1/z|xwB(;m,Fk̃ TtP|x{}N8'O~z&oZhBE+CH.9Q (p+`.&y''uEfe,`h&bZ4E]Yq_Do& ͣkݴo.]"gru7^y_<8ojR+By܇2P+X].ޚ?bICL[ݕV@+.~-r2US1PcS B< ةnCB { Ajh3;[)_:K@5}<𹏿me, h97},@4A$|a_MÍW?o#W/LvB6IENDB`open-build-service-2.9.4/src/api/app/assets/images/transparency_grey_20.png000066400000000000000000000003121332555733200266630ustar00rootroot00000000000000PNG  IHDR szzbKGD pHYs  tIME  : DZtEXtCommentCreated with The GIMPd%n.IDATXA0j%X'z4L@@@@@@@@@@@@@@| IENDB`open-build-service-2.9.4/src/api/app/assets/images/voting-neutral.png000066400000000000000000000036601332555733200256120ustar00rootroot00000000000000PNG  IHDRH`4sRGBbKGDC pHYs7tIME +; 0IDATxoi?؉'6BbŦMą+Z*VZܚ?(fpG*e!i4q8̼68v44?}y'"Ok Fu`' + 3G#o_~ E7N:]^*/)_6} uAL2I&mLӤT* h4\.n6sL&ӹz@68/(iX433Ra$H3 2̩w4`u>:r Vm]R !)WTҲr**0: 1a%N5x6ttum)Hx^` Q)oi;~¨>4M\ۤ!HH3x$sy~K=@^Z{$/\+}ygrj!f$9Æ:аw:l/"/za/\AN 0>7༂4\ oDH"r@F<1H*D"@8 u_H)*_)h3t]4erE_1h2h._5 ˲pgY)JV=::ӌ|:^ s_>,Ykg9d:G?wZ޶K>Fsi&* ahŖ[Tb(ϥRXbH #Ҷ4mMOOwvBӧO >rGd|w~Dz^L&Ғ,}AaoO>ED >|gqr[ M3O_eL.2ɗYt{s<޹&']vJӴ. *8mnY|k-:uO[H/ϟ?뺁u~DZK.} zQd{@1 ڷ 2ȍ7H$|d2)B@2 Ҟ… @ qyT*E* ܪ&0DHRr4 c;Yh+i@m;P(yLӔ|>^7̀wΝ; !4ǏCGFFhkkk 0<<(/l68D844$OLL@g2id2رUUٻwCPM hض-Em( ʀOA(zUUٳgܣdl]ԀwGI;@@GGRUUU%Nv @Ӵ~ #{y62|.GDQp4֭(s|퍥*HئIW_1p:xk.Z:;:88Iqn\+W.q7XJ?{z'Fz$a`)Ƕa&iXX\䧁o0*OMMMaܽzw;;99mY2cؾqȲxD(t//<<C7or(pG:m00/gh۶tGkZB>gq- TZ72PˁDujjJ=ijkk>) P j&okຘnXn8ISD<%@YO2BpuyͲyry mצ~ۺF CG t DgT LNNބJ,FoO3gPffX('βk²JkgƈC(k[[[ݷIJ$n2].cJd 1x+.loX}?S;j \]Aqotbwp=7KHUIJuo=0=GϢƞnȏ< >,Ë/*@}o˗yϳ0gS42WP14e~>~\ (ʜ뺵~WT:v۷X(DRQ/0> gq]B3J_?>Xuu.fxf1 qȑZZZ>6Uob<5DfgG3 4P=Sھ6aTk 6Wh9<fYfk5ëkҽĬ}>dwGIENDB`open-build-service-2.9.4/src/api/app/assets/javascripts/000077500000000000000000000000001332555733200232055ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/javascripts/webui/000077500000000000000000000000001332555733200243205ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/javascripts/webui/application.js.erb000066400000000000000000000221451332555733200277340ustar00rootroot00000000000000// This is a manifest file that'll be compiled into application.js, which will include all the files // listed below. // // Any JavaScript/Coffee file within this directory, lib/assets/javascripts, vendor/assets/javascripts, // or vendor/assets/javascripts of plugins, if any, can be referenced here using a relative path. // // It's not advisable to add code directly here, but if you do, it'll appear at the bottom of the // the compiled file. // // WARNING: THE FIRST BLANK LINE MARKS THE END OF WHAT'S TO BE PROCESSED, ANY BLANK LINE SHOULD // GO AFTER THE REQUIRES BELOW. // //= require jquery //= require jquery.ui.menu //= require jquery.ui.autocomplete //= require jquery.ui.tabs //= require jquery.ui.tooltip //= require jquery.tokeninput //= require jquery.flot //= require jquery.flot.resize //= require jquery.flot.time //= require jquery.flot.stack.js //= require jquery_ujs //= require dataTables/jquery.dataTables //= require cocoon //= require moment //= require mousetrap //= require peek // //= require webui/keybindings.js.coffee //= require webui/application/bento/script.js //= require webui/application/bento/global-navigation.js //= require webui/application/bento/l10n/global-navigation-data-en_US.js //= require webui/application/package //= require webui/application/project //= require webui/application/request //= require webui/application/patchinfo //= require webui/application/comment //= require webui/application/attribute //= require webui/application/main //= require webui/application/repository_tab //= require webui/application/user //= require webui/application/requests_table //= require webui/application/image_templates //= require webui/application/kiwi_editor //= require webui/application/live_build_log //= require webui/application/groups //= require webui/application/upload_jobs function remove_dialog() { $(this).parents('.dialog:visible').remove(); $('.overlay').hide(); } function setup_buildresult_tooltip(element_id, url) { $('#' + element_id).tooltip({ content: function () { return "

    loading buildresult...
    "; } }); $('#' + element_id).mouseover(function () { if ($('#' + element_id + '_tooltip').html() == 'loading buildresult...') { $('#' + element_id + '_tooltip').load(url); } }); } function fillEmptyFields() { if (document.getElementById('username').value === '') { document.getElementById('username').value = "_"; } if (document.getElementById('password').value === '') { document.getElementById('password').value = "_"; } } function toggleBox(link, box) { //calculating offset for displaying popup message var leftVal = link.position().left + "px"; var topVal = link.position().bottom + "px"; $(box).css({ left: leftVal, top: topVal }).toggle(); } function project_monitor_ready() { /* $(document).click(function() { $(".filterbox").hide(); }); $(".filteritem input").click(function() { toggleCheck($(this)); toggleCheck($(this)); return true; }); $(".filteritem").click(function() { toggleCheck($(this).find("input:first")); return false; }); */ $("#statuslink").click(function () { toggleBox($(this), "#statusbox"); $("#archbox").hide(); $("#repobox").hide(); return false; }); $("#archlink").click(function () { toggleBox($(this), "#archbox"); $("#statusbox").hide(); $("#repobox").hide(); return false; }); $("#repolink").click(function () { toggleBox($(this), "#repobox"); $("#archbox").hide(); $("#statusbox").hide(); return false; }); $("#statusbox_close").click(function () { $("#statusbox").hide(); }); $("#statusbox_all").click(function () { $(".statusitem").attr("checked", "checked"); return false; }); $("#statusbox_none").click(function () { $(".statusitem").attr("checked", false); return false; }); $("#archbox_close").click(function () { $("#archbox").hide(); }); $("#archbox_all").click(function () { $(".architem").attr("checked", "checked"); return false; }); $("#archbox_none").click(function () { $(".architem").attr("checked", false); return false; }); $("#repobox_close").click(function () { $("#repobox").hide(); }); $("#repobox_all").click(function () { $(".repoitem").attr("checked", "checked"); return false; }); $("#repobox_none").click(function () { $(".repoitem").attr("checked", false); return false; }); } function monitor_ready() { $(".scheduler_status").hover( function () { $(this).find(".statustext").fadeIn(); }, function () { $(this).find(".statustext").hide(); } ); } function resizeMonitorBoxes() { /* needs work */ } function callPiwik() { var u = (("https:" == document.location.protocol) ? "https://beans.opensuse.org/piwik/" : "http://beans.opensuse.org/piwik/"); _paq.push(['setSiteId', 8]); _paq.push(['setTrackerUrl', u + 'piwik.php']); _paq.push(['trackPageView']); _paq.push(['setDomains', ["*.opensuse.org"]]); var d = document, g = d.createElement('script'), s = d.getElementsByTagName('script')[0]; g.type = 'text/javascript'; g.defer = true; g.async = true; g.src = u + 'piwik.js'; s.parentNode.insertBefore(g, s); } $(document).ajaxSend(function (event, request, settings) { if (typeof(CSRF_PROTECT_AUTH_TOKEN) == "undefined") return; // settings.data is a serialized string like "foo=bar&baz=boink" (or null) settings.data = settings.data || ""; settings.data += (settings.data ? "&" : "") + "authenticity_token=" + encodeURIComponent(CSRF_PROTECT_AUTH_TOKEN); }); // Could be handy elsewhere ;-) var URL_REGEX = /\b((?:[a-z][\w-]+:(?:\/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}\/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))/gi; // jquery.dataTables setup: $(function () { $.extend($.fn.dataTable.defaults, { 'iDisplayLength': 25, }); }); function change_role(obj) { var td = obj.parent("td"); var type = td.data("type"); var role = td.data("role"); var url; var data = {project: $('#involved-users').data("project"), package: $('#involved-users').data("package"), role: role}; data[type + 'id'] = td.data(type); if (obj.is(':checked')) { url = $('#involved-users').data("save-" + type); } else { url = $('#involved-users').data("remove"); } $('#' + type + '_spinner').show(); $('#' + type + '_table input').animate({opacity: 0.2}, 500); $('#' + type + '_table input').attr("disabled", "true"); $.ajax({ url: url, type: 'POST', data: data, complete: function () { $('#' + type + '_spinner').hide(); $('#' + type + '_table input').animate({opacity: 1}, 200); $('#' + type + '_table input').removeAttr('disabled'); } }); } function collapse_expand(file_id) { var placeholder = $('#diff_view_' + file_id + '_placeholder'); if (placeholder.attr('id')) { $.ajax({ url: placeholder.parents('.table_wrapper').data("url"), type: 'POST', data: { text: placeholder.text(), uid: placeholder.data('uid') }, success: function (data) { $('#diff_view_' + file_id).show(); $('#diff_view_' + file_id + '_placeholder').html(data); $('#diff_view_' + file_id + '_placeholder').attr('id', ''); use_codemirror(placeholder.data('uid'), true, placeholder.data("mode")); $('#collapse_' + file_id).show(); $('#expand_' + file_id).hide(); }, error: function (data) { $('#diff_view_' + file_id).hide(); $('#collapse_' + file_id).hide(); $('#expand_' + file_id).show(); }, }); } else { $('#diff_view_' + file_id).toggle(); $('#collapse_' + file_id).toggle(); $('#expand_' + file_id).toggle(); } } // used in testing function select_from_autocomplete(toselect) { $('ul.ui-autocomplete li.ui-menu-item a').each(function (index) { if ($(this).text() == toselect) { $(this).trigger('mouseenter').click(); } }); } $(function() { $('.show_dialog').on('click', function() { $($(this).data('target')).removeClass('hidden'); $('.overlay').show(); }); }); $(document).on('click','.close-dialog', function() { var target = $(this).data('target'); if (target) { $(target).addClass('hidden'); $('.overlay').hide(); } }); // show/hide functionality for text $(function() { $('.show-hide').on('click', function() { var target = $(this).data('target'); $(target).toggle(); if ($(target).is(':hidden')) { $(this).text($(this).data('showtext')); } else { $(this).text($(this).data('hidetext')); } }); }); open-build-service-2.9.4/src/api/app/assets/javascripts/webui/application/000077500000000000000000000000001332555733200266235ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/javascripts/webui/application/attribute.js000066400000000000000000000003271332555733200311660ustar00rootroot00000000000000$(function() { $('#attrib_attrib_type_id').on( { "change": function() { $("#first-help").hide(); $(".attrib-type").hide(); $('#' + $(this).val() + '-help').show(); } }); });open-build-service-2.9.4/src/api/app/assets/javascripts/webui/application/bento/000077500000000000000000000000001332555733200277325ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/javascripts/webui/application/bento/global-navigation.js000066400000000000000000000032451332555733200336710ustar00rootroot00000000000000var position_menu = function(button_id, menu_id) { var top = $('#global-navigation').height()+1; var left = $('#' + button_id).offset().left; $('#' + menu_id).css({left:'',top:''}); $('#' + menu_id).offset({left:left,top:top}); }; $(function() { if (!global_navigation_data) return; var html = ''; $.each(global_navigation_data, function(i,menu){ html += ''; }); $('#global-navigation').after(html); $('#global-navigation li[id^=item-]').click(function(){ var name = $(this).attr('id').substring(5); $("ul[id^=menu-]:visible").each(function() { $(this).fadeOut('fast'); } ); if( $(this).hasClass('selected') ) { $('#global-navigation li.selected').removeClass('selected'); } else { $('#global-navigation li.selected').removeClass('selected'); position_menu('item-' + name, 'menu-' + name); $('#menu-' + name).fadeIn(); $(this).addClass('selected'); } return false; }); $('.global-navigation-menu').mouseleave(function(){ $('#global-navigation li.selected').removeClass('selected'); $(this).fadeOut(); }); }); open-build-service-2.9.4/src/api/app/assets/javascripts/webui/application/bento/l10n/000077500000000000000000000000001332555733200305045ustar00rootroot00000000000000global-navigation-data-en_US.js000066400000000000000000000067661332555733200363150ustar00rootroot00000000000000open-build-service-2.9.4/src/api/app/assets/javascripts/webui/application/bento/l10nvar global_navigation_data = [ { "id": "downloads", "items": [ { "link": "http://software.opensuse.org/", "image": "release-stable", "title": "Latest stable release", "desc": "Get the latest stable release of openSUSE" }, { "link": "http://software.opensuse.org/developer/", "image": "release-devel", "title": "Latest development release", "desc": "Get the latest development release of openSUSE" }, { "link": "http://software.opensuse.org/search", "image": "packages", "title": "Search for packages", "desc": "Get additional software from openSUSE Build Service" }, { "link": "http://en.opensuse.org/Derivatives", "image": "derivatives", "title": "Derivatives", "desc": "Get one of the specialized distributions built on openSUSE" } ] }, { "id": "support", "items": [ { "link": "http://en.opensuse.org/Portal:Support", "image": "help", "title": "Help", "desc": "Go to the Support Portal" }, { "link": "http://doc.opensuse.org", "image": "development-documentation", "title": "Documentation", "desc": "Read guides and manuals" }, { "link": "http://en.opensuse.org/", "image": "wiki", "title": "Wiki", "desc": "Read and write articles in our Wiki" }, { "link": "http://tube.opensuse.org", "image": "video", "title": "Video", "desc": "Watch various tutorials, screencasts and recordings from talks and presentations" }, { "link": "http://forums.opensuse.org/", "image": "forums", "title": "Forums", "desc": "Participate in our forums" }, { "link": "http://en.opensuse.org/openSUSE:Mailing_lists", "image": "lists", "title": "Mailing lists", "desc": "Subscribe to mailinglists and look into their archives" }, { "link": "http://en.opensuse.org/openSUSE:IRC_list", "image": "irc", "title": "IRC channels", "desc": "Communicate online using Internet Relay Chat" } ] }, { "id": "community", "items": [ { "link": "http://connect.opensuse.org/", "image": "users", "title": "Connect", "desc": "Connect with your openSUSE friends" }, { "link": "http://planet.opensuse.org/", "image": "planet", "title": "Planet", "desc": "See blogposts aggregated into one stream" }, { "link": "http://news.opensuse.org/", "image": "news", "title": "News", "desc": "Latest announcements from the team" }, { "link": "http://lizards.opensuse.org/", "image": "lizards", "title": "Lizards", "desc": "Users blog platform" }, { "link": "http://shop.opensuse.org/", "image": "shop", "title": "Shop", "desc": "openSUSE gear here!" } ] }, { "id": "development", "items": [ { "link": "http://en.opensuse.org/Portal:Development", "image": "developer", "title": "Developer documentation", "desc": "Centralized developer's documentation" }, { "link": "http://features.opensuse.org/", "image": "features", "title": "Features", "desc": "Vote and work on upcoming openSUSE features" }, { "link": "http://build.opensuse.org/", "image": "buildservice", "title": "Build Service", "desc": "Create, build and enhance packages" }, { "link": "http://bugs.opensuse.org/", "image": "bugs", "title": "Bugs", "desc": "Report bugs using Bugzilla" }, { "link": "http://susestudio.com", "image": "studio", "title": "SUSE Studio", "desc": "Create your own images using Studio" } ] } ]; open-build-service-2.9.4/src/api/app/assets/javascripts/webui/application/bento/script.js000066400000000000000000000141601332555733200315760ustar00rootroot00000000000000$(function() { // == Login Form UI Actions ================================================ var formStatus = false; // true == form is open; false == forme is closed // variables to specify form dimensions var x1 = 0; var x2 = 0; var y1 = 0; var y2 = 0; $('#login-form').insertAfter('#footer'); // move login form to end of document var positionIndicator = ''; // Snipplet to get bottom-right position $('#login-form > :last-child').after(positionIndicator); // place snipplet after last element in form-container $('#login-trigger').click(function() { // display login form var offsetSubheader = $('#subheader').offset(); // get position of #subheader var posX = parseInt(offsetSubheader.left) + $('#subheader').width() - $('#login-form').width(); // calculate position for login-form $('#login-form').css('left', posX).slideDown('fast', function() { // slide down and set position in callback var rbPos = $('.position-br').offset(); x1 = posX; // left x2 = rbPos.left; // right y1 = offsetSubheader.top; // top y2 = rbPos.top; // bottom $('#username').focus(); }); formStatus = true; return false; }); $('#login-form input.inline-text').each(function() { // hide overlaying
    \n\n"; BSWatcher::reply($opresultxml, "Status: $code $tag", 'Content-Type: text/xml', @hdrs); } sub authorize { my ($conf, $req, $auth) = @_; return () unless $BSConfig::ipaccess; my %auths; my $peer = $req->{'peer'}; for my $ipre (sort keys %$BSConfig::ipaccess) { next unless $peer =~ /^$ipre$/s; $auths{$_} = 1 for split(',', $BSConfig::ipaccess->{$ipre}); } return () if grep {$auths{$_}} split(',', $auth); warn("500 access denied for $peer by \$ipaccess rules in BSConfig\n"); die("500 access denied by \$ipaccess rules\n"); } sub dispatch { my ($conf, $req) = @_; my $peer = $isajax ? 'AJAX' : $req->{'peer'}; my $msg = sprintf("%-22s %s%s", "$req->{'action'} ($peer)", $req->{'path'}, defined($req->{'query'}) ? "?$req->{'query'}" : '', ); BSServer::setstatus(2, $msg) if $conf->{'serverstatus'}; BSUtil::printlog($msg); BSServerEvents::cloneconnect("OK\n", "Content-Type: text/plain") if $isajax; BSDispatch::dispatch($conf, $req); } my $configurationcheck = 0; sub periodic { my ($conf) = @_; if (-e "$rundir/$conf->{'name'}.exit") { BSServer::msg("$conf->{'name'} exiting..."); unlink("$conf->{'ajaxsocketpath'}.lock") if $conf->{'ajaxsocketpath'}; unlink("$rundir/$conf->{'name'}.exit"); exit(0); } if (-e "$rundir/$conf->{'name'}.restart") { BSServer::msg("$conf->{'name'} restarting..."); if (system($0, "--test")) { BSServer::msg("$0 failed, aborting restart"); return; } unlink("$rundir/$conf->{'name'}.restart"); my $arg; my $sock = BSServer::getserversocket(); # clear close-on-exec bit fcntl($sock, F_SETFD, 0); $arg = fileno($sock); my $sock2 = BSServer::getserversocket2(); if ($sock2) { fcntl($sock2, F_SETFD, 0); $arg .= ','.fileno($sock2); } exec($0, '--restart', $arg); die("$0: $!\n"); } if ($configurationcheck++ > 10) { BSConfiguration::check_configuration(); $configurationcheck = 0; } } sub periodic_ajax { my ($conf) = @_; if (!$conf->{'exiting'}) { my @s = stat(BSServer::getserverlock()); return if $s[3]; my $sev = $conf->{'server_ev'}; close($sev->{'fd'}); BSEvents::rem($sev); BSServer::msg("AJAX: $conf->{'name'} exiting."); $conf->{'exiting'} = 10 + 1; } my @events = BSEvents::allevents(); if (@events <= 1 || --$conf->{'exiting'} == 0) { BSServer::msg("AJAX: $conf->{'name'} goodbye."); exit(0); } } sub serverstatus { my ($cgi) = @_; my @res; for my $s (BSServer::serverstatus()) { next unless $s->{'state'}; push @res, { 'id' => $s->{'slot'}, 'starttime' => $s->{'starttime'}, 'pid' => $s->{'pid'}, 'request' => $s->{'data'}, }; $res[-1]->{'group'} = $s->{'group'} if $s->{'group'}; } my $serverstatus = { 'job' => \@res, 'starttime' => $BSServer::request->{'server'}->{'starttime'}, }; return ($serverstatus, $BSXML::serverstatus); } sub isrunning { my ($name, $conf) = @_; return 1 unless $conf; # can't check # hmm, might want to use a lock instead... eval { BSServer::serveropen($conf->{'port'}); BSServer::serverclose(); }; return $@ && "$@" =~ /bind:/ ? 1 : 0; } sub server { my ($name, $args, $conf, $aconf) = @_; if ($args && @$args) { if ($args->[0] eq '--test') { exit 0; } if ($args->[0] eq '--stop' || $args->[0] eq '--exit') { if (!isrunning($name, $conf)) { print "server not running\n"; exit 0; } print("exiting server...\n"); BSUtil::touch("$rundir/$name.exit"); BSUtil::waituntilgone("$rundir/$name.exit"); exit 0; } if ($args->[0] eq '--restart' && @$args == 1) { if (!isrunning($name, $conf)) { die("server not running\n"); } print("restarting server...\n"); BSUtil::touch("$rundir/$name.restart"); BSUtil::waituntilgone("$rundir/$name.restart"); exit 0; } } my $bsdir = $BSConfig::bsdir || "/srv/obs"; BSUtil::mkdir_p_chown($bsdir, $BSConfig::bsuser, $BSConfig::bsgroup) || die("unable to create $bsdir\n"); if ($conf) { $conf->{'verifiers'} ||= $BSVerify::verifiers; $conf->{'dispatch'} ||= \&dispatch; $conf->{'stdreply'} ||= \&stdreply; $conf->{'errorreply'} ||= \&errreply; $conf->{'authorize'} ||= \&authorize; $conf->{'periodic'} ||= \&periodic; $conf->{'periodic_interval'} ||= 1; $conf->{'serverstatus'} ||= "$rundir/$name.status"; $conf->{'setkeepalive'} = 1 unless defined $conf->{'setkeepalive'}; $conf->{'run'} ||= \&BSServer::server; $conf->{'slowrequestlog'} ||= "$bsdir/log/$name.slow.log" if $conf->{'slowrequestthr'}; $conf->{'name'} = $name; BSDispatch::compile($conf); } if ($aconf) { require BSHandoff; $aconf->{'verifiers'} ||= $BSVerify::verifiers; $aconf->{'dispatch'} ||= \&dispatch; $aconf->{'stdreply'} ||= \&stdreply; $aconf->{'errorreply'} ||= \&errreply; $aconf->{'periodic'} ||= \&periodic_ajax; $aconf->{'periodic_interval'} ||= 1; $aconf->{'dispatches_call'} ||= \&BSWatcher::dispatches_call; $aconf->{'getrequest_recvfd'} ||= \&BSHandoff::receivefd; $aconf->{'setkeepalive'} = 1 unless defined $aconf->{'setkeepalive'}; $aconf->{'getrequest_timeout'} = 10 unless exists $aconf->{'getrequest_timeout'}; $aconf->{'replrequest_timeout'} = 10 unless exists $aconf->{'replrequest_timeout'}; $aconf->{'run'} ||= \&BSEvents::schedule; $aconf->{'name'} = $name; BSDispatch::compile($aconf); } BSServer::deamonize(@{$args || []}); if ($conf) { my $port = $conf->{'port'}; my $port2 = $conf->{'port2'}; if ($args && @$args && $args->[0] eq '--restart') { my @ports = split(',', $args->[1]); $port = "&=$ports[0]" if defined $ports[0]; $port2 = "&=$ports[1]" if $port2 && defined $ports[1]; POSIX::close($ports[1]) if !$port2 && defined $ports[1]; } BSServer::serveropen($port2 ? "$port,$port2" : $port, $BSConfig::bsuser, $BSConfig::bsgroup); } if ($conf && $aconf) { $conf->{'ajaxsocketpath'} = $aconf->{'socketpath'}; $conf->{'handoffpath'} = $aconf->{'socketpath'}; unlink("$aconf->{'socketpath'}.lock"); } if ($aconf) { if (!$conf || xfork() == 0) { $isajax = 1; BSServer::serverclose() if $conf; BSServer::serveropen_unix($aconf->{'socketpath'}, $BSConfig::bsuser, $BSConfig::bsgroup); my $sev = BSServerEvents::addserver(BSServer::getserversocket(), $aconf); $aconf->{'server_ev'} = $sev; # for periodic_ajax BSServer::msg("AJAX: $name started"); eval { $aconf->{'run'}->($aconf); }; writestr("$rundir/$name.AJAX.died", undef, $@); die("AJAX: died $@\n"); } } mkdir_p($rundir); die("cannot write to rundir '$rundir'\n") unless POSIX::access($rundir, POSIX::W_OK); # intialize xml converter to speed things up XMLin(['startup' => '_content'], 'x'); if ($conf->{'port2'}) { BSServer::msg("$name started on ports $conf->{port} and $conf->{port2}"); } else { BSServer::msg("$name started on port $conf->{port}"); } $conf->{'run'}->($conf); die("server returned\n"); } =head2 openlog - open STDOUT/STDERR to log file checks if $logfile is set and reopens STDOUT/STDERR to logfile BSUtil::openlog($logfile, $user, $group); =cut sub openlog { my ($logfile, $user, $group) = @_; return unless defined $logfile; $logfile = "$BSConfig::logdir/$logfile" unless $logfile =~ /\//; my ($ld) = $logfile =~ m-(.*)/- ; BSUtil::mkdir_p_chown($ld, $user, $group) if $ld && defined($user) || defined($group); open(STDOUT, '>>', $logfile) || die("Could not open $logfile: $!\n"); open(STDERR, ">&STDOUT"); } 1; open-build-service-2.9.4/src/backend/BSUrlmapper.pm000066400000000000000000000101641332555733200221440ustar00rootroot00000000000000# # Copyright (c) 2017 SUSE LLC # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # url <-> project/repo mapper # package BSUrlmapper; use BSConfiguration; use BSRPC; my $extrepodir = "$BSConfig::bsdir/repos"; my $urlmapcache = {}; sub urlmapper { my ($url, $cache) = @_; $url =~ s/\/+$//; return undef if $url eq ''; $cache ||= $urlmapcache; if (!exists $cache->{''}) { $cache->{''} = undef; for my $prp (sort keys %{$BSConfig::prp_ext_map || {}}) { my $u = $BSConfig::prp_ext_map->{$prp}; $u =~ s/\/+$//; $cache->{$u} = $prp; } } my $prp = $cache->{$url}; return $prp if $prp; if ($BSConfig::repodownload && $url =~ /^\Q$BSConfig::repodownload\E\/(.+\/.+)/) { my $path = $1; $path =~ s/%([a-fA-F0-9]{2})/chr(hex($1))/ge; my @p = split('/', $path); while (@p > 1 && $p[0] =~ /:$/) { splice(@p, 0, 2, "$p[0]$p[1]"); } my $project = shift(@p); while (@p > 1 && $p[0] =~ /:$/) { splice(@p, 0, 2, "$p[0]$p[1]"); } my $repository = shift(@p); return "$project/$repository" if $project && $repository; } return undef; } sub map_to_extrep { my ($prp) = @_; my $prp_ext = $prp; $prp_ext =~ s/:/:\//g; my $extrep = "$extrepodir/$prp_ext"; return $extrep unless $BSConfig::publishredirect; if ($BSConfig::publishedredirect_use_regex || $BSConfig::publishedredirect_use_regex) { for my $key (sort {$b cmp $a} keys %{$BSConfig::publishredirect}) { if ($prp =~ /^$key/) { $extrep = $BSConfig::publishredirect->{$key}; last; } } } elsif (exists($BSConfig::publishredirect->{$prp})) { $extrep = $BSConfig::publishredirect->{$prp}; } $extrep = $extrep->($prp, $prp_ext) if $extrep && ref($extrep) eq 'CODE'; return $extrep; } sub get_extrep { my ($prp) = @_; my $extrep = map_to_extrep($prp); return defined($extrep) && ref($extrep) ? $extrep->[0] : $extrep; } sub get_downloadurl { my ($prp) = @_; # check ext_map if ($BSConfig::prp_ext_map && exists $BSConfig::prp_ext_map->{$prp}) { return $BSConfig::prp_ext_map->{$prp}; } # check :publishredirect my $extrep = map_to_extrep($prp); $extrep = [ $extrep ] unless ref $extrep; return $extrep->[2] if $extrep->[2]; # default to repodownload url return undef unless $BSConfig::repodownload; if ($extrep->[0] =~ /^\Q$BSConfig::bsdir\E\/repos\/(.*)$/) { my $url = "$BSConfig::repodownload/".BSRPC::urlencode($1).'/'; $url =~ s!//$!/!; return $url; } my $prp_ext = $prp; $prp_ext =~ s/:/:\//g; return "$BSConfig::repodownload/".BSRPC::urlencode($prp_ext)."/"; } sub get_path_downloadurl { my ($prp) = @_; my ($path, $url); # check ext_map if ($BSConfig::prp_ext_map && exists $BSConfig::prp_ext_map->{$prp}) { $url = $BSConfig::prp_ext_map->{$prp}; return (undef, undef) unless defined $url; # not published } my $extrep = map_to_extrep($prp); $extrep = [ $extrep ] unless ref $extrep; $path = $extrep->[1]; $url = $extrep->[2] if !defined($url); if ((!defined($path) || !defined($url)) && $extrep->[0] =~ /^\Q$BSConfig::bsdir\E\/repos\/(.*)$/) { $path = $1 if !defined $path; $url = "$BSConfig::repodownload/".BSRPC::urlencode($1) if $BSConfig::repodownload && !defined($url); } if (!defined($url) && $BSConfig::repodownload) { my $prp_ext = $prp; $prp_ext =~ s/:/:\//g; $url = "$BSConfig::repodownload/".BSRPC::urlencode($prp_ext); } $url =~ s/\/?$/\// if defined $url; return ($path, $url); } 1; open-build-service-2.9.4/src/backend/BSUtil.pm000066400000000000000000000451311332555733200211140ustar00rootroot00000000000000# # Copyright (c) 2006, 2007 Michael Schroeder, Novell Inc. # Copyright (c) 2016 Frank Schreiner, SUSE LLC # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ package BSUtil; =head1 NAME BSUtil - collection of useful functions =cut require Exporter; @ISA = qw(Exporter); @EXPORT = qw{writexml writestr readxml readstr ls mkdir_p xfork str2utf8 data2utf8 str2utf8xml data2utf8xml}; use XML::Structured; use POSIX; use Fcntl qw(:DEFAULT :flock); use Encode; use Storable (); use IO::Handle; use strict; # should we do a slow fdatasync? our $fdatasync_before_rename; # the current debug level my $debuglevel; =head1 FUNCTIONS / METHODS =cut sub set_fdatasync_before_rename { $fdatasync_before_rename = 1; if (!defined(&File::Sync::fdatasync_fd)) { eval { require File::Sync; }; warn($@) if $@; *File::Sync::fdatasync_fd = sub {} unless defined &File::Sync::fdatasync_fd; } } sub do_fdatasync { my ($fd) = @_; set_fdatasync_before_rename() unless defined &File::Sync::fdatasync_fd; File::Sync::fdatasync_fd($fd); } sub writexml { my ($fn, $fnf, $dd, $dtd) = @_; my $d = XMLout($dtd, $dd); local *F; open(F, '>', $fn) || die("$fn: $!\n"); (syswrite(F, $d) || 0) == length($d) || die("$fn write: $!\n"); do_fdatasync(fileno(F)) if defined($fnf) && $fdatasync_before_rename; close(F) || die("$fn close: $!\n"); return unless defined $fnf; $! = 0; rename($fn, $fnf) || die("rename $fn $fnf: $!\n"); } sub writestr { my ($fn, $fnf, $d) = @_; local *F; open(F, '>', $fn) || die("$fn: $!\n"); if (length($d)) { (syswrite(F, $d) || 0) == length($d) || die("$fn write: $!\n"); } do_fdatasync(fileno(F)) if defined($fnf) && $fdatasync_before_rename; close(F) || die("$fn close: $!\n"); return unless defined $fnf; rename($fn, $fnf) || die("rename $fn $fnf: $!\n"); } sub appendstr { my ($fn, $d) = @_; local *F; open(F, '>>', $fn) || die("$fn: $!\n"); if (length($d)) { (syswrite(F, $d) || 0) == length($d) || die("$fn write: $!\n"); } close(F) || die("$fn close: $!\n"); } sub readstr { my ($fn, $nonfatal) = @_; local *F; if (!open(F, '<', $fn)) { die("$fn: $!\n") unless $nonfatal; return undef; } my $d = ''; 1 while sysread(F, $d, 8192, length($d)); close F; return $d; } sub readxml { my ($fn, $dtd, $nonfatal) = @_; my $d = readstr($fn, $nonfatal); return $d unless defined $d; if ($d !~ /<.*?>/s) { die("$fn: not xml\n") unless $nonfatal; return undef; } return XMLin($dtd, $d) unless $nonfatal; eval { $d = XMLin($dtd, $d); }; return $@ ? undef : $d; } sub fromxml { my ($d, $dtd, $nonfatal) = @_; return XMLin($dtd, $d) unless $nonfatal; eval { $d = XMLin($dtd, $d); }; return $@ ? undef : $d; } sub toxml { my ($d, $dtd) = @_; return XMLout($dtd, $d); } sub touch($) { my ($file) = @_; if (-e $file) { utime(time, time, $file); } else { # create new file, mtime is anyway current local *F; open(F, '>>', $file) || die("$file: $!\n"); close(F) || die("$file close: $!\n"); } } sub ls { local *D; opendir(D, $_[0]) || return (); my @r = grep {$_ ne '.' && $_ ne '..'} readdir(D); closedir D; return @r; } sub mkdir_p { my ($dir) = @_; return 1 if -d $dir; my $pdir; if ($dir =~ /^(.+)\//) { $pdir = $1; mkdir_p($pdir) || return undef; } while (!mkdir($dir, 0777)) { my $e = $!; return 1 if -d $dir; if (defined($pdir) && ! -d $pdir) { mkdir_p($pdir) || return undef; next; } $! = $e; warn("mkdir: $dir: $!\n"); return undef; } return 1; } =head2 mkdir_p_chown - create directory recursivly and change ownership calls mkdir_p and changes ownership of the created directory to the supplied user and group if provided. =cut sub mkdir_p_chown { my ($dir, $user, $group) = @_; if (!(-d $dir)) { mkdir_p($dir) || return undef; } return 1 unless defined($user) || defined($group); $user = -1 unless defined $user; $group = -1 unless defined $group; my $ouser = $user; my $ogroup = $group; if ($user !~ /^-?\d+$/ && !defined($user = getpwnam($user))) { warn "user $ouser unknown\n"; return undef } if ($group !~ /^-?\d+$/ && !defined($group = getgrnam($group))) { warn "group $ogroup unknown\n"; return undef } my @s = stat($dir); if ($s[4] != $user || $s[5] != $group) { if (!chown $user, $group, $dir) { warn "failed to chown $dir to $user:$group\n"; return undef; } } return 1; } sub drop_privs_to { my ($user, $group) = @_; if (defined($group)) { printlog("Drop privileges to group '$group'", 1); $group = getgrnam($group) unless $group =~ /^\d+$/; die("unknown group\n") unless defined $group; if ($) != $group || $( != $group) { ($), $() = ($group, $group); die("setgid: $!\n") if $) != $group; } } if (defined($user)) { printlog("Drop privileges to user '$user'", 1); $user = getpwnam($user) unless $user =~ /^\d+$/; die("unknown user\n") unless defined $user; if ($> != $user || $< != $user) { ($>, $<) = ($user, $user); die("setuid: $!\n") if $> != $user; } } } sub cleandir { my ($dir) = @_; my $ret = 1; return 1 unless -d $dir; for my $c (ls($dir)) { if (! -l "$dir/$c" && -d _) { cleandir("$dir/$c"); $ret = undef unless rmdir("$dir/$c"); } else { $ret = undef unless unlink("$dir/$c"); } } return $ret; } sub linktree { my ($from, $to) = @_; return unless -d $from; mkdir_p($to); my @todo = sort(ls($from)); while (@todo) { my $f = shift @todo; if (! -l "$from/$f" && -d _) { mkdir_p("$to/$f"); unshift @todo, map {"$f/$_"} ls("$from/$f"); } else { link("$from/$f", "$to/$f") || die("link $from/$f $to/$f: $!\n"); } } } sub treeinfo { my ($dir) = @_; my @info; my @todo = sort(ls($dir)); while (@todo) { my $f = shift @todo; my @s = lstat("$dir/$f"); next unless @s; if (-d _) { push @info, "$f"; unshift @todo, map {"$f/$_"} ls("$dir/$f"); } else { push @info, "$f $s[9]/$s[7]/$s[1]"; } } return \@info; } sub xfork { my $pid; while (1) { $pid = fork(); last if defined $pid; die("fork: $!\n") if $! != POSIX::EAGAIN; sleep(5); } return $pid; } sub cp { my ($from, $to, $tof) = @_; local *F; local *T; open(F, '<', $from) || die("$from: $!\n"); open(T, '>', $to) || die("$to: $!\n"); my $buf; while (sysread(F, $buf, 8192)) { (syswrite(T, $buf) || 0) == length($buf) || die("$to write: $!\n"); } close(F); close(T) || die("$to: $!\n"); if (defined($tof)) { rename($to, $tof) || die("rename $to $tof: $!\n"); } } sub checkutf8 { my ($oct) = @_; Encode::_utf8_off($oct); return 1 unless defined $oct; return 1 unless $oct =~ /[\200-\377]/; eval { Encode::_utf8_on($oct); encode('UTF-8', $oct, Encode::FB_CROAK); }; return $@ ? 0 : 1; } sub str2utf8 { my ($oct) = @_; return $oct unless defined $oct; return $oct unless $oct =~ /[^\011\012\015\040-\176]/s; eval { Encode::_utf8_on($oct); $oct = encode('UTF-8', $oct, Encode::FB_CROAK); }; if ($@) { # assume iso-8859-1 eval { Encode::_utf8_off($oct); $oct = encode('UTF-8', $oct, Encode::FB_CROAK); }; if ($@) { Encode::_utf8_on($oct); $oct = encode('UTF-8', $oct, Encode::FB_XMLCREF); } } Encode::_utf8_off($oct); # just in case... return $oct; } sub data2utf8 { my ($d) = @_; if (ref($d) eq 'ARRAY') { for my $dd (@$d) { if (ref($dd) eq '') { $dd = str2utf8($dd); } else { data2utf8($dd); } } } elsif (ref($d) eq 'HASH') { for my $dd (keys %$d) { if (ref($d->{$dd}) eq '') { $d->{$dd} = str2utf8($d->{$dd}); } else { data2utf8($d->{$dd}); } } } } sub str2utf8xml { my ($oct) = @_; return $oct unless defined $oct; return $oct unless $oct =~ /[^\011\012\015\040-\176]/s; $oct = str2utf8($oct); Encode::_utf8_on($oct); # xml does not accept all utf8 chars, escape the illegal $oct =~ s/([\000-\010\013\014\016-\037\177])/sprintf("&#x%x;",ord($1))/sge; $oct =~ s/([\x{d800}-\x{dfff}\x{fffe}\x{ffff}])/sprintf("&#x%x;",ord($1))/sge; Encode::_utf8_off($oct); return $oct; } sub data2utf8xml { my ($d) = @_; if (ref($d) eq 'ARRAY') { for my $dd (@$d) { if (ref($dd) eq '') { $dd = str2utf8xml($dd); } else { data2utf8xml($dd); } } } elsif (ref($d) eq 'HASH') { for my $dd (keys %$d) { if (ref($d->{$dd}) eq '') { $d->{$dd} = str2utf8xml($d->{$dd}); } else { data2utf8xml($d->{$dd}); } } } } sub waituntilgone { my ($fn, $timeout) = @_; while (1) { return 1 unless -e $fn; return 0 if defined($timeout) && $timeout <= 0; select(undef, undef, undef, .1); $timeout -= .1 if defined $timeout; } } sub lockopen { my ($fg, $op, $fn, $nonfatal) = @_; local *F = $fg; while (1) { if (!open(F, $op, $fn)) { return undef if $nonfatal; die("$fn: $!\n"); } flock(F, LOCK_EX) || die("flock $fn: $!\n"); my @s = stat(F); return 1 if @s && $s[3]; close F; } } sub lockcheck { my ($op, $fn) = @_; local *F; while (1) { if (!open(F, $op, $fn)) { return -1; } if (!flock(F, LOCK_EX | LOCK_NB)) { close(F); return 0; } my @s = stat(F); close F; return 1 if @s && $s[3]; } } sub lockopenxml { my ($fg, $op, $fn, $dtd, $nonfatal) = @_; if (!lockopen($fg, $op, $fn, $nonfatal)) { die("$fn: $!\n") unless $nonfatal; return undef; } my $d = readxml($fn, $dtd, $nonfatal); if (!$d) { local *F = $fg; close F; } return $d; } sub lockcreatexml { my ($fg, $fn, $fnf, $dd, $dtd) = @_; local *F = $fg; writexml($fn, undef, $dd, $dtd); open(F, '<', $fn) || die("$fn: $!\n"); flock(F, LOCK_EX | LOCK_NB) || die("lock: $!\n"); if (!link($fn, $fnf)) { unlink($fn); close F; return undef; } unlink($fn); return 1; } # XXX: does that really belong here? # =head2 enabled Algorithm: each enable/disable has a score: +1 if it's a disable +2 if the arch matches +4 if the repo matches =cut sub enabled { my ($repoid, $disen, $default, $arch) = @_; # filter matching elements, check for shortcuts return $default unless $disen; my @dis = grep { (!defined($_->{'arch'}) || $_->{'arch'} eq $arch) && (!defined($_->{'repository'}) || $_->{'repository'} eq $repoid) } @{$disen->{'disable'} || []}; return 1 if !@dis && $default; my @ena = grep { (!defined($_->{'arch'}) || $_->{'arch'} eq $arch) && (!defined($_->{'repository'}) || $_->{'repository'} eq $repoid) } @{$disen->{'enable'} || []}; return @dis ? 0 : $default unless @ena; return @ena ? 1 : $default unless @dis; # have @dis and @ena, need to do score thing... my $disscore = 0; for (@dis) { my $score = 1; $score += 2 if defined($_->{'arch'}); $score += 4 if defined($_->{'repository'}); if ($score > $disscore) { return 0 if $score == 7; # can't max this! $disscore = $score; } } my $enascore = 0; for (@ena) { my $score = 0; $score += 2 if defined($_->{'arch'}); $score += 4 if defined($_->{'repository'}); if ($score > $enascore) { return 1 if $score > $disscore; $enascore = $score; } } return $enascore > $disscore ? 1 : 0; } sub store { my ($fn, $fnf, $dd) = @_; if ($fdatasync_before_rename && defined($fnf)) { local *F; open(F, '>', $fn) || die("$fn: $!\n"); if (!Storable::nstore_fd($dd, \*F)) { die("nstore_fd $fn: $!\n"); } (\*F)->flush(); do_fdatasync(fileno(F)); close(F) || die("$fn close: $!\n"); } else { if (!Storable::nstore($dd, $fn)) { die("nstore $fn: $!\n"); } } return unless defined $fnf; $! = 0; rename($fn, $fnf) || die("rename $fn $fnf: $!\n"); } sub retrieve { my ($fn, $nonfatal) = @_; my $dd; if (!$nonfatal) { $dd = ref($fn) ? Storable::fd_retrieve($fn) : Storable::retrieve($fn); die("retrieve $fn: $!\n") unless $dd; } else { eval { $dd = ref($fn) ? Storable::fd_retrieve($fn) : Storable::retrieve($fn); }; if (!$dd && $nonfatal == 2) { if ($@) { warn($@); } else { warn("retrieve $fn: $!\n"); } } } return $dd; } sub tostorable { my ($d) = @_; return 'pst0'.Storable::nfreeze($d); } sub fromstorable { my ($d, $nonfatal) = @_; return Storable::thaw(substr($d, 4)) unless $nonfatal; eval { $d = Storable::thaw(substr($d, 4)); }; if ($@) { warn($@) if $nonfatal == 2; return undef; } return $d; } sub ping { my ($pingfile) = @_; local *F; if (sysopen(F, $pingfile, POSIX::O_WRONLY|POSIX::O_NONBLOCK)) { syswrite(F, 'x'); close(F); } } sub drainping { my ($ping) = @_; my $dummy; fcntl($ping, F_SETFL, POSIX::O_NONBLOCK); 1 while (sysread($ping, $dummy, 1024, 0) || 0) > 0; fcntl($ping, F_SETFL, 0); } sub waitping { my ($ping, $timeout) = @_; my $dummy; if (!defined($timeout)) { sysread($ping, $dummy, 1, 0); return; } fcntl($ping, F_SETFL, POSIX::O_NONBLOCK); while ($timeout > 0) { last if (sysread($ping, $dummy, 1024, 0) || 0) > 0; sleep(1); $timeout -= 1; } fcntl($ping, F_SETFL, 0); } sub restartexit { my ($arg, $name, $runfile, $pingfile) = @_; return unless $arg; # support option hash as arg $arg = '--stop' if ref($arg) && $arg->{stop}; $arg = '--restart' if ref($arg) && $arg->{restart}; return if ref($arg); if ($arg eq '--stop' || $arg eq '--exit') { if (!(-e "$runfile.lock") || lockcheck('>>', "$runfile.lock")) { print "$name not running.\n"; exit 0; } print "exiting $name...\n"; touch("$runfile.exit"); ping($pingfile) if $pingfile; waituntilgone("$runfile.exit"); exit(0); } if ($arg eq '--restart') { die("$name not running.\n") if !(-e "$runfile.lock") || lockcheck('>>', "$runfile.lock"); print "restarting $name...\n"; touch("$runfile.restart"); ping($pingfile) if $pingfile; waituntilgone("$runfile.restart"); exit(0); } } sub xsystem { my ($in, @args) = @_; local (*RIN, *WIN); local (*RERR, *WERR); local *P; if (defined($in)) { pipe(RIN, WIN) || die("stdin pipe: $!\n"); } pipe(RERR, WERR) || die("stderr pipe: $!\n"); my $pid; $pid = open(P, '-|'); die("fork: $!\n") unless defined $pid; if (!$pid) { close WIN if defined $in; close RERR; open(STDIN, defined($in) ? "<&RIN" : "&WERR"); eval { exec(@args); die("$args[0]: $!\n"); }; warn($@) if $@; exit 1; } close RIN if defined $in; close WERR; my ($indead, $outdead, $errdead); $indead = 1 unless defined $in; my ($out, $err) = ('', ''); my $stat; while (!($outdead && $errdead)) { my ($rin, $win) = ('', ''); vec($win, fileno(WIN), 1) = 1 unless $indead; vec($rin, fileno(P), 1) = 1 unless $outdead; vec($rin, fileno(RERR), 1) = 1 unless $errdead; my $nfound = select($rin, $win, undef, undef); if (!defined($nfound) || $nfound == -1) { next if $! == POSIX::EINTR; die("select: $!\n"); } next unless $nfound; if (!$indead && vec($win, fileno(WIN), 1)) { my $l = syswrite(WIN, $in); if (!defined($l) || $l < 0) { next if $! == POSIX::EINTR || $! == POSIX::EWOULDBLOCK; close(WIN); $indead = 1; } else { $in = substr($in, $l); if (length($in) <= 0) { close(WIN); $indead = 1; } } } if (!$outdead && vec($rin, fileno(P), 1)) { my $l = sysread(P, $out, 4096, length($out)); if (!defined($l) || $l <= 0) { next if !defined($l) && ($! == POSIX::EINTR || $! == POSIX::EWOULDBLOCK); $stat = close(P); $outdead = 1; } } if (!$errdead && vec($rin, fileno(RERR), 1)) { my $l = sysread(RERR, $err, 4096, length($err)); if (!defined($l) || $l <= 0) { next if !defined($l) && ($! == POSIX::EINTR || $! == POSIX::EWOULDBLOCK); close(RERR); $errdead = 1; } } } close WIN unless $indead; if (!$stat) { chomp $err; die(($err || "$args[0]: $?") . "\n"); } if (!wantarray) { chomp $err; warn("$err\n") if $err; return $out; } else { return ($out, $err); } } sub unify { my %h = map {$_ => 1} @_; return grep(delete($h{$_}), @_); } sub identical { my ($d1, $d2, $except, $subexcept) = @_; if (!defined($d1)) { return defined($d2) ? 0 : 1; } return 0 unless defined($d2); my $r = ref($d1); return 0 if $r ne ref($d2); if ($r eq '') { return 0 if $d1 ne $d2; } elsif ($r eq 'HASH') { my %k = (%$d1, %$d2); for my $k (keys %k) { next if $except && $except->{$k}; return 0 unless identical($d1->{$k}, $d2->{$k}, $subexcept, $subexcept); } } elsif ($r eq 'ARRAY') { return 0 unless @$d1 == @$d2; for (my $i = 0; $i < @$d1; $i++) { return 0 unless identical($d1->[$i], $d2->[$i], $subexcept, $subexcept); } } else { return 0; } return 1; } =head2 isotime - convert time to iso format BSUtil::isotime($time); =cut sub isotime { my ($t) = @_; my @lt = localtime($t || time()); return sprintf "%04d-%02d-%02d %02d:%02d:%02d", $lt[5] + 1900, $lt[4] + 1, @lt[3,2,1,0]; } =head2 getdebuglevel - get the current debug level BSUtil::getdebuglevel(); =cut sub getdebuglevel { return $debuglevel; } =head2 setdebuglevel - set the current debug level BSUtil::setdebuglevel($newlevel); =cut sub setdebuglevel { my ($level) = @_; my $oldlevel = $debuglevel; $debuglevel = $level; return $oldlevel; } =head2 printlog - print unified log messages BSUtil::printlog($message [, $level]); FORMAT: "YYYY-MM-DD hh:mm:ss [$pid] $message" =cut sub printlog { my ($msg, $level) = @_; return if $level && !($debuglevel && $debuglevel >= $level); $msg = "[debug $level] $msg" if $level; printf "%s: %-7s %s\n", isotime(time), "[$$]", $msg; } 1; open-build-service-2.9.4/src/backend/BSVerify.pm000066400000000000000000000500751332555733200214460ustar00rootroot00000000000000# # Copyright (c) 2006, 2007 Michael Schroeder, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # parameter verification functions # package BSVerify; use strict; # keep in sync with src/api/app/model/project.rb sub verify_projid { my $projid = $_[0]; die("projid is empty\n") unless defined($projid) && $projid ne ''; die("projid '$projid' is illegal\n") if $projid =~ /[\/\000-\037]/; die("projid '$projid' is illegal\n") if ":$projid:" =~ /:[_\.:]/; die("projid '$projid' is illegal\n") unless $projid; die("projid '$projid' is too long\n") if length($projid) > 200; } sub verify_projkind { my $projkind = $_[0]; die("projkind '$projkind' is illegal\n") if $projkind ne 'standard' && $projkind ne 'maintenance' && $projkind ne 'maintenance_incident' && $projkind ne 'maintenance_release' } # NOTE: this method is used for source and build container names sub verify_packid { my $packid = $_[0]; die("packid is empty\n") unless defined($packid) && $packid ne ''; die("packid '$packid' is too long\n") if length($packid) > 200; if ($packid =~ /(? 200; } sub verify_jobid { my $jobid = $_[0]; die("jobid is empty\n") unless defined($jobid) && $jobid ne ''; die("jobid '$jobid' is illegal\n") if $jobid =~ /[\/\000-\037]/; die("jobid '$jobid' is illegal\n") if $jobid =~ /^[\.]/; } sub verify_arch { my $arch = $_[0]; die("arch is empty\n") unless defined($arch) && $arch ne ''; die("arch '$arch' is illegal\n") if $arch =~ /[\/:\.\000-\037]/; die("arch '$arch' is illegal\n") unless $arch; die("arch '$arch' is too long\n") if length($arch) > 200; verify_simple($arch); } sub verify_packid_repository { verify_packid($_[0]) unless $_[0] && $_[0] eq '_repository'; } sub verify_service { my $p = $_[0]; verify_filename($p->{'name'}) if defined($p->{'name'}); for my $param (@{$p->{'param'} || []}) { verify_filename($param->{'name'}); } } sub verify_patchinfo { # This verifies the absolute minimum required content of a patchinfo file my $p = $_[0]; verify_filename($p->{'name'}) if defined($p->{'name'}); my %allowed_categories = map {$_ => 1} qw{security recommended optional feature}; die("Invalid category defined in _patchinfo\n") if defined($p->{'category'}) && !$allowed_categories{$p->{'category'}}; for my $rt (@{$p->{'releasetarget'} || []}) { verify_projid($rt->{'project'}); verify_repoid($rt->{'repository'}) if defined $rt->{'repository'}; } } sub verify_simple { my $name = $_[0]; die("illegal characters\n") if $name =~ /[^\-+=\.,0-9:%{}\@#%A-Z_a-z~\200-\377]/s; } sub verify_filename { my $filename = $_[0]; die("filename is empty\n") unless defined($filename) && $filename ne ''; die("filename '$filename' is illegal\n") if $filename =~ /[\/\000-\037]/; die("filename '$filename' is illegal\n") if $filename =~ /^\./; } sub verify_url { my $url = $_[0]; die("url is empty\n") unless defined($url) && $url ne ''; die("illegal characters in url\n") if $url =~ /[^\041-\176\200-\377]/s; die("url does not start with a scheme\n") if $url !~ /^[a-zA-Z]+:/s; } sub verify_md5 { my $md5 = $_[0]; die("not a md5 sum\n") unless $md5 && $md5 =~ /^[0-9a-f]{32}$/s; } # can be a md5sum or a git id sub verify_srcmd5 { my $srcmd5 = $_[0]; die("not a srcmd5 sum\n") unless $srcmd5 && ($srcmd5 =~ /^[0-9a-f]{32}$/s || $srcmd5 =~ /^[0-9a-f]{40}$/s); } sub verify_rev { my $rev = $_[0]; die("revision is empty\n") unless defined($rev) && $rev ne ''; return if $rev =~ /^[0-9a-f]{32}$/s; return if $rev =~ /^[0-9a-f]{40}$/s; # git id return if $rev eq 'upload' || $rev eq 'build' || $rev eq 'latest' || $rev eq 'repository'; die("bad revision '$rev'\n") unless $rev =~ /^\d+$/s; } sub verify_linkrev { my $rev = $_[0]; return if $rev && $rev eq 'base'; verify_rev($rev); } sub verify_port { my $port = $_[0]; die("port is empty\n") unless defined($port) && $port ne ''; die("bad port '$port'\n") unless $port =~ /^\d+$/s; die("illegal port '$port'\n") unless $port >= 1024; } sub verify_num { my $num = $_[0]; die("number is empty\n") unless defined($num) && $num ne ''; die("not a number: '$num'\n") unless $num =~ /^\d+$/; } sub verify_intnum { my $num = $_[0]; die("number is empty\n") unless defined($num) && $num ne ''; die("not a number: '$num'\n") unless $num =~ /^-?\d+$/; } sub verify_bool { my $bool = $_[0]; die("not boolean\n") unless defined($bool) && ($bool eq '0' || $bool eq '1'); } sub verify_prp { my $prp = $_[0]; die("not a prp: '$prp'\n") unless $prp =~ /^([^\/]*)\/(.*)$/s; my ($projid, $repoid) = ($1, $2); verify_projid($projid); verify_repoid($repoid); } sub verify_prpa { my $prpa = $_[0]; die("not a prpa: '$prpa'\n") unless $prpa =~ /^(.*)\/([^\/]*)$/s; my ($prp, $arch) = ($1, $2); verify_prp($prp); verify_arch($arch); } sub verify_resultview { my $view = $_[0]; die("unknown view parameter: '$view'\n") if $view ne 'summary' && $view ne 'status' && $view ne 'binarylist' && $view ne 'stats' && $view ne 'versrel'; } sub verify_workerid { } sub verify_disableenable { my ($disen) = @_; for my $d (@{$disen->{'disable'} || []}, @{$disen->{'enable'} || []}) { verify_repoid($d->{'repository'}) if exists $d->{'repository'}; verify_arch($d->{'arch'}) if exists $d->{'arch'}; } } sub verify_repo { my ($repo) = @_; verify_repoid($repo->{'name'}); for my $r (@{$repo->{'path'} || []}) { verify_projid($r->{'project'}); verify_repoid($r->{'repository'}); } for my $a (@{$repo->{'arch'} || []}) { verify_arch($a); } for my $rt (@{$repo->{'releasetarget'} || []}) { verify_projid($rt->{'project'}); verify_repoid($rt->{'repository'}); } my %archs = map {$_ => 1} @{$repo->{'arch'} || []}; for my $dod (@{$repo->{'download'} || []}) { verify_dod($dod); die("dod arch $dod->{'arch'} not in repo\n") unless $archs{$dod->{'arch'}}; die("dod arch $dod->{'arch'} listed more than once\n") if $archs{$dod->{'arch'}}++ > 1; } if ($repo->{'base'}) { die("repo contains a 'base' element\n"); } # what is this? if ($repo->{'hostsystem'}) { verify_projid($repo->{'hostsystem'}->{'project'}); verify_repoid($repo->{'hostsystem'}->{'repository'}); } } sub verify_proj { my ($proj, $projid) = @_; if (defined($projid)) { die("name does not match data\n") unless $projid eq $proj->{'name'}; } verify_projid($proj->{'name'}); verify_projkind($proj->{'kind'}) if exists $proj->{'kind'}; my %got_pack; for my $pack (@{$proj->{'package'} || []}) { verify_packid($pack->{'name'}); die("package $pack->{'name'} listed more than once\n") if $got_pack{$pack->{'name'}}; $got_pack{$pack->{'name'}} = 1; } my %got; for my $repo (@{$proj->{'repository'} || []}) { verify_repo($repo); die("repository $repo->{'name'} listed more than once\n") if $got{$repo->{'name'}}; $got{$repo->{'name'}} = 1; } for my $link (@{$proj->{'link'} || []}) { verify_projid($link->{'project'}); if (exists($link->{'vrevmode'})) { die("bad vrevmode attribute: $link->{'vrevmode'}\n") unless $link->{'vrevmode'} && ($link->{'vrevmode'} eq 'extend' || $link->{'vrevmode'} eq 'unextend'); } } for my $f ('build', 'publish', 'debuginfo', 'useforbuild', 'lock', 'binarydownload', 'sourceaccess', 'access') { verify_disableenable($proj->{$f}) if $proj->{$f}; } die('project must not have a mountproject\n') if exists $proj->{'mountproject'}; if ($proj->{'maintenance'}) { for my $m (@{$proj->{'maintenance'}->{'maintains'} || []}) { verify_projid($m->{'project'}); } } } sub verify_pack { my ($pack, $packid) = @_; if (defined($packid)) { die("name does not match data\n") unless $packid eq $pack->{'name'}; } verify_projid($pack->{'project'}) if exists $pack->{'project'}; verify_packid($pack->{'name'}); verify_disableenable($pack); # obsolete for my $f ('build', 'publish', 'debuginfo', 'useforbuild', 'lock', 'binarydownload', 'sourceaccess', 'access') { verify_disableenable($pack->{$f}) if $pack->{$f}; } if ($pack->{'devel'}) { verify_projid($pack->{'devel'}->{'project'}) if exists $pack->{'devel'}->{'project'}; verify_packid($pack->{'devel'}->{'package'}) if exists $pack->{'devel'}->{'package'}; } } sub verify_link { my ($l) = @_; verify_projid($l->{'project'}) if exists $l->{'project'}; verify_packid($l->{'package'}) if exists $l->{'package'}; verify_rev($l->{'rev'}) if exists $l->{'rev'}; verify_rev($l->{'baserev'}) if exists $l->{'baserev'}; verify_simple($l->{'vrev'}) if defined $l->{'vrev'}; die("link must contain some target description \n") unless exists $l->{'project'} || exists $l->{'package'} || exists $l->{'rev'}; if (exists $l->{'cicount'}) { if ($l->{'cicount'} ne 'add' && $l->{'cicount'} ne 'copy' && $l->{'cicount'} ne 'local') { die("unknown cicount '$l->{'cicount'}'\n"); } } if (exists $l->{'missingok'}) { die("missingok in link must be '1' or 'true'\n") unless $l->{'missingok'} && ($l->{'missingok'} eq '1' || $l->{'missingok'} eq 'true'); } return unless $l->{'patches'} && $l->{'patches'}->{''}; for my $p (@{$l->{'patches'}->{''}}) { die("more than one type in patch\n") unless keys(%$p) == 1; my $type = (keys %$p)[0]; my $pd = $p->{$type}; if ($type eq 'branch') { die("branch link must have baserev\n") unless $l->{'baserev'}; die("branch link must not have other patches\n") if @{$l->{'patches'}->{''}} != 1; die("branch element contains data\n") if $pd; } elsif ($type eq 'add' || $type eq 'apply' || $type eq 'delete') { verify_filename($pd->{'name'}); } elsif ($type ne 'topadd') { die("unknown patch type '$type'\n"); } } } sub verify_aggregatelist { my ($al) = @_; for my $a (@{$al->{'aggregate'} || []}) { verify_projid($a->{'project'}); if (defined($a->{'nosources'})) { die("'nosources' element must be empty\n") if $a->{'nosources'} ne ''; } for my $p (@{$a->{'package'} || []}) { verify_packid($p); } for my $b (@{$a->{'binary'} || []}) { verify_filename($b); } for my $r (@{$a->{'repository'} || []}) { verify_repoid($r->{'source'}) if exists $r->{'source'}; verify_repoid($r->{'target'}) if exists $r->{'target'}; } } } sub verify_channel { my ($channel) = @_; for my $binaries (@{$channel->{'binaries'} || []}) { verify_projid($binaries->{'project'}) if defined $binaries->{'project'}; verify_arch($binaries->{'arch'}) if defined $binaries->{'arch'}; for my $binary (@{$binaries->{'binary'} || []}) { verify_filename($binary->{'name'}); verify_arch($binaries->{'binaryarch'}) if defined $binary->{'binaryarch'}; verify_projid($binary->{'project'}) if defined $binary->{'project'}; verify_packid($binary->{'package'}) if defined $binary->{'package'}; verify_packid($binary->{'arch'}) if defined $binary->{'arch'}; } } for my $rt (@{$channel->{'target'} || []}) { die("bad target specification\n") unless $rt->{'project'} || $rt->{'repository'}; verify_projid($rt->{'project'}) if $rt->{'project'}; verify_repoid($rt->{'repository'}) if $rt->{'repository'}; } } my %req_states = map {$_ => 1} qw {new revoked accepted superseded declined deleted review}; sub verify_request { my ($req) = @_; die("request must not contain a key\n") if exists $req->{'key'}; verify_num($req->{'id'}) if exists $req->{'id'}; die("request must contain a state\n") unless $req->{'state'}; die("request must contain a state name\n") unless $req->{'state'}->{'name'}; die("request must contain a state who\n") unless $req->{'state'}->{'who'}; die("request must contain a state when\n") unless $req->{'state'}->{'when'}; die("request contains unknown state '$req->{'state'}->{'name'}'\n") unless $req_states{$req->{'state'}->{'name'}}; verify_num($req->{'state'}->{'superseded_by'}) if exists $req->{'state'}->{'superseded_by'}; my $actions; if ($req->{'type'}) { die("unknown old-stype request type\n") unless $req->{'type'} eq 'submit'; die("old-stype request with action element\n") if $req->{'action'}; die("old-stype request without submit element\n") unless $req->{'submit'}; my %oldsubmit = (%{$req->{'submit'}}, 'type' => 'submit'); $actions = [ \%oldsubmit ]; } else { die("new-stype request with submit element\n") if $req->{'submit'}; $actions = $req->{'action'}; } die("request must contain an action\n") unless $actions && @$actions; my %pkgchange; for my $h (@{$req->{'history'} ||[]}) { die("history element has no 'who' attribute\n") unless $h->{'who'}; die("history element has no 'when' attribute\n") unless $h->{'when'}; die("history element has no 'name' attribute\n") unless $h->{'name'}; } for my $r (@$actions) { die("request action has no type\n") unless $r->{'type'}; if ($r->{'type'} eq 'delete') { die("delete target specification missing\n") unless $r->{'target'}; die("delete target project specification missing\n") unless $r->{'target'}->{'project'}; verify_projid($r->{'target'}->{'project'}); verify_packid($r->{'target'}->{'package'}) if exists $r->{'target'}->{'package'}; die("delete action has a source element\n") if $r->{'source'}; } elsif ($r->{'type'} eq 'maintenance_release') { die("maintenance_release source missing\n") unless $r->{'source'}; die("maintenance_release target missing\n") unless $r->{'target'}; verify_projid($r->{'source'}->{'project'}); verify_projid($r->{'target'}->{'project'}); } elsif ($r->{'type'} eq 'maintenance_incident') { die("maintenance_incident source missing\n") unless $r->{'source'}; die("maintenance_incident target missing\n") unless $r->{'target'}; verify_projid($r->{'source'}->{'project'}); verify_projid($r->{'target'}->{'project'}); } elsif ($r->{'type'} eq 'set_bugowner') { die("set_bugowner target missing\n") unless $r->{'target'}; verify_projid($r->{'target'}->{'project'}); verify_packid($r->{'target'}->{'package'}) if exists $r->{'target'}->{'package'}; } elsif ($r->{'type'} eq 'add_role') { die("add_role target missing\n") unless $r->{'target'}; verify_projid($r->{'target'}->{'project'}); verify_packid($r->{'target'}->{'package'}) if exists $r->{'target'}->{'package'}; } elsif ($r->{'type'} eq 'change_devel') { die("change_devel source missing\n") unless $r->{'source'}; die("change_devel target missing\n") unless $r->{'target'}; die("change_devel source with rev attribute\n") if exists $r->{'source'}->{'rev'}; verify_projid($r->{'source'}->{'project'}); verify_projid($r->{'target'}->{'project'}); verify_packid($r->{'source'}->{'package'}) if exists $r->{'source'}->{'package'}; verify_packid($r->{'target'}->{'package'}); } elsif ($r->{'type'} eq 'submit') { die("submit source missing\n") unless $r->{'source'}; die("submit target missing\n") unless $r->{'target'}; verify_projid($r->{'source'}->{'project'}); verify_projid($r->{'target'}->{'project'}); verify_packid($r->{'source'}->{'package'}); verify_packid($r->{'target'}->{'package'}); verify_rev($r->{'source'}->{'rev'}) if exists $r->{'source'}->{'rev'}; } else { die("unknown request action type '$r->{'type'}'\n"); } if ($r->{'type'} eq 'submit' || ($r->{'type'} eq 'delete' && exists($r->{'target'}->{'package'}))) { die("request contains multiple source changes for package \"$r->{'target'}->{'package'}\"\n") if $pkgchange{"$r->{'target'}->{'project'}/$r->{'target'}->{'package'}"}; $pkgchange{"$r->{'target'}->{'project'}/$r->{'target'}->{'package'}"} = 1; } } } sub verify_nevraquery { my ($q) = @_; verify_arch($q->{'arch'}); die("binary has no name\n") unless defined $q->{'name'}; die("binary has no version\n") unless defined $q->{'version'}; my $f = "$q->{'name'}-$q->{'version'}"; $f .= "-$q->{'release'}" if defined $q->{'release'}; verify_filename($f); verify_simple($f); } sub verify_attribute { my ($attribute) = @_; die("no namespace defined\n") unless defined $attribute->{'namespace'}; die("no name defined\n") unless defined $attribute->{'name'}; verify_simple($attribute->{'namespace'}); verify_simple($attribute->{'name'}); verify_simple($attribute->{'binary'}) if exists $attribute->{'binary'}; } sub verify_attributes { my ($attributes) = @_; for my $attribute (@{$attributes->{'attribute'} || []}) { verify_attribute($attribute); } } sub verify_frozenlinks { my ($frozenlinks) = @_; my %seen; for my $fp (@{$frozenlinks->{'frozenlink'} || []}) { my $xp = exists($fp->{'project'}) ? $fp->{'project'} : '/all'; verify_projid($fp->{'project'}) if exists $fp->{'project'}; die("project listed multiple times in frozenlinks\n") if $seen{$xp} || $seen{'/all'}; $seen{$xp} = 1; for my $p (@{$fp->{'package'} || []}) { verify_packid($p->{'name'}); verify_srcmd5($p->{'srcmd5'}); verify_simple($p->{'vrev'}) if defined $p->{'vrev'}; } } } sub verify_dod { my ($dod) = @_; verify_arch($dod->{'arch'}); verify_simple($dod->{'repotype'}); verify_url($dod->{'url'}); my $master = $dod->{'master'}; if ($master) { verify_url($master->{'url'}) if defined $master->{'url'}; verify_simple($master->{'sslfingerprint'}) if defined $master->{'sslfingerprint'}; } } sub verify_multibuild { my ($mb) = @_; die("multibuild cannot have both package and flavor elements\n") if $mb->{'package'} && $mb->{'flavor'}; for my $packid (@{$mb->{'package'} || []}) { verify_packid($packid); die("packid $packid is illegal in multibuild\n") if $packid =~ /:/; } for my $packid (@{$mb->{'flavor'} || []}) { verify_packid($packid); die("flavor $packid is illegal in multibuild\n") if $packid =~ /:/; } } our $verifiers = { 'project' => \&verify_projid, 'package' => \&verify_packid, 'repository' => \&verify_repoid, 'arch' => \&verify_arch, 'job' => \&verify_jobid, 'package_repository' => \&verify_packid_repository, 'filename' => \&verify_filename, 'md5' => \&verify_md5, 'srcmd5' => \&verify_srcmd5, 'rev' => \&verify_rev, 'linkrev' => \&verify_linkrev, 'bool' => \&verify_bool, 'num' => \&verify_num, 'intnum' => \&verify_intnum, 'port' => \&verify_port, 'prp' => \&verify_prp, 'prpa' => \&verify_prpa, 'resultview' => \&verify_resultview, 'jobid' => \&verify_md5, 'workerid' => \&verify_workerid, }; 1; open-build-service-2.9.4/src/backend/BSWatcher.pm000066400000000000000000001036331332555733200215760ustar00rootroot00000000000000# # Copyright (c) 2006, 2007 Michael Schroeder, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # implementation of state change watchers. Can watch for file # changes, RPC results, and file download data. Handle with care. # package BSWatcher; use BSServer; use BSServerEvents; use BSRPC; use BSEvents; use BSHTTP; use POSIX; use Socket; use Symbol; use XML::Structured; use Data::Dumper; use Digest::MD5 (); use strict; my %hostlookupcache; my %cookiestore; # our session store to keep iChain fast my $tossl; sub import { if (grep {$_ eq ':https'} @_) { require BSSSL; $tossl = \&BSSSL::tossl; BSRPC::import(':https'); } } sub reply { my $jev = $BSServerEvents::gev; return BSServer::reply(@_) unless $jev; deljob($jev); return BSServerEvents::reply(@_); } sub reply_file { my $jev = $BSServerEvents::gev; return BSServer::reply_file(@_) unless $jev; deljob($jev); return BSServerEvents::reply_file(@_); } sub reply_cpio { my $jev = $BSServerEvents::gev; return BSServer::reply_cpio(@_) unless $jev; deljob($jev); return BSServerEvents::reply_cpio(@_); } ########################################################################### # # job handling # # # we add the following elements to the connection event: # - redohandler # - args # sub redo_request { my ($jev) = @_; return if $jev->{'deljob_done'}; # job is already deleted local $BSServerEvents::gev = $jev; local $BSServer::request = $jev->{'request'}; my $conf = $jev->{'conf'}; eval { my @r = $jev->{'redohandler'}->(@{$jev->{'args'} || []}); if ($conf->{'stdreply'}) { $conf->{'stdreply'}->(@r); } elsif (@r && (@r != 1 || defined($r[0]))) { BSServerEvents::reply(@r); } }; if ($@) { print $@; BSServerEvents::reply_error($conf, $@); } } sub deljob { my ($jev) = @_; #print "deljob #$jev->{'id'}\n"; $jev->{'deljob_done'} = 1; filewatcher_deljob($jev); serialize_deljob($jev); rpc_deljob($jev); } ########################################################################### # # file watching # # state my %filewatchers; my %filewatchers_s; my %filewatchers_periodic; my $filewatchers_ev; my $filewatchers_ev_active; our $filewatchers_interval = 1; sub filewatcher_handler { # print "filewatcher_handler\n"; BSEvents::add($filewatchers_ev, $filewatchers_interval); for my $file (sort keys %filewatchers) { next unless $filewatchers{$file}; my $periodic = $filewatchers_periodic{$file}; my @s = stat($file); my $s = @s ? "$s[9]/$s[7]/$s[1]" : "-/-/-"; if ($s eq $filewatchers_s{$file}) { if ($periodic && $periodic->[1] + $periodic->[0] < time()) { print "periodic call for file $file!\n"; } else { next; } } else { print "file $file changed!\n"; } $filewatchers_s{$file} = $s; $periodic->[1] = time() if $periodic; my @jobs = @{$filewatchers{$file}}; for my $jev (@jobs) { redo_request($jev); } } } sub addfilewatcher { my ($file, $periodic) = @_; my $jev = $BSServerEvents::gev; return unless $jev; $jev->{'closehandler'} = \&deljob; if ($filewatchers{$file}) { #print "addfilewatcher to already watched $file\n"; if ($periodic) { $filewatchers_periodic{$file} ||= [ $periodic, time() ]; $filewatchers_periodic{$file}->[0] = $periodic if $filewatchers_periodic{$file}->[0] > $periodic; } push @{$filewatchers{$file}}, $jev unless grep {$_ eq $jev} @{$filewatchers{$file}}; return; } #print "addfilewatcher $file\n"; if (!$filewatchers_ev) { $filewatchers_ev = BSEvents::new('timeout', \&filewatcher_handler); } if (!$filewatchers_ev_active) { BSEvents::add($filewatchers_ev, $filewatchers_interval); $filewatchers_ev_active = 1; } my @s = stat($file); my $s = @s ? "$s[9]/$s[7]/$s[1]" : "-/-/-"; push @{$filewatchers{$file}}, $jev; $filewatchers_s{$file} = $s; $filewatchers_periodic{$file} = [ $periodic, time() ] if $periodic; } sub filewatcher_deljob { my ($jev) = @_; for my $file (keys %filewatchers) { next unless grep {$_ == $jev} @{$filewatchers{$file}}; @{$filewatchers{$file}} = grep {$_ != $jev} @{$filewatchers{$file}}; if (!@{$filewatchers{$file}}) { delete $filewatchers{$file}; delete $filewatchers_s{$file}; delete $filewatchers_periodic{$file}; } } if (!%filewatchers && $filewatchers_ev_active) { BSEvents::rem($filewatchers_ev); $filewatchers_ev_active = 0; } } ########################################################################### # # serialization # # state my %serializations; my %serializations_waiting; sub serialize { my ($file) = @_; my $jev = $BSServerEvents::gev; die("only supported in AJAX servers\n") unless $jev; $jev->{'closehandler'} = \&deljob; if ($serializations{$file}) { if ($serializations{$file} != $jev) { #print "adding to serialization queue of $file\n"; push @{$serializations_waiting{$file}}, $jev unless grep {$_ eq $jev} @{$serializations_waiting{$file}}; return undef; } } else { $serializations{$file} = $jev; } return {'file' => $file}; } sub serialize_end { my ($ser) = @_; return unless $ser; my $file = $ser->{'file'}; #print "serialize_end for $file\n"; delete $serializations{$file}; my @waiting = @{$serializations_waiting{$file} || []}; delete $serializations_waiting{$file}; while (@waiting) { my $jev = shift @waiting; #print "waking up $jev\n"; redo_request($jev); if ($serializations{$file}) { push @{$serializations_waiting{$file}}, @waiting; last; } } } sub serialize_deljob { my ($jev) = @_; for my $file (keys %serializations) { @{$serializations_waiting{$file}} = grep {$_ != $jev} @{$serializations_waiting{$file}}; delete $serializations_waiting{$file} unless @{$serializations_waiting{$file} || []}; serialize_end({'file' => $file}) if $jev == $serializations{$file}; } } ########################################################################### # # rpc implementation # # state my %rpcs; sub rpc_error { my ($ev, $err) = @_; $ev->{'rpcstate'} = 'error'; #print "rpc_error: $err\n"; my $uri = $ev->{'rpcuri'}; delete $rpcs{$uri}; close $ev->{'fd'} if $ev->{'fd'}; delete $ev->{'fd'}; my @jobs = @{$ev->{'joblist'} || []}; for my $jev (@jobs) { $jev->{'rpcdone'} = $jev->{'rpcoriguri'} || $uri; $jev->{'rpcerror'} = $err; redo_request($jev); delete $jev->{'rpcdone'}; delete $jev->{'rpcerror'}; delete $jev->{'rpcoriguri'}; } } sub rpc_result { my ($ev, $res) = @_; $ev->{'rpcstate'} = 'done'; my $uri = $ev->{'rpcuri'}; #print "got result for $uri\n"; delete $rpcs{$uri}; close $ev->{'fd'} if $ev->{'fd'}; delete $ev->{'fd'}; my @jobs = @{$ev->{'joblist'} || []}; for my $jev (@jobs) { $jev->{'rpcdone'} = $jev->{'rpcoriguri'} || $uri; $jev->{'rpcresult'} = $res; redo_request($jev); delete $jev->{'rpcdone'}; delete $jev->{'rpcresult'}; delete $jev->{'rpcoriguri'}; } } sub rpc_redirect { my ($ev, $location) = @_; unless ($location) { rpc_error($ev, "remote error: got status 302 but no location header"); return; } my $param = $ev->{'param'}; if (!$param->{'maxredirects'}) { unless (exists $param->{'maxredirects'}) { rpc_error($ev, "no redirects allowed"); } else { rpc_error($ev, "max number of redirects exhausted"); } return; } delete $rpcs{$ev->{'rpcuri'}}; close $ev->{'fd'} if $ev->{'fd'}; delete $ev->{'fd'}; #print "redirecting to: $location\n"; my @jobs = @{$ev->{'joblist'} || []}; for my $jev (@jobs) { $jev->{'rpcoriguri'} ||= $ev->{'rpcuri'}; local $BSServerEvents::gev = $jev; rpc({%$param, 'uri' => $location, 'maxredirects' => $param->{'maxredirects'} - 1}); } } ########################################################################### # # rpc_recv_chunked_stream_handler # # do chunk decoding and forward to next handler # (should probably do this in BSServerEvents::stream_read_handler) # sub rpc_recv_chunked_stream_handler { my ($ev) = @_; my $rev = $ev->{'readev'}; #print "rpc_recv_chunked_stream_handler\n"; $ev->{'paused'} = 1; # always need more bytes! nextchunk: $ev->{'replbuf'} =~ s/^\r?\n//s; if ($ev->{'replbuf'} !~ /\r?\n/s) { return unless $rev->{'eof'}; BSServerEvents::stream_close($rev, $ev, undef, "rpc_recv_chunked_stream_handler: premature EOF"); return; } if ($ev->{'replbuf'} !~ /^([0-9a-fA-F]+)/) { BSServerEvents::stream_close($rev, $ev, undef, "rpc_recv_chunked_stream_handler: bad chunked data"); return; } my $cl = hex($1); # print "rpc_recv_chunked_stream_handler: chunk len $cl\n"; if ($cl < 0 || $cl >= 1000000) { BSServerEvents::stream_close($rev, $ev, undef, "rpc_recv_chunked_stream_handler: illegal chunk size: $cl"); return; } if ($cl == 0) { # wait till trailer is complete if ($ev->{'replbuf'} !~ /\n\r?\n/s) { return unless $rev->{'eof'}; BSServerEvents::stream_close($rev, $ev, undef, "rpc_recv_chunked_stream_handler: premature EOF"); return; } #print "rpc_recv_chunked_stream_handler: chunk EOF\n"; my $trailer = $ev->{'replbuf'}; $trailer =~ s/^(.*?\r?\n)/\r\n/s; # delete chunk header $trailer =~ s/\n\r?\n.*//s; # delete stuff after trailer $trailer =~ s/\r$//s; $trailer = substr($trailer, 2) if $trailer ne ''; $trailer .= "\r\n" if $trailer ne ''; $ev->{'chunktrailer'} = $trailer; BSServerEvents::stream_close($rev, $ev); return; } # split the chunk into 8192 sized subchunks if too big my $lcl = $cl > 8192 ? 8192 : $cl; $ev->{'replbuf'} =~ /^(.*?\r?\n)/s; if (length($1) + $lcl > length($ev->{'replbuf'})) { return unless $rev->{'eof'}; BSServerEvents::stream_close($rev, $ev, undef, "rpc_recv_chunked_stream_handler: premature EOF"); return; } my $data = substr($ev->{'replbuf'}, length($1), $lcl); my $nextoff = length($1) + $lcl; # handler returns false: cannot consume now, try later return unless $ev->{'datahandler'}->($ev, $rev, $data); $ev->{'replbuf'} = substr($ev->{'replbuf'}, $nextoff); if ($lcl < $cl) { # had to split the chunk $ev->{'replbuf'} = sprintf("%X\r\n", $cl - $lcl) . $ev->{'replbuf'}; } goto nextchunk if length($ev->{'replbuf'}); if ($rev->{'eof'}) { #print "rpc_recv_chunked_stream_handler: EOF\n"; BSServerEvents::stream_close($rev, $ev); } } sub rpc_recv_unchunked_stream_handler { my ($ev) = @_; my $rev = $ev->{'readev'}; #print "rpc_recv_unchunked_stream_handler\n"; my $cl = $rev->{'contentlength'}; $ev->{'paused'} = 1; # always need more bytes! my $data = $ev->{'replbuf'}; if (length($data) && (!defined($cl) || $cl)) { my $oldeof = $rev->{'eof'}; if (defined($cl)) { $data = substr($data, 0, $cl) if $cl < length($data); $cl -= length($data); $rev->{'eof'} = 1 if !$cl; } return unless $ev->{'datahandler'}->($ev, $rev, $data); delete $rev->{'eof'} unless $oldeof; $rev->{'contentlength'} = $cl; $ev->{'replbuf'} = ''; } if ($rev->{'eof'} && $cl) { BSServerEvents::stream_close($rev, $ev, undef, "rpc_recv_unchunked_stream_handler: premature EOF"); return; } if ($rev->{'eof'} || (defined($cl) && !$cl)) { #print "rpc_recv_unchunked_stream_handler: EOF\n"; BSServerEvents::stream_close($rev, $ev); } } ########################################################################### # # forward receiver methods # sub rpc_adddata { my ($jev, $data) = @_; $data = sprintf("%X\r\n", length($data)).$data."\r\n"; $jev->{'replbuf'} .= $data; if ($jev->{'paused'}) { delete $jev->{'paused'}; BSEvents::add($jev); } } sub rpc_recv_forward_close_handler { my ($ev, $err) = @_; #print "rpc_recv_forward_close_handler\n"; my $rev = $ev->{'readev'}; my $trailer = $ev->{'chunktrailer'} || ''; my @jobs = @{$rev->{'joblist'} || []}; for my $jev (@jobs) { $jev->{'replbuf'} .= "0\r\n$trailer\r\n"; if ($jev->{'paused'}) { delete $jev->{'paused'}; BSEvents::add($jev); } $jev->{'readev'} = {'eof' => 1, 'rpcuri' => $rev->{'rpcuri'}}; } # the stream rpc is finished! #print "stream rpc $rev->{'rpcuri'} is finished!\n"; delete $rpcs{$rev->{'rpcuri'}}; } sub rpc_recv_forward_data_handler { my ($ev, $rev, $data) = @_; my @stay; my @leave; my @jobs = @{$rev->{'joblist'} || []}; for my $jev (@jobs) { if (length($jev->{'replbuf'}) >= 16384) { push @stay, $jev; } else { push @leave, $jev; } } if ($rev->{'eof'}) { # must not hold back data at eof @leave = @jobs; @stay = (); } if (@stay && !@leave) { # too full! wait till there is more room #print "stay=".@stay.", leave=".@leave.", blocking\n"; $rev->{'paused'} = 1; return 0; } # advance our uri my $newuri = $rev->{'rpcuri'}; my $newpos = length($data); if ($newuri =~ /start=(\d+)/) { $newpos += $1; $newuri =~ s/start=\d+/start=$newpos/; } elsif ($newuri =~ /\?/) { $newuri .= '&' unless $newuri =~ /\?$/; $newuri .= "start=$newpos"; } else { $newuri .= "?start=$newpos"; } # mark it as in progress so that only other calls in progress can join $newuri .= "&inprogress" unless $newuri =~ /\&inprogress$/; #print "stay=".@stay.", leave=".@leave.", newpos=$newpos\n"; if (@leave && $rpcs{$newuri}) { my $nev = $rpcs{$newuri}; print "joining ".@leave." jobs with $newuri!\n"; for my $jev (@leave) { push @{$nev->{'joblist'}}, $jev unless grep {$_ == $jev} @{$nev->{'joblist'}}; $jev->{'readev'} = $nev; } $rev->{'joblist'} = [ @stay ]; for my $jev (@leave) { rpc_adddata($jev, $data); } @leave = (); } if (!@leave) { if (!@stay) { BSServerEvents::stream_close($rev, $ev); return 0; } # too full! wait till there is more room $rev->{'paused'} = 1; return 0; } my $olduri = $rev->{'rpcuri'}; $rpcs{$newuri} = $rev; delete $rpcs{$olduri}; $rev->{'rpcuri'} = $newuri; if (@stay) { # worst case: split of $rev->{'joblist'} = [ @leave ]; print "splitting ".@stay." jobs from $newuri!\n"; # put old output event on hold for my $jev (@stay) { delete $jev->{'readev'}; if (!$jev->{'paused'}) { BSEvents::rem($jev); } delete $jev->{'paused'}; } # this is scary $olduri =~ s/\&inprogress$//; eval { local $BSServerEvents::gev = $stay[0]; my $param = { 'uri' => $olduri, 'verbatim_uri' => 1, 'joinable' => 1, }; $param->{'receiver'} = $rev->{'param'}->{'receiver'} if $rev->{'param'}->{'receiver'}; rpc($param); die("could not restart rpc\n") unless $rpcs{$olduri}; }; if ($@ || !$rpcs{$olduri}) { # terminate all old rpcs my $err = $@ || "internal error\n"; $err =~ s/\n$//s; warn("$err\n"); for my $jev (@stay) { if ($jev->{'streaming'}) { # can't do much here, sorry local $BSServerEvents::gev = $jev; BSServerEvents::reply_error($jev->{'conf'}, $err); next; } $jev->{'rpcdone'} = $olduri; $jev->{'rpcerror'} = $err; redo_request($jev); delete $jev->{'rpcdone'}; delete $jev->{'rpcerror'}; } } else { my $nev = $rpcs{$olduri}; for my $jev (@stay) { push @{$nev->{'joblist'}}, $jev unless grep {$_ == $jev} @{$nev->{'joblist'}}; } } } for my $jev (@leave) { rpc_adddata($jev, $data); } return 1; } sub rpc_recv_forward_setup { my ($jev, $ev, @args) = @_; if (!$jev->{'streaming'}) { local $BSServerEvents::gev = $jev; BSServerEvents::reply(undef, @args); BSEvents::rem($jev); $jev->{'streaming'} = 1; delete $jev->{'timeouthandler'}; } $jev->{'handler'} = \&BSServerEvents::stream_write_handler; $jev->{'readev'} = $ev; if (length($jev->{'replbuf'})) { delete $jev->{'paused'}; BSEvents::add($jev, 0); } else { $jev->{'paused'} = 1; } } sub rpc_recv_forward { my ($ev, $chunked, $data, @args) = @_; push @args, 'Transfer-Encoding: chunked'; unshift @args, 'Content-Type: application/octet-stream' unless grep {/^content-type:/i} @args; $ev->{'rpcstate'} = 'streaming'; $ev->{'replyargs'} = \@args; # # setup output streams for all jobs # my @jobs = @{$ev->{'joblist'} || []}; for my $jev (@jobs) { rpc_recv_forward_setup($jev, $ev, @args); } # # setup input stream from rpc client # $ev->{'streaming'} = 1; my $wev = BSEvents::new('always'); # print "new rpc input stream $ev $wev\n"; $wev->{'replbuf'} = $data; $wev->{'readev'} = $ev; $ev->{'writeev'} = $wev; if ($chunked) { $wev->{'handler'} = \&rpc_recv_chunked_stream_handler; } else { $wev->{'handler'} = \&rpc_recv_unchunked_stream_handler; } $wev->{'datahandler'} = \&rpc_recv_forward_data_handler; $wev->{'closehandler'} = \&rpc_recv_forward_close_handler; $ev->{'handler'} = \&BSServerEvents::stream_read_handler; BSEvents::add($ev); BSEvents::add($wev); # do this last } ########################################################################### # # file receiver methods # sub rpc_recv_file_data_handler { my ($ev, $rev, $data) = @_; if ((syswrite($ev->{'fd'}, $data) || 0) != length($data)) { BSServerEvents::stream_close($rev, $ev, undef, "rpc_recv_file_data_handler: write error"); return 0; } $ev->{'ctx'}->add($data) if $ev->{'ctx'}; return 1; } sub rpc_recv_file_close_handler { my ($ev, $err) = @_; #print "rpc_recv_file_close_handler\n"; my $rev = $ev->{'readev'}; my $res = {}; if ($ev->{'fd'}) { my @s = stat($ev->{'fd'}); $res->{'size'} = $s[7] if @s; close $ev->{'fd'}; if ($ev->{'ctx'}) { $res->{'md5'} = $ev->{'ctx'}->hexdigest; delete $ev->{'ctx'}; } } delete $ev->{'fd'}; my $trailer = $ev->{'chunktrailer'} || ''; if ($err) { rpc_error($rev, $err); } else { rpc_result($rev, $res); } #print "file rpc $rev->{'rpcuri'} is finished!\n"; delete $rpcs{$rev->{'rpcuri'}}; } sub rpc_recv_file { my ($ev, $chunked, $data, $filename, $withmd5) = @_; #print "rpc_recv_file $filename\n"; my $fd = gensym; if (!open($fd, '>', $filename)) { rpc_error($ev, "$filename: $!"); return; } my $wev = BSEvents::new('always'); $wev->{'replbuf'} = $data; $wev->{'readev'} = $ev; $ev->{'writeev'} = $wev; $wev->{'fd'} = $fd; $wev->{'ctx'} = Digest::MD5->new if $withmd5; if ($chunked) { $wev->{'handler'} = \&rpc_recv_chunked_stream_handler; } else { $wev->{'handler'} = \&rpc_recv_unchunked_stream_handler; } $wev->{'datahandler'} = \&rpc_recv_file_data_handler; $wev->{'closehandler'} = \&rpc_recv_file_close_handler; $ev->{'handler'} = \&BSServerEvents::stream_read_handler; BSEvents::add($ev); BSEvents::add($wev); # do this last } ########################################################################### # # string receiver methods # sub rpc_recv_string_data_handler { my ($ev, $rev, $data) = @_; $ev->{'string'} .= $data; return 1; } sub rpc_recv_string_close_handler { my ($ev, $err) = @_; #print "rpc_recv_string_close_handler\n"; my $rev = $ev->{'readev'}; my $trailer = $ev->{'chunktrailer'} || ''; if ($err) { rpc_error($rev, $err); } else { rpc_result($rev, $ev->{'string'}); } #print "string rpc $rev->{'rpcuri'} is finished!\n"; delete $rpcs{$rev->{'rpcuri'}}; } sub rpc_recv_string { my ($ev, $chunked, $data) = @_; my $wev = BSEvents::new('always'); $wev->{'replbuf'} = $data; $wev->{'readev'} = $ev; $ev->{'writeev'} = $wev; if ($chunked) { $wev->{'handler'} = \&rpc_recv_chunked_stream_handler; } else { $wev->{'handler'} = \&rpc_recv_unchunked_stream_handler; } $wev->{'string'} = ''; $wev->{'datahandler'} = \&rpc_recv_string_data_handler; $wev->{'closehandler'} = \&rpc_recv_string_close_handler; $ev->{'handler'} = \&BSServerEvents::stream_read_handler; BSEvents::add($ev); BSEvents::add($wev); # do this last } ########################################################################### # # null receiver methods # sub rpc_recv_null { my ($ev, $chunked, $data) = @_; my $wev = BSEvents::new('always'); $wev->{'replbuf'} = $data; $wev->{'readev'} = $ev; $ev->{'writeev'} = $wev; if ($chunked) { $wev->{'handler'} = \&rpc_recv_chunked_stream_handler; } else { $wev->{'handler'} = \&rpc_recv_unchunked_stream_handler; } $wev->{'string'} = ''; $wev->{'datahandler'} = sub {1}; $wev->{'closehandler'} = \&rpc_recv_string_close_handler; $ev->{'handler'} = \&BSServerEvents::stream_read_handler; BSEvents::add($ev); BSEvents::add($wev); # do this last } ########################################################################### # # rpc methods # sub rpc_tossl { my ($ev) = @_; # print "switching to https\n"; fcntl($ev->{'fd'}, F_SETFL, 0); # in danger honor... eval { ($ev->{'param'}->{'https'} || $tossl)->($ev->{'fd'}, $ev->{'param'}->{'ssl_keyfile'}, $ev->{'param'}->{'ssl_certfile'}, 1); if ($ev->{'param'}->{'sslpeerfingerprint'}) { die("bad sslpeerfingerprint '$ev->{'param'}->{'sslpeerfingerprint'}'\n") unless $ev->{'param'}->{'sslpeerfingerprint'} =~ /^(.*?):(.*)$/s; my $pfp = tied($ev->{'fd'})->peerfingerprint($1); die("peer fingerprint does not match: $2 != $pfp\n") if $2 ne $pfp; } }; fcntl($ev->{'fd'}, F_SETFL, O_NONBLOCK); if ($@) { my $err = $@; $err =~ s/\n$//s; rpc_error($ev, $err); return undef; } return 1; } sub rpc_recv_handler { my ($ev) = @_; my $cs = 1024; # needs to be bigger than the ssl package size... $cs = 16384 if $ev->{'param'} && $ev->{'param'}->{'proto'} && $ev->{'param'}->{'proto'} eq 'https'; my $r = sysread($ev->{'fd'}, $ev->{'recvbuf'}, $cs, length($ev->{'recvbuf'})); if (!defined($r)) { if ($! == POSIX::EINTR || $! == POSIX::EWOULDBLOCK) { BSEvents::add($ev); return; } rpc_error($ev, "read error from $ev->{'rpcdest'}: $!"); return; } my $ans; $ev->{'rpceof'} = 1 if !$r; $ans = $ev->{'recvbuf'}; if ($ev->{'_need'}) { #shortcut for need more bytes... if (!$ev->{'rpceof'} && length($ans) < $ev->{'_need'}) { #printf "... %d/%d\n", length($ans), $ev->{'_need'}; BSEvents::add($ev); return; } delete $ev->{'_need'}; } if ($ans !~ /\n\r?\n/s) { if ($ev->{'rpceof'}) { rpc_error($ev, "EOF from $ev->{'rpcdest'}"); return; } BSEvents::add($ev); return; } if ($ans !~ s/^HTTP\/\d+?\.\d+?\s+?(\d+[^\r\n]*)/Status: $1/s) { rpc_error($ev, "bad answer from $ev->{'rpcdest'}"); return; } my $status = $1; $ans =~ /^(.*?)\n\r?\n(.*)$/s; my $headers = $1; $ans = $2; my %headers; BSHTTP::gethead(\%headers, $headers); if ($status =~ /^302[^\d]/) { rpc_redirect($ev, $headers{'location'}); return; } elsif ($status !~ /^200[^\d]/) { if ($status =~ /^(\d+) +(.*?)$/) { rpc_error($ev, "$1 remote error: $2"); } else { rpc_error($ev, "remote error: $status"); } return; } if ($ev->{'proxytunnel'}) { # CONNECT method worked. we now have a https connection return unless rpc_tossl($ev); $ev->{'param'}->{'proto'} = 'https'; $ev->{'sendbuf'} = $ev->{'proxytunnel'}; delete $ev->{'proxytunnel'}; delete $ev->{'recvbuf'}; $ev->{'rpcstate'} = 'sending'; $ev->{'type'} = 'write'; $ev->{'handler'} = \&rpc_send_handler; BSEvents::add($ev, 0); return; } my $param = $ev->{'param'}; BSRPC::updatecookies(\%cookiestore, $param->{'uri'}, $headers{'set-cookie'}) if $headers{'set-cookie'}; my $cl = $headers{'content-length'}; my $chunked = $headers{'transfer-encoding'} && lc($headers{'transfer-encoding'}) eq 'chunked' ? 1 : 0; if ($param->{'receiver'}) { #rpc_error($ev, "answer is neither chunked nor does it contain a content length\n") unless $chunked || defined($cl); $ev->{'contentlength'} = $cl if !$chunked; if ($param->{'receiver'} == \&BSHTTP::file_receiver) { rpc_recv_file($ev, $chunked, $ans, $param->{'filename'}, $param->{'withmd5'}); } elsif ($param->{'receiver'} == \&BSHTTP::cpio_receiver) { if (defined $param->{'tmpcpiofile'}) { rpc_recv_file($ev, $chunked, $ans, $param->{'tmpcpiofile'}); } else { rpc_error($ev, "need tmpcpiofile for cpio_receiver\n"); } } elsif ($param->{'receiver'} == \&BSServer::reply_receiver) { my $ct = $headers{'content-type'} || 'application/octet-stream'; my @args; push @args, "Status: $headers{'status'}" if $headers{'status'}; push @args, "Content-Type: $ct"; rpc_recv_forward($ev, $chunked, $ans, @args); } elsif ($param->{'receiver'} == \&BSHTTP::null_receiver) { rpc_recv_null($ev, $chunked, $ans); } else { rpc_error($ev, "unsupported receiver\n"); } return; } if ($chunked) { rpc_recv_string($ev, $chunked, $ans); return; } if ($ev->{'rpceof'} && $cl && length($ans) < $cl) { rpc_error($ev, "EOF from $ev->{'rpcdest'}"); return; } if (!$ev->{'rpceof'} && (!defined($cl) || length($ans) < $cl)) { $ev->{'_need'} = length($headers) + $cl if defined $cl; BSEvents::add($ev); return; } $ans = substr($ans, 0, $cl) if defined $cl; rpc_result($ev, $ans); } sub rpc_send_handler { my ($ev) = @_; my $l = length($ev->{'sendbuf'}); return unless $l; $l = 4096 if $l > 4096; my $r = syswrite($ev->{'fd'}, $ev->{'sendbuf'}, $l); if (!defined($r)) { if ($! == POSIX::EINTR || $! == POSIX::EWOULDBLOCK) { BSEvents::add($ev); return; } rpc_error($ev, "write error to $ev->{'rpcdest'}: $!"); return; } if ($r != length($ev->{'sendbuf'})) { $ev->{'sendbuf'} = substr($ev->{'sendbuf'}, $r) if $r; BSEvents::add($ev); return; } # print "done sending to $ev->{'rpcdest'}, now receiving\n"; delete $ev->{'sendbuf'}; $ev->{'recvbuf'} = ''; $ev->{'type'} = 'read'; $ev->{'rpcstate'} = 'receiving'; $ev->{'handler'} = \&rpc_recv_handler; BSEvents::add($ev); } sub rpc_connect_timeout { my ($ev) = @_; rpc_error($ev, "connect to $ev->{'rpcdest'}: timeout"); } sub rpc_connect_handler { my ($ev) = @_; my $err; #print "rpc_connect_handler\n"; $err = getsockopt($ev->{'fd'}, SOL_SOCKET, SO_ERROR); if (!defined($err)) { $err = "getsockopt: $!"; } else { $err = unpack("I", $err); if ($err == 0 || $err == POSIX::EISCONN) { $err = undef; } else { $! = $err; $err = "connect to $ev->{'rpcdest'}: $!"; } } if ($err) { rpc_error($ev, $err); return; } #print "rpc_connect_handler: connected!\n"; if ($ev->{'param'} && $ev->{'param'}->{'proto'} && $ev->{'param'}->{'proto'} eq 'https') { return unless rpc_tossl($ev); } $ev->{'rpcstate'} = 'sending'; delete $ev->{'timeouthandler'}; $ev->{'handler'} = \&rpc_send_handler; BSEvents::add($ev, 0); } my $tcpproto = getprotobyname('tcp'); # # This implements a subset of the BSRPC::rpc functionality with # the async ServerEvents mechansim. # # not supported are: # * data # * sender # * timeout (its timeouts are fixed) # * generic receivers, supported are only: # - BSHTTP::file_receiver # - BSHTTP::cpio_receiver (with tmpcpiofile set) # - BSHTTP::null_receiver # - BSServer::reply_receiver # # the following extra functionality is available: # * joinable - try to join with already running requests # * background - run the request detached, no result will be reported # sub rpc { my ($uri, $xmlargs, @args) = @_; my $jev = $BSServerEvents::gev; return BSRPC::rpc($uri, $xmlargs, @args) unless $jev; my @xhdrs; my $param = {'uri' => $uri}; if (ref($uri) eq 'HASH') { $param = $uri; $uri = $param->{'uri'}; @xhdrs = @{$param->{'headers'} || []}; } if ($param->{'background'}) { my $ev = BSEvents::new('never'); for (keys %$jev) { $ev->{$_} = $jev->{$_} unless $_ eq 'id' || $_ eq 'handler' || $_ eq 'fd' || $_ eq 'rpcerror'; } $ev->{'redohandler'} = sub { die("$ev->{'rpcerror'}\n") if $ev->{'rpcerror'}; return undef }; local $BSServerEvents::gev = $ev; rpc({%$param, 'background' => 0}, $xmlargs, @args); return; } $uri = BSRPC::createuri($param, @args); my $rpcuri = $uri; $rpcuri .= ";$jev->{'id'}" unless $param->{'joinable'}; if ($jev->{'rpcdone'} && $rpcuri eq $jev->{'rpcdone'}) { die("$jev->{'rpcerror'}\n") if exists $jev->{'rpcerror'}; my $ans = $jev->{'rpcresult'}; if ($xmlargs) { die("answer is not xml\n") if $ans !~ /<.*?>/s; return XMLin($xmlargs, $ans); } if ($param->{'receiver'} == \&BSHTTP::cpio_receiver && defined($param->{'tmpcpiofile'})) { local *CPIOFILE; open(CPIOFILE, '<', $param->{'tmpcpiofile'}) || die("open tmpcpiofile: $!\n"); unlink($param->{'tmpcpiofile'}); $ans = BSHTTP::cpio_receiver(BSHTTP::fd2req(\*CPIOFILE), $param); close CPIOFILE; } return $ans; } $jev->{'closehandler'} = \&deljob; if ($rpcs{$rpcuri}) { my $ev = $rpcs{$rpcuri}; print "rpc $rpcuri already in progress, ".@{$ev->{'joblist'} || []}." entries\n"; return undef if grep {$_ == $jev} @{$ev->{'joblist'}}; if ($ev->{'rpcstate'} eq 'streaming') { # this seams wrong, cannot join a living stream! # (we're lucky to change the url when streaming...) print "joining stream\n"; rpc_recv_forward_setup($jev, $ev, @{$ev->{'replyargs'} || []}); } push @{$ev->{'joblist'}}, $jev; return undef; } my $proxy = $param->{'proxy'}; my ($proto, $host, $port, $req, $proxytunnel) = BSRPC::createreq($param, $uri, $proxy, \%cookiestore, @xhdrs); if ($proto eq 'https' || $proxytunnel) { die("https not supported\n") unless $tossl || $param->{'https'}; } $param->{'proto'} = $proto; if (!$hostlookupcache{$host}) { # should do this async, but that's hard to do in perl my $hostaddr = inet_aton($host); die("unknown host '$host'\n") unless $hostaddr; $hostlookupcache{$host} = $hostaddr; } my $fd = gensym; socket($fd, PF_INET, SOCK_STREAM, $tcpproto) || die("socket: $!\n"); fcntl($fd, F_SETFL,O_NONBLOCK); setsockopt($fd, SOL_SOCKET, SO_KEEPALIVE, pack("l",1)); my $ev = BSEvents::new('write', \&rpc_send_handler); if ($proxytunnel) { $ev->{'proxytunnel'} = $req; $req = $proxytunnel; } $ev->{'fd'} = $fd; $ev->{'sendbuf'} = $req; $ev->{'rpcdest'} = "$host:$port"; $ev->{'rpcuri'} = $rpcuri; $ev->{'rpcstate'} = 'connecting'; $ev->{'param'} = $param; $ev->{'starttime'} = time(); push @{$ev->{'joblist'}}, $jev; $rpcs{$rpcuri} = $ev; #print "new rpc $uri\n"; if (!connect($fd, sockaddr_in($port, $hostlookupcache{$host}))) { if ($! == POSIX::EINPROGRESS) { $ev->{'handler'} = \&rpc_connect_handler; $ev->{'timeouthandler'} = \&rpc_connect_timeout; BSEvents::add($ev, 60); # 60s connect timeout return undef; } close $ev->{'fd'}; delete $ev->{'fd'}; delete $rpcs{$rpcuri}; die("connect to $host:$port: $!\n"); } $ev->{'rpcstate'} = 'sending'; BSEvents::add($ev); return undef; } sub rpc_deljob { my ($jev) = @_; for my $uri (keys %rpcs) { my $ev = $rpcs{$uri}; next unless $ev; next unless grep {$_ == $jev} @{$ev->{'joblist'}}; @{$ev->{'joblist'}} = grep {$_ != $jev} @{$ev->{'joblist'}}; if (!@{$ev->{'joblist'}}) { print "deljob: rpc $uri no longer needed\n"; BSServerEvents::stream_close($ev, $ev->{'writeev'}); delete $rpcs{$uri}; } } } ########################################################################### # # status query and setup functions # sub jobstatus { my ($ev) = @_; my $j = {'ev' => $ev->{'id'}}; $j->{'fd'} = fileno(*{$ev->{'fd'}}) if $ev->{'fd'}; my $req = $ev->{'request'}; if ($req) { $j->{'state'} = $req->{'state'} if $req->{'state'}; $j->{'starttime'} = $req->{'starttime'} if $req->{'starttime'}; $j->{'peer'} = $req->{'headers'}->{'x-peer'} if $req->{'headers'} && $req->{'headers'}->{'x-peer'}; $j->{'request'} = substr("$req->{'action'} $req->{'path'}?$req->{'query'}", 0, 1024) if $req->{'action'}; } return $j; } sub getstatus { my $ret = {}; my $jev = $BSServerEvents::gev; $ret->{'ev'} = $jev->{'id'}; my $req = $jev->{'request'}; $ret->{'starttime'} = $req->{'server'}->{'starttime'}; for my $filename (sort keys %filewatchers) { my $fw = {'filename' => $filename, 'state' => $filewatchers_s{$filename}}; for my $jev (@{$filewatchers{$filename}}) { push @{$fw->{'job'}}, jobstatus($jev); } push @{$ret->{'watcher'}}, $fw; } for my $uri (sort keys %rpcs) { my $ev = $rpcs{$uri}; my $r = {'uri' => substr($uri, 0, 1024), 'ev' => $ev->{'id'}}; $r->{'fd'} = fileno(*{$ev->{'fd'}}) if $ev->{'fd'}; $r->{'state'} = $ev->{'rpcstate'} if $ev->{'rpcstate'}; $r->{'starttime'} = $ev->{'starttime'} if $ev->{'starttime'}; for my $jev (@{$ev->{'joblist'} || []}) { push @{$r->{'job'}}, jobstatus($jev); } push @{$ret->{'rpc'}}, $r; } for my $filename (sort keys %serializations_waiting) { my $sz = {'filename' => $filename}; for my $jev (@{$serializations_waiting{$filename}}) { push @{$sz->{'job'}}, jobstatus($jev); } push @{$ret->{'serialize'}}, $sz; } for my $jev (BSServerEvents::getrequestevents($req->{'server'})) { push @{$ret->{'joblist'}->{'job'}}, jobstatus($jev); } return $ret; } # put our call data into the job event so that we can redo the request sub dispatches_call { my ($f, @args) = @_; my $jev = $BSServerEvents::gev; $jev->{'redohandler'} = $f; $jev->{'args'} = [ @args ]; return $f->(@args); } sub background { return BSServerEvents::background(@_); } 1; open-build-service-2.9.4/src/backend/BSXML.pm000066400000000000000000001006161332555733200206370ustar00rootroot00000000000000# # Copyright (c) 2006, 2007 Michael Schroeder, Novell Inc. # Copyright (c) 2008 Adrian Schroeter, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # XML templates for the BuildService. See XML/Structured. # package BSXML; use strict; # # an explained example entry of this file # #our $pack = [ creates space # 'package' => # 'name', # 'project', # [], before the [] all strings become attributes to # 'title', from here on all strings become children like # 'description', # [[ 'person' => creates children, the [[ ]] syntax allows any number of them including zero # 'role', again role and userid attributes, both are required # 'userid', # ]], this block describes a construct # @flags, copies in the block of possible flag definitions # [ $repo ], refers to the repository construct and allows again any number of them (0-X) #]; closes the child with our $download = [ 'download' => 'arch', 'repotype', 'url', [], 'archfilter', [ 'master' => 'url', 'sslfingerprint', ], 'pubkey', ]; # same as download, but with project/repository our $doddata = [ 'doddata' => 'project', 'repository', @$download[1 .. $#$download], ]; our $repo = [ 'repository' => 'name', 'rebuild', 'block', 'linkedbuild', [ $download ], [[ 'releasetarget' => 'project', 'repository', 'trigger', ]], [[ 'path' => 'project', 'repository', ]], [ 'hostsystem' => 'project', 'repository', ], [ 'base' => # expanded repo only! 'project', 'repository', ], [ 'arch' ], 'status', ]; our @disableenable = ( [[ 'disable' => 'arch', 'repository', ]], [[ 'enable' => 'arch', 'repository', ]], ); our @flags = ( [ 'lock' => @disableenable ], [ 'build' => @disableenable ], [ 'publish' => @disableenable ], [ 'debuginfo' => @disableenable ], [ 'useforbuild' => @disableenable ], [ 'binarydownload' => @disableenable ], [ 'sourceaccess' => @disableenable ], [ 'access' => @disableenable ], ); our @roles = ( [[ 'person' => 'userid', 'role', ]], [[ 'group' => 'groupid', 'role', ]], ); our $maintenance = [ 'maintenance' => [[ 'maintains' => 'project', ]], ]; our $proj = [ 'project' => 'name', 'kind', [], 'title', 'description', 'url', [[ 'link' => 'project', 'vrevmode', ]], 'remoteurl', 'remoteproject', 'mountproject', [ 'devel' => 'project', ], @roles, $maintenance, @flags, [ $repo ], ]; our $pack = [ 'package' => 'name', 'project', [], 'title', 'description', 'releasename', [ 'devel' => 'project', 'package', ], @roles, @disableenable, @flags, 'url', 'bcntsynctag', ]; our $packinfo = [ 'info' => 'repository', 'name', 'file', 'error', [ 'dep' ], [ 'prereq' ], [ 'buildtimeservice' ], [ 'imagetype' ], # kiwi [ 'imagearch' ], # kiwi 'nodbgpkgs', # kiwi 'nosrcpkgs', # kiwi [[ 'path' => 'project', 'repository', 'priority', ]], [[ 'containerpath' => 'project', 'repository', 'priority', ]], [[ 'extrasource' => 'project', 'package', 'srcmd5', 'file', ]], ]; our $linked = [ 'linked' => 'project', 'package', ]; our $aggregatelist = [ 'aggregatelist' => [[ 'aggregate' => 'project', [], 'nosources', [ 'package' ], [ 'binary' ], [[ 'repository' => 'target', 'source', ]], ]], ]; # former: kernel - 123 - 1 123: incident # now: sec-123 - 1 -1 our $patchinfo = [ 'patchinfo' => 'incident', # optional, gets replaced on with updateinfoid on release 'version', # optional, defaults to 1 [], [ 'package' ],# optional [ 'binary' ], # optional [[ 'releasetarget' => # optional 'project', 'repository', ]], [[ 'issue' => 'tracker', 'id', 'documented', [], '_content', ]], 'category', 'rating', 'name', # optional, old patchinfo name which will become part of incident string 'summary', 'description', 'message', # optional pop-up message 'swampid', # obsolete 'packager', 'stopped', 'zypp_restart_needed', 'reboot_needed', 'relogin_needed', ]; our $channel = [ 'channel' => [ 'product' => 'project', 'name', ], [[ 'target' => 'project', 'repository', 'id_template', # optional 'requires_issue', # optional [], 'disabled', # optional ]], [[ 'binaries' => 'project', 'repository', 'arch', [[ 'binary' => 'name', 'binaryarch', 'project', 'repository', 'package', 'arch', 'supportstatus', ]], ]], ]; our $projpack = [ 'projpack' => 'repoid', [[ 'project' => 'name', 'kind', [], 'title', 'description', 'config', 'patternmd5', [[ 'link' => 'project', 'vrevmode', ]], 'remoteurl', 'remoteproject', @flags, @roles, [ $repo ], [[ 'package' => 'name', 'releasename', 'rev', 'srcmd5', # commit id 'versrel', 'verifymd5', # tree id 'originproject', 'revtime', 'constraintsmd5', # md5sum of constraints file in srcmd5 [ $linked ], 'error', [ $packinfo ], $aggregatelist, $patchinfo, 'channelmd5', @flags, 'bcntsynctag', 'hasbuildenv', ]], 'missingpackages', ]], [[ 'remotemap' => 'project', 'kind', 'root', 'remoteurl', 'remoteproject', 'remoteroot', 'partition', 'proto', # project data not included [], 'config', @flags, @roles, [ $repo ], 'error', ]], [[ 'channeldata' => 'md5', $channel, ]], ]; our $linkinfo = [ 'linkinfo' => # information from link 'project', 'package', 'rev', 'srcmd5', 'baserev', 'missingok', # expanded / unexpanded srcmd5 'xsrcmd5', 'lsrcmd5', 'error', 'lastworking', [ $linked ], ]; our $serviceinfo = [ 'serviceinfo' => # information in case a source service is part of package 'code', # can be "running", "failed", "succeeded" 'xsrcmd5', 'lsrcmd5', [], 'error', # contains error message (with new lines) in case of error ]; our $dir = [ 'directory' => 'name', 'count', # obsolete, the API sets this for some requests 'rev', 'vrev', 'srcmd5', 'tproject', # obsolete, use linkinfo 'tpackage', # obsolete, use linkinfo 'trev', # obsolete, use linkinfo 'tsrcmd5', # obsolete, use linkinfo 'lsrcmd5', # obsolete, use linkinfo 'error', 'xsrcmd5', # obsolete, use linkinfo $linkinfo, $serviceinfo, [[ 'entry' => 'name', 'md5', 'hash', 'size', 'mtime', 'error', 'id', 'originproject', # for package listing 'originpackage', # for package listing ]] ]; our $fileinfo = [ 'fileinfo' => 'filename', [], 'name', 'epoch', 'version', 'release', 'arch', 'source', 'summary', 'description', 'size', 'mtime', [ 'provides' ], [ 'requires' ], [ 'prerequires' ], [ 'conflicts' ], [ 'obsoletes' ], [ 'recommends' ], [ 'supplements' ], [ 'suggests' ], [ 'enhances' ], [[ 'provides_ext' => 'dep', [[ 'requiredby' => 'name', 'epoch', 'version', 'release', 'arch', 'project', 'repository', ]], ]], [[ 'requires_ext' => 'dep', [[ 'providedby' => 'name', 'epoch', 'version', 'release', 'arch', 'project', 'repository', ]], ]], ]; our $sourceinfo = [ 'sourceinfo' => 'package', 'rev', 'vrev', 'srcmd5', 'lsrcmd5', 'verifymd5', [], 'filename', 'error', 'originproject', 'originpackage', [ $linked ], 'revtime', 'changesmd5', 'name', 'version', 'release', [ 'subpacks' ], [ 'deps' ], [ 'prereqs' ], [ 'exclarch' ], [ 'badarch' ], ]; our $sourceinfolist = [ 'sourceinfolist' => [ $sourceinfo ], ]; our $buildinfo = [ 'buildinfo' => 'project', 'repository', 'package', 'srcserver', 'reposerver', 'downloadurl', [], 'job', 'arch', 'hostarch', # for cross build 'error', 'srcmd5', 'verifymd5', 'rev', 'disturl', 'reason', # just for the explain string of a build reason 'needed', # number of blocked 'revtime', # time of last commit 'readytime', 'specfile', # obsolete 'file', 'versrel', 'bcnt', 'release', 'debuginfo', 'constraintsmd5', [ 'prjconfconstraint' ], [ 'subpack' ], [ 'imagetype' ], # kiwi 'nodbgpkgs', # kiwi 'nosrcpkgs', # kiwi 'genmetaalgo', # internal [ 'dep' ], [[ 'bdep' => 'name', 'preinstall', 'vminstall', 'cbpreinstall', 'cbinstall', 'runscripts', 'notmeta', 'noinstall', 'installonly', 'epoch', 'version', 'release', 'arch', 'hdrmd5', 'project', 'repository', 'repoarch', 'binary', # filename 'package', 'srcmd5', ]], [ 'pdep' ], # obsolete [[ 'path' => 'project', 'repository', 'server', # internal 'url', # external ]], [[ 'syspath' => 'project', 'repository', 'server', # internal 'url', # external ]], [[ 'containerpath' => 'project', 'repository', 'server', # internal 'url', # external ]], 'containerannotation', # temporary hack 'expanddebug', 'followupfile', # for two-stage builds 'masterdispatched', # dispatched through a master dispatcher 'nounchanged', # do not check for "unchanged" builds [ 'preinstallimage' => 'project', 'repository', 'repoarch', 'package', 'filename', 'hdrmd5', [ 'binary' ], 'url', # external ], ]; our $jobstatus = [ 'jobstatus' => 'code', 'result', # succeeded, failed or unchanged 'details', [], 'starttime', 'endtime', 'lastduration', # optional 'workerid', 'hostarch', 'uri', # uri to reach worker 'arch', # our architecture 'job', # our jobname 'jobid', # md5 of job info file 'attempt', # number of attempts to build the job ]; our $buildreason = [ 'reason' => [], 'explain', # Readable reason 'time', # unix time from start build 'oldsource', # last build source md5 sum, if a source change was the reason [[ 'packagechange' => # list changed files which are used for building 'change', # kind of change (content/meta change, additional file or removed file) 'key', # file name ]], ]; our $buildstatus = [ 'status' => 'package', 'code', 'status', # obsolete, now code 'error', # obsolete, now details 'versrel', # for withversrel result call [], 'details', 'workerid', # last build data 'hostarch', 'readytime', 'starttime', 'endtime', 'job', # internal, job when building 'uri', # obsolete 'arch', # obsolete ]; our $builddepinfo = [ 'builddepinfo' => [[ 'package' => 'name', [], 'source', [ 'pkgdep' ], [ 'subpkg' ], ]], [[ 'cycle' => [ 'package' ], ]], ]; our $event = [ 'event' => 'type', [], 'project', 'repository', 'arch', 'package', 'job', 'worker', 'due', 'srcmd5', # for type=servicedispatch 'rev', # for type=servicedispatch 'linksrcmd5', # for type=servicedispatch 'projectservicesmd5', # for type=servicedispatch 'oldsrcmd5', # for type=servicedispatch 'details', # for type=dispatchdetails ]; our $events = [ 'events' => 'next', 'sync', [ $event ], ]; our $revision = [ 'revision' => 'rev', 'vrev', [], 'srcmd5', 'version', 'time', 'user', 'comment', 'requestid', ]; our $revision_acceptinfo = [ @$revision, [ 'acceptinfo' => 'rev', 'srcmd5', 'osrcmd5', 'xsrcmd5', 'oxsrcmd5', ], ]; our $revisionlist = [ 'revisionlist' => [ $revision ] ]; our $buildhist = [ 'buildhistory' => [[ 'entry' => 'rev', 'srcmd5', 'versrel', 'bcnt', 'time', 'duration', ]], ]; our $binaryversionlist = [ 'binaryversionlist' => [[ 'binary' => 'name', # should be filename instead. sigh. 'sizek', 'error', 'hdrmd5', 'metamd5', 'leadsigmd5', ]], ]; our $packagebinaryversionlist = [ 'packagebinaryversionlist' => [[ 'binaryversionlist' => 'package', 'code', [[ 'binary' => 'name', 'sizek', 'error', 'hdrmd5', 'metamd5', 'leadsigmd5', ]], ]], ]; our $worker = [ 'worker' => 'hostarch', 'ip', 'port', 'registerserver', 'workerid', [ 'buildarch' ], [ 'hostlabel' ], 'sandbox', [ 'linux' => [], 'version', 'flavor' ], [ 'hardware' => [ 'cpu' => [ 'flag' ], ], 'processors', 'jobs', 'nativeonly', # don't allow usage via the helper script 'memory', # in MBytes 'swap', # in MBytes 'disk', # in MBytes ], 'owner', 'tellnojob', 'job', # set when worker is busy 'arch', # set when worker is busy 'jobid', # set when worker is busy 'reposerver', # set when worker is busy and job was masterdispatched ]; our $packstatuslist = [ 'packstatuslist' => 'project', 'repository', 'arch', [[ 'packstatus' => 'name', 'status', 'error', ]], [[ 'packstatussummary' => 'status', 'count', ]], ]; our $linkpatch = [ '' => [ 'add' => 'name', 'type', 'after', 'popt', 'dir', ], [ 'apply' => 'name', ], [ 'delete' => 'name', ], 'branch', 'topadd', ]; our $link = [ 'link' => 'project', 'package', 'rev', 'vrev', 'cicount', 'baserev', 'missingok', [ 'patches' => [ $linkpatch ], ], ]; our $workerstatus = [ 'workerstatus' => 'clients', [[ 'idle' => 'uri', 'workerid', 'hostarch', ]], [[ 'building' => 'uri', 'workerid', 'hostarch', 'project', 'repository', 'package', 'arch', 'starttime', ]], [[ 'down' => 'workerid', 'hostarch', ]], [[ 'dead' => 'workerid', 'hostarch', ]], [[ 'away' => 'workerid', 'hostarch', ]], [[ 'waiting' => 'arch', 'jobs', ]], [[ 'blocked' => 'arch', 'jobs', ]], [[ 'buildavg' => 'arch', 'buildavg', ]], [[ 'partition' => 'name', [[ 'daemon' => 'type', # scheduler/dispatcher/signer/publisher/warden 'arch', # scheduler only 'state', 'starttime', [ 'queue' => # scheduler only 'high', 'med', 'low', 'next', ], ]], ]], ]; our $workerstate = [ 'workerstate' => 'state', 'nextstate', # for exit/restart 'jobid', 'pid', # pid of building worker process ]; our $jobhistlay = [ 'package', 'rev', 'srcmd5', 'versrel', 'bcnt', 'readytime', 'starttime', 'endtime', 'code', 'uri', 'workerid', 'hostarch', 'reason', 'verifymd5', ]; our $jobhist = [ 'jobhist' => @$jobhistlay, ]; our $jobhistlist = [ 'jobhistlist' => [ $jobhist ], ]; our $ajaxjob = [ 'job' => 'ev', 'fd', 'starttime', 'peer', 'request', 'state', ]; our $ajaxstatus = [ 'ajaxstatus' => 'starttime', 'ev', [[ 'watcher' => 'filename', 'state', [ $ajaxjob ], ]], [[ 'rpc' => 'uri', 'state', 'ev', 'fd', 'starttime', [ $ajaxjob ], ]], [[ 'serialize' => 'filename', [ $ajaxjob ], ]], [ 'joblist' => [ $ajaxjob ], ], ]; our $serverstatus = [ 'serverstatus' => 'starttime', [[ 'job' => 'id', 'starttime', 'pid', 'peer', 'request', 'group', ]], ]; ##################### new api stuff our $binarylist = [ 'binarylist' => 'package', [[ 'binary' => 'filename', 'md5', 'size', 'mtime', ]], ]; our $summary = [ 'summary' => [[ 'statuscount' => 'code', 'count', ]], ]; our $schedulerstats = [ 'stats' => 'lastchecked', 'checktime', 'lastfinished', 'lastpublished', ]; our $result = [ 'result' => 'project', 'repository', 'arch', 'code', # pra state, can be "unknown", "broken", "scheduling", "blocked", "building", "finished", "publishing", "published" or "unpublished" 'state', # old name of 'code', to be removed 'details', 'dirty', # marked for re-scheduling if element exists, state might not be correct anymore [ $buildstatus ], [ $binarylist ], $summary, $schedulerstats, ]; our $resultlist = [ 'resultlist' => 'state', 'retryafter', [ $result ], ]; our $opstatus = [ 'status' => 'code', 'origin', [], 'summary', 'details', [[ 'data' => 'name', '_content', ]], [ 'exception' => 'type', 'message', [ 'backtrace' => [ 'line' ], ], ], ]; my $rpm_entry = [ 'rpm:entry' => 'kind', 'name', 'epoch', 'ver', 'rel', 'flags', ]; our $pattern = [ 'pattern' => 'xmlns', # obsolete, moved to patterns 'xmlns:rpm', # obsolete, moved to patterns [], 'name', 'arch', [[ 'version' => 'epoch', 'ver', 'rel', ]], [[ 'summary' => 'lang', '_content', ]], [[ 'description' => 'lang', '_content', ]], 'default', 'uservisible', [[ 'category' => 'lang', '_content', ]], 'icon', 'script', [ 'rpm:provides' => [ $rpm_entry ], ], [ 'rpm:conflicts' => [ $rpm_entry ], ], [ 'rpm:obsoletes' => [ $rpm_entry ], ], [ 'rpm:requires' => [ $rpm_entry ], ], [ 'rpm:suggests' => [ $rpm_entry ], ], [ 'rpm:enhances' => [ $rpm_entry ], ], [ 'rpm:supplements' => [ $rpm_entry ], ], [ 'rpm:recommends' => [ $rpm_entry ], ], ]; our $patterns = [ 'patterns' => 'count', 'xmlns', 'xmlns:rpm', [], [ $pattern ], ]; our $ymp = [ 'metapackage' => 'xmlns:os', 'xmlns', [], [[ 'group' => 'recommended', 'distversion', [], 'name', 'summary', 'description', 'remainSubscribed', [ 'repositories' => [[ 'repository' => 'recommended', 'format', 'producturi', [], 'name', 'summary', 'description', 'url', ]], ], [ 'software' => [[ 'item' => 'type', 'recommended', 'architectures', 'action', [], 'name', 'summary', 'description', ]], ], ]], ]; our $binary_id = [ 'binary' => 'name', 'project', 'package', 'repository', 'version', 'release', 'arch', 'filename', 'filepath', 'baseproject', 'type', 'downloadurl', ]; our $pattern_id = [ 'pattern' => 'name', 'project', 'repository', 'arch', 'filename', 'filepath', 'baseproject', 'type', 'downloadurl', ]; our $request = [ 'request' => 'id', 'creator', 'type', # obsolete, still here to handle OBS pre-1.5 requests 'key', # cache key, not really in request 'retryafter', # timed out waiting for a key change [[ 'action' => 'type', # currently submit, delete, change_devel, add_role, maintenance_release, maintenance_incident, set_bugowner [ 'source' => 'project', 'package', 'rev', # belongs to package attribute 'repository', # for merge request ], [ 'target' => 'project', 'package', 'releaseproject', # for incident request 'repository', # for release and delete request ], [ 'person' => 'name', 'role', ], [ 'group' => 'name', 'role', ], [ 'options' => [], 'sourceupdate', # can be cleanup, update or noupdate 'updatelink', # can be true or false 'makeoriginolder', # can be true or false ], [ 'acceptinfo' => 'rev', 'srcmd5', 'osrcmd5', 'xsrcmd5', 'oxsrcmd5', ], ]], [ 'submit' => # this is old style, obsolete by request, but still supported [ 'source' => 'project', 'package', 'rev', ], [ 'target' => 'project', 'package', ], ], 'priority', [ 'state' => 'name', 'who', 'when', 'superseded_by', # set when state.name is "superseded" [], 'comment', ], [[ 'review' => 'state', # review state (new/accepted or declined) 'by_user', # this user shall review it 'by_group', # one of this groupd shall review it # either user or group must be used, never both 'by_project', # any maintainer of this project can review it 'by_package', # any maintainer of this package can review it (requires by_project) 'who', # this user has reviewed it 'when', [], 'comment', [[ 'history' => 'who', 'when', [], 'comment', 'description', ]], ]], [[ 'history' => 'name', 'who', 'when', 'superseded_by', [], 'comment', 'description', ]], 'accept_at', 'title', 'description', ]; our $repositorystate = [ 'repositorystate' => [ 'blocked' ], ]; our $collection = [ 'collection' => 'matches', 'limited', [ $request ], [ $proj ], [ $pack ], [ $binary_id ], [ $pattern_id ], [ 'value' ], ]; our $quota = [ 'quota' => 'packages', [[ 'project' => 'name', 'packages', ]], ]; our $schedulerinfo = [ 'schedulerinfo' => 'arch', 'started', 'time', [], 'slept', 'booting', 'notready', [ 'queue' => 'high', 'med', 'low', 'next', ], 'projects', 'repositories', [[ 'worst' => 'project', 'repository', 'packages', 'time', ]], 'buildavg', 'avg', 'variance', ]; our $person = [ 'person' => 'login', 'email', 'realname', [ 'owner' => 'userid', ], 'state', [ 'globalrole' ], [ 'watchlist' => [[ 'project' => 'name', ]], ], ]; our $comps = [ 'comps' => [[ 'group' => [], 'id', [[ 'description' => 'xml:lang', '_content', ]], [[ 'name' => 'xml:lang', '_content', ]], [ 'packagelist' => [[ 'packagereq' => 'type', '_content', ]], ], ]], ]; our $dispatchprios = [ 'dispatchprios' => [[ 'prio' => 'project', 'repository', 'arch', 'adjust', ]], ]; # list of used services for a package or project our $services = [ 'services' => [[ 'service' => 'name', 'mode', # "localonly" is skipping this service on server side, "trylocal" is trying to merge changes directly in local files, "disabled" is just skipping it [[ 'param' => 'name', '_content' ]], ]], ]; # service type definitions our $servicetype = [ 'service' => 'name', 'hidden', # "true" to suppress it from service list in GUIs [], 'summary', 'description', [[ 'parameter' => 'name', [], 'description', 'required', # don't run without this parameter 'allowmultiple', # This parameter can be used multiple times [ 'allowedvalue' ], # list of possible values ]], ]; our $servicelist = [ 'servicelist' => [ $servicetype ], ]; our $updateinfoitem = [ 'update' => 'from', 'status', 'type', 'version', [], 'id', 'title', 'severity', 'release', [ 'issued' => 'date', ], [ 'updated' => 'date', ], 'reboot_suggested', [ 'references' => [[ 'reference' => 'href', 'id', 'title', 'type', ]], ], 'description', 'message', #optional popup message [ 'pkglist', [[ 'collection' => 'short', [], 'name', [[ 'package' => 'name', 'epoch', 'version', 'release', 'arch', 'src', 'supportstatus', # extension [], 'filename', [ 'sum' => # obsolete? 'type', '_content', ], 'reboot_suggested', 'restart_suggested', 'relogin_suggested', ]], ]], ], 'patchinforef', # extension, "project/package" ]; our $updateinfo = [ 'updates' => 'xmlns', [ $updateinfoitem ], ]; our $deltapackage = [ 'newpackage' => 'name', 'epoch', 'version', 'release', 'arch', [[ 'delta' => 'oldepoch', 'oldversion', 'oldrelease', [], 'filename', 'sequence', 'size', [ 'checksum' => 'type', '_content', ], ]], ]; our $deltainfo = [ 'deltainfo' => [ $deltapackage ], ]; our $prestodelta = [ 'prestodelta' => [ $deltapackage ], ]; our $sourcediff = [ 'sourcediff' => 'key', [ 'old' => 'project', 'package', 'rev', 'srcmd5', ], [ 'new' => 'project', 'package', 'rev', 'srcmd5', ], [ 'files' => [[ 'file' => 'state', # added, deleted, changed [ 'old' => 'name', 'md5', 'size', 'mtime', ], [ 'new' => 'name', 'md5', 'size', 'mtime', ], [ 'diff' => 'binary', 'lines', 'shown', '_content', ], ]], ], [ 'issues' => [[ 'issue' => 'state', 'tracker', 'name', 'label', 'url', ]] ], ]; our $configuration = [ 'configuration' => [], 'title', #webui only 'description', #webui only 'name', #obsname 'anonymous', 'registration', 'default_access_disabled', 'default_tracker', 'allow_user_to_create_home_project', 'multiaction_notify_support', 'disallow_group_creation', 'change_password', 'cleanup_after_days', 'hide_private_options', 'gravatar', 'enforce_project_keys', 'download_on_demand', 'download_url', 'obs_url', 'api_url', 'ymp_url', 'errbit_url', 'bugzilla_url', 'http_proxy', 'no_proxy', 'admin_email', 'theme', 'cleanup_empty_projects', 'disable_publish_for_branches', [ 'schedulers' => [ 'arch' ], ], 'unlisted_projects_filter', 'unlisted_projects_filter_description' ]; our $issue_trackers = [ 'issue-trackers' => [[ 'issue-tracker' => [], 'name', 'description', 'kind', 'label', 'enable-fetch', 'regex', 'user', # 'password', commented out on purpose, should not reach backend 'show-url', 'url', 'issues-updated', ]], ]; our $appdataitem = [ 'application' => [ 'id' => 'type', '_content' ], 'pkgname', 'name', 'summary', [ 'icon' => 'type', [], 'name', [[ 'filecontent' => 'file', '_content' ]], ], [ 'appcategories' => [ 'appcategory' ] ], [ 'mimetypes' => [ 'mimetype' ] ], [ 'keywords' => [ 'keyword' ] ], [ 'url' => 'type', '_content' ] ]; our $appdata = [ 'applications' => 'version', [ $appdataitem ] ]; our $attribute = [ 'attribute' => 'namespace', 'name', 'binary', [ 'value' ], [[ 'issue' => 'name', 'tracker' ]], ]; our $attributes = [ 'attributes' => [ $attribute ], ]; our $size = [ 'size' => 'unit', [], '_content', ]; our $time = [ 'time' => 'unit', [], '_content', ]; # define constraints for build jobs in packages or projects. our @constraint = ( [[ 'hostlabel' => 'exclude', # true or false. default is false. [], '_content' # workers might get labels defined by admin, for example for benchmarking. ]], [ 'sandbox' => 'exclude', # true or false. default is false. [], '_content' # xen/kvm/zvm/lxc/emulator/chroot/secure ], [ 'linux' => [ 'version' => [], 'max' , 'min' , ], 'flavor', ], [ 'hardware' => [ 'cpu' => [ 'flag' ], ], 'processors', 'jobs', [ 'disk' => $size ], [ 'memory' => $size ], [ 'physicalmemory' => $size ], ] ); our $constraints = [ 'constraints' => @constraint, [[ 'overwrite' => [ 'conditions' => [ 'arch' ], [ 'package' ], ], @constraint, ]] ]; our $buildstatistics = [ 'buildstatistics' => [ 'disk' => [ 'usage' => [ 'size' => 'unit', [], '_content', ], 'io_requests', 'io_sectors', ], ], [ 'memory' => [ 'usage' => $size ], ], [ 'times' => [ 'total' => $time ], [ 'preinstall' => $time ], [ 'install' => $time ], [ 'main' => $time ], [ 'download' => $time ], ], [ 'download' => [], $size, 'binaries', 'cachehits', 'preinstallimage', ], ]; our $notifications = [ 'notifications' => 'next', 'sync', 'limit_reached', [[ 'notification' => 'type', 'time', [[ 'data' => 'key', '_content', ]], ]], ]; our $frozenlinks = [ 'frozenlinks' => [[ 'frozenlink' => 'project', [[ 'package' => 'name', 'srcmd5', 'vrev', ]], ]], ]; our $report = [ 'report' => [[ 'binary' => 'name', 'epoch', 'version', 'release', 'binaryarch', 'buildtime', 'disturl', 'supportstatus', 'project', 'repository', 'package', 'arch', # schedulerarch '_content', ]], ]; our $publishedpath = [ 'publishedpath' => 'project', 'repository', 'medium', [], 'path', 'url', ]; our $multibuild = [ 'multibuild' => [ 'package' ], # obsolete [ 'flavor' ], ]; our $pubkeyinfo = [ 'pubkey' => 'keyid', 'algo', 'keysize', 'expires', 'fingerprint', '_content', ]; our $keyinfo = [ 'keyinfo' => 'project', $pubkeyinfo, 'sslcert', ]; our $binannotation = [ 'annotation' => [[ 'repo' => 'url', 'project', 'repository', 'priority', ]], 'disturl', 'buildtime', 'package', # only in build job annotation 'epoch', # only in build job annotation 'version', # only in build job annotation 'release', # only in build job annotation 'binaryarch', # only in build job annotation 'hdrmd5', # only in build job annotation ]; our $availablebinaries = [ 'availablebinaries' => [[ 'packages' => [ 'arch' ], [ 'name' ], ]], [[ 'products' => [ 'arch' ], [ 'name' ], ]], [[ 'patterns' => [ 'arch' ], [ 'name' ], ]], ]; our $clouduploadjob = [ 'clouduploadjob' => 'name', [], 'state', # created, receiving, scheduled, uploading, succeeded, waiting, failed 'details', # error messages, upload result string 'progress', # percentage completed 'try', # retry count 'created', # when was this job created 'user', # who did this 'target', # where to upload to 'project', 'repository', 'package', 'arch', 'filename', # what to upload 'size', 'pid', # internal ]; our $clouduploadjoblist = [ 'clouduploadjoblist' => [ $clouduploadjob ], ]; 1; open-build-service-2.9.4/src/backend/BSXPath.pm000066400000000000000000000354731332555733200212330ustar00rootroot00000000000000# # Copyright (c) 2006, 2007 Michael Schroeder, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # Simple XPath query functions to search parsed XML data. # package BSXPath; use Data::Dumper; use strict; sub boolop_eq { return $_[0] eq $_[1]; } sub boolop_not { return !$_[0]; } sub boolop { my ($cwd, $v1, $v2, $op, $negpol) = @_; #print Dumper($cwd).Dumper($v1).Dumper($v2); my @v1 = @$v1; my @v2 = @$v2; my @cwd = @$cwd; my @vr; while (@v1) { my $e1 = shift @v1; my $e2 = shift @v2; $e1 = '' if ref($e1) eq 'ARRAY' && !@$e1; $e2 = '' if ref($e2) eq 'ARRAY' && !@$e2; my $r = shift @cwd; if ($r->[4]) { push @vr, $r->[4]->boolop($e1, $e2, $op, $negpol); next; } if (ref($e1) ne '' && ref($e1) ne 'HASH' && ref($e1) ne 'ARRAY') { $e1 = $e1->value(); } if (ref($e2) ne '' && ref($e2) ne 'HASH' && ref($e2) ne 'ARRAY') { $e2 = $e2->value(); } if (ref($e1) eq 'HASH') { if (!exists($e1->{'_content'})) { push @vr, ''; next; } $e1 = $e1->{'_content'}; } if (ref($e2) eq 'HASH') { if (!exists($e2->{'_content'})) { push @vr, ''; next; } $e2 = $e2->{'_content'}; } if (!ref($e1) && !ref($e2)) { push @vr, $op->($e1, $e2) ? 'true' : ''; next; } if (!ref($e1)) { push @vr, (grep {$op->($e1, $_)} @$e2) ? 'true' : ''; next; } if (!ref($e2)) { push @vr, (grep {$op->($_, $e2)} @$e1) ? 'true' : ''; next; } my $x = ''; for my $e (@$e1) { next unless grep {$op->($e, $_)} @$e2; $x = 'true'; last; } push @vr, $x; } # print "multop ret: ".Dumper(\@vr); return \@vr; } sub op { my ($cwd, $v1, $v2, $op) = @_; my @v1 = @$v1; my @v2 = @{$v2 || []}; my @cwd = @$cwd; for my $vv (@v1) { my $vv2; $vv2 = shift @v2 if defined $v2; my $r = shift @cwd; if ($r->[4]) { $vv = $r->[4]->op($vv, $vv2, $op); next; } if (ref($vv) ne '' && ref($vv) ne 'HASH' && ref($vv) ne 'ARRAY') { $vv = $vv->value(); } if (ref($vv) eq 'HASH') { $vv = $vv->{'_content'}; } elsif (ref($vv) ne '') { $vv = ''; } if ($vv2) { if (ref($vv2) ne '' && ref($vv2) ne 'HASH' && ref($vv2) ne 'ARRAY') { $vv2 = $vv2->value(); } if (ref($vv2) eq 'HASH') { $vv2 = $vv2->{'_content'}; } elsif (ref($vv2) ne '') { $vv2 = ''; } } $vv = $op->($vv, $vv2); } return \@v1; } sub predicate { my ($cwd, $expr, $v) = @_; my @ncwd; my @r = @$cwd; for my $vv (@$v) { my $rr = shift @r; # flatten vv if (ref($vv) eq 'HASH' || ref($vv) eq '') { push @ncwd, [$rr->[0], $vv, 1, 1]; } elsif (ref($vv) eq 'ARRAY') { my $i = 1; my $s = @$vv; push @ncwd, [$rr->[0], $_, $i++, $s] for @$vv; } else { push @ncwd, [$rr->[0], $vv, 1, 1, $vv]; #my $vv2 = $vv->value(); #my $i = 1; #my $s = @$vv2; #push @ncwd, [$rr->[0], $_, $i++, $s] for @$vv2; } } my ($v2, $nexpr) = expr(\@ncwd, $expr, 0); die("internal error!\n") if @$v2 != @ncwd; #print Dumper($v2); for my $vv (@$v) { if ($ncwd[0]->[4]) { my $r = shift @ncwd; $vv = $r->[4]->predicate(shift @$v2, $expr); next; } my @nvv; while (1) { my $r = shift @ncwd; my $b = shift @$v2; $b = @$b ? 'true' : '' if ref($b) eq 'ARRAY'; if ($b =~ /^-?\d+$/) { push @nvv, $r->[1] if $r->[2] == $b; } else { push @nvv, $r->[1] if $b; } last if $r->[2] == $r->[3]; } $vv = \@nvv; } return ($v, $nexpr); } sub pathstep { my ($cwd, $v, $c) = @_; for my $vv (@$v) { if (ref($vv) eq 'HASH') { if ($c eq '*') { $vv = [ map {ref($vv->{$_}) eq 'ARRAY' ? @{$vv->{$_}} : $vv->{$_}} grep {$_ ne '_content'} sort keys %$vv ]; } else { $vv = exists($vv->{$c}) ? $vv->{$c} : []; } } elsif (ref($vv) eq 'ARRAY') { if ($c eq '*') { my @nvv; for my $d (@$vv) { next unless ref($d) eq 'HASH'; push @nvv, map {ref($d->{$_}) eq 'ARRAY' ? @{$d->{$_}} : $d->{$_}} grep {$_ ne '_content'} sort keys %$d; } $vv = \@nvv; } else { $vv = [ map {ref($_->{$c}) eq 'ARRAY' ? @{$_->{$c}} : $_->{$c}} grep {ref($_) eq 'HASH' && exists($_->{$c})} @$vv ]; } } elsif (ref($vv) eq '') { $vv = []; } else { $vv = $vv->step($c); } } return $v; } sub limit { my ($cwd, $v) = @_; my @ncwd; my $changed; my @v = @$v; for my $r (@$cwd) { my $vv = $r->[1]; my $lv = shift @v; if (ref($vv) ne '' && ref($vv) ne 'HASH' && ref($vv) ne 'ARRAY') { my $vv2 = $vv->limit($lv); if ($vv2 != $vv) { push @ncwd, [ @$r ]; $ncwd[-1]->[1] = $vv2; $changed = 1; next; } } push @ncwd, $r; } return $changed ? \@ncwd : $cwd; } sub expr { my ($cwd, $expr, $lev, $negpol) = @_; $lev ||= 0; # calculate next value my ($v, $v2); $expr =~ s/^\s+//; my $t = substr($expr, 0, 1); if ($t eq '(') { ($v, $expr) = expr($cwd, substr($expr, 1), 0, $negpol); die("missing ) in expression\n") unless $expr =~ s/^\)//; } elsif ($t eq '-') { ($v, $expr) = expr($cwd, substr($expr, 1), 6, $negpol); $v = op($cwd, $v, undef, sub {-$_[0]}); } elsif ($t eq "'") { die("missing string terminator\n") unless $expr =~ /^\'([^\']*)\'(.*)$/s; $v = $1; $expr = $2; while ($expr =~ /^(\'[^\']*)\'(.*)$/s) { $v .= $1; $expr = $2; } $v = [ ($v) x scalar(@$cwd) ]; } elsif ($t eq '"') { die("missing string terminator\n") unless $expr =~ /^\"([^\"]*)\"(.*)$/s; $v = $1; $expr = $2; while ($expr =~ /^(\"[^\"]*)\"(.*)$/s) { $v .= $1; $expr = $2; } $v = [ ($v) x scalar(@$cwd) ]; } elsif ($expr =~ /^([0-9]+(?:\.[0-9]*)?)(.*?)$/s) { $v = 0 + $1; $v = [ ($v) x scalar(@$cwd) ]; $expr = $2; } elsif ($t eq '/' && $expr =~ /^\/(\/.*)$/s) { # unary // $expr = $1; die("unary // op not implemented yet\n"); } elsif ($t eq '/') { # unary / $v = [ map {$_->[0]} @$cwd ]; } elsif ($t eq '.') { if ($expr =~ /^\.\./) { die(".. op not implemented yet\n"); } else { $v = [ map {$_->[1]} @$cwd ]; $expr = substr($expr, 1); } } elsif ($expr =~ /^([-_a-zA-Z0-9]+)\s*\((.*?)$/s) { my $f = $1; $expr = $2; my @args; while ($expr !~ s/^\)//) { ($v, $expr) = expr($cwd, $expr, 0, $f eq 'not' ? !$negpol : $negpol); push @args, $v; last if $expr =~ s/^\)//; die("$f: bad argument separator\n") unless $expr =~ s/^,//; } if ($f eq 'not') { die("$f: one argument required\n") unless @args == 1; push @args, [ (1) x scalar(@$cwd) ]; $v = boolop($cwd, @args, \&boolop_not, $negpol); } elsif ($f eq 'starts-with') { unshift @args, [ map {$_->[1]} @$cwd ] if @args == 1; die("$f: one or two arguments required\n") unless @args == 2; $v = boolop($cwd, @args, sub {substr($_[0], 0, length($_[1])) eq $_[1]}, $negpol); } elsif ($f eq 'contains') { unshift @args, [ map {$_->[1]} @$cwd ] if @args == 1; die("$f: at least two arguments required\n") unless @args >= 2; if (@args > 2) { my $arg1 = shift @args; for my $a (@args) { die("multi arg contains only works with strings\n") if grep {ref($_) || $_ ne $a->[0]} @$a; } my $arg2 = $args[0]; @args = map {$_->[0]} @args; $v = boolop($cwd, $arg1, $arg2, sub {!grep {index($_[0], $_) == -1} @args}, $negpol); } else { $v = boolop($cwd, @args, sub {index($_[0], $_[1]) != -1}, $negpol); } } elsif ($f eq 'compare') { unshift @args, [ map {$_->[1]} @$cwd ] if @args == 1; die("$f: one or two arguments required\n") unless @args == 2; $v = boolop($cwd, @args, sub {$_[0] cmp $_[1]}, $negpol); } elsif ($f eq 'ends-with') { unshift @args, [ map {$_->[1]} @$cwd ] if @args == 1; die("$f: one or two arguments required\n") unless @args == 2; $v = boolop($cwd, @args, sub {substr($_[0], -length($_[1])) eq $_[1]}, $negpol); } elsif ($f eq 'equals-ic') { unshift @args, [ map {$_->[1]} @$cwd ] if @args == 1; die("$f: one or two arguments required\n") unless @args == 2; $v = boolop($cwd, @args, sub {lc($_[0]) eq lc($_[1])}, $negpol); } elsif ($f eq 'starts-with-ic') { unshift @args, [ map {$_->[1]} @$cwd ] if @args == 1; die("$f: one or two arguments required\n") unless @args == 2; $v = boolop($cwd, @args, sub {substr(lc($_[0]), 0, length($_[1])) eq lc($_[1])}, $negpol); } elsif ($f eq 'ends-with-ic') { unshift @args, [ map {$_->[1]} @$cwd ] if @args == 1; die("$f: one or two arguments required\n") unless @args == 2; $v = boolop($cwd, @args, sub {substr(lc($_[0]), -length($_[1])) eq lc($_[1])}, $negpol); } elsif ($f eq 'contains-ic') { unshift @args, [ map {$_->[1]} @$cwd ] if @args == 1; die("$f: at least two arguments required\n") unless @args >= 2; if (@args > 2) { my $arg1 = shift @args; for my $a (@args) { die("multi arg contains-ic only works with strings\n") if grep {ref($_) || $_ ne $a->[0]} @$a; } my $arg2 = $args[0]; @args = map {lc($_->[0])} @args; $v = boolop($cwd, $arg1, $arg2, sub {!grep {index(lc($_[0]), $_) == -1} @args}, $negpol); } else { $v = boolop($cwd, @args, sub {index(lc($_[0]), lc($_[1])) != -1}, $negpol); } } elsif ($f eq 'position') { die("$f: no arguments required\n") unless @args == 0; $v = [ map {$_->[2]} @$cwd ]; } elsif ($f eq 'last') { $v = [ map {$_->[3]} @$cwd ]; } else { die("unknown function: $f\n"); } } elsif ($expr =~ /^(\@?(?:[-_a-zA-Z0-9]+|\*))(.*?)$/s) { # path component my $c = $1; $expr = $2; $c =~ s/^\@//; $v = [ map {$_->[1]} @$cwd ]; $v = pathstep($cwd, $v, $c); } else { die("syntax error: bad primary: $expr\n"); } # got primary, now go for ops while (1) { $expr =~ s/^\s+//; if ($expr =~ /^or/) { return ($v, $expr) if $lev > 1; ($v2, $expr) = expr($cwd, substr($expr, 2), 1, $negpol); $v = boolop($cwd, $v, $v2, sub {$_[0] || $_[1]}, $negpol); } elsif ($expr =~ /^and/) { return ($v, $expr) if $lev > 2; my $cwd2 = limit($cwd, $v); ($v2, $expr) = expr($cwd2, substr($expr, 3), 2, $negpol); $v = boolop($cwd, $v, $v2, sub {$_[0] && $_[1]}, $negpol); } elsif ($expr =~ /^=/) { return ($v, $expr) if $lev > 3; ($v2, $expr) = expr($cwd, substr($expr, 1), 3, $negpol); $v = boolop($cwd, $v, $v2, \&boolop_eq, $negpol); } elsif ($expr =~ /^!=/) { return ($v, $expr) if $lev > 3; ($v2, $expr) = expr($cwd, substr($expr, 2), 3, $negpol); $v = boolop($cwd, $v, $v2, sub {$_[0] ne $_[1]}, $negpol); } elsif ($expr =~ /^<=/) { return ($v, $expr) if $lev > 3; ($v2, $expr) = expr($cwd, substr($expr, 2), 3, $negpol); $v = boolop($cwd, $v, $v2, sub {$_[0] <= $_[1]}, $negpol); } elsif ($expr =~ /^>=/) { return ($v, $expr) if $lev > 3; ($v2, $expr) = expr($cwd, substr($expr, 2), 3, $negpol); $v = boolop($cwd, $v, $v2, sub {$_[0] >= $_[1]}, $negpol); } elsif ($expr =~ /^ 3; ($v2, $expr) = expr($cwd, substr($expr, 1), 3, $negpol); $v = boolop($cwd, $v, $v2, sub {$_[0] < $_[1]}, $negpol); } elsif ($expr =~ /^>/) { return ($v, $expr) if $lev > 3; ($v2, $expr) = expr($cwd, substr($expr, 1), 3, $negpol); $v = boolop($cwd, $v, $v2, sub {$_[0] > $_[1]}, $negpol); } elsif ($expr =~ /^\+/) { return ($v, $expr) if $lev > 4; ($v2, $expr) = expr($cwd, substr($expr, 1), 4, $negpol); $v = op($cwd, $v, $v2, sub {$_[0] + $_[1]}); } elsif ($expr =~ /^-/) { return ($v, $expr) if $lev > 4; ($v2, $expr) = expr($cwd, substr($expr, 1), 4, $negpol); $v = op($cwd, $v, $v2, sub {$_[0] - $_[1]}); } elsif ($expr =~ /^\*/) { return ($v, $expr) if $lev > 5; ($v2, $expr) = expr($cwd, substr($expr, 1), 5, $negpol); $v = op($cwd, $v, $v2, sub {$_[0] * $_[1]}); } elsif ($expr =~ /^div/) { return ($v, $expr) if $lev > 5; ($v2, $expr) = expr($cwd, substr($expr, 3), 5, $negpol); $v = op($cwd, $v, $v2, sub {$_[0] / $_[1]}); } elsif ($expr =~ /^mod/) { return ($v, $expr) if $lev > 5; ($v2, $expr) = expr($cwd, substr($expr, 3), 5, $negpol); $v = op($cwd, $v, $v2, sub {$_[0] % $_[1]}); } elsif ($expr =~ /^\|/) { die("union op not implemented yet\n"); } elsif ($expr =~ /^\/(\@?(?:[-_a-zA-Z0-9]+|\*))(.*?)$/s) { my $c = $1; $expr = $2; $c =~ s/^\@//; $v = pathstep($cwd, $v, $c); #print "following $c\n".Dumper($v); } elsif ($expr =~ /^\/\//s) { $expr = substr($expr, 1); die("// op not implemented yet\n"); } elsif ($expr =~ /^\[/) { ($v, $expr) = predicate($cwd, substr($expr, 1), $v); die("missing ] in predicate\n") if $expr eq ''; die("syntax error in predicate\n") unless $expr =~ s/^\]//; } else { return ($v, $expr); } } } sub select { my ($data, $expr) = @_; my $v; ($v, $expr) = BSXPath::expr([[$data, $data, 1, 1]], $expr); die("junk at and of expr: $expr\n") if $expr ne ''; $v = $v->[0]; if (ref($v) ne '' && ref($v) ne 'HASH' && ref($v) ne 'ARRAY') { $v = $v->value(); } return $v; } sub match { my ($data, $expr) = @_; my $v; ($v, $expr) = predicate([[$data, $data, 1, 1]], $expr, [$data]); die("junk at and of expr: $expr\n") if $expr ne ''; $v = $v->[0]; if (ref($v) ne '' && ref($v) ne 'HASH' && ref($v) ne 'ARRAY') { $v = $v->value(); } return $v; } sub valuematch { my ($data, $expr) = @_; my $v; ($v, $expr) = BSXPath::expr([[$data, $data, 1, 1]], $expr); die("junk at and of expr: $expr\n") if $expr ne ''; my @v = @$v; my @r; while (@v) { $v = shift @v; if (ref($v) ne '' && ref($v) ne 'HASH' && ref($v) ne 'ARRAY') { $v = $v->value(); } if (ref($v) eq '') { push @r, $v; } elsif (ref($v) eq 'HASH') { push @r, $v->{'_content'} if exists $v->{'_content'}; } elsif (ref($v) eq 'ARRAY') { unshift @v, @$v; } else { die("illegal return type\n"); } } return \@r; } 1; open-build-service-2.9.4/src/backend/BSXPathKeys.pm000066400000000000000000000252701332555733200220610ustar00rootroot00000000000000# # Copyright (c) 2006, 2007 Michael Schroeder, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # Abstract data implementation for the BSXPath engine. Data is # identified via keys. # package BSXPathKeys; use BSXPath; use Data::Dumper; use strict; # # needs: # db->values($path) -> array of values; # db->keys($path, $value) -> array of keys; # db->fetch($key) -> data; # # # node types: # # value defined # -> concrete node element # keys/other must also be set, define value set # # keys defined # -> abstract node element # limited to keys # # all other # -> abstract node element, unlimited # sub node { my ($db, $path, $limit) = @_; my $v = bless {}; $v->{'db'} = $db; $v->{'path'} = $path; $v->{'limit'} = $limit; return $v; } sub selectpath { my ($v, $path) = @_; $v = [ $v ] unless ref($v) eq 'ARRAY'; my @v = @$v; my $c; while(1) { last if !defined($path) || $path eq ''; ($c, $path) = split('/', $path, 2); for my $vv (splice(@v)) { next unless ref($vv) eq 'HASH'; $vv = $vv->{$c}; next unless defined($vv); push @v, ref($vv) eq 'ARRAY' ? @$vv : $vv; } } return @v; } sub value { my ($self) = @_; my @v; if (exists($self->{'value'})) { return [ $self->{'value'} ]; # hmm, what about other? } my $db = $self->{'db'}; my $path = $self->{'path'}; if (!exists($self->{'keys'})) { if (defined($path)) { push @v, $db->values($path); } else { push @v, $db->keys(); } } else { die("413 search limit reached\n") if $self->{'limit'} && @{$self->{'keys'}} > $self->{'limit'}; for my $k (@{$self->{'keys'}}) { my $v = $db->fetch($k); next unless defined $v; push @v, selectpath($v, $path); } } die("413 search limit reached\n") if $self->{'limit'} && @v > $self->{'limit'}; return \@v; } sub step { my ($self, $c) = @_; return [] if exists $self->{'value'}; # can't step concrete value my $v = bless {}; $v->{'db'} = $self->{'db'}; $v->{'keys'} = $self->{'keys'} if $self->{'keys'}; $v->{'limit'} = $self->{'limit'} if $self->{'limit'}; if ($self->{'path'} eq '') { $v->{'path'} = "$c"; } else { $v->{'path'} = "$self->{'path'}/$c"; } return $v; } sub toconcrete { my ($self) = @_; my $vv = bless {}; $vv->{'db'} = $self->{'db'}; $vv->{'limit'} = $self->{'limit'} if $self->{'limit'}; if ($self->{'keys'}) { $vv->{'keys'} = $self->{'keys'}; $vv->{'value'} = 'true'; $vv->{'other'} = ''; } else { $vv->{'keys'} = []; $vv->{'value'} = ''; $vv->{'other'} = 'true'; } return $vv; } sub boolop { my ($self, $v1, $v2, $op, $negpol) = @_; if (ref($v1) ne ref($self) && ref($v2) ne ref($self)) { return $op->($v1, $v2) ? 'true' : ''; } #print "boolop ".Dumper($v1).Dumper($v2)."---\n"; #print "negated!\n" if $negpol; if (ref($v1) eq ref($self) && ref($v2) eq ref($self)) { $v1 = toconcrete($v1) unless exists $v1->{'value'}; $v2 = toconcrete($v2) unless exists $v2->{'value'}; my $v = bless {}; $v->{'db'} = $v1->{'db'}; $v->{'limit'} = $v1->{'limit'} if $v1->{'limit'}; my @k; my %k1 = map {$_ => 1} @{$v1->{'keys'}}; my %k2 = map {$_ => 1} @{$v2->{'keys'}}; if ($op->($v1->{'other'}, $v2->{'other'})) { push @k, grep {$k2{$_}} @{$v1->{'keys'}} if !$op->($v1->{'value'}, $v2->{'value'}); push @k, grep {!$k2{$_}} @{$v1->{'keys'}} if !$op->($v1->{'value'}, $v2->{'other'}); push @k, grep {!$k1{$_}} @{$v2->{'keys'}} if !$op->($v1->{'other'}, $v2->{'value'}); $v->{'value'} = ''; $v->{'other'} = 'true'; } else { push @k, grep {$k2{$_}} @{$v1->{'keys'}} if $op->($v1->{'value'}, $v2->{'value'}); push @k, grep {!$k2{$_}} @{$v1->{'keys'}} if $op->($v1->{'value'}, $v2->{'other'}); push @k, grep {!$k1{$_}} @{$v2->{'keys'}} if $op->($v1->{'other'}, $v2->{'value'}); $v->{'value'} = 'true'; $v->{'other'} = ''; } $v->{'keys'} = \@k; return $v; } if (ref($v1) eq ref($self)) { my $v = bless {}; $v->{'db'} = $v1->{'db'}; $v->{'limit'} = $v1->{'limit'} if $v1->{'limit'}; my $db = $v1->{'db'}; if (exists($v1->{'value'})) { $v->{'keys'} = $v1->{'keys'}; $v->{'value'} = $op->($v1->{'value'}, $v2) ? 'true' : ''; $v->{'other'} = $op->($v1->{'other'}, $v2) ? 'true' : ''; return $v; } my @k; my %k = map {$_ => 1} @{$v1->{'keys'} || []}; if ($v1->{'keys'} && !@{$v1->{'keys'}}) { @k = (); } elsif ($op == \&BSXPath::boolop_eq) { if ($v1->{'keys'} && $v1->{'path'} && $db->{"fetch_$v1->{'path'}"}) { # have super-fast select_path_from_key function for my $k (@{$v1->{'keys'}}) { my @d = $db->{"fetch_$v1->{'path'}"}->($db, $k); next unless @d; if (!$negpol) { next unless grep {$_ eq $v2} @d; } else { next if grep {$_ eq $v2} @d; } push @k, $k; } } else { @k = $db->keys($v1->{'path'}, $v2, $v1->{'keys'}); @k = grep {$k{$_}} @k if $v1->{'keys'}; #die("413 search limit reached\n") if $v1->{'limit'} && @k > $v1->{'limit'}; $negpol = 0; } } elsif ($op == \&BSXPath::boolop_not && $v1->{'keys'} && !exists($v1->{'value'})) { for my $k (@{$v1->{'keys'}}) { my $vv = $db->fetch($k); next unless defined $vv; my @p = selectpath($vv, $v1->{'path'}); if (!$negpol) { next unless !@p || grep {!$_} @p; } else { next if !@p || grep {!$_} @p; } push @k, $k; } } else { my @values = $db->values($v1->{'path'}, $v1->{'keys'}); if ($v1->{'keys'} && @values > @{$v1->{'keys'}}) { for my $k (@{$v1->{'keys'}}) { my $vv = $db->fetch($k); next unless defined $vv; if (!$negpol) { next unless grep {$op->($_, $v2)} selectpath($vv, $v1->{'path'}); } else { next if grep {$op->($_, $v2)} selectpath($vv, $v1->{'path'}); } push @k, $k; } } else { for my $vv (@values) { if (!$negpol) { next unless $op->($vv, $v2); } else { next if $op->($vv, $v2); } if ($v1->{'keys'}) { push @k, grep {$k{$_}} $db->keys($v1->{'path'}, $vv, $v1->{'keys'}); } else { push @k, $db->keys($v1->{'path'}, $vv, $v1->{'keys'}); } die("413 search limit reached\n") if $v1->{'limit'} && @k > $v1->{'limit'}; } } } $v->{'keys'} = \@k; $v->{'value'} = $negpol ? '' : 'true'; $v->{'other'} = $negpol ? 'true' : ''; #print "==> ".Dumper($v)."<===\n"; return $v; } if (ref($v2) eq ref($self)) { my $v = bless {}; $v->{'db'} = $v1->{'db'}; $v->{'limit'} = $v1->{'limit'} if $v1->{'limit'}; my $db = $v1->{'db'}; if (exists($v2->{'value'})) { $v->{'keys'} = $v2->{'keys'}; $v->{'value'} = $op->($v1, $v2->{'value'}) ? 'true' : ''; $v->{'other'} = $op->($v1, $v2->{'other'}) ? 'true' : ''; return $v; } my @k; my %k = map {$_ => 1} @{$v2->{'keys'} || []}; if ($v2->{'keys'} && !@{$v2->{'keys'}}) { @k = (); } elsif ($op == \&BSXPath::boolop_eq) { @k = $db->keys($v2->{'path'}, $v1, $v2->{'keys'}); @k = grep {$k{$_}} @k if $v2->{'keys'}; #die("413 search limit reached\n") if $v2->{'limit'} && @k > $v2->{'limit'}; $negpol = 0; } else { my @values = $db->values($v2->{'path'}, $v2->{'keys'}); if ($v2->{'keys'} && @values > @{$v2->{'keys'}}) { for my $k (@{$v2->{'keys'}}) { my $vv = $db->fetch($k); next unless defined $vv; if (!$negpol) { next unless grep {$op->($v1, $_)} selectpath($vv, $v2->{'path'}); } else { next if grep {$op->($v1, $_)} selectpath($vv, $v2->{'path'}); } push @k, $k; } } else { for my $vv (@values) { if (!$negpol) { next unless $op->($v1, $vv); } else { next if $op->($v1, $vv); } if ($v2->{'keys'}) { push @k, grep {$k{$_}} $db->keys($v2->{'path'}, $vv, $v2->{'keys'}); } else { push @k, $db->keys($v2->{'path'}, $vv, $v2->{'keys'}); } } } } $v->{'keys'} = \@k; $v->{'value'} = $negpol ? '' : 'true'; $v->{'other'} = $negpol ? 'true' : ''; return $v; } } sub op { my ($self, $v1, $v2, $op) = @_; if (ref($v1) ne ref($self) && ref($v2) ne ref($self)) { return $op->($v1, $v2); } die("op not implemented for abstract elements\n"); } sub predicate { my ($self, $v, $expr) = @_; if (ref($v) ne ref($self)) { $v = @$v ? 'true' : '' if ref($v) eq 'ARRAY'; if ($v =~ /^-?\d+$/) { die("enumeration not implemented for abstract elements\n"); } else { return $v ? $self : []; } } $v = toconcrete($v) unless exists $v->{'value'}; my $vv = bless {}; $vv->{'db'} = $self->{'db'}; $vv->{'path'} = $self->{'path'}; $vv->{'limit'} = $self->{'limit'} if $self->{'limit'}; my @k; if ($v->{'value'}) { @k = @{$v->{'keys'}}; } elsif ($v->{'other'}) { my %k = map {$_ => 1} @{$v->{'keys'}}; @k = grep {!$k{$_}} $self->{'db'}->keys(); } if (@k && $self->{'keys'}) { my %k = map {$_ => 1} @{$self->{'keys'}}; @k = grep {$k{$_}} @k; } if ($self->{'path'}) { # postprocess matched keys for my $k (splice(@k)) { my $db = $self->{'db'}; my $kv = $db->fetch($k); next unless $kv; $kv = [ selectpath($kv, $self->{'path'}) ]; next unless @$kv; ($kv, undef) = BSXPath::predicate([[$kv, $kv, 1, 1]], $expr, [$kv]); push @k, $k if @{$kv->[0]}; } } $vv->{'keys'} = \@k; return $vv; } sub keymatch { my ($self, $expr) = @_; my $v; ($v, $expr) = BSXPath::predicate([[$self, $self, 1, 1]], $expr, [$self]); die("junk at and of expr: $expr\n") if $expr ne ''; return $v->[0]->{'keys'} || []; } sub limit { my ($self, $v) = @_; if (ref($v) ne ref($self)) { return $self; } return $self if $self->{'value'}; if ($v->{'value'}) { my @k = @{$v->{'keys'}}; my $vv = bless {}; $vv->{'db'} = $self->{'db'}; $vv->{'limit'} = $self->{'limit'} if $self->{'limit'}; $vv->{'path'} = $self->{'path'}; if (@k && $self->{'keys'}) { my %k = map {$_ => 1} @{$self->{'keys'}}; @k = grep {$k{$_}} @k; } $vv->{'keys'} = \@k; return $vv; } else { return $self; } } 1; open-build-service-2.9.4/src/backend/DESIGN000066400000000000000000000026511332555733200203100ustar00rootroot00000000000000 System layout ------------- 1) one source server Maintains the source repository and project/package configurations. It is the only interface for api stack and it may forward requests to one of the repository servers 2) N repository servers a repository server provides access to the repositories of some projects/packages/architectures. It also maintains the build client worker pool, sending jobs to the clients and informing the schedulers about finished jobs. 3) N*A schedulers A scheduler maintains all projects/packages of the corresponding repository server, but for just one architecture. It is responsible for starting jobs in the right order, collecting the built binary packages and creating the external repository if a project's repository is completely built. The schedulers need to run on the same system as the repository servers. 4) M build clients The build clients contact the repository servers for jobs, build the jobs and send the result back to the repository server. The most simple setup is one source server, one repository server and schedulers for every involved architecture. Code conventions ---------------- standard variable names: $proj project structure $projid project name ($proj->{'name'}) $repoid repository name $arch architecture $pack package structure $packid package name $prp "$projid/$repoid" $prpa "$projid/$repoid/$arch" open-build-service-2.9.4/src/backend/License000066400000000000000000000003341332555733200207150ustar00rootroot00000000000000 All files are under the GPL v2 license as described in the COPYING file. Contributions to this directory do require a signed copyright assignement. Please contact Adrian Schroeter for details. open-build-service-2.9.4/src/backend/Makefile000066400000000000000000000025011332555733200210460ustar00rootroot00000000000000include ../../Makefile.include OBS_BACKEND_DATA_SUBDIRS := build events info jobs log projects repos run sources trees workers OBS_BACKEND_DATA_DIR := /srv/obs prepare_dirs: $(INSTALL) -d -m 755 $(DESTDIR)$(OBS_BACKEND_PREFIX) $(INSTALL) -d -m 755 $(DESTDIR)$(OBS_BACKEND_DATA_DIR) bs_config: [ -f ./BSConfig.pm ] || cp ./BSConfig.pm.template ./BSConfig.pm install: prepare_dirs install_data_dirs bs_config # prevent error while copy if there is an previous installation rm -rf $(DESTDIR)$(OBS_BACKEND_PREFIX)/build cp -a ./* $(DESTDIR)$(OBS_BACKEND_PREFIX) rm -rf $(DESTDIR)$(OBS_BACKEND_PREFIX)/build rm -rf $(DESTDIR)$(OBS_BACKEND_PREFIX)/Makefile rm -rf $(DESTDIR)$(OBS_BACKEND_PREFIX)/t rm -rf $(DESTDIR)$(OBS_BACKEND_PREFIX)/testdata rm -rf $(DESTDIR)$(OBS_BACKEND_PREFIX)/examples # just for check section, it is a %%ghost ln -sf /usr/lib/build $(DESTDIR)$(OBS_BACKEND_PREFIX)/build install_data_dirs: prepare_dirs $(foreach data_dir,$(OBS_BACKEND_DATA_SUBDIRS), \ $(shell $(INSTALL) -d -m 755 $(DESTDIR)$(OBS_BACKEND_DATA_DIR)/$(data_dir) ) \ ) test_unit: bs_config clean_cover rm -rf t/tmp/* PERL5OPT=-MDevel::Cover LANG=C prove -Ibuild -I. -v t/*.t cover: test_unit cover -ignore_re '^((build|XML|t)/|/usr/bin/prove$$)' -outputdir /srv/www/htdocs clean_cover: rm -rf cover_db/ .PHONY: test_unit cover open-build-service-2.9.4/src/backend/README000066400000000000000000000017301332555733200202710ustar00rootroot00000000000000To setup an own backend from git: 1) Copy BSConfig.pm.template to BSConfig.pm 2) Change BSConfig.pm to match your local paths and server names. Currently the server runs with user "obsrun" and group "obsrun", so you have to add them to your system. 3) Initialize build code (provides needed package format parsers): # git submodule init # git submodule update 3) run a src server: ./bs_srcserver 4) run a repo server: ./bs_repserver 5) run a job scheduler for every arch you need ./bs_sched i586 ./bs_sched x86_64 ... 6) on your build clients: create a directory /root/bs copy BSBuild.pm BSConfig.pm BSHTTP.pm BSRPC.pm BSServer.pm BSDispatch.pm BSConfiguration.pm BSKiwiXML.pm BSCando.pm BSUtil.pm BSXML.pm XML/Structured.pm bs_worker into the directory. create a work directory, e.g. /BUILD/root_1 create a state dir, e.g. /var/run/worker_1 run ./bs_worker --root /BUILD/root_1 --id /1 --statedir /var/run/worker_1 open-build-service-2.9.4/src/backend/XML/000077500000000000000000000000001332555733200200505ustar00rootroot00000000000000open-build-service-2.9.4/src/backend/XML/Structured.pm000066400000000000000000000323261332555733200225600ustar00rootroot00000000000000 package XML::Structured; use vars qw($VERSION @ISA @EXPORT); require Exporter; @ISA = qw(Exporter); @EXPORT = qw(XMLin XMLinfile XMLout); $VERSION = '1.1'; use Encode; use strict; our $bytes; sub import { $bytes = 1 if grep {$_ eq ':bytes'} @_; __PACKAGE__->export_to_level(1, grep {$_ ne ':bytes'} @_); } sub _workin { my ($how, $out, $ain, @in) = @_; my @how = @$how; my $am = shift @how; my %known = map {ref($_) ? (!@$_ ? () : (ref($_->[0]) ? $_->[0]->[0] : $_->[0] => $_)) : ($_=> $_)} @how; for my $a (keys %$ain) { die("unknown attribute: $a\n") unless $known{$a}; if (ref($known{$a})) { die("attribute '$a' must be element\n") if @{$known{$a}} > 1 || ref($known{$a}->[0]); push @{$out->{$a}}, $ain->{$a}; } else { die("attribute '$a' must be singleton\n") if exists $out->{$a}; $out->{$a} = $ain->{$a}; Encode::_utf8_off($out->{$a}) if $bytes; } } while (@in) { my ($e, $v) = splice(@in, 0, 2); my $ke = $known{$e}; if ($e eq '0') { next if $v =~ /^\s*$/s; die("element '$am' contains content\n") unless $known{'_content'}; Encode::_utf8_off($v) if $bytes; $v =~ s/\s+$/ /s; $v =~ s/^\s+/ /s; if (exists $out->{'_content'}) { $out->{'_content'} =~ s/ $//s if $v =~ /^ /s; $out->{'_content'} .= $v; } else { $out->{'_content'} = $v; } next; } if (!$ke && $known{''}) { $ke = $known{''}; $v = [{}, $e, $v]; $e = ''; } die("unknown element: $e\n") unless $ke; if (!ref($ke)) { push @$v, '0', '' if @$v == 1; die("element '$e' contains attributes @{[keys %{$v->[0]}]}\n") if %{$v->[0]}; die("element '$e' has subelements\n") if $v->[1] ne '0'; die("element '$e' must be singleton\n") if exists $out->{$e}; Encode::_utf8_off($v->[2]) if $bytes; $out->{$e} = $v->[2]; } elsif (@$ke == 1 && !ref($ke->[0])) { push @$v, '0', '' if @$v == 1; die("element '$e' contains attributes\n") if %{$v->[0]}; die("element '$e' has subelements\n") if $v->[1] ne '0'; Encode::_utf8_off($v->[2]) if $bytes; push @{$out->{$e}}, $v->[2]; } else { if (@$ke == 1) { push @{$out->{$e}}, {}; _workin($ke->[0], $out->{$e}->[-1], @$v); } else { die("element '$e' must be singleton\n") if exists $out->{$e}; $out->{$e} = {}; _workin($ke, $out->{$e}, @$v); } } } if (exists $out->{'_content'}) { $out->{'_content'} =~ s/^ //s; $out->{'_content'} =~ s/ $//s; } } sub _escape { my ($d) = @_; $d =~ s/&/&/sg; $d =~ s//>/sg; $d =~ s/"/"/sg; return $d; } sub _workout { my ($how, $d, $indent) = @_; my @how = @$how; my $am = _escape(shift @how); my $ret = "$indent<$am"; my $inelem; my %d2 = %$d; my $gotel = 0; if ($am eq '') { $ret = ''; $gotel = $inelem = 1; $indent = substr($indent, 2); } for my $e (@how) { if (!$inelem && !ref($e) && $e ne '_content') { next unless exists $d2{$e}; $ret .= _escape(" $e=").'"'._escape($d2{$e}).'"'; delete $d2{$e}; next; } $inelem = 1; next if ref($e) && !@$e; # magic inelem marker my $en = $e; $en = $en->[0] if ref($en); $en = $en->[0] if ref($en); next unless exists $d2{$en}; my $ee = _escape($en); if (!ref($e) && $e eq '_content' && !$gotel) { $gotel = 2; # special marker to strip indent $ret .= ">"._escape($d2{$e})."\n"; delete $d2{$e}; next; } $ret .= ">\n" unless $gotel; $gotel = 1; if (!ref($e)) { die("'$e' must be scalar\n") if ref($d2{$e}); if ($e eq '_content') { my $c = $d2{$e}; $ret .= "$indent "._escape("$c\n"); delete $d2{$e}; next; } if (defined($d2{$e})) { $ret .= "$indent <$ee>"._escape($d2{$e})."\n"; } else { $ret .= "$indent <$ee/>\n"; } delete $d2{$e}; next; } elsif (@$e == 1 && !ref($e->[0])) { die("'$en' must be array\n") unless UNIVERSAL::isa($d2{$en}, 'ARRAY'); for my $se (@{$d2{$en}}) { $ret .= "$indent <$ee>"._escape($se)."\n"; } delete $d2{$en}; } elsif (@$e == 1) { die("'$en' must be array\n") unless UNIVERSAL::isa($d2{$en}, 'ARRAY'); for my $se (@{$d2{$en}}) { die("'$en' must be array of hashes\n") unless UNIVERSAL::isa($se, 'HASH'); $ret .= _workout($e->[0], $se, "$indent "); } delete $d2{$en}; } else { die("'$en' must be hash\n") unless UNIVERSAL::isa($d2{$en}, 'HASH'); $ret .= _workout($e, $d2{$en}, "$indent "); delete $d2{$en}; } } die("excess hash entries: ".join(', ', sort keys %d2)."\n") if %d2; if ($gotel == 2 && $ret =~ s/\n$//s) { $ret .= "\n" unless $am eq ''; } elsif ($gotel) { $ret .= "$indent\n" unless $am eq ''; } else { $ret .= " />\n"; } return $ret; } package XML::Structured::saxparser; sub new { return bless []; } sub start_document { my ($self) = @_; $self->[0] = []; } sub start_element { my ($self, $e) = @_; my %as = map {$_->{'Name'} => $_->{'Value'}} values %{$e->{'Attributes'} || {}}; push @{$self->[0]}, $e->{'Name'}, [ $self->[0], \%as ]; $self->[0] = $self->[0]->[-1]; } sub end_element { my ($self) = @_; $self->[0] = shift @{$self->[0]}; } sub characters { my ($self, $c) = @_; my $cl = $self->[0]; if (@$cl > 2 && $cl->[-2] eq '0') { $cl->[-1] .= $c->{'Data'}; } else { push @$cl, '0' => $c->{'Data'}; } } sub end_document { my ($self) = @_; return $self->[0]; } package XML::Structured; my $xmlinparser; sub _xmlparser { my ($str) = @_; my $p = new XML::Parser(Style => 'Tree'); return $p->parse($str); } sub _saxparser { my ($str) = @_; my $handler = new XML::Structured::saxparser; my $sp = XML::SAX::ParserFactory->parser('Handler' => $handler); if (ref(\$str) eq 'GLOB' || UNIVERSAL::isa($str, 'IO::Handle')) { return $sp->parse_file($str); } return $sp->parse_string($str); } sub _chooseparser { eval { require XML::SAX; }; my $saxok; if (!$@) { $saxok = 1; my $parsers = XML::SAX->parsers(); return \&_saxparser if $parsers && @$parsers && (@$parsers > 1 || $parsers->[0]->{'Name'} ne 'XML::SAX::PurePerl'); } eval { require XML::Parser; }; return \&_xmlparser unless $@; return \&_saxparser if $saxok; die("XML::Structured needs either XML::SAX or XML::Parser\n"); } sub XMLin { my ($dtd, $str) = @_; $xmlinparser = _chooseparser() unless defined $xmlinparser; my $d = $xmlinparser->($str); my $out = {}; $d = ['', [{}, @$d]] if $dtd->[0] eq ''; die("document element must be '$dtd->[0]', was '$d->[0]'\n") if $d->[0] ne $dtd->[0]; _workin($dtd, $out, @{$d->[1]}); return $out; } sub XMLinfile { my ($dtd, $fn) = @_; local *F; open(F, '<', $fn) || die("$fn: $!\n"); my $out = XMLin($dtd, *F); close F; return $out; } sub XMLout { my ($dtd, $d) = @_; die("parameter is not a hash\n") unless UNIVERSAL::isa($d, 'HASH'); if ($dtd->[0] eq '') { die("excess hash elements\n") if keys %$d > 1; for my $el (@$dtd) { return _workout($el, $d->{$el->[0]}, '') if ref($el) && $d->{$el->[0]}; } die("no match for alternative\n"); } return _workout($dtd, $d, ''); } 1; __END__ =head1 NAME XML::Structured - simple conversion API from XML to perl structures and back =head1 SYNOPSIS use XML::Structured; $dtd = [ 'element' => 'attribute1', 'attribute2', [], 'element1', [ 'element2' ], [ 'element3' => ... ], [[ 'element4' => ... ]], ]; $hashref = XMLin($dtd, $xmlstring); $hashref = XMLinfile($dtd, $filename_or_glob); $xmlstring = XMLout($dtd, $hashref); =head1 DESCRIPTION The XML::Structured module provides a way to convert xml data into a predefined perl data structure and back to xml. Unlike with modules like XML::Simple it is an error if the xml data does not match the provided skeleton (the "dtd"). Another advantage is that the order of the attributes and elements is taken from the dtd when converting back to xml. =head2 XMLin() The XMLin() function takes the dtd and a string as arguments and returns a hash reference containing the data. =head2 XMLinfile() This function works like C, but takes a filename or a file descriptor glob as second argument. =head2 XMLout() C provides the reverse operation to C, it takes a dtd and a hash reference as arguments and returns an XML string. =head1 The DTD The dtd parameter specifies the structure of the allowed xml data. It consists of nested perl arrays. =head2 simple attributes and elements The very simple example for a dtd is: $dtd = [ 'user' => 'login', 'password', ]; This dtd will accept/create XML like: XMLin doesn't care if "login" or "password" are attributes or elements, so foo bar is also valid input (but doesn't get re-created by C). =head2 multiple elements of the same name If an element may appear multiple times, it must be declared as an array in the dtd: $dtd = [ 'user' => 'login', [ 'favorite_fruits' ], ]; XMLin will create an array reference as value in this case, even if the xml data contains only one element. Valid XML looks like: apple peach As attributes may not appear multiple times, XMLout will create elements for this case. Note also that all attributes must come before the first element, thus the first array in the dtd ends the attribute list. As an example, the following dtd $dtd = [ 'user' => 'login', [ 'favorite_fruits' ], 'password', ]; will create xml like: apple peach bar "login" is translated to an attribute and "password" to an element. You can use an empty array reference to force the end of the attribute list, e.g.: $dtd = [ 'user' => [], 'login', 'password', ]; will translate to foo bar instead of =head2 sub-elements sub-elements are elements that also contain attributes or other elements. They are specified in the dtd as arrays with more than one element. Here is an example: $dtd = [ 'user' => 'login', [ 'address' => 'street', 'city', ], ]; Valid xml for this dtd looks like:
    It is sometimes useful to specify such dtds in multiple steps: $addressdtd = [ 'address' => 'street', 'city', ]; $dtd = [ 'user' => 'login', $addressdtd, ]; =head2 multiple sub-elements with the same name As with simple elements, one can allow sub-elements to occur multiple times. C creates an array of hash references in this case. The dtd specification uses an array reference to an array for this case, for example: $dtd = [ 'user' => 'login', [[ 'address' => 'street', 'city', ]], ]; Or, with the $addressdtd definition used in the previous example: $dtd = [ 'user' => 'login', [ $addressdtd ], ]; Accepted XML is:
    =head2 the _content pseudo-element All of the non-whitespace parts between elements get collected into a single "_content" element. As example,
    hello
    world would set the _content element to C (the dtd must allow a _content element, of course). If the dtd is $dtd = [ 'user' => 'login', [ $addressdtd ], '_content', ]; the xml string created by XMLout() will be:
    hello world The exact input cannot be re-created, as the positions and the fragmentation of the content data is lost. =head1 SEE ALSO B requires either L or L. =head1 COPYRIGHT Copyright 2006 Michael Schroeder Emls@suse.deE This library is free software; you can redistribute it and/or modify it under the same terms as Perl itself. =cut open-build-service-2.9.4/src/backend/bs_admin000077500000000000000000001136161332555733200211220ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2008 Adrian Schroeter, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # The Admin Tool # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use strict; use POSIX; use Data::Dumper; use Getopt::Long; use Digest::MD5 (); use XML::Structured ':bytes'; use Build; use BSConfiguration; use BSFileDB; use BSWatcher; use BSUtil; use BSXML; use BSKiwiXML; use BSProductXML; use BSDB; use BSDBIndex; use BSSolv; use BSSrcrep; use BSRevision; my $nosharedtrees; $nosharedtrees = $BSConfig::nosharedtrees if defined($BSConfig::nosharedtrees); my $new_full_handling = 1; $new_full_handling = $BSConfig::new_full_handling if defined($BSConfig::new_full_handling); my $reporoot = "$BSConfig::bsdir/build"; my $eventroot = "$BSConfig::bsdir/events"; my $projectsdir = "$BSConfig::bsdir/projects"; my $srcrepdir = "$BSConfig::bsdir/sources"; my $configfile = "$BSConfig::bsdir/configuration.xml"; my $treesdir = $nosharedtrees ? "$BSConfig::bsdir/trees" : $srcrepdir; my $sourcedb = "$BSConfig::bsdir/db/source"; my $rundir = "$BSConfig::bsdir/run"; sub echo_help { print "\n The Open Build Service Admin Tool ===================================== *** This tool is only intended to be used by experienced admins on *** the backend server ! General options =============== --help Gives this help output. Job Controlling =============== --shutdown-scheduler Stops the scheduler nicely with dumping out its current state for fast startup. --check-project --check-project --check-all-projects Check status of a project and its repositories again --deep-check-project --deep-check-project Check status of a project and its repositories again This deep check includes also the sources, in case of lost events. --check-package Check status of a package in all repositories --publish-repository Creates an event for the publisher. The scheduler is NOT scanning for new packages. The publisher may skip the event, if nothing has changed. Use --republish-repository when you want to enforce a publish. --unpublish-repository Removes the prepared :repo collection and let the publisher remove the result. This is also updating the search database. WARNING: this works also for locked projects! --prefer-publish-event prefers a publish event to be next. is the file name inside of the publish event directory. --republish-repository enforce to publish a repository --rebuild-full-tree rebuild the content of :full/ directory --clone-repository --clone-repository Clone an existing repo into another existing repository. Usefull for creating snapshots. --rescan-repository Asks the scheduler to scan a repository for new packages and add them to the cache file. --force-check-project Enforces the check of an repository, even when it is currently blocked due to amount of calculating time. --create-patchinfo-from-updateinfo creates a patchinfo submission based on an updateinfo information. Maintenance Tasks ================= Note: the --update-*-db calls are usually only needed when corrupt data has been created, for example after a file system corruption. --update-source-db [] Update the index for all source files. --update-request-db Updates the index for all requests. --remove-old-sources (--debug) WARNING: this is an experimental feature atm. It may trash your data, but you have anyway a backup, right? remove sources older than days, but keep number of revisions --debug for debug output Debug Options ============= --dump-cache Dumps out the content of a binary cache file. This shows all the content of a repository, including all provides and requires. --dump-state --dump-project-from-state dump the state of a project. --dump-relsync To dump content of :relsync files. --set-relsync Modify key content in a a :relsync file. --check-meta-xml --check-meta-xml Is parsing a project or package xml file and puts out error messages, in case of errors. --check-product-xml Is parsing a product xml file and puts out error messages, in case of errors. It does expand all xi:include references and validates the result. --check-product-group-xml Is parsing a group xml file from a product definition and puts out error messages, in case of errors. --check-kiwi-xml --check-kiwi-xml Is parsing a kiwi xml file and puts out error messages, in case of errors. --check-constraints --check-constraints Validates a _constraints file --check-pattern-xml Is parsing a pattern xml file and puts out error messages, in case of errors. --check-request-xml Is parsing a request xml file and puts out error messages, in case of errors. --parse-build-desc [ []] Parse a spec, dsc or kiwi file with the Build script parser. --show-scheduler-architectures Show all architectures which are configured in configuration.xml to be supported by this instance. --show-delta-file Show all instructions of a OBS delta file --show-delta-store Show delta store statistics --dump-memstats [what] Backend Configuration ===================== --query-config Dispatcher Maintenance ====================== --list-badhosts List all marked badhosts from bs_dispatch --drop-badhosts Drop information about badhosts in bs_dispatch "; } my $emptymd5 = 'd41d8cd98f00b204e9800998ecf8427e'; #### FIXME: this function is copied from src server. We should move it to some util module maybe. sub findfile { my ($rev, $repoid, $ext, $files) = @_; $files = BSRevision::lsrev($rev) unless ref $files; my $packid = $rev->{'package'}; return ($files->{"$packid-$repoid.$ext"}, "$packid-$repoid.$ext") if defined($repoid) && $files->{"$packid-$repoid.$ext"}; return ($files->{"$packid.$ext"}, "$packid.$ext") if $files->{"$packid.$ext"} && defined($repoid); my @files = grep {/\.$ext$/} keys %$files; @files = grep {/^\Q$packid\E/i} @files if @files > 1; return ($files->{$files[0]}, $files[0]) if @files == 1; if (@files > 1) { if (!defined($repoid)) { # return (undef, undef); @files = sort @files; return ($files->{$files[0]}, $files[0]); } @files = grep {/^\Q$packid-$repoid\E/i} @files if @files > 1; return ($files->{$files[0]}, $files[0]) if @files == 1; } return (undef, undef); } #### end of copy from src server sub find_latest_file { my ($project, $package, $type) = @_; my $rev = BSRevision::getrev_local($project, $package); if (!$rev || $rev->{'srcmd5'} eq 'empty') { return ( "Refered to non existing $type in $project $package" ); } my $files = BSRevision::lsrev($rev); # FIXME: handle source links # $files = handlelinks($projid, $pinfo, $files, $rev) if ref($files) && $files->{'_link'}; if (!ref $files) { return( "could not get file list for $project $package" ); } my ($md5, $file) = findfile($rev, undef, $type, $files); return ($md5, $file); } sub dump_nStore { my ($file, @sel) = @_; my $cache = BSUtil::retrieve($file); $cache = $cache->{$_} for @sel; print Dumper($cache); return $cache } sub dump_cache { my ($project, $repo, $arch) = @_; my $full = "$reporoot/$project/$repo/$arch/:full"; return dump_solv("$full.solv") if -e "$full.solv"; return dump_nStore("$full.cache") if -e "$full.cache"; die("neither $full.cache nor $full.solv exists\n"); } sub dump_solv { my ($fn) = @_; my $pool = BSSolv::pool->new(); my $repo = $pool->repofromfile(0, $fn); my %names = $repo->pkgnames(); my $r = {}; for my $p (values %names) { $r->{$pool->pkg2name($p)} = $pool->pkg2data($p); } print Dumper($r); } sub clone_repository { my ($srcproject, $srcrepo, $destproject, $destrepo, $dovolatile) = @_; my $srcdir = "$reporoot/$srcproject/$srcrepo"; my $destdir = "$reporoot/$destproject/$destrepo"; my $tmpdir = "$BSConfig::bsdir/tmp"; die("Destination repo must get created by scheduler first!\n") unless -d $destdir; mkdir_p($tmpdir) || die("mkdir_p $tmpdir: $!\n"); $tmpdir .= "/bs_admin.$$"; if (-d $tmpdir) { system('rm', '-rf', $tmpdir) && die("removing of $tmpdir failed!\n"); } if (-d "$tmpdir.old") { system('rm', '-rf', "$tmpdir.old") && die("removing of $tmpdir.old failed!\n"); } print "cloning $srcproject / $srcrepo\n"; system('cp', '-al', $srcdir, $tmpdir) && die("cloning failed!\n"); # remove :repoinfo, as the new repo is not published yet unlink("$tmpdir/:repoinfo"); # remove jobhistory files for my $a (ls($tmpdir)) { unlink("$tmpdir/$a/:jobhistory"); # the new repo might get published system('rm', '-rf', "$tmpdir/$a/:repo", "$tmpdir/$a/:repodone"); } if ($dovolatile && $new_full_handling) { for my $a (ls($tmpdir)) { next unless -d "$tmpdir/$a/:full"; system('rm', '-rf', "$tmpdir/$a/_volatile"); system('cp', '-al', "$tmpdir/$a/:full", "$tmpdir/$a/_volatile") && die("volatile cloning failed!\n"); } } print "exchanging with $destproject / $destrepo\n"; rename($destdir, "$tmpdir.old") || die("rename $destdir $tmpdir.old: $!\n"); rename($tmpdir, $destdir) || die("rename $tmpdir $destdir: $!\n"); print "tell schedulers about the change "; my @archs = grep {-d "$destdir/$_"} ls($destdir); for my $a (@archs) { print "$a, "; write_event($destproject, $destrepo, $a, 'scanrepo'); } print "\nremoving old tree in $tmpdir.old\n"; system('rm', '-rf', "$tmpdir.old") && die("removing of $tmpdir.old failed!\n"); print "finished. Have a nice day.\n"; } sub update_request_db { my $requestdb = "$BSConfig::bsdir/db/request"; my $requestdir = "$BSConfig::bsdir/requests"; mkdir_p($requestdb) unless -d $requestdb; my $db = BSDB::opendb($requestdb, ''); $db->{'noindex'} = {'id' => 1}; my @allrequests = ls($requestdir); my $i = 0; my $count = @allrequests; for my $rid (@allrequests) { next if $rid eq ".nextid"; $i++; print "$i / $count \r"; my $req = readxml("$requestdir/$rid", $BSXML::request, 1); print "WARNING: unable to parse request: $rid!\n" unless $req; $db->updateindex($rid, {}, $req || {}); } } sub insert_request_db { my ($file) = @_; my $requestdb = "$BSConfig::bsdir/db/request"; my $requestdir = "$BSConfig::bsdir/requests"; mkdir_p($requestdb) unless -d $requestdb; my $db = BSDB::opendb($requestdb, ''); $db->{'noindex'} = {'id' => 1}; my @rid = split ('/',$file); my $rid = $rid[-1]; my $req = readxml("$requestdir/$rid", $BSXML::request, 1); print "WARNING: unable to parse request: $rid!\n" unless $req; $db->updateindex($rid, {}, $req || {}); } sub check_xml_file { my ($file, $type) = @_; print "parsing $file\n"; my $xmldesc = readxml("$file", $type, 0); if ( defined($xmldesc) ) { print "Succesfull parsed file !\n"; } else { die("ERROR: Unable to parse xml file !\n"); } } sub check_product_xml_file { my ($file) = @_; print "parsing $file\n"; my $xmldesc = BSProductXML::readproductxml("$file", 0, 1 ); if ( defined($xmldesc) ) { print "Succesfull parsed file !\n"; } else { die("ERROR: Unable to parse xml file !\n"); } } sub check_kiwi_xml { my ($project, $package) = @_; my ($md5, $file) = find_latest_file($project, $package, 'kiwi'); if (defined($md5) && defined($file)) { my $f = "$srcrepdir/$package/$md5-$file"; check_xml_file($f, $BSKiwiXML::kiwidesc); } else { die("ERROR: No kiwi config file found in $project / $package !\n"); } } sub check_constraints_xml { my ($project, $package) = @_; my ($md5, $file) = find_latest_file($project, $package, '_constraints'); if (defined($md5) && defined($file)) { my $f = "$srcrepdir/$package/$md5-$file"; check_xml_file($f, $BSXML::constraints); } else { die("ERROR: No _constraints file found in $project / $package !\n"); } } sub check_meta_xml { my ($project, $package) = @_; my $file; my $metadesc; if (defined($package)){ $file = "$projectsdir/${project}.pkg/${package}.xml"; $metadesc = readxml("$file", $BSXML::pack, 0); } else { $file = "$projectsdir/$project.xml"; $metadesc = readxml("$file", $BSXML::proj, 0); } if (defined($metadesc)) { print "Succesfull parsed $file !\n"; } else { die("ERROR: Unable to parse Meta XML in $file !\n"); } } sub write_event { my ($project, $repo, $arch, $event, $package, $job) = @_; my $evname = "${event}"; $evname .= "::$project" if defined $project; $evname .= "::$package" if defined $package; $evname .= "::$repo" if defined $repo; $evname = "${event}:::".Digest::MD5::md5_hex($evname) if length($evname) > 200; my $ev = { 'type' => $event }; $ev->{'project'} = $project if defined $project; $ev->{'package'} = $package if defined $package; $ev->{'repository'} = $repo if defined $repo; $ev->{'job'} = $job if defined $job; writexml("$eventroot/$arch/.$evname$$", "$eventroot/$arch/$evname", $ev, $BSXML::event); BSUtil::ping("$eventroot/$arch/.ping"); } sub write_publish_event { my ($project, $repo) = @_; my $evname = "${project}::${repo}"; my $ev = { 'type' => "publish" }; $ev->{'project'} = $project; $ev->{'repository'} = $repo; writexml("$eventroot/publish/.$evname$$", "$eventroot/publish/$evname", $ev, $BSXML::event); BSUtil::ping("$eventroot/publish/.ping"); } sub prefer_publish_event { my ($name) = @_; rename( "$eventroot/publish/$name", "$eventroot/publish/_$name" ) || die("rename of $eventroot/publish/$name failed: $!"); BSUtil::touch("$rundir/bs_publish.rescan"); } sub scan_repo { my ($project, $repo, $arch) = @_; write_event( $project, $repo, $arch, 'scanrepo' ); } sub wipe_notyet { my ($project, $repo, $arch) = @_; write_event( $project, $repo, $arch, 'wipenotyet' ); } sub dump_state { my ($arch) = @_; write_event( undef, undef, $arch, 'dumpstate' ); } sub dump_memstats { my ($arch, $job) = @_; write_event( undef, undef, $arch, 'memstats', undef, $job ); } sub shutdown_scheduler { my ($arch) = @_; write_event( '', undef, $arch, 'exitcomplete' ); } sub rebuild_full_tree { my ($project, $repo, $arch) = @_; write_event($project, $repo, $arch, 'useforbuild'); } sub check_project { my ($project, $repo, $arch, $deep, $admin) = @_; if (defined $deep) { write_event($project, $repo, $arch, 'package'); if (defined $admin) { write_event($project, $repo, $arch, 'admincheck'); }; } else { if (defined $admin) { write_event($project, $repo, $arch, 'admincheck'); } else { write_event($project, $repo, $arch, 'recheck'); } } } sub check_package { my ($project, $package, $arch) = @_; write_event($project, undef, $arch, 'package', $package); } # make stdout non-buffered $| = 1; # # Argument parsing # if ( @ARGV < 1 ){ echo_help(); exit(1); } while (@ARGV) { my $arg = shift @ARGV; if ($arg eq "--help") { echo_help(); exit(0); } if ($arg eq "--check-meta-xml") { die("ERROR: need at least a project name as argument!\n") if @ARGV < 1; my $project = shift @ARGV; if (@ARGV == 1) { my $package = shift @ARGV; check_meta_xml($project, $package); } else { check_meta_xml($project); } } elsif ($arg eq "--check-product-group-xml") { die("ERROR: need a file name as argument!\n") if @ARGV != 1; my $file = shift @ARGV; check_xml_file($file, $BSProductXML::group); } elsif ($arg eq "--check-product-xml") { die("ERROR: need a file name as argument!\n") if @ARGV != 1; my $file = shift @ARGV; check_product_xml_file($file); } elsif ($arg eq "--check-pattern-xml") { die("ERROR: need a file name as argument!\n") if @ARGV != 1; my $file = shift @ARGV; check_xml_file($file, $BSXML::pattern); } elsif ($arg eq "--check-request-xml") { die("ERROR: need a file name !\n") if @ARGV != 1; my $file = shift @ARGV; check_xml_file($file, $BSXML::request); } elsif ($arg eq "--update-request-db") { BSUtil::drop_privs_to($BSConfig::bsuser, $BSConfig::bsgroup); if (@ARGV == 1) { my $file = shift @ARGV; insert_request_db($file); } else { update_request_db(); } } elsif ($arg eq "--update-source-db") { BSUtil::drop_privs_to($BSConfig::bsuser, $BSConfig::bsgroup); my @prjs = BSRevision::lsprojects_local(); if (@ARGV == 1) { @prjs = (shift @ARGV); } for my $projid (@prjs) { for my $packid (BSRevision::lspackages_local($projid)) { print "$projid/$packid\n"; my $rev = BSRevision::getrev_local($projid, $packid); BSRevision::updatelinkinfodb($projid, $packid, $rev, BSRevision::lsrev($rev)); } } } elsif ($arg eq "--check-kiwi-xml") { die("ERROR: need either file name or project and package as argument!\n") if @ARGV < 1; if (@ARGV == 1){ my $file = shift @ARGV; check_xml_file($file, $BSKiwiXML::kiwidesc); } else { my $project = shift @ARGV; my $package = shift @ARGV; check_kiwi_xml($project, $package); } } elsif ($arg eq "--check-constraints") { die("ERROR: need either file name or project and package as argument!\n") if @ARGV < 1; if (@ARGV == 1){ my $file = shift @ARGV; check_xml_file($file, $BSXML::constraints); } else { my $project = shift @ARGV; my $package = shift @ARGV; check_constraints_xml($project, $package); } } elsif ($arg eq "--show-scheduler-architectures") { my $c = readxml($configfile, $BSXML::configuration); if ($c->{'schedulers'} && @{$c->{'schedulers'}->{'arch'} || []}) { print join(' ', @{$c->{'schedulers'}->{'arch'}})."\n"; } } elsif ($arg eq "--parse-build-desc") { die("ERROR: need a file name as argument (spec, dsc or kiwi)!\n") if @ARGV < 1; my $file = shift @ARGV; my $cfile; my $arch; my $cf = $cfile = $arch = undef; $arch = shift @ARGV if @ARGV > 0; if (@ARGV > 0) { $cfile = shift @ARGV if @ARGV == 1; $cf = Build::read_config( $arch, $cfile ); }; $cf->{'arch'} = $arch if $arch; my $ret = Build::parse($cf, $file); print Dumper($ret); } elsif ($arg eq "--parse-hdrmd5") { die("ERROR: need a file name as argument (rpm or deb)!\n") if @ARGV != 1; my $file = shift @ARGV; my $ret = Build::queryhdrmd5($file); print Dumper($ret); } elsif ($arg eq "--dump-cache") { if (@ARGV == 1) { my $fullfile = shift @ARGV; die("ERROR: invalid filename (must end with .cache or .solv)\n") if $fullfile !~ /\.(?:solv|cache)$/; dump_solv($fullfile) if $fullfile =~ /\.solv$/; dump_nStore($fullfile) if $fullfile =~ /\.cache$/; } else { die("ERROR: need project, repository and architecture as argument!\n") if @ARGV < 3; my $project = shift @ARGV; my $repo = shift @ARGV; my $arch = shift @ARGV; dump_cache($project, $repo, $arch); } } elsif ($arg eq "--dump-relsync" || $arg eq '--dump') { die("ERROR: need file as argument!\n") if @ARGV < 1; my $file = shift @ARGV; dump_nStore($file, splice @ARGV); } elsif ($arg eq "--set-relsync") { die("ERROR: need file as argument!\n") if @ARGV < 1; my $file = shift @ARGV; my $s = dump_nStore($file); my $key = shift @ARGV; my $value = shift @ARGV; if (defined($key) && defined($value)){ $s->{$key} = $value; print "\nChanged to:\n"; print Dumper($s); BSUtil::store($file, undef, $s); } } elsif ($arg eq "--dump-state") { die("ERROR: need architecture as argument!\n") if @ARGV < 1; my $arch = shift @ARGV; dump_state($arch); } elsif ($arg eq "--dump-memstats") { die("ERROR: need architecture as argument!\n") if @ARGV < 1; my $arch = shift @ARGV; my $job = shift @ARGV if @ARGV; dump_memstats($arch, $job); } elsif ($arg eq "--dump-project-from-state") { die("ERROR: need project as argument!\n") if @ARGV < 1; my $project = shift @ARGV; die("ERROR: need architecture as argument!\n") if @ARGV < 1; my $arch = shift @ARGV; if (! -e "$rundir/bs_sched.$arch.state") { print "Error: no dumped scheduler state, use --dump-state first.\n"; exit(1); } my $schedstate = BSUtil::retrieve("$rundir/bs_sched.$arch.state", 2); if (defined($schedstate->{'remoteprojs'}->{$project})) { print "remotemap:\n"; print Dumper($schedstate->{'remoteprojs'}->{$project}); } if (defined($schedstate->{'projpacks'}->{$project})) { print "projpack:\n"; print Dumper($schedstate->{'projpacks'}->{$project}); } } elsif ($arg eq "--shutdown-scheduler") { die("ERROR: need architecture as argument!\n") if @ARGV < 1; my $arch = shift @ARGV; shutdown_scheduler( $arch ); } elsif ( $arg eq "--check-project" ) { die("ERROR: need at least project and architecture as argument!\n") if @ARGV < 2; my $project = shift @ARGV; my $repo; $repo = shift @ARGV if @ARGV == 2; my $arch = shift @ARGV; check_project($project, $repo, $arch); } elsif ( $arg eq "--check-all-projects" ) { die("ERROR: need architecture as argument!\n") if @ARGV < 1; my $arch = shift @ARGV; check_project(undef, undef, $arch); } elsif ( $arg eq "--check-package" ) { die("ERROR: need project, package and architecture as argument!\n") if @ARGV < 3; my $project = shift @ARGV; my $package = shift @ARGV; my $arch = shift @ARGV; check_package($project, $package, $arch); } elsif ( $arg eq "--rebuild-full-tree" ) { die("ERROR: need project ,repository and architecture as argument!\n") if @ARGV < 3; my $project = shift @ARGV; my $repo = shift @ARGV; my $arch = shift @ARGV; rebuild_full_tree($project, $repo, $arch); } elsif ( $arg eq "--deep-check-project" ) { die("ERROR: need at least project and architecture as argument!\n") if @ARGV < 2; my $project = shift @ARGV; my $repo; $repo = shift @ARGV if @ARGV == 2; my $arch = shift @ARGV; check_project($project, $repo, $arch, 1); } elsif ( $arg eq "--publish-repository" || $arg eq "--unpublish-repository" || $arg eq "--republish-repository" ) { die("ERROR: need project and repository as argument!\n") if @ARGV != 2; my $project = shift @ARGV; my $repo = shift @ARGV; my $repodir = "$reporoot/$project/$repo/"; if ( $arg eq "--republish-repository" ) { # clear the repository state to force republishing my $repoinfo = BSUtil::retrieve("$repodir/:repoinfo", 1) || {}; if ($repoinfo->{'state'}) { delete $repoinfo->{'state'}; BSUtil::store("$repodir/.:repoinfo", "$repodir/:repoinfo", $repoinfo); } } if ( $arg eq "--unpublish-repository" ) { # remove :repo for my $a (ls($repodir)) { next unless -e "$repodir/$a/:repodone"; system('rm', '-rf', "$repodir/$a/:repo", "$repo/$a/:repodone"); } } write_publish_event($project, $repo); } elsif ($arg eq "--prefer-publish-event") { die("ERROR: need event file name as argument!\n") if @ARGV != 1; my $name = shift @ARGV; prefer_publish_event( $name ); } elsif ( $arg eq "--clone-repository" ) { my $dovolatile; if (@ARGV && $ARGV[0] eq '--volatile') { $dovolatile = 1; shift @ARGV; } die("ERROR: need source project & repository and destination project & repository as argument!\n") if @ARGV < 3; my $srcproject = shift @ARGV; my $srcrepo = shift @ARGV; my $destproject; my $destrepo; if (@ARGV == 1) { $destrepo = shift @ARGV; $destproject = $srcproject; } else { $destproject = shift @ARGV; $destrepo = shift @ARGV; } clone_repository($srcproject, $srcrepo, $destproject, $destrepo, $dovolatile); } elsif ($arg eq "--rescan-repository") { die("ERROR: need project, repository and architecture as argument!\n") if @ARGV < 3; my $project = shift @ARGV; my $repo = shift @ARGV; my $arch = shift @ARGV; wipe_notyet($project, $repo, $arch); scan_repo( $project, $repo, $arch ); } elsif ($arg eq "--force-check-project") { die("ERROR: need project, repository and architecture as argument!\n") if @ARGV < 3; my $project = shift @ARGV; my $repo = shift @ARGV; my $arch = shift @ARGV; wipe_notyet($project, $repo, $arch); check_project($project, $repo, $arch, undef, 1); # with adminhighprio } elsif ($arg eq "--show-delta-file") { die("ERROR: need delta file as argument!\n") if @ARGV < 1; die("ERROR: not a OBS delta file!\n") unless BSSolv::isobscpio($ARGV[0]); my $store = $ARGV[0]; $store =~ s/[^\/]*$/deltastore/s; if (-e $store) { BSSolv::obscpioinstr($ARGV[0], $store); } else { BSSolv::obscpioinstr($ARGV[0]); } shift @ARGV; } elsif ($arg eq "--cat-delta-file") { die("ERROR: need delta file as argument!\n") if @ARGV < 1; my $store = $ARGV[0]; $store =~ s/[^\/]*$/deltastore/s; local *F; BSSolv::obscpioopen($ARGV[0], $store, \*F, "$srcrepdir/:upload") || die("ARGV[0]: $!\n"); my $chunk; print $chunk while read(F, $chunk, 4096); close F; shift @ARGV; } elsif ($arg eq "--show-delta-store") { die("ERROR: need delta file/store as argument!\n") if @ARGV < 1; my $store = $ARGV[0]; $store .= '/deltastore' if -d $store; $store =~ s/[^\/]*\.obscpio$/deltastore/s; BSSolv::obscpiostorestats($store); shift @ARGV; } elsif ($arg eq "--create-patchinfo-from-updateinfo") { my $uf = shift @ARGV; my $pooldirecotory = shift @ARGV; my $updateinfo = readxml($uf, $BSXML::updateinfoitem); my $patchinfo= {}; $patchinfo->{'incident'} = $updateinfo->{'id'}; $patchinfo->{'summary'} = $updateinfo->{'title'}; $patchinfo->{'description'} = $updateinfo->{'description'}; $patchinfo->{'version'} = $updateinfo->{'version'}; $patchinfo->{'category'} = $updateinfo->{'type'}; $patchinfo->{'packager'} = $updateinfo->{'from'}; $patchinfo->{'rating'} = 'low'; $patchinfo->{'issue'} = []; for my $ref (@{$updateinfo->{'references'}->{'reference'} || []}) { my $b; if ($ref->{'type'} eq 'bugzilla') { $b = { 'id' => $ref->{'id'}, 'tracker' => 'bnc' }; } elsif ($ref->{'type'} eq 'cve') { $b = { 'id' => $ref->{'id'}, 'tracker' => 'CVE' }; } else { die("Unhandled type $ref->{'type'}"); }; push @{$patchinfo->{'issue'}}, $b; }; delete $patchinfo->{'issue'} unless @{$patchinfo->{'issue'}} > 0; my $id = "$patchinfo->{'incident'}-$patchinfo->{'version'}"; mkdir($id); writexml("._patchinfo", "$id/_patchinfo", $patchinfo, $BSXML::patchinfo); for my $file (@{$updateinfo->{'pkglist'}->{'collection'}[0]->{'package'} || []}) { system( "find $pooldirecotory -name $file->{'filename'} | xargs -I {} cp {} $id/" ) && die( "$file->{'filename'} not found in $pooldirecotory" ); } system( "rpm -qp --qf '%{SOURCERPM}\n' $id/*rpm|while read i; do find $pooldirecotory -name \$i | xargs -I {} cp {} $id/; done" ); my $ufc; $ufc->{'update'} = []; push @{$ufc->{"update"}}, $updateinfo; writexml("$id/.updateinfo.xml", "$id/updateinfo.xml", $ufc, $BSXML::updateinfo); } elsif ($arg eq "--remove-old-sources" ) { die("ERROR: need age (in days) and count of revisions to keep as argument!\n") if @ARGV < 2; my $days = shift @ARGV; my $min_revs = shift @ARGV; die("ERROR: second argument must be >=1!\n") if $min_revs <1; my $debug = 0; if ( @ARGV == 1 ) { if ( shift @ARGV eq "--debug") { $debug = 1; } } elsif ( @ARGV > 1 ) { die("ERROR: too much parameters!\n"); } my $mastertimestamp = time - $days*60*60*24; my %deletehashes; #key: hash value: @files my %keephashes; my @revfiles; my %treesfiles; my $deletedbytes = 0; # get all .rev and .mrev files and fill hashes with files to delete or not do delete my @projectdirs; opendir(D, $projectsdir) || die ($!); foreach my $prjdir (readdir(D)) { next if $prjdir =~ /^\.{1,2}$/; if ( -d $projectsdir.'/'.$prjdir ) { opendir(E, $projectsdir.'/'.$prjdir) || die($!); foreach my $file (readdir(E)) { if ( $file =~ /\.(mrev|rev)(\.del){0,1}$/ ) { push @revfiles, "$projectsdir/$prjdir/$file"; open(F, '<', $projectsdir.'/'.$prjdir.'/'.$file) || die($!); my @lines = ; close(F); my @keeplines; if (scalar(@lines) < $min_revs) { @keeplines = splice(@lines, -scalar(@lines)); } else { @keeplines = splice(@lines, -$min_revs); } # remove lines to keep from normal timestamp checking and put them directly into hash foreach my $line (@keeplines) { my ($hash, $time) = ( split(/\|/, $line))[2,4]; push @{$keephashes{$hash}}, { project => $prjdir, file => $projectsdir.'/'.$prjdir.'/'.$file }; } foreach my $line (@lines) { my ($hash, $time) = ( split(/\|/, $line) )[2,4]; if ( $time < $mastertimestamp) { push @{$deletehashes{$hash}}, { project => $prjdir, file => $projectsdir.'/'.$prjdir.'/'.$file }; } else { push @{$keephashes{$hash}}, { project => $prjdir, file => $projectsdir.'/'.$prjdir.'/'.$file }; } } } } closedir(E); } } closedir(D); if ($debug) { print "all hashes to keep (must be at least one per project):\n"; foreach my $hash (keys %keephashes) { foreach my $entry (@{$keephashes{$hash}}) { print "project: ", $entry->{project}, ", file: ", $entry->{file}, " hash: ", $hash, "\n"; } } print "\n"; } # get all files from treesdir my @treesdirs; opendir(D, $treesdir) || die($!); push @treesdirs, map { $treesdir."/".$_ } readdir(D); closedir(D); opendir(D, $srcrepdir) || die($!); push @treesdirs, map { $srcrepdir."/".$_ } readdir(D); closedir(D); @treesdirs = grep { $_ !~ /\.{1,2}$/ } @treesdirs; if ($debug) { print "all treesdirs:\n", join("\n", @treesdirs); print "\n\n"; } foreach my $dir (@treesdirs) { if ( -d $dir ) { if ( $dir =~ /$srcrepdir/ ) { opendir(F, $dir) || die($!); foreach my $file (readdir(F)) { if ( $file =~ /(.+)-MD5SUMS$/ ) { my $MD5SUM = $1; $treesfiles{$MD5SUM} = $dir.'/'.$file if $file =~ /-MD5SUMS$/; } } closedir(F); } else { opendir(E, $dir) || die($!); foreach my $package (readdir(E)) { if ( -d $dir.'/'.$package ) { opendir(F, $dir.'/'.$package) || die($!); foreach my $file (readdir(F)) { if ( $file =~ /(.+)-MD5SUMS$/ ) { my $MD5SUM = $1; $treesfiles{$MD5SUM} = $dir.'/'.$package.'/'.$file if $file =~ /-MD5SUMS$/; } } closedir(F); } # if } # foreach closedir(E); } # else } # if -d $dir } #foreach if ($debug) { print "all treesfiles:\n"; foreach my $key (keys %treesfiles) { print $treesfiles{$key}, "\n"; } print "\n"; } # get all dir names in srcrepdir # fetch all filenames in subdirectories my %sourcefiles; opendir(D, $srcrepdir) || die($!); foreach my $dir (readdir(D)) { next if $dir =~ /^\.{1,2}$/; if ( -d $srcrepdir.'/'.$dir ) { opendir(E, $srcrepdir.'/'.$dir) || die($!); foreach my $file (readdir(E)) { next if $file =~ /^\.{1,2}$/; next if $file eq 'deltastore'; $sourcefiles{$file} = "$srcrepdir/$dir/$file"; } closedir(E); } } closedir(D); if ($debug) { print "all sourcefiles:\n"; foreach my $key (keys %sourcefiles) { print $sourcefiles{$key}, "\n"; } print "\n"; } my %deletefiles; # create array with files to delete from srcrepdir foreach my $file (keys %deletehashes) { next if !defined $treesfiles{$file}; open(F, '<', $treesfiles{$file}) || die($!); while () { my ($hash, $desc) = split(/\s+/, $_); $deletefiles{$hash} = $hash."-".$desc; } close(F); } if ($debug) { print "files to delete:\n"; foreach my $key (keys %deletefiles) { print $deletefiles{$key}, "\n"; } print "\n"; } my %keepfiles; # look if keephashes contains links to revision that would get deleted foreach my $file (keys %keephashes) { open(F, '<', $treesfiles{$file}) || die($!); while () { my ($hash, $desc) = split(/\s+/, $_); if ( /_link/ ) { my ($hash, $desc) = split(/\s+/, $_); next if !defined( $sourcefiles{$hash.'-'.$desc}); # open link file to look if it links to a file that will be deleted my $link; eval { $link = readxml($sourcefiles{$hash.'-'.$desc}, $BSXML::link); } ; if ($@) { warn "$@ whilst processing $treesfiles{$file}"; next; } next if !defined $link->{"package"} || !defined $link->{"project"} || !defined $link->{"rev"}; my $revision = BSRevision::getrev_local($link->{"project"}, $link->{"package"}, $link->{"rev"}); next if !defined($revision) || !defined($revision->{"time"}); if ($revision->{"time"} < $mastertimestamp) { # delete the hash with the link to be able to rewrite .rev files delete ($deletehashes{$revision->{"srcmd5"}}); next unless (-e $treesfiles{$revision->{"srcmd5"}}); open(F, '<', $treesfiles{$revision->{"srcmd5"}}) or die($!); foreach my $line () { $keepfiles{$hash} = $hash."-".$desc; } close(F); } } else { $keepfiles{$hash} = $hash."-".$desc; } } close(F); } if ($debug) { print "files to keep:\n"; foreach my $key (keys %keepfiles) { print $keepfiles{$key}, "\n"; } print "\n"; } my @deletefiles; my @keepfiles = map {$_ } %keepfiles; foreach my $file (keys %deletefiles) { push @deletefiles, $deletefiles{$file} if !grep(/$file/, @keepfiles); } if ($debug) { print "files to delete without kept ones:\n"; print join("\n", @deletefiles); print "\n"; } if (scalar(@deletefiles) == 0) { print "nothing to delete\n"; } else { my $deleted = 0; my $dr = 0; # delete result # delete files! print "starting deletion process: \n" if $debug; foreach my $f (keys %sourcefiles) { print "\nfile:\t$sourcefiles{$f}" if $debug; next if !grep(/$f/, @deletefiles); if ( -e $sourcefiles{$f} ) { $deletedbytes = $deletedbytes + (stat($sourcefiles{$f}))[7] if (stat($sourcefiles{$f}))[3] == 1; $dr = unlink $sourcefiles{$f} || warn "Could not unlink $sourcefiles{$f}: $!"; if ($dr) { print " deleted\n" if $debug; $deleted++; } } } # find treefiles without references my @utreefiles; foreach my $tfile (keys %treesfiles) { } if ($deleted > 0) { # rewrite rev files foreach my $revfile (@revfiles) { my @revfile; open(F, '<', $revfile) or die($!); foreach my $line () { my ($hash) = ( split(/\|/, $line) )[2]; # do not rewrite hashes from %deletehashes, to not overwrite files uploaded as the deletion runs push @revfile, $line if (!defined $deletehashes{$hash} || defined $keephashes{$hash}); } close(F); open(F, '>', $revfile) or die($!); print F @revfile; close(F); } } # some checking needed to reread everything? printf "\nDeleted %d files, Freed %.3f KB.\n", $deleted, $deletedbytes/1024; } } elsif ( $arg eq "--query-config" ) { my $var=shift @ARGV; my $val; eval "\$val = \$BSConfig::$var"; print "$val\n"; } elsif ( $arg eq "--drop-badhosts" ) { BSUtil::touch("$rundir/bs_dispatch.dropbadhosts"); } elsif ( $arg eq "--list-badhosts" ) { if ( -f "$rundir/dispatch.badhosts" ) { my $result = {}; my $badhosts = BSUtil::retrieve("$rundir/dispatch.badhosts"); print Dumper($badhosts); #for my $key (keys(%{$badhosts})) { # if ($key =~ s#^([^/]+/[^/]+/[^/]+/[^/]+)#$1# ) { # $result->{$key}=1; # } #} #for my $key (sort(keys($result))) { print "$key\n" } } else { print "No badhosts found\n"; } } else { echo_help(); exit(1) } } open-build-service-2.9.4/src/backend/bs_archivereq000077500000000000000000000024101332555733200221500ustar00rootroot00000000000000#!/usr/bin/perl BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use Date::Parse; use Data::Dumper; use XML::Structured ':bytes'; use BSConfig; use BSDB; use BSUtil; use BSXML; use strict; BSUtil::drop_privs_to($BSConfig::bsuser, $BSConfig::bsgroup); my $requestsdir = "$BSConfig::bsdir/requests"; my $oldrequestsdir = "$BSConfig::bsdir/requests.old"; my $reqindexdb = "$BSConfig::bsdir/db/request"; my $db = BSDB::opendb($reqindexdb, ''); $db->{'noindex'} = {'id' => 1}; my @r = sort {$a <=> $b} ls($requestsdir); my %donestates = map {$_ => 1} qw{ accept accepted deleted declined rejected revoked superseded }; my $now = time; my $cut = 366 * 24 * 3600; mkdir_p($oldrequestsdir); for my $id (@r) { next if $id =~ /^\./; print "- $id\n"; my $req = readxml("$requestsdir/$id", $BSXML::request, 1); next unless $req && $req->{'state'}; my $state = $req->{'state'}->{'name'}; next unless $donestates{$state || ''}; my $t = str2time($req->{'state'}->{'when'}); next unless $t; next if $t + $cut > $now; print "state $state, ".localtime($t)."\n"; $db->updateindex($id, $req, {}); rename("$requestsdir/$id", "$oldrequestsdir/$id") || die("rename $requestsdir/$id $oldrequestsdir/$id: $!\n"); } open-build-service-2.9.4/src/backend/bs_check_consistency000077500000000000000000000512361332555733200235270ustar00rootroot00000000000000#!/usr/bin/perl # # Copyright (c) 2011 Ruediger Oertel, SUSE LINUX Products GmbH # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # The bs validator, a filesystem check for /bs # Combining instable storage and fragile filesystem implementations # is a bad idea. Try to find out what junk ended up in the tree today. # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } $| = 1; my $check_jobs = 0; my $check_db = 0; my $check_event = 0; my $check_worker = 0; my $check_build = 0; my $do_check_meta = 0; my $do_check_signatures = 0; my $check_one_project = ""; my $check_one_package = ""; my $fix; # undef|1=soft|2=hard use POSIX; use Data::Dumper; use Getopt::Long; use Storable (); use XML::Structured ':bytes'; use Build; our $nosharedtrees; use BSConfig; use BSFileDB; use BSWatcher; use BSUtil; use BSXML; use BSKiwiXML; use BSProductXML; use BSDB; use BSDBIndex; use BSSolv; use BSVerify; sub usage { my $basedir = "$BSConfig::bsdir"; print "bs_check_consistency: check integrity of the buildservice data tree\n"; print " --check-jobs check consistency for $basedir/jobs\n"; print " --check-db check consistency for $basedir/db\n"; print " --check-event check consistency for $basedir/events\n"; print " --check-worker check consistency for $basedir/workers\n"; print " --check-request check consistency for $basedir/requests\n"; print " --check-build check consistency for $basedir/build\n"; print " (WARNING: takes very long)\n"; print " --check-all (all of the above)\n"; print "\n"; print " --do-check-meta check content of .meta files (otherwise ignored for speed)\n"; print " --do-check-signatures check signatures of .rpm files (otherwise just the magic is checked)\n"; print " --do-check-project-build PROJECT check build tree of one project\n"; print "\n"; print " --do-check-package-source PROJECT PACKAGE check source references of one package\n"; print "\n"; print " --fix fix data which can be re-created, scheduler cold start needed\n"; print " --forced-fix fix data by breaking data which can be manually fixed by admin\n"; print "\n"; print "not yet implemented:\n"; print " --check-projects\n"; print " --check-sources\n"; print " no checking yet for .deb files yet\n"; } while (@ARGV) { my $arg = shift @ARGV; if ($arg eq "--help") { usage(); exit(0); } elsif ($arg eq "--check-jobs") { $check_jobs = 1; } elsif ($arg eq "--check-db") { $check_db = 1; } elsif ($arg eq "--check-event") { $check_event = 1; } elsif ($arg eq "--check-worker") { $check_worker = 1; } elsif ($arg eq "--check-build") { $check_build = 1; } elsif ($arg eq "--check-all") { $check_jobs = 1; $check_db = 1; $check_event = 1; $check_worker = 1; $check_build = 1; } elsif ($arg eq "--do-check-project-build") { $check_build = 1; $check_one_project = shift @ARGV; } elsif ($arg eq "--do-check-package-source") { $check_package_source = 1; $check_one_project = shift @ARGV; $check_one_package = shift @ARGV; } elsif ($arg eq "--do-check-meta") { $do_check_meta = 1; } elsif ($arg eq "--do-check-signatures") { $do_check_signatures = 1; } elsif ($arg eq "--fix") { $fix = 1; } elsif ($arg eq "--forced-fix") { $fix = 2; } else { usage(); exit 1; } } unless ($check_jobs || $check_db || $check_event || $check_worker || $check_build || $check_package_source) { usage(); exit 1; } # to drop files which can be recreated sub warnORremove { my ($file, $msg) = @_; if ($fix) { print "FIXING: $msg ($file)"; unlink($file) } else { warn("WARNING: $msg ($file)"); } } # when removing files will damage data, but should be manually fixable. sub warnORbreak { my ($file, $msg) = @_; if ($fix == 2) { warn("REMOVING, MIGHT BREAK STUFF: $msg ($file)"); unlink($file) } else { warn("WARNING: $msg ($file)"); } } sub is_rpm_package { my ($pack) = @_; open(FILE,$pack); my $tag; sysread(FILE,$tag,4); close (FILE); warnORremove($pack, "broken rpm") unless $tag eq "\xed\xab\xee\xdb"; if ($do_check_signatures) { # just check integrity, no gpg verification open(CHKSIG,'-|',"rpm --checksig --nosignature $pack 2>&1"); my $chksig = ; close (CHKSIG); warnORremove($pack, "broken signature") unless $chksig =~ /md5 OK$/ || $chksig =~ /md5 gpg OK$/ || $chksig =~ /md5 pgp OK$/; } } sub check_package_meta { my ($pra, $prap, $metafile) = @_; return unless $do_check_meta; if (-f $metafile) { open (META,"$metafile"); while () { warnORremove($metafile, "broken meta file") unless /^[0-9a-f]{32} ..*$/ || /^fake to detect source changes/; } close (META); } else { warn ("ERROR: $metafile strange entry"); } } sub check_package_dir { # used in package build dir and :full and :repo my ($basedir) = @_; opendir(PACK,$basedir); my @entry = readdir(PACK); closedir (PACK); for my $ent (sort(@entry)) { next if $ent eq "." || $ent eq ".."; if ($ent eq ":bininfo") { open (BIN,"$basedir/$ent"); while (my $bini = ) { chomp($bini); if ($bini =~ /^[0-9a-f]{32} \(..*\)$/) { warnORremove("$basedir/$ent", "bininfo missing rpm $2") unless grep {$_ eq $2} @entry; } } close (BIN); } elsif ($ent =~ /.meta$/) { next unless $do_check_meta; open (META,"$basedir/$ent"); while () { warnORremove("$basedir/$ent", "broken meta file") unless /^[0-9a-f]{32} ..*$/; } close (META); } elsif ($ent eq "history" || $ent eq "logfile" || $ent =~ /\.desktopfiles$/) { # okay } elsif ($ent eq "reason") { eval { my $rdx = readxml("$basedir/$ent",$BSXML::buildreason,0) } || warnORremove("$basedir/$ent", "broken xml file, $@"); } elsif ($ent eq "status") { eval { my $rdx = readxml("$basedir/$ent",$BSXML::buildstatus,0) } || warnORremove("$basedir/$ent", "broken xml file, $@"); } elsif ($ent =~ /\.rpm$/) { is_rpm_package("$basedir/$ent"); } elsif ($ent =~ /\.sha256$/ || $ent =~ /\.md5$/ || $ent =~ /\.bz2$/ || $ent =~ /\.iso$/ || $ent =~ /\.tar\.gz$/) { # fixme to be done } elsif ($ent =~ /updateinfo.xml$/) { eval { my $rdx = readxml("$basedir/$ent",$BSXML::updateinfo,0); } || warnORremove("$basedir/$ent", "broken xml file, $@"); } elsif ($ent eq ".updateinfo" || $ent eq ".updateinfodata") { eval { my $rdx = Storable::retrieve("$basedir/$ent") || {}; } || warnORremove("$basedir/$ent", "broken storable file, $@"); } elsif ($ent =~ /^.waiting_for_/) { # okay, might check for 0-size } elsif (-d "$basedir/$ent" && $ent =~ /-Media[0-9]*$/) { # okay, unpacked media tree } elsif ($ent =~ /\.deb$/ || $ent =~ /\.diff\.gz$/ || $ent =~ /\.dsc$/ || $ent =~ /\.changes$/) { # okay, debian stuff # FIXME: not checked yet } elsif ($ent eq ".errors") { # seems to be some legacy file } elsif ($ent eq ":lastcache") { # unchecked for now } elsif ($ent eq ":full.useforbuild") { # unchecked for now } elsif ($ent eq ":full.metacache") { # unchecked for now } elsif ($ent eq ".bininfo") { # unchecked for now } elsif ($ent eq ".checksums") { # unchecked for now } elsif ($ent eq ".channelinfo") { # unchecked for now } elsif ($ent eq "_statistics") { # unchecked for now } elsif ($ent eq "_buildenv") { # unchecked for now } elsif ($ent =~ /^pesign-/) { # unchecked for now } elsif ($ent =~ /[-.]appdata\.xml$/) { # unchecked for now } elsif ($ent =~ /\.applications$/) { # unchecked for now } elsif ($ent eq ".meta.success") { # unchecked for now } elsif ($ent eq ".nosourceaccess") { # just a flag file } elsif ($ent eq "rpmlint.log") { # unchecked for now } elsif ($ent eq ".preinstallimages") { # unchecked for now } elsif ($ent =~ /-Build/) { # kiwi build result } else { warnORbreak("$basedir/$ent", "strange file in package dir"); } } } $nosharedtrees = $BSConfig::nosharedtrees if defined($BSConfig::nosharedtrees); my $jobsroot = "$BSConfig::bsdir/jobs"; my $dbroot = "$BSConfig::bsdir/db"; my $buildroot = "$BSConfig::bsdir/build"; my $eventroot = "$BSConfig::bsdir/events"; my $projectsdir = "$BSConfig::bsdir/projects"; my $srcrepdir = "$BSConfig::bsdir/sources"; my $treesdir = $nosharedtrees ? "$BSConfig::bsdir/trees" : $srcrepdir; my $workersroot = "$BSConfig::bsdir/workers"; my $sourcedb = "$BSConfig::bsdir/db/source"; my $requestsroot = "$BSConfig::bsdir/requests"; my $srcrevlay = [qw{rev vrev srcmd5 version time user comment requestid}]; my $eventlay = [qw{number time type project package repository arch}]; if (-d $projectsdir && $check_package_source) { my $metafile; eval { $file = readxml("$projectsdir/$check_one_project.pkg/$check_one_package.xml",$BSXML::pack,0); } || warn ("ERROR: package meta of check_one_project/$check_one_package is not parseable"); for my $suffix ("rev","mrev","rev.del","mrev.del") { my $file = "$projectsdir/$check_one_project.pkg/$check_one_package.$suffix"; if (-e $file) { eval { my @revs = BSFileDB::fdb_getall($file, $srcrevlay) } || warn ("ERROR: revisions in $suffix file of $check_one_project/$check_one_package are not parsable: $@)"); } } } if (-d $jobsroot && $check_jobs) { print "PROGRESS: checking jobs\n"; for my $file ("$jobsroot/dispatchprios","$jobsroot/load") { if (-f $file) { eval { my $dp = Storable::retrieve($file) || {}; } || warnORremove($file, "broken, $@"); } else { warn ("WARNING: $file missing"); } } opendir (DIR, $jobsroot); my @dir_a = readdir(DIR); closedir(DIR); for my $dir (sort(@dir_a)) { next if $dir =~ /^\./; next unless -d "$jobsroot/$dir"; my $jobsdir = "$jobsroot/$dir"; print "PROGRESS: working on $jobsdir\n"; opendir(JDIR,$jobsdir); my @file_a = readdir(JDIR); closedir(JDIR); for my $file (sort(@file_a)) { next if $file =~ /^\./; if ($file =~ /:status$/) { my $job = $file; $job =~ s/:status$//; unless (-f "$jobsdir/$job") { warnORremove("$jobsdir/$file", "status file without existing job"); } next unless -e "$jobsdir/$file"; my $status; eval { $status = readxml("$jobsdir/$file",$BSXML::jobstatus,0); } || warnORremove("$jobsdir/$file", "broken status $@"); warn ("WARNING: $jobsdir/$file is dispatching") if $status->{'code'} eq "dispatching"; } elsif ($file =~ /:dir$/) { warn ("ERROR: $jobsdir/$file is not a directory") unless -d "$jobsdir/$file"; my $curjob = "$jobsdir/$file"; opendir (JOB,$curjob); my @jfile_a = readdir(JOB); closedir(JOB); for my $jfile (sort(@jfile_a)) { next if ($jfile =~ /^\./); if ($jfile = "meta" && -f "$curjob/$jfile") { open(META,"$curjob/$jfile"); while () { warnORremove("$curjob/$jfile", "broken entry $_") unless /^[0-9a-f]{32} ..*$/; } close (META); } } } else { next unless -e "$jobsdir/$file"; # this better be a job file my $jobentry; eval { $jobentry = readxml("$jobsdir/$file",$BSXML::buildinfo,0); } || warnORremove("$jobsdir/$file", "broken buildinfo"); } } } } if (-d "$dbroot" && $check_db) { opendir(DIR,$dbroot); my @dbdir_a = readdir(DIR); closedir(DIR); for my $dbdir (sort(@dbdir_a)) { next if $dbdir =~ /^\./; if ($dbdir eq "published" || $dbdir eq "source") { # request db dir is obsolete print "PROGRESS: checking $dbdir db\n"; if (-d "$dbroot/$dbdir") { } else { warn "ERROR: $dbroot/$dbdir is not a directory"; } my $progress; opendir (DBP, "$dbroot/$dbdir"); while (my $dbp = readdir(DBP)) { next if $dbp =~ /^\./; unless (-d "$dbroot/$dbdir/$dbp") { warnORbreak ("$dbroot/$dbdir/$dbp", "is not a directory"); } if ($progress ne substr($dbp,0,1)) { $progress = substr($dbp,0,1); print "DBPROGRESS: $progress"; } if ($dbp =~ /^[0-9a-f][0-9a-f]$/) { opendir (DBPP, "$dbroot/$dbdir/$dbp"); while (my $dbpp = readdir(DBPP)) { next if $dbpp =~ /^\./; if ($dbpp !~ /^[0-9a-f]{30}$/) { warnORbreak("$dbroot/$dbdir/$dbp/$dbpp", "strange published entry"); } eval { my $publ = Storable::retrieve("$dbroot/$dbdir/$dbp/$dbpp") || {}; } || warnORbreak("$dbroot/$dbdir/$dbp/$dbpp", "broken, $@"); } closedir (DBPP); } else { warn ("ERROR: $dbroot/$dbdir/$dbp strange entry"); } } closedir (DBP); print "\n"; } else { warn ("WARNING: $dbroot/$dbdir unknown db dir entry"); } } } if (-d "$eventroot" && $check_event) { print "PROGRESS: checking events dir\n"; opendir(EVT,$eventroot); while (my $evt = readdir(EVT)) { next if $evt =~ /^\./; if (-f "$eventroot/$evt") { next if $evt eq "lastevents"; next if $evt eq "lastnotifications"; warnORremove("$eventroot/$evt", "strange file"); } elsif (-d "$eventroot/$evt") { opendir(EVTT,"$eventroot/$evt"); while (my $evtt = readdir(EVTT)) { next if $evtt =~ /^\./; eval { my $event = readxml("$eventroot/$evt/$evtt",$BSXML::event,0); } || warnORremove("$eventroot/$evt/$evtt", "broken event $@"); } closedir(EVTT); } else { warn ("ERROR: $eventroot/$evt strange directory entry"); } } closedir(EVT); } if (-d "$workersroot" && $check_worker) { print "PROGRESS: checking workers dir\n"; opendir(WRK,$workersroot); while (my $wrk = readdir(WRK)) { next if $wrk =~ /^\./; if (-f "$workersroot/$wrk") { warnORremove("$workersroot/$wrk", "strange file"); } elsif (-d "$workersroot/$wrk") { opendir(WRKR,"$workersroot/$wrk"); while (my $wrkr = readdir(WRKR)) { next if $wrkr =~ /^\./; if (-f "$workersroot/$wrk/$wrkr") { # todo, check for valid arch next if ($wrk eq "disable"); eval { my $worker = readxml("$workersroot/$wrk/$wrkr",$BSXML::worker,0); } || warnORremove("$workersroot/$wrk/$wrkr", "broken worker entry $@"); } elsif (-e "$workersroot/$wrk/$wrkr") { warnORremove("$workersroot/$wrk/$wrkr", "strange directory entry"); } } closedir(WRKR); } else { warn ("ERROR: $workersroot/$wrk strange directory entry"); } } closedir(EVT); } if (-d "$buildroot" && $check_build) { print "PROGRESS: checking build dir\n"; opendir(PRJ,$buildroot); my @prjlist = readdir(PRJ); closedir (PRJ); for my $prj (sort (@prjlist)) { next if $prj =~ /^\./; if (-f "$buildroot/$prj") { next if $prj eq "_repoid"; warn ("ERROR: $buildroot/$prj strange file"); } elsif (-d "$buildroot/$prj") { next if $check_one_project && $prj ne $check_one_project; # we have a project, look for repositories my $proj = "$buildroot/$prj"; opendir (REP, $proj); my @rep_a = readdir(REP); closedir(REP); for my $rep (sort(@rep_a)) { next if $rep =~ /^\./; if (-f "$proj/$rep") { warn ("WARNING: $proj/$rep strange file"); } elsif (-d "$proj/$rep") { # we have a repository, start checking opendir (ARCH,"$proj/$rep"); my @arch_a = readdir(ARCH); closedir (ARCH); for my $arch (sort(@arch_a)) { next if $arch eq "." || $arch eq ".."; if (-f "$proj/$rep/$arch") { if ($arch eq ":repoinfo") { eval { my $dp = Storable::retrieve("$proj/$rep/$arch") || {}; } || warnORremove("$proj/$rep/$arch", "broken, $@"); } elsif ($arch eq ".finishedlock") { # okay, should be 0 byte } else { warn ("ERROR: $proj/$rep/$arch strange file"); } } elsif (-d "$proj/$rep/$arch") { my $pra = "$proj/$rep/$arch"; print "PROGRESS: running on $pra\n"; opendir(PRAP,$pra); my @prap_a = readdir(PRAP); closedir(PRAP); for my $prap (sort(@prap_a)) { next if $prap eq "." || $prap eq ".."; if (-f "$pra/$prap") { if ($prap eq ":depends" || $prap eq ":full.cache" || $prap eq ":relsync" || $prap eq ":relsync.max" || $prap eq ":relsync.sent" || $prap eq ":repoinfo") { eval { my $dp = Storable::retrieve("$pra/$prap") || {}; } || warnORremove("$pra/$prap", "broken, $@"); } elsif ($prap eq ":packstatus") { eval { my $dp = Storable::retrieve("$pra/$prap") || {}; } || eval { my $dp = readxml("$pra/$prap",$BSXML::packstatuslist,0); } || warnORremove("$pra/$prap", "is not a perl storable and not a packstatuslist"); } elsif ($prap eq ":jobhistory") { eval { my @hist = BSFileDB::fdb_getall("$pra/$prap", $BSXML::jobhistlay) } || warnORbreak("$pra/$prap", "is not parsable"); } elsif ($prap eq ":schedulerstate") { # FIXME check 1 line content } elsif ($prap eq ":repodone" || $prap eq ":schedulerstate.dirty") { # empty files, ignore them } elsif ($prap eq ":repostate") { eval { my $repstate = readxml("$pra/$prap",$BSXML::repositorystate,0); } || warnORremove("$pra/$prap", "broken repostate $@"); } elsif ($prap eq ":full.solv") { # FIXME #my $pool = BSSolv::pool->new(); #eval { # my $cache = $pool->repofromfile($proj,"$pra/$prap"); #} || warn ("ERROR: $pra/$prap broken file"); } elsif ($prap eq ".errors") { # seems to be some legacy file } elsif ($prap eq ":bininfo" || $prap eq ":bininfo.merge" ) { # unchecked for now } elsif ($prap eq ":lastfailures") { # unchecked for now } elsif ($prap eq ":lastcheck") { # unchecked for now } elsif ($prap eq ":full.metacache") { # unchecked for now } elsif ($prap eq ":full.metacache.merge") { # unchecked for now } elsif ($prap eq ":full.xcache") { # unchecked for now } elsif ($prap eq ":full.useforbuild") { # unchecked for now } elsif ($prap eq ".bininfo") { # unchecked for now } elsif ($prap eq ".checksums") { # unchecked for now } elsif ($prap eq ".preinstallimages") { # unchecked for now } elsif ($prap eq ":packstatus.finished") { # unchecked for now } elsif ($prap eq ".meta.success") { # unchecked for now check_package_meta($pra, $prap, "$pra/$prap/.meta.success"); } elsif ($prap eq "_deltas") { # unchecked for now } else { warn ("ERROR: strange file $pra/$prap"); } } elsif (-d "$pra/$prap") { if ($prap eq ":logfiles.fail" || $prap eq ":logfiles.success") { # fine, probably no need to check } elsif ($prap eq ":meta") { next unless $do_check_meta; opendir(MTA,"$pra/$prap"); while (my $mta = readdir(MTA)) { next if $mta eq "." || $mta eq ".."; warnORremove("$pra/$prap/$mta", "meta file without existing package") unless -d "$pra/$mta"; check_package_meta($pra, $prap, "$pra/$prap/$mta"); } closedir (MTA); } elsif ($prap eq ":full") { check_package_dir("$pra/$prap") unless -l "$pra/$prap"; } else { # probably a package, start checking check_package_dir("$pra/$prap"); } } else { warn ("ERROR: $pra/$prap strange directory entry in repository") if -e "$pra/$prap"; } } } else { warn ("ERROR: $proj/$rep/$arch strange directory entry in architecture"); } } } else { warn ("ERROR: $proj/$rep strange directory entry in project"); } } } else { warn ("ERROR: $buildroot/$prj strange directory entry in build dir"); } } } print "check finished\n"; open-build-service-2.9.4/src/backend/bs_cleanup000077500000000000000000000647321332555733200214650ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2015-2016 Ericsson AB # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # Admin tool for finding and removing old source files in one or more packages # use strict; use File::Find; use Data::Dumper; use File::Basename; use Getopt::Long; use File::Path qw(make_path); use File::Copy; use XML::Simple; our $OBS_SRC_DIR = "/srv/obs/sources"; our $OBS_TREES_DIR = "/srv/obs/trees"; our $OBS_PROJ_DIR = "/srv/obs/projects"; our $MD5_PATTERN = "[0-9a-fA-F]{32}"; our @include_pkgs = (); our @exclude_pkgs = (); our @remove_old_sources = (); our $service_cleanup = 0; our @full_cleanup = (); our $remove_deleted_project = 0; our $debug = 0; our $search = 0; our $move_dst = 0; our $help = 0; our $remove = 0; our $total_size = 0; sub print_help { my $help = <<"END_HELP"; *** EXPERIMENTAL *** Admin tool for finding and/or removing old source files in one or more packages. usage: $0 |MODE| |OPTIONS| MODE (mandatory) --remove-old-sources Find and remove sources older than days, but keep number of revisions. Functionality is based on bs_admin script. Service generated files are not removed. --service-cleanup Find and remove unrevisioned service files. --full-cleanup Combines both commands above. NOTE when using search-option with full-cleanup: service file cleanup search checks only against current revisions. It doesn't take into account what revisions will be deleted. --remove-deleted-project Remove files of an deleted project. OPTIONS (optional) --help|-h Shows this help menu and exit. --debug|-d Debug output prints enabled. --search|-s Search old source files to be removed. Default mode unless otherwise specified. --remove|-rm Remove old source files. --move|-mv Move old source files to folder. --exclude-packages ... Excludes one or more packages from source file search. Otherwise all packages are included to search. --exclude-packages Reads list of packages from which will be excluded from the search. Packages must be separated by whitespace in the file. --include-packages .. Includes one or more packages to source file search. Otherwise all packages are included to search. --include-packages Reads list of packages from which will be included to the search. Packages must be separated by whitespace in the file. END_HELP print $help; } sub read_pkg_file { my $pkg_list = shift; my $pkg_file = pop @$pkg_list; open my $fh, '<', "$pkg_file" || die "unable to read $pkg_file!\n"; while (my $line = <$fh>) { push @$pkg_list, split(/\s+/, $line); } close $fh; } sub format_size { my $size = shift; my $exp = 0; my $units = [qw(B KB MB GB TB PB)]; for (@$units) { last if $size < 1024; $size /= 1024; $exp++; } return wantarray ? ($size, $units->[$exp]) : sprintf("%.2f %s", $size, $units->[$exp]); } sub is_rev_in_proj { my $proj = shift; my $pkg = shift; my $revs = shift; my $rev_file = "$OBS_PROJ_DIR/$proj.pkg/$pkg.rev"; return 0 unless -e $rev_file; open my $fh, '<', "$rev_file" || die "unable to read $rev_file!\n"; my $rev_found = 0; while (my $line = <$fh>) { my @fields = split('\|', $line); my $rev = $fields[2]; if (grep {$_ eq $rev} @$revs) { print " Revision $rev found in $rev_file\n" if $debug; $rev_found = 1; last; } } close $fh; return $rev_found; } sub find_link { my $revs = shift; my $md5 = shift; my $found = 0; foreach my $rev (@$revs) { my @rev_files = (); eval { find(sub { if ($File::Find::name =~ /$rev-MD5SUMS/) { push @rev_files, $File::Find::name; } }, $OBS_TREES_DIR); }; foreach my $rev_file (@rev_files) { if (-e $rev_file) { my $rev_path = dirname($rev_file); print " revision link $rev is linked to $rev_file\n" if $debug; my @f = split('/', $rev_path); my $pkg_link = $f[@f - 1]; my $proj_link = $f[@f - 2]; my $revs_link = get_trees_rev($rev_path, $md5); if (exists $revs_link->{"LSERVICE"}) { $found += is_rev_in_proj($proj_link, $pkg_link, $revs_link->{"LSERVICE"}); } if (exists $revs_link->{"LINK"}) { print " link to link detected in $rev_file\n" if $debug; next; } last if $found != 0 } } } return $found; } sub get_trees_rev { my $tree = shift; my $md5 = shift; my $revs; opendir(TREE, $tree) or die $!; while (my $md5file = readdir(TREE)) { if ($md5file =~ /$MD5_PATTERN-MD5SUMS/) { open my $fh, '<', "$tree/$md5file" || die "unable to read $tree/$md5file!\n"; my $rev; my $md5_match = 0; my $is_link = 0; while (my $line = <$fh>) { if ($line =~ /$md5/) { $md5_match = 1; } elsif ($line =~ /($MD5_PATTERN)\s+\/LSERVICE/) { $rev = $1; last; } elsif ($line =~ /($MD5_PATTERN)\s+\/LINK/) { $rev = $1; $is_link = 1; last; } } close $fh; if ($md5_match == 1 && defined $rev) { if ($is_link == 1) { push @{$revs->{"LINK"}}, $rev; print " found revision link: $rev from $md5file\n" if $debug; } else { push @{$revs->{"LSERVICE"}}, $rev; print " found revision: $rev from $md5file\n" if $debug; } } } } closedir(TREE); return $revs; } sub search_revisions { my $service_file = shift; my $pkg = shift; (my $md5) = ($service_file =~ /($MD5_PATTERN).*/); opendir(TREES_DIR, $OBS_TREES_DIR) or die $!; my $found = 0; while (my $project = readdir(TREES_DIR)) { next unless -d "$OBS_TREES_DIR/$project/$pkg"; print " search revisions: $OBS_TREES_DIR/$project/$pkg\n" if $debug; my $revs = get_trees_rev("$OBS_TREES_DIR/$project/$pkg", $md5); if (exists $revs->{"LSERVICE"}) { $found += is_rev_in_proj($project, $pkg, $revs->{"LSERVICE"}); } if (exists $revs->{"LINK"}) { $found += find_link($revs->{"LINK"}, $md5); } last if ($found != 0) } closedir(TREES_DIR); if ($found == 0) { my $size = -s "$OBS_SRC_DIR/$pkg/$service_file" || 0; $total_size += $size; if ($remove) { printf " delete unrevisioned file: $OBS_SRC_DIR/$pkg/$service_file size: %.2f %s\n", format_size($size); unlink "$OBS_SRC_DIR/$pkg/$service_file" or die "error deleting file $OBS_SRC_DIR/$pkg/$service_file!\n"; } elsif ($move_dst) { unless (-d "$move_dst/$pkg") { make_path("$move_dst/$pkg", {error => \my $err}); if (@$err) { die "error creating directory $move_dst/$pkg!\n"; } } print " move unrevisioned file: $OBS_SRC_DIR/$pkg/$service_file to $move_dst/$pkg/$service_file\n"; move("$OBS_SRC_DIR/$pkg/$service_file", "$move_dst/$pkg/$service_file") or die "move failed! $!\n"; } else { printf " found unrevisioned file: $OBS_SRC_DIR/$pkg/$service_file size: %.2f %s\n", format_size($size); } } } sub check_pkg { my $pkg = shift; print "\ncheck package: $OBS_SRC_DIR/$pkg\n" if $debug; opendir(SRC_PKG_DIR, "$OBS_SRC_DIR/$pkg") or die $!; while (my $src_file = readdir(SRC_PKG_DIR)) { next if ($src_file =~ m/^\./); if ($src_file =~ /$MD5_PATTERN-_service/) { print " service file: $OBS_SRC_DIR/$pkg/$src_file\n" if $debug; search_revisions($src_file, $pkg); } } closedir(SRC_PKG_DIR); } sub search_srcs { opendir(SRC_DIR, $OBS_SRC_DIR) or die $!; while (my $pkg = readdir(SRC_DIR)) { next if ($pkg =~ m/^\./); if (@exclude_pkgs) { if (grep {$_ eq $pkg} @exclude_pkgs) { print "\nskip package: $OBS_SRC_DIR/$pkg\n" if $debug; next; } } if (@include_pkgs) { unless (grep {$_ eq $pkg} @include_pkgs) { print "\nskip package: $OBS_SRC_DIR/$pkg\n" if $debug; next; } } if (-d "$OBS_SRC_DIR/$pkg") { check_pkg($pkg); } } closedir(SRC_DIR); } # taken from BSUtils sub ls { local *D; opendir(D, $_[0]) || return (); my @r = grep {$_ ne '.' && $_ ne '..'} readdir(D); closedir D; return @r; } # taken from BSRevision:lsprojects_local sub lsprojects_local { my ($deleted) = @_; if ($deleted) { my @projids = grep {s/\.pkg$//} ls("$OBS_PROJ_DIR/_deleted"); @projids = grep {! -e "$OBS_PROJ_DIR/$_.xml"} @projids; return sort @projids; } local *D; return () unless opendir(D, $OBS_PROJ_DIR); my @projids = grep {s/\.xml$//} readdir(D); closedir(D); return sort @projids; } # functionality based on bs_admin script sub remove_old_srcs { my $days = shift; my $min_revs = shift; my $deleted_prj = shift || ""; my $deleted_projects = shift || 0; my $proj_dir = $OBS_PROJ_DIR; my @projids; if ($deleted_projects) { $min_revs = 0; $days = 0; $proj_dir = "$OBS_PROJ_DIR/_deleted"; @projids = ($deleted_prj); } else { @projids = lsprojects_local(); die("ERROR: second argument must be >=1!\n") if $min_revs <1; } my $mastertimestamp = time - $days*60*60*24; my %deletehashes; my %keephashes; my @revfiles; my %treesfiles; my $deletedbytes = 0; # get all .rev and .mrev files and fill hashes with files to delete or not do delete my @projectdirs; foreach my $prjid (@projids) { my $prjdir = "$prjid.pkg"; if ( -d $proj_dir.'/'.$prjdir ) { opendir(E, $proj_dir.'/'.$prjdir) || die($!); foreach my $file (readdir(E)) { if ( $file =~ /\.(rev)(\.del){0,1}$/ ) { push @revfiles, "$proj_dir/$prjdir/$file"; open(F, '<', $proj_dir.'/'.$prjdir.'/'.$file) || die($!); my @lines = ; close(F); my @keeplines; if (!$deleted_projects) { if (scalar(@lines) < $min_revs) { @keeplines = splice(@lines, -scalar(@lines)); } else { @keeplines = splice(@lines, -$min_revs); } } # remove lines to keep from normal timestamp checking and put them directly into hash foreach my $line (@keeplines) { my ($hash, $time) = ( split(/\|/, $line))[2,4]; push @{$keephashes{$hash}}, { project => $prjdir, file => $proj_dir.'/'.$prjdir.'/'.$file }; } foreach my $line (@lines) { my ($hash, $time) = ( split(/\|/, $line) )[2,4]; if ( $time < $mastertimestamp || $deleted_projects) { push @{$deletehashes{$hash}}, { project => $prjdir, file => $proj_dir.'/'.$prjdir.'/'.$file }; } else { push @{$keephashes{$hash}}, { project => $prjdir, file => $proj_dir.'/'.$prjdir.'/'.$file }; } } } } closedir(E); } } if ($debug) { print "all hashes to keep (must be at least one per project):\n"; foreach my $hash (keys %keephashes) { foreach my $entry (@{$keephashes{$hash}}) { print "project: ", $entry->{project}, ", file: ", $entry->{file}, " hash: ", $hash, "\n"; } } print "\n"; print "all hashes to remove:\n"; foreach my $hash (keys %deletehashes) { foreach my $entry (@{$deletehashes{$hash}}) { print "project: ", $entry->{project}, ", file: ", $entry->{file}, " hash: ", $hash, "\n"; } } print "\n"; } # get all files from OBS_TREES_DIR my @treesdirs; opendir(D, $OBS_TREES_DIR) || die($!); push @treesdirs, map { $OBS_TREES_DIR."/".$_ } readdir(D); closedir(D); opendir(D, $OBS_SRC_DIR) || die($!); push @treesdirs, map { $OBS_SRC_DIR."/".$_ } readdir(D); closedir(D); @treesdirs = grep { $_ !~ /\.{1,2}$/ } @treesdirs; if ($debug) { print "all treesdirs:\n", join("\n", @treesdirs); print "\n\n"; } foreach my $dir (@treesdirs) { if ( -d $dir ) { if ( $dir =~ /$OBS_SRC_DIR/ ) { opendir(F, $dir) || die($!); foreach my $file (readdir(F)) { if ( $file =~ /(.+)-MD5SUMS$/ ) { my $MD5SUM = $1; $treesfiles{$MD5SUM} = $dir.'/'.$file if $file =~ /-MD5SUMS$/; } } closedir(F); } else { opendir(E, $dir) || die($!); foreach my $package (readdir(E)) { if ( -d $dir.'/'.$package ) { opendir(F, $dir.'/'.$package) || die($!); foreach my $file (readdir(F)) { if ( $file =~ /(.+)-MD5SUMS$/ ) { my $MD5SUM = $1; $treesfiles{$MD5SUM} = $dir.'/'.$package.'/'.$file if $file =~ /-MD5SUMS$/; } } closedir(F); } # if } # foreach closedir(E); } # else } # if -d $dir } #foreach if ($debug) { print "all treesfiles:\n"; foreach my $key (keys %treesfiles) { print $treesfiles{$key}, "\n"; } print "\n"; } my %deletefiles; # create array with files to delete from OBS_SRC_DIR foreach my $rev (keys %deletehashes) { next if !defined $treesfiles{$rev}; my $package_path = dirname($treesfiles{$rev}); my $project = basename(dirname($package_path)); my $package = basename($package_path); if (@exclude_pkgs) { if (grep {$_ eq $package} @exclude_pkgs) { print "revision $rev is skipped for package $package\n" if $debug; next; } } if (@include_pkgs) { unless (grep {$_ eq $package} @include_pkgs) { print "revision $rev is skipped for package $package\n" if $debug; next; } } open(F, '<', $treesfiles{$rev}) || die($!); while () { my ($hash, $desc) = split(/\s+/, $_); my $key = (scalar keys %deletefiles) + 1; $deletefiles{$key} = { "project" => $project, "package" => $package, "hash" => $hash, "desc" => $desc, "revision" => $rev }; } close(F); } if ($debug) { print "files to delete:\n"; foreach my $key (keys %deletefiles) { print "file: ".$deletefiles{$key}->{"hash"}."-".$deletefiles{$key}->{"desc"}."\n". " project: ".$deletefiles{$key}->{"project"}."\n". " package: ".$deletefiles{$key}->{"package"}."\n". " revision: ".$deletefiles{$key}->{"revision"}."\n"; } print "\n"; } my %keepfiles; # look if keephashes contains links to revision that would get deleted print "check kept revisions are not linked to deleted ones:\n" if $debug; foreach my $file (keys %keephashes) { print "check files ".$treesfiles{$file}."\n" if $debug; #my $package = basename(dirname($treesfiles{$file})); my $package_path = dirname($treesfiles{$file}); my $project = basename(dirname($package_path)); my $package = basename($package_path); open(F, '<', $treesfiles{$file}) || die($!); while () { my ($hash, $desc) = split(/\s+/, $_); #$keepfiles{$hash} = $hash."-".$desc; my $key = (scalar keys %keepfiles) + 1; $keepfiles{$key} = { "project" => $project, "package" => $package, "hash" => $hash, "desc" => $desc, "revision" => $file }; if ($desc eq "_link") { my ($hash, $desc) = split(/\s+/, $_); # search link files from sources my $link_file = "$OBS_SRC_DIR/$package/$hash-$desc"; next unless -e $link_file; print "read link file: $link_file\n"; # open link file to look if it links to a file that will be deleted my $link; eval { $link = XMLin($link_file); } ; if ($@) { warn "$@ whilst processing $treesfiles{$file}"; next; } next if !defined $link->{"package"} || !defined $link->{"project"} || !defined $link->{"rev"}; if (exists $deletehashes{$link->{"rev"}}) { print "keep revision ".$link->{"rev"}." from ".$link->{"project"}."/". $link->{"package"}." which is linked from $link_file\n" if $debug; delete ($deletehashes{$link->{"rev"}}); foreach my $k (keys %deletefiles) { if ($deletefiles{$k}->{"package"} eq $link->{"package"} and $deletefiles{$k}->{"project"} eq $link->{"project"} and $deletefiles{$k}->{"revision"} eq $link->{"rev"}) { my $fname = $deletefiles{$k}."-".$deletefiles{$k}; print "keep file $fname linked to revision ".$link->{"rev"}."\n" if $debug; delete $deletefiles{$k}; } } } } } close(F); } if ($debug) { print "\nfiles to keep:\n"; foreach my $key (keys %keepfiles) { print "file: ".$keepfiles{$key}->{"hash"}."-".$keepfiles{$key}->{"desc"}."\n". " project: ".$keepfiles{$key}->{"project"}."\n". " package: ".$keepfiles{$key}->{"package"}."\n". " revision: ".$keepfiles{$key}->{"revision"}."\n"; } print "\n"; } # keep files which are used in kept revisions foreach my $del_key (keys %deletefiles) { foreach my $keep_key (keys %keepfiles) { if ($deletefiles{$del_key}->{"package"} eq $keepfiles{$keep_key}->{"package"} and $deletefiles{$del_key}->{"hash"} eq $keepfiles{$keep_key}->{"hash"}) { delete $deletefiles{$del_key}; last; } } } if ($debug) { print "files to delete without kept ones:\n"; foreach my $k (keys %deletefiles) { print "file: ".$deletefiles{$k}->{"hash"}."-".$deletefiles{$k}->{"desc"}."\n". " project: ".$deletefiles{$k}->{"project"}."\n". " package: ".$deletefiles{$k}->{"package"}."\n". " revision: ".$deletefiles{$k}->{"revision"}."\n"; } } if (scalar(keys %deletefiles) == 0) { print "\ndidn't find any files to remove!\n"; if ($deleted_projects) { print "deleting project rev files\n"; foreach my $revfile (@revfiles) { print "\nfile:\t $revfile" if $debug; if (unlink $revfile || warn "Could not unlink $revfile: $!") { print "deleted\n" if $debug; } } } return; } print "\nstarting cleanup process: \n"; foreach my $key (keys %deletefiles) { my $path = "$OBS_SRC_DIR/".$deletefiles{$key}->{"package"}."/". $deletefiles{$key}->{"hash"}."-".$deletefiles{$key}->{"desc"}; next unless -e $path; my $size = -s $path || 0; $total_size += $size; if ($remove) { printf "deleted file: $path size: %.2f %s\n", format_size($size); unlink $path or die "error deleting file $path!\n"; } elsif ($move_dst) { my $dst = $move_dst."/".$deletefiles{$key}->{"package"}; unless (-d $dst) { make_path($dst, {error => \my $err}); if (@$err) { die "error creating directory $dst!\n"; } } print "move $path to $dst\n"; move($path, $dst) or die "move failed! $!\n"; } else { printf "found file $path size: %.2f %s\n", format_size($size); } } # rewrite rev files if ($remove || $move_dst) { print "\nupdate revision files:\n" if $debug; print "\ndelete not needed revision files:\n" if $debug && $deleted_projects; if (!$deleted_projects) { foreach my $key (keys %deletefiles) { my $path = "$OBS_SRC_DIR/".$deletefiles{$key}->{"package"}."/" .$deletefiles{$key}->{"hash"}."-".$deletefiles{$key}->{"desc"}; unless (-e $path) { my $revfile = "$proj_dir/".$deletefiles{$key}->{"project"}.".pkg/". $deletefiles{$key}->{"package"}.".rev"; my $del_hash = $deletefiles{$key}->{"revision"}; open(F, '<', $revfile) or die($!); my @revisions = (); foreach my $line () { my ($hash) = ( split(/\|/, $line) )[2]; if ($hash =~ $del_hash) { print "remove $hash from $revfile\n" if $debug; } else { push @revisions, $line; } } close(F); open(F, '>', $revfile) or die($!); print F @revisions; close(F); } } } else { # remove revfiles, not rewrite them print "deleting project rev files\n"; foreach my $revfile (@revfiles) { print "\nfile:\t $revfile" if $debug; if (unlink $revfile || warn "Could not unlink $revfile: $!") { print " deleted\n" if $debug; } } } } if ($deleted_projects) { if (-d "$OBS_PROJ_DIR/_deleted/$deleted_prj.pkg") { opendir(D, "$OBS_PROJ_DIR/_deleted/$deleted_prj.pkg"); foreach my $f (grep {$_ ne '.' && $_ ne '..'} readdir(D)) { unlink("$OBS_PROJ_DIR/_deleted/$deleted_prj.pkg/$f"); } close(F); } rmdir("$OBS_PROJ_DIR/_deleted/$deleted_prj.pkg"); if (-d "$OBS_TREES_DIR/$deleted_prj") { opendir(D, "$OBS_TREES_DIR/$deleted_prj"); foreach my $p (grep {$_ ne '.' && $_ ne '..'} readdir(D)) { if (-d "$OBS_TREES_DIR/$deleted_prj/$p") { opendir(F, "$OBS_TREES_DIR/$deleted_prj/$p"); foreach my $f (grep {$_ ne '.' && $_ ne '..'} readdir(F)) { print "deleting file from treesdir: $OBS_TREES_DIR/$deleted_prj/$p/$f\n" if $debug; unlink("$OBS_TREES_DIR/$deleted_prj/$p/$f"); } close(F); rmdir("$OBS_TREES_DIR/$deleted_prj/$p"); } } close(D); rmdir("$OBS_TREES_DIR/$deleted_prj"); } rmdir("$OBS_PROJ_DIR/_deleted/$deleted_prj.pkg"); } } my $no_args = @ARGV; my $res = GetOptions( 'remove-old-sources=i{2,2}' => \@remove_old_sources, 'service-cleanup' => \$service_cleanup, 'full-cleanup=i{2,2}' => \@full_cleanup, 'remove-deleted-project=s{1,1}' => \$remove_deleted_project, 'include-packages=s{1,}' => \@include_pkgs, 'exclude-packages=s{1,}' => \@exclude_pkgs, 'debug|d' => \$debug, 'search|s' => \$search, 'remove|rm' => \$remove, 'move|mv=s' => \$move_dst, 'help|h' => \$help ); if ($help || $no_args == 0 || $res != 1) { print_help; exit 0; } unless ($move_dst && $remove) { $search = 1; } if ($move_dst) { unless (-d $move_dst) { make_path($move_dst, {error => \my $err}); if (@$err) { die "error creating directory $move_dst!\n"; } } } if (@include_pkgs == 1 && -e $include_pkgs[0]) { read_pkg_file(\@include_pkgs); } if (@exclude_pkgs == 1 && -e $exclude_pkgs[0]) { read_pkg_file(\@exclude_pkgs); } if (@remove_old_sources == 2) { remove_old_srcs($remove_old_sources[0], $remove_old_sources[1], "", 0); } elsif ($service_cleanup) { search_srcs; } elsif (@full_cleanup == 2) { remove_old_srcs($full_cleanup[0], $full_cleanup[1], "", 0); search_srcs; } elsif ($remove_deleted_project) { if (! grep(/$remove_deleted_project/, lsprojects_local(1)) ) { die("$remove_deleted_project is not a deleted project!\n"); } remove_old_srcs(0, 0, $remove_deleted_project, 1); } else { die "mode missing!\n"; } if ($total_size != 0 && !$move_dst) { printf "cleanup files total size: %.2f %s\n", format_size($total_size); } open-build-service-2.9.4/src/backend/bs_clouduploadserver000077500000000000000000000304301332555733200235640ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2017 SUSE Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # Cloud upload server. Creates jobs and returns their status. # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd"; } use XML::Structured ':bytes'; use Data::Dumper; use POSIX; use Fcntl qw(:DEFAULT :flock); use Symbol; use BSServer; use BSWatcher; use BSHandoff; use BSStdServer; use BSConfiguration; use BSUtil; use BSXML; use BSHTTP; use strict; my $port = 5452; $port = $1 if $BSConfig::clouduploadserver =~ /:(\d+)$/; my $rundir = $BSConfig::rundir || "$BSConfig::bsdir/run"; my $jobdir = "$BSConfig::bsdir/cloudupload"; my $eventdir = "$BSConfig::bsdir/events"; my $pubkeyfile = "/etc/obs/cloudupload/_pubkey"; my $ajaxsocket = "$rundir/bs_clouduploadserver.ajax"; my $maxchild = $BSConfig::cloudupload_maxchild || $BSConfig::cloudupload_maxchild || 5; my $jobdonedir = "$jobdir/done"; sub usage { my ($ret) = @_; print <{'name'} ||= 1; die("weird next content\n") unless keys(%$job) == 1; for my $id (ls($jobdir)) { next unless $id =~ /^(\d+)$/; $job->{'name'} = $id + 1 if $job->{'name'} <= $id; } for my $id (ls("$jobdir/done")) { next unless $id =~ /^(\d+)$/; $job->{'name'} = $id + 1 if $job->{'name'} <= $id; } writexml("$jobdir/.next$$", "$jobdir/next", $job, $BSXML::clouduploadjob); close F; } sub nextjobid { local *F; my $job = BSUtil::lockopenxml(\*F, '<', "$jobdir/next", $BSXML::clouduploadjob); die unless keys(%$job) == 1; my $jobid = $job->{'name'}; die unless $jobid; $job->{'name'} = $jobid + 1; writexml("$jobdir/.next$$", "$jobdir/next", $job, $BSXML::clouduploadjob); close F; return $jobid; } sub pingworker { BSUtil::ping("$eventdir/clouduploadworker/.ping"); } sub cloudupload_create { my ($cgi) = @_; my $jobskelxml = BSServer::read_data(10000000); my $jobskel = BSUtil::fromxml($jobskelxml, $BSXML::clouduploadjob); my $job = {}; for (qw{user target project repository package arch filename size}) { die("$_ is missing from job skeleton\n") unless defined $jobskel->{$_}; $job->{$_} = $jobskel->{$_}; } my $targetdata = delete $jobskel->{'details'}; $targetdata = '' unless defined $targetdata; $targetdata = pack('H*', $targetdata); my $jobid = nextjobid(); $job->{'name'} = $jobid; $job->{'state'} = 'created'; $job->{'details'} = 'waiting to receive image'; $job->{'created'} = time(); mkdir_p($jobdir); writexml("$jobdir/.$jobid$$", "$jobdir/$jobid", $job, $BSXML::clouduploadjob); writestr("$jobdir/.$jobid.data$$", "$jobdir/$jobid.data", $targetdata); return ($job, $BSXML::clouduploadjob); } sub cloudupload_upload { my ($cgi, $jobid) = @_; local *F; my $job = BSUtil::lockopenxml(\*F, '<', "$jobdir/$jobid", $BSXML::clouduploadjob); die("job is not in created state\n") unless $job->{'state'} eq 'created'; $job->{'state'} = 'receiving'; $job->{'pid'} = $$; writexml("$jobdir/.$jobid$$", "$jobdir/$jobid", $job, $BSXML::clouduploadjob); close F; my $uploaded; eval { $uploaded = BSServer::read_file("$jobdir/.$jobid.file$$"); die unless $uploaded; }; my $error = $@; $job = BSUtil::lockopenxml(\*F, '<', "$jobdir/$jobid", $BSXML::clouduploadjob); die("job is not in receiving state\n") unless $job->{'state'} eq 'receiving'; delete $job->{'pid'}; delete $job->{'details'}; $error = "size mismatch: $uploaded->{'size'} != $job->{'size'}" if !$error && $uploaded->{'size'} != $job->{'size'}; if ($error) { unlink("$jobdir/.$jobid.file$$"); chomp $error; $job->{'state'} = 'failed'; $job->{'details'} = $error; mkdir_p($jobdonedir); writexml("$jobdonedir/.$jobid$$", "$jobdonedir/$jobid", $job, $BSXML::clouduploadjob); unlink("$jobdir/$jobid.data"); unlink("$jobdir/$jobid"); close F; die("$error\n"); } rename("$jobdir/.$jobid.file$$", "$jobdir/$jobid.file") || die("rename $jobdir/.$jobid.file$$ $jobdir/$jobid.file: $!\n"); $job->{'state'} = 'scheduled'; writexml("$jobdir/.$jobid$$", "$jobdir/$jobid", $job, $BSXML::clouduploadjob); close F; pingworker(); return $BSStdServer::return_ok; } sub cloudupload_status { my ($cgi, $jobid) = @_; my ($job) = readxml("$jobdir/$jobid", $BSXML::clouduploadjob, 1) || readxml("$jobdonedir/$jobid", $BSXML::clouduploadjob, 1); die("404 no such job\n") unless $job; return ($job, $BSXML::clouduploadjob); } sub archivejob { my ($jobid, $job) = @_; mkdir_p($jobdonedir); rename("$jobdir/$jobid.log", "$jobdonedir/$jobid.log"); writexml("$jobdonedir/.$jobid$$", "$jobdonedir/$jobid", $job, $BSXML::clouduploadjob); unlink("$jobdir/$jobid.log"); unlink("$jobdir/$jobid.data"); unlink("$jobdir/$jobid.file"); unlink("$jobdir/$jobid.result"); BSUtil::cleandir("$jobdir/$jobid.dir"); rmdir("$jobdir/$jobid.dir"); unlink("$jobdir/$jobid"); } sub cloudupload_kill { my ($cgi, $jobid) = @_; local *F; my $job = BSUtil::lockopenxml(\*F, '<', "$jobdir/$jobid", $BSXML::clouduploadjob, 1); if (!$job) { die("404 no such job\n") unless readxml("$jobdonedir/$jobid", $BSXML::clouduploadjob, 1); return $BSStdServer::return_ok; } if ($job->{'state'} eq 'succeeded' || $job->{'state'} eq 'failed') { close F; return $BSStdServer::return_ok; } my $pid = $job->{'pid'}; if ($pid && $pid > 1) { kill -15, $pid; pingworker(); # trigger process reap } unlink("$jobdir/.$jobid.file$pid") if $pid && $job->{'state'} eq 'receiving'; delete $job->{'pid'}; $job->{'state'} = 'failed'; $job->{'details'} = 'killed'; archivejob($jobid, $job); close F; return $BSStdServer::return_ok; } sub cloudupload_joblist { my ($cgi) = @_; my @res; my @jobids = @{$cgi->{'name'} || []}; if (!@jobids) { push @jobids, grep {/^\d+$/} ls($jobdir); push @jobids, grep {/^\d+$/} ls($jobdonedir); @jobids = BSUtil::unify(sort(@jobids)); } for my $jobid (@jobids) { my $job = readxml("$jobdir/$jobid", $BSXML::clouduploadjob, 1) || readxml("$jobdonedir/$jobid", $BSXML::clouduploadjob, 1); push @res, $job if $job; } return ({'clouduploadjob' => \@res}, $BSXML::clouduploadjoblist); } sub cloudupload_log { my ($cgi, $jobid) = @_; my ($job) = readxml("$jobdir/$jobid", $BSXML::clouduploadjob, 1) || readxml("$jobdonedir/$jobid", $BSXML::clouduploadjob, 1); if ($BSStdServer::isajax && $BSServerEvents::gev->{'streaming'}) { my $eof; $eof = 1 if !$job || ($job->{'state'} ne 'scheduled' && $job->{'state'} ne 'uploading'); BSServerEvents::reply_file_grown($eof); return undef; } die("404 no such job\n") unless $job; my $state = $job->{'state'}; if (!$BSStdServer::isajax && !$cgi->{'view'} && !($state eq 'succeeded' || $state eq 'failed')) { BSHandoff::handoff("/cloudupload/$jobid/_log", undef, BSRPC::args($cgi, 'start', 'end', 'nostream')); } if ($BSStdServer::isajax) { BSWatcher::addfilewatcher("$jobdir/$jobid"); BSWatcher::addfilewatcher("$jobdir/$jobid.log"); } my $fd = gensym; if (!open($fd, '<', "$jobdir/$jobid.log")) { if (!open($fd, '<', "$jobdonedir/$jobid.log")) { return undef if $BSStdServer::isajax && $state eq 'created' || $state eq 'receiving'; die("$jobdir/$jobid.log: $!\n"); } } my @s = stat($fd); if ($cgi->{'view'} && $cgi->{'view'} eq 'entry') { my $entry = {'name' => '_log', 'size' => $s[7], 'mtime' => $s[9]}; return ({'entry' => [ $entry ]}, $BSXML::dir); } my $start = $cgi->{'start'} || 0; my $end = $cgi->{'end'}; $start = $s[7] + $start if $start < 0; $start = 0 if $start < 0; die("start out of range: $start\n") if $start > $s[7]; if (!$BSStdServer::isajax || $state eq 'succeeded' || $state eq 'failed') { $end = $s[7] if !defined($end) || $end > $s[7]; } $end = $start if defined($end) && $end < $start; my $len = defined($end) ? $end - $start : undef; if ($cgi->{'nostream'} && $BSStdServer::isajax && $start == $s[7] && (!defined($len) || $len > 0) && ($state eq 'scheduled' || $state eq 'uploading')) { # no new data present, wait close $fd; return undef; } defined(sysseek($fd, $start, Fcntl::SEEK_SET)) || die("sysseek: $!\n"); if ($BSStdServer::isajax) { my $param = {'filename' => $fd, 'chunked' => 1}; $param->{'filegrows'} = 1 if !$cgi->{'nostream'} && ($state eq 'scheduled' || $state eq 'uploading'); $param->{'maxbytes'} = $len if defined $len; BSServerEvents::reply_file($param, 'Content-Type: text/plain'); } else { BSWatcher::reply_file($fd, 'Content-Type: text/plain', "Content-Length: $len"); close $fd; } return undef; } sub cloudupload_pubkey { my ($cgi) = @_; BSWatcher::reply_file($pubkeyfile, 'Content-Type: text/plain'); } sub workerstatus { my ($cgi) = @_; my @daemonarchs = qw{clouduploadserver clouduploadworker}; @daemonarchs = (@{$cgi->{'arch'}}) if $cgi->{'arch'}; my @daemons; for my $arch (@daemonarchs) { my $lock; my $daemondata = {'state' => 'dead', 'type' => $arch}; if ($arch eq 'clouduploadserver') { my $req = $BSServer::request; $daemondata->{'starttime'} = $req->{'server'}->{'starttime'} if $req && $req->{'server'}; if ($req && $req->{'conf'} && $req->{'conf'}->{'handoffpath'}) { $lock = "$req->{'conf'}->{'handoffpath'}.lock"; } $daemondata->{'state'} = 'running' unless $lock; } elsif ($arch eq 'clouduploadworker') { $lock = "$rundir/bs_clouduploadworker.lock"; } else { next; } if ($lock && open(F, '<', $lock)) { if (!flock(F, LOCK_EX | LOCK_NB)) { my @s = stat(F); $daemondata->{'state'} = 'running'; $daemondata->{'starttime'} ||= $s[9] if @s; } close F; } push @daemons, $daemondata; } my $partition = { 'daemon' => \@daemons }; my $ret = {'partition' => [ $partition ]}; return ($ret, $BSXML::workerstatus); } sub hello { my ($cgi) = @_; return "\n"; } sub getajaxstatus { my ($cgi) = @_; BSHandoff::handoff('/ajaxstatus') if !$BSStdServer::isajax; my $r = BSWatcher::getstatus(); return ($r, $BSXML::ajaxstatus); } sub run { initjobid(); BSServer::server(@_); } # define server my $dispatches = [ '/' => \&hello, '!rw :' => undef, '!- GET:' => undef, '!- HEAD:' => undef, '!- POST:/cloudupload' => \&cloudupload_create, '!- PUT:/cloudupload/$job' => \&cloudupload_upload, 'POST:/cloudupload/$job cmd=kill' => \&cloudupload_kill, '/cloudupload/_pubkey' => \&cloudupload_pubkey, '/cloudupload/$job' => \&cloudupload_status, '/cloudupload/$job/_log nostream:bool? start:intnum? end:num? view:?' => \&cloudupload_log, '/cloudupload name:num*' => \&cloudupload_joblist, '/serverstatus' => \&BSStdServer::serverstatus, '/ajaxstatus' => \&getajaxstatus, '/workerstatus arch*' => \&workerstatus, ]; my $dispatches_ajax = [ '/' => \&hello, '/ajaxstatus' => \&getajaxstatus, '/cloudupload/$job/_log nostream:bool? start:intnum? end:num? view:?' => \&cloudupload_log, ]; my $conf = { 'port' => $port, 'dispatches' => $dispatches, 'setkeepalive' => 1, 'maxchild' => $maxchild, 'run' => \&run, }; my $aconf = { 'socketpath' => $ajaxsocket, 'dispatches' => $dispatches_ajax, }; BSStdServer::server('bs_clouduploadserver', \@ARGV, $conf, $aconf); open-build-service-2.9.4/src/backend/bs_clouduploadworker000077500000000000000000000142751332555733200236000ustar00rootroot00000000000000#!/usr/bin/perl BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use XML::Structured ':bytes'; use Data::Dumper; use POSIX; use Fcntl qw(:DEFAULT :flock); use BSConfiguration; use BSUtil; use BSStdRunner; use BSRunner; use BSXML; use strict; my $bsdir = $BSConfig::bsdir || "/srv/obs"; my $jobdir = "$BSConfig::bsdir/cloudupload"; my $eventdir = "$BSConfig::bsdir/events"; my $rundir = $BSConfig::rundir || "$BSConfig::bsdir/run"; my $maxchild = 4; $maxchild = $BSConfig::clouduploadworker_maxchild if defined $BSConfig::clouduploadworker_maxchild; my $myeventdir = "$eventdir/clouduploadworker"; my $jobdonedir = "$jobdir/done"; sub lsjobs { return sort {$a <=> $b} grep {/^\d+$/} ls($jobdir); } sub reap { my ($chld, $chld_user) = @_; my $pid; while (($pid = waitpid(-1, POSIX::WNOHANG)) > 0) { my $user = delete $chld->{$pid}; delete $chld_user->{$user}->{$pid} if $chld_user->{$user}; } } sub archivejob { my ($jobid, $job) = @_; mkdir_p($jobdonedir); rename("$jobdir/$jobid.log", "$jobdonedir/$jobid.log"); writexml("$jobdonedir/.$jobid$$", "$jobdonedir/$jobid", $job, $BSXML::clouduploadjob); unlink("$jobdir/$jobid.log"); unlink("$jobdir/$jobid.data"); unlink("$jobdir/$jobid.file"); unlink("$jobdir/$jobid.result"); BSUtil::cleandir("$jobdir/$jobid.dir"); rmdir("$jobdir/$jobid.dir"); unlink("$jobdir/$jobid"); } sub startupload { my ($job, $logfp) = @_; my $jobid = $job->{'name'}; my $pid; BSUtil::printlog("uploading job $job->{'name'} [$job->{'user'}, $job->{'target'}]\n"); if (!($pid = xfork())) { if (-e "$jobdir/$jobid.dir") { BSUtil::cleandir("$jobdir/$jobid.dir"); rmdir("$jobdir/$jobid.dir"); rename("$jobdir/$jobid.dir", "$jobdir/$jobid.dir.gone$$") if -e "$jobdir/$jobid.dir"; } mkdir_p("$jobdir/$jobid.dir"); chdir("$jobdir/$jobid.dir") || die("chdir $jobdir/$jobid.dir: $!\n"); my @args = ($job->{'user'}, $job->{'target'}, "$jobdir/$jobid.file", "$jobdir/$jobid.data", $job->{'filename'}, "$jobdir/$jobid.result"); print "calling clouduploader @args\n"; open(STDIN, '<', '/dev/null') || die("/dev/null: $!\n"); open(STDOUT, '>&', $logfp); open(STDERR, '>&STDOUT'); close($logfp); exec("clouduploader", @args); die("clouduploader: $!\n"); } my $ex = 1; # assume fail while (1) { if (waitpid($pid, 0)) { $ex = $?; last; } last if $! != POSIX::EINTR; } local *JOBLOCK; $job = BSUtil::lockopenxml(\*JOBLOCK, '<', "$jobdir/$jobid", $BSXML::clouduploadjob, 1); if ($job) { if ($job->{'state'} eq 'uploading') { my $result = readstr("$jobdir/$jobid.result", 1); if ($ex) { $job->{'state'} = 'failed'; $result ||= "exit status ".($ex >> 8); } else { $job->{'state'} = 'succeeded'; } delete $job->{'details'}; $job->{'details'} = $result if defined $result; delete $job->{'pid'}; archivejob($jobid, $job); } close JOBLOCK; } } sub checkjob { my ($jobid, $job) = @_; die("wrong job name $job->{'name'}\n") if $job->{'name'} ne $jobid; die("no target\n") unless $job->{'target'}; die("no user\n") unless $job->{'user'}; die("no target data\n") unless -e "$jobdir/$jobid.data"; } sub run { my ($conf) = @_; my $ping = $conf->{'ping'}; my $maxchild = $conf->{'maxchild'}; my $maxchild_user = $conf->{'maxchild_user'}; my %chld; my %chld_user; my $pid; while(1) { BSUtil::drainping($ping); reap(\%chld, \%chld_user) if %chld; my @jobs = lsjobs(); my $now = time(); my $havedelayed; for my $jobid (@jobs) { last if keys(%chld) >= $maxchild; my $job = readxml("$jobdir/$jobid", $BSXML::clouduploadjob, 1); next unless $job; if ($job->{'due'} && $job->{'due'} > $now) { $havedelayed = 1; next; } if ($job->{'state'} ne 'scheduled' && $job->{'state'} ne 'waiting') { next; } my $user = $job->{'user'}; if ($maxchild_user) { if (keys(%{$chld_user{$user} || {}}) >= $maxchild_user) { $havedelayed = 1; next; } } # create a new job local *JOBLOCK; $job = BSUtil::lockopenxml(\*JOBLOCK, '<', "$jobdir/$jobid", $BSXML::clouduploadjob, 1); next unless $job; if ($job->{'state'} ne 'scheduled' && $job->{'state'} ne 'waiting') { close JOBBLOCK; next; } # make sure job is sane eval { checkjob($jobid, $job) }; if ($@) { warn("bad job: $jobid $@\n"); close JOBBLOCK; next; } # open and lock the log file local *LOGLOCK; my $haveloglock; if (open(LOGLOCK, '>>', "$jobdir/$jobid.log")) { $haveloglock = 1 if flock(LOGLOCK, LOCK_EX | LOCK_NB); close(LOGLOCK) unless $haveloglock; } if (!$haveloglock) { # could not get lock, start a new file unlink("$jobdir/.$jobid.log.$$"); BSUtil::lockopen(\*LOGLOCK, '>>', "$jobdir/.$jobid.log.$$"); rename("$jobdir/.$jobid.log.$$", "$jobdir/$jobid.log") || die("rename $jobdir/.$jobid.log.$$ $jobdir/$jobid.log: $!\n"); } if (!($pid = xfork())) { POSIX::setsid() > 0 || die("setsid: $!\n"); # creates new session and process group close JOBLOCK; close($conf->{'runlock'}); startupload($job, \*LOGLOCK); BSUtil::ping($ping); # trigger process reap exit(0); } close LOGLOCK; $job->{'pid'} = $pid; $job->{'state'} = 'uploading'; writexml("$jobdir/.$jobid$$", "$jobdir/$jobid", $job, $BSXML::clouduploadjob); close JOBLOCK; $chld{$pid} = $user; $chld_user{$user}->{$pid} = undef; } reap(\%chld, \%chld_user) if %chld; for my $fc (sort %{$conf->{'filechecks'} || {}}) { next unless -e $fc; $conf->{'filechecks'}->{$fc}->($conf, $fc); } if ($havedelayed) { BSUtil::waitping($ping, 10); } else { if ($conf->{'testmode'} && !%chld) { print "test mode, all jobs processed, exiting...\n"; last; } print "waiting for an event...\n"; BSUtil::waitping($ping); } } } my $conf = { 'eventdir' => $myeventdir, 'dispatches' => [], 'maxchild' => $maxchild, 'maxchild_user' => 2, 'run' => \&run, }; BSStdRunner::run('bs_clouduploadworker', \@ARGV, $conf); open-build-service-2.9.4/src/backend/bs_deltastore000077500000000000000000000130301332555733200221650ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2016 SUSE LLC. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # The source delta generator. # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use strict; use XML::Structured ':bytes'; use POSIX; use Fcntl qw(:DEFAULT :flock); use Digest::MD5 (); use BSStdServer; use BSConfiguration; use BSUtil; use BSSolv; use Getopt::Long (); sub parse_options { my %opts; if (!Getopt::Long::GetOptionsFromArray(\@_, \%opts, 'testmode|test-mode', 'stop|exit', 'restart', 'logfile=s', )) { print_usage(); die("Invalid option(s)\n"); } return \%opts; } sub print_usage { $0 =~ /([^\/]+$)/; print "Usage: $1 [options] Options: --testmode|--test-mode - run only for one event --stop|--exit - graceful shutdown daemon --restart - restart daemon --logfile file - redirect output to logfile "; } # copy @ARGV to keep it untouched in case of restart my $options = parse_options(@ARGV); BSUtil::mkdir_p_chown($BSConfig::bsdir, $BSConfig::bsuser, $BSConfig::bsgroup); # Open logfile if requested BSStdServer::openlog($options->{'logfile'}, $BSConfig::bsuser, $BSConfig::bsgroup); BSUtil::drop_privs_to($BSConfig::bsuser, $BSConfig::bsgroup); BSUtil::set_fdatasync_before_rename() unless $BSConfig::disable_data_sync || $BSConfig::disable_data_sync; my $eventdir = "$BSConfig::bsdir/events"; my $rundir = $BSConfig::rundir || "$BSConfig::bsdir/run"; my $myeventdir = "$eventdir/deltastore"; sub deltastore { my ($projid, $packid, $file) = @_; my $srcrep = "$BSConfig::bsdir/sources"; my $uploaddir = "$BSConfig::bsdir/upload"; BSUtil::printlog("generating src delta for $projid/$packid/$file"); mkdir_p($uploaddir); my $tmp = "$uploaddir/deltastore.$$"; unlink($tmp); unlink("$tmp.in"); die("cannot get rid of $tmp") if -e $tmp; link("$srcrep/$packid/$file", "$tmp.in") || die("link $srcrep/$packid/$file $tmp.in: $!\n"); if (BSSolv::isobscpio("$tmp.in")) { BSUtil::printlog(" - already delta cpio"); unlink("$tmp.in"); return; } if (!BSSolv::makeobscpio("$tmp.in", "$srcrep/$packid/deltastore", $tmp)) { BSUtil::printlog(" - delta creation error"); unlink("$tmp.in"); unlink($tmp); return; } unlink("$tmp.in"); if (1) { if ($file =~ /^([0-9a-f]{32})-/) { my $md5 = $1; BSUtil::printlog(" - verifying re-expansion..."); local *F; BSSolv::obscpioopen($tmp, "$srcrep/$packid/deltastore", \*F, $uploaddir) || die("BSSolv::obscpioopen failed\n"); my $ctx = Digest::MD5->new; $ctx->addfile(*F); close F; my $rmd5 = $ctx->hexdigest(); die(" - md5sum mismatch: $md5 $rmd5\n") if $md5 ne $rmd5; } } if (!rename($tmp, "$srcrep/$packid/$file")) { BSUtil::printlog(" - rename $tmp $srcrep/$packid/$file: $!"); unlink("$tmp.in"); unlink($tmp); return; } } $| = 1; $SIG{'PIPE'} = 'IGNORE'; BSUtil::restartexit($options, 'deltastore', "$rundir/bs_deltastore", "$myeventdir/.ping"); BSUtil::printlog("starting source delta generator"); mkdir_p($rundir); open(RUNLOCK, '>>', "$rundir/bs_deltastore.lock") || die("$rundir/bs_deltastore.lock: $!\n"); flock(RUNLOCK, LOCK_EX | LOCK_NB) || die("deltastore is already running!\n"); utime undef, undef, "$rundir/bs_deltastore.lock"; mkdir_p($myeventdir); if (!-p "$myeventdir/.ping") { POSIX::mkfifo("$myeventdir/.ping", 0666) || die("$myeventdir/.ping: $!"); chmod(0666, "$myeventdir/.ping"); } sysopen(PING, "$myeventdir/.ping", POSIX::O_RDWR) || die("$myeventdir/.ping: $!"); while(1) { # drain ping pipe BSUtil::drainping(\*PING); # check for events my @events = ls($myeventdir); @events = grep {!/^\./} @events; for my $event (@events) { last if -e "$rundir/bs_deltastore.exit"; last if -e "$rundir/bs_deltastore.restart"; my $ev = readxml("$myeventdir/$event", $BSXML::event, 1); if (!$ev || !$ev->{'type'} || $ev->{'type'} ne 'deltastore') { unlink("$myeventdir/$event"); next; } if (!defined($ev->{'project'}) || !defined($ev->{'package'}) || !defined($ev->{'job'})) { unlink("$myeventdir/$event"); next; } eval { deltastore($ev->{'project'}, $ev->{'package'}, $ev->{'job'}); }; if ($@) { warn($@); } else { unlink("$myeventdir/$event"); } } if ($options->{testmode}) { close(RUNLOCK); BSUtil::printlog("Test mode, exiting..."); exit(0); } # check for restart/exit if (-e "$rundir/bs_deltastore.exit") { close(RUNLOCK); unlink("$rundir/bs_deltastore.exit"); BSUtil::printlog("exiting..."); exit(0); } if (-e "$rundir/bs_deltastore.restart") { close(RUNLOCK); unlink("$rundir/bs_deltastore.restart"); BSUtil::printlog("restarting..."); exec($0); die("$0: $!\n"); } BSUtil::printlog("waiting for an event..."); BSUtil::waitping(\*PING); } open-build-service-2.9.4/src/backend/bs_dispatch000077500000000000000000001311461332555733200216270ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2006, 2007 Michael Schroeder, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # The Job Dispatcher # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; # FIXME: currently the bs_dispatcher makes assumptions on being in a # properly set up working dir, e.g. with subdirs 'worker' and # 'build'. Either that is cleaned up or this stays in, for the sake # of startproc and others being able to start a bs_srcserver without # knowing that it has to be started in the right directory.... chdir "$wd"; unshift @INC, "build"; unshift @INC, "."; } use POSIX; use Data::Dumper; use Digest::MD5 (); use List::Util; use Fcntl qw(:DEFAULT :flock); use XML::Structured ':bytes'; use Storable; use BSConfiguration; use BSRPC; use BSUtil; use BSXML; use BSCando; use BSDispatcher::Constraints; use strict; my $nosrcchangescale = 3; # -4.77 my %powerpkgs; if ($BSConfig::powerpkgs) { my $i = 1; for (@{$BSConfig::powerpkgs || []}) { $powerpkgs{$_} = $i++; } } my %secure_sandboxes; if ($BSConfig::secure_sandboxes) { %secure_sandboxes = map {$_ => 1} @$BSConfig::secure_sandboxes; } else { # we just define xen, kvm and zvm as entirely secure sandboxes atm # chroot, emulator, lxc are currently considered as not safe $secure_sandboxes{$_} = 1 for qw{xen kvm zvm}; } my $testmode; while (@ARGV) { if ($ARGV[0] eq '--test-mode' || $ARGV[0] eq '--testmode') { shift @ARGV; $testmode = 1; } elsif ($ARGV[0] eq '--test-constraints') { shift @ARGV; my $package = shift @ARGV; my $architecture = shift @ARGV; my $workerinfo_file = shift @ARGV; my $constraints_file = shift @ARGV; my $constraintsprj_file = shift @ARGV; my $workerinfo = readxml($workerinfo_file, $BSXML::worker); my $jobinfo = { 'arch' => $architecture, 'package' => $package }; my $constraints; if ($constraints_file) { $constraints = readxml($constraints_file, $BSXML::constraints); $constraints = overwriteconstraints($jobinfo, $constraints); } if ($constraintsprj_file) { my @lines = map { [ split(' ', $_) ] } split("\n", readstr($constraintsprj_file)); my $prjconfconstraint = BSDispatcher::Constraints::list2struct($BSXML::constraints, \@lines); if ($prjconfconstraint) { $constraints = $constraints ? BSDispatcher::Constraints::mergeconstraints($prjconfconstraint, $constraints) : $prjconfconstraint; } } die("No parseable workerinfo\n") unless keys %$workerinfo; die("No parseable constraints\n") unless keys %$constraints; my $o = BSDispatcher::Constraints::oracle($workerinfo, $constraints); exit 0 if defined($o) && $o > 0; exit 1; } else { last; } } BSUtil::set_fdatasync_before_rename() unless $BSConfig::disable_data_sync || $BSConfig::disable_data_sync; my $bsdir = $BSConfig::bsdir || "/srv/obs"; BSUtil::mkdir_p_chown($bsdir, $BSConfig::bsuser, $BSConfig::bsgroup) || die("unable to create $bsdir\n"); BSUtil::drop_privs_to($BSConfig::bsuser, $BSConfig::bsgroup); my $port = 5252; #'RR' $port = $1 if $BSConfig::reposerver =~ /:(\d+)$/; # strip helpers from cando my %cando = %BSCando::cando; for my $arch (values %cando) { $arch = [ @{$arch || []} ]; # make a copy so we can modify s/:.*// for @$arch; } my %harchcando; # can the harch build an arch? for my $harch (keys %BSCando::cando) { for my $arch (@{$BSCando::cando{$harch}}) { if ($arch =~ /^([^:]+):(.+)$/) { $harchcando{"$harch/$1"} = $2; } else { $harchcando{"$harch/$arch"} = ''; } } } # 4h build will add .5 to the load # 4h idle will half the load my $decay = log(.5)/(4*3600); my $rundir = $BSConfig::rundir || "$BSConfig::bsdir/run"; my $workersdir = "$BSConfig::bsdir/workers"; my $jobsdir = "$BSConfig::bsdir/jobs"; my $eventdir = "$BSConfig::bsdir/events"; my $reporoot = "$BSConfig::bsdir/build"; sub getcodemd5 { my ($dir, $cache) = @_; my $md5 = ''; my %new; my $doclean; my @files = grep {!/^\./} ls($dir); push @files, map {"Build/$_"} grep {!/^\./} ls("$dir/Build"); push @files, map {"emulator/$_"} grep {!/^\./} ls("$dir/emulator"); $cache ||= {}; for my $file (sort @files) { next unless -f "$dir/$file"; my @s = stat _; my $id = "$s[9]/$s[7]/$s[1]"; $new{$id} = 1; if ($cache->{$id}) { $md5 .= "$cache->{$id} $file\n"; next; } $cache->{$id} = Digest::MD5::md5_hex(readstr("$dir/$file")); $md5 .= "$cache->{$id} $file\n"; $doclean = 1; } if ($doclean) { for (keys %$cache) { delete $cache->{$_} unless $new{$_}; } } return Digest::MD5::md5_hex($md5); } my $workerdircache = {}; my $builddircache = {}; my $workercode; my $buildcode; my $lastcodechecktime; my %badhost; my $badhostchanged = 1; my %newestsrcchange; my %infocache; my %constraintscache; my %lastbuild; # last time a job was build in that prpa my %masterdispatched; # we masterdispatched those, prpa => [ starttime, ... ] sub checkbadhost { my (@attempts) = @_; my %hosts; for my $attempt (@attempts) { my $host = $attempt; $host =~ s/:\d+$//; $hosts{$host} = 1; } my $nhosts = keys %hosts; $nhosts = 1 unless $nhosts; return (@attempts > 5 + 20 / $nhosts) ? 1 : 0; } sub assignjob { my ($job, $idlename, $arch) = @_; local *F; BSUtil::printlog("assignjob $arch/$job -> $idlename"); my $jobstatus = { 'code' => 'dispatching', }; if (!BSUtil::lockcreatexml(\*F, "$jobsdir/$arch/.dispatch.$$", "$jobsdir/$arch/$job:status", $jobstatus, $BSXML::jobstatus)) { BSUtil::printlog("job lock failed!"); return 'badjob'; } # got the lock, re-check if job is still there if (! -e "$jobsdir/$arch/$job") { unlink("$jobsdir/$arch/$job:status"); close F; BSUtil::printlog("job disappeared!"); return 'badjob'; } # prepare job data my $infoxml = readstr("$jobsdir/$arch/$job"); my $jobid = Digest::MD5::md5_hex($infoxml); my $info = XMLin($BSXML::buildinfo, $infoxml); my $now = time(); if (!$lastcodechecktime || $now - $lastcodechecktime > 20 || $now - $lastcodechecktime < 0) { $workercode = getcodemd5('worker', $workerdircache); $buildcode = getcodemd5('build', $builddircache); $lastcodechecktime = $now; } # get the worker data my $worker = readxml("$workersdir/idle/$idlename", $BSXML::worker, 1); if (!$worker) { unlink("$jobsdir/$arch/$job:status"); close F; BSUtil::printlog("worker is gone!"); return undef; } $worker->{'hardware'}->{'nativeonly'} = undef if $worker->{'hardware'} && exists($worker->{'hardware'}->{'nativeonly'}); # assign job to worker my @args = ("port=$port", "workercode=$workercode", "buildcode=$buildcode"); push @args, "registerserver=$worker->{'registerserver'}" if $worker->{'registerserver'}; my $attempt = 0; if ($badhost{"$info->{'arch'}/$job"}) { my $id = "$info->{'arch'}/$job"; my @attempts = grep {s/^\Q$id\E\///} keys %badhost; $attempt = scalar(@attempts) + 1; my $msg = "gave up after $attempt failed build attempts..."; push @args, "nobadhost=$msg" if checkbadhost(@attempts); } eval { BSRPC::rpc({ 'uri' => "http://$worker->{'ip'}:$worker->{'port'}/build", 'timeout' => 10 + int(length($infoxml) / 100000), 'request' => "PUT", 'headers' => [ "Content-Type: text/xml" ], 'data' => $infoxml, }, undef, @args); }; if ($@) { my $err = $@; BSUtil::printlog("rpc error: $@"); unlink("$jobsdir/$arch/$job:status"); close F; if ($err =~ /cannot build anything/) { return undef; } if ($err =~ /cannot build this repository/) { $badhost{"$info->{'project'}/:repo:$info->{'repository'}/$info->{'arch'}/$idlename"} = time(); $badhostchanged = 1; return 'badhost'; } if ($err =~ /cannot build this package/) { $badhost{"$info->{'project'}/$info->{'package'}/$info->{'arch'}/$idlename"} = time(); $badhostchanged = 1; return 'badhost'; } if ($err =~ /bad job/) { return 'badjob'; } mkdir_p("$workersdir/down"); rename("$workersdir/idle/$idlename", "$workersdir/down/$idlename"); # broken client or rebooting return undef; } # update jobstatus data $jobstatus->{'code'} = 'building'; $jobstatus->{'uri'} = "http://$worker->{'ip'}:$worker->{'port'}"; $jobstatus->{'workerid'} = $worker->{'workerid'} if defined $worker->{'workerid'}; $jobstatus->{'starttime'} = time(); $jobstatus->{'hostarch'} = $worker->{'hostarch'}; $jobstatus->{'jobid'} = $jobid; $jobstatus->{'attempt'} = $attempt if $attempt; # update worker data with build job info $worker->{'job'} = $job; $worker->{'jobid'} = $jobid; $worker->{'arch'} = $arch; $worker->{'reposerver'} = $info->{'reposerver'} if $info->{'masterdispatched'}; # update worker dir for my $oldstate (qw{idle down away dead}) { unlink("$workersdir/$oldstate/$idlename"); } mkdir_p("$workersdir/building"); writexml("$workersdir/building/.$idlename", "$workersdir/building/$idlename", $worker, $BSXML::worker); if ($info->{'masterdispatched'}) { # we did out job. delete job push @{$masterdispatched{"$info->{'project'}/$info->{'repository'}/$info->{'arch'}"}}, $jobstatus->{'starttime'}; unlink("$jobsdir/$arch/$job"); unlink("$jobsdir/$arch/$job:status"); close F; return 'assigned'; } # write new status and release lock writexml("$jobsdir/$arch/.$job:status", "$jobsdir/$arch/$job:status", $jobstatus, $BSXML::jobstatus); close F; return 'assigned'; } sub sendeventtoserver { my ($server, $ev) = @_; my @args; for ('type', 'project', 'package', 'repository', 'arch', 'job', 'worker') { push @args, "$_=$ev->{$_}" if defined $ev->{$_}; } my $param = { 'uri' => "$server/event", 'request' => 'POST', 'timeout' => 10, }; BSRPC::rpc($param, undef, @args); } sub staleness { my ($prpa, $now, $ic, $jobs) = @_; my $projid = (split('/', $prpa))[0]; my $lb = $lastbuild{$prpa}; return 0 unless $lb; $lb = $now if $lb > $now; my $newestsrcchange = $newestsrcchange{$projid}; if (!defined $newestsrcchange) { $newestsrcchange = 0; for (@$jobs) { my $job = $ic->{$_}; $newestsrcchange = $job->{'revtime'} if $job && $job->{'revtime'} && $job->{'revtime'} > $newestsrcchange; } $newestsrcchange ||= $lb; $newestsrcchange{$projid} = $newestsrcchange; } my $ret = ($lb - $newestsrcchange) / (($now - $lb) * 40 + 5000000); $ret = 0 if $ret < 0; #BSUtil::printlog("staleness $prpa: $ret"); return $ret; } sub overwrite { my ($dst, $src) = @_; for my $k (sort keys %$src) { next if $k eq "conditions"; my $d = $src->{$k}; if (!exists($dst->{$k}) || !ref($d) || ref($d) ne 'HASH') { $dst->{$k} = $d; } else { overwrite($dst->{$k}, $d); } } } sub overwriteconstraints { my ($info, $constraints) = @_; # use condition specific constraints to merge it properly for my $o (@{$constraints->{'overwrite'}||[]}) { next unless $o && $o->{'conditions'}; if ($o->{'conditions'}->{'arch'}) { next unless grep {$_ eq $info->{'arch'}} @{$o->{'conditions'}->{'arch'}}; } if ($o->{'conditions'}->{'package'}) { my $packagename = $info->{'package'}; my $shortpackagename = $info->{'package'}; $shortpackagename =~ s/\..*//; next unless grep {$_ eq $packagename or $_ eq $shortpackagename} @{$o->{'conditions'}->{'package'}}; } # conditions are matching, overwrite... $constraints = Storable::dclone($constraints); overwrite($constraints, $o); } return $constraints; } sub getconstraints { my ($info, $constraintsmd5) = @_; my $param = { 'uri' => "$BSConfig::srcserver/source/$info->{'project'}/$info->{'package'}/_constraints", 'timeout' => 300, }; my $constraintsxml; eval { $constraintsxml = BSRPC::rpc($param, undef, "expand=1", "rev=$info->{'srcmd5'}"); die("huh? constaints md5 does not match\n") unless Digest::MD5::md5_hex($constraintsxml) eq $constraintsmd5; }; if ($@) { warn($@); return [ time() + 600 ]; # better luck next time } return undef unless $constraintsxml; return BSUtil::fromxml($constraintsxml, $BSXML::constraints, 1); } # normalizes an xml size element to mega bytes my %syncedjobs; my %lastmastersync; my $lastmdloadsync; sub dispatchslave { my $synced = 0; my @archs = grep {!/^\./} sort(ls($jobsdir)); my %projid2repocache; my %building; my %building_time; for my $arch (@archs) { next unless -d "$jobsdir/$arch"; my $now = time(); my $added = 0; my $deleted = 0; # if we only work on a partition get the filtered jobs from the # source server if ($BSConfig::partition && ($lastmastersync{$arch} || 0) + 600 < $now) { $lastmastersync{$arch} = $now; my $res; eval { $res = BSRPC::rpc({ 'uri' => "$BSConfig::srcserver/jobs/$arch", 'timeout' => 60, }, $BSXML::dir, "partition=$BSConfig::partition"); }; if ($@) { warn($@); next; } $syncedjobs{$arch} = { map {$_->{'name'} => 1} @{$res->{'entry'} || []} }; } my @jobs = sort(ls($jobsdir)); my @b = grep {!/^\./} ls("$jobsdir/$arch"); my %locked = map {$_ => 1} grep {/:status$/} @b; my %notlocked = map {$_ => 1} grep {!$locked{$_}} @b; my %seen; my @crossb = grep {/:cross$/} @b; if (@crossb) { my %crossarchs; for (@crossb) { push @{$crossarchs{$2}}, $1 if /^(.*):([^:]+):cross$/; } for my $crossarch (sort keys %crossarchs) { my %cj = map {$_ => 1} ls("$jobsdir/$crossarch"); # deltete orphaned marker for (grep {!$cj{$_}} @{$crossarchs{$crossarch}}) { BSUtil::printlog(" - deleting orphaned cross marker $arch/$_:${crossarch}:cross"); unlink("$jobsdir/$arch/$_:${crossarch}:cross"); } } @b = grep {!/:cross$/} @b; } for my $job (grep {!/:(?:dir|status|new)$/} @b) { next if $locked{"$job:status"}; $seen{$job} = 1; next if $syncedjobs{$arch}->{$job}; my $infoxml = readstr("$jobsdir/$arch/$job", 1); next unless $infoxml; my $info = BSUtil::fromxml($infoxml, $BSXML::buildinfo, 1); next unless $info && $info->{'file'} && $info->{'file'} ne '_aggregate'; $info->{'masterdispatched'} = Digest::MD5::md5_hex($infoxml); $infoxml = XMLout($BSXML::buildinfo, $info); undef $info; eval { BSRPC::rpc({ 'uri' => "$BSConfig::masterdispatcher/jobs/$arch/$job", 'request' => 'PUT', 'timeout' => 10 + int(length($infoxml) / 100000), 'headers' => [ "Content-Type: text/xml" ], 'data' => $infoxml, }, undef); }; if ($@) { if ($@ =~ /already exists/) { $syncedjobs{$arch}->{$job} = 1; } warn($@); next; } $added++; $synced++; $syncedjobs{$arch}->{$job} = 1; } for my $job (sort(keys %{$syncedjobs{$arch} || {}})) { next if $seen{$job}; $synced++; eval { BSRPC::rpc({ 'uri' => "$BSConfig::masterdispatcher/jobs/$arch/$job", 'request' => 'DELETE', 'timeout' => 60, }, undef); }; if ($@) { warn($@); next; } $deleted++; $synced++; delete $syncedjobs{$arch}->{$job}; } BSUtil::printlog("$arch: added $added, deleted $deleted") if $added || $deleted; # adapt the load my $load = {}; for my $job (keys %locked) { my $jn = $job; $jn =~ s/:status$//; next unless $notlocked{$jn}; $jn =~ s/-[0-9a-f]{32}$//s; my ($projid, $repoid, $packid) = split('::', $jn); if (!defined($packid)) { my $info = readxml("$jobsdir/$arch/$job", $BSXML::buildinfo, 1); next unless $info && $info->{'file'} && $info->{'file'} ne '_aggregate'; ($projid, $repoid, $packid) = ($info->{'project'}, $info->{'repository'}, $info->{'package'}); next unless defined $packid; } my $prpa = "$projid/$repoid/$arch"; $building{$prpa} ||= 0; $building{$prpa} += 1; $building_time{$prpa} = $now; } } # upload the mdload from time to time my $now = time(); if (!$lastmdloadsync || $lastmdloadsync + 120 < $now) { BSUtil::printlog("uploading load to master dispatcher"); $lastmdloadsync = $now; my $load = BSUtil::retrieve("$jobsdir/load", 1) || {}; for my $prpa (keys %$load) { $load->{$prpa}->[2] = $building_time{$prpa} || $now; $load->{$prpa}->[3] = $building{$prpa} || 0; } eval { BSRPC::rpc({ 'uri' => "$BSConfig::masterdispatcher/jobs/_mdload", 'request' => 'POST', 'data' => BSUtil::tostorable($load), 'timeout' => 60, }, undef); }; if ($@) { warn($@); } } return $synced; } # add a 30 second penalty for badhost events sub addbadhosttofinished { my ($ev, $now) = @_; return unless $ev->{'project'} && $ev->{'repository'} && $ev->{'arch'}; $now ||= time(); my ($hostarch, $workerid) = split(':', $ev->{'worker'}, 2); my @l = ($ev->{'project'}, $ev->{'repository'}, $ev->{'arch'}, $ev->{'package'}, $now - 30, $now, 'badhost', $workerid, $hostarch); s/([\000-\037%|=\177-\237])/sprintf("%%%02X", ord($1))/ge for @l; BSUtil::appendstr("$jobsdir/finished", join('|', @l)."\n"); } my %lastrepoeventsent; sub forwardevents { if (%lastrepoeventsent) { my $now = time(); delete $lastrepoeventsent{$_} for grep {$lastrepoeventsent{$_} + 180 < $now} keys(%lastrepoeventsent); } for my $evname (ls("$eventdir/repository")) { next if $evname =~ /^\./; next if $lastrepoeventsent{$evname}; my $ev = readxml("$eventdir/repository/$evname", $BSXML::event, 1); next unless $ev; eval { sendeventtoserver($BSConfig::srcserver, $ev); }; if ($@) { warn($@); } else { unlink("$eventdir/repository/$evname"); $lastrepoeventsent{$evname} = time(); } } for my $evname (ls("$eventdir/dispatch")) { next if $evname =~ /^\./; my $ev = readxml("$eventdir/dispatch/$evname", $BSXML::event, 1); next unless $ev; next if $ev->{'due'} && time() < $ev->{'due'}; delete $ev->{'due'}; eval { if ($ev->{'type'} eq 'built') { # resend to rep server } elsif ($ev->{'type'} eq 'badhost') { BSUtil::printlog("badhost event: $ev->{'project'}/$ev->{'package'}/$ev->{'arch'}/$ev->{'worker'}"); if ($BSConfig::masterdispatcher && $BSConfig::masterdispatcher ne $BSConfig::reposerver) { sendeventtoserver($BSConfig::masterdispatcher, $ev) unless $ev->{'package'} eq '_deltas'; # XXX } else { my $now = time(); $badhost{"$ev->{'project'}/$ev->{'package'}/$ev->{'arch'}/$ev->{'worker'}"} = $now; $badhost{"$ev->{'arch'}/$ev->{'job'}"} = $now; $badhost{"$ev->{'arch'}/$ev->{'job'}/$ev->{'worker'}"} = $now; $badhostchanged = 1; addbadhosttofinished($ev, $now); } } else { sendeventtoserver($BSConfig::srcserver, $ev); } }; if ($@) { warn($@); } else { unlink("$eventdir/dispatch/$evname"); } } } sub filechecks { if (-e "$rundir/bs_dispatch.exit") { my $state = { 'infocache' => \%infocache, 'badhost' => \%badhost, 'newestsrcchange' => \%newestsrcchange, }; BSUtil::store("$rundir/bs_dispatch.state.new", "$rundir/bs_dispatch.state", $state); close(RUNLOCK); unlink("$rundir/bs_dispatch.exit"); BSUtil::printlog("exiting..."); exit(0); } if (-e "$rundir/bs_dispatch.restart") { my $state = { 'infocache' => \%infocache, 'badhost' => \%badhost, 'newestsrcchange' => \%newestsrcchange, }; BSUtil::store("$rundir/bs_dispatch.state.new", "$rundir/bs_dispatch.state", $state); close(RUNLOCK); unlink("$rundir/bs_dispatch.restart"); BSUtil::printlog("restarting..."); exec($0); die("$0: $!\n"); } if (-e "$rundir/bs_dispatch.dumpstate") { my $state = { 'infocache' => \%infocache, 'badhost' => \%badhost, 'newestsrcchange' => \%newestsrcchange, }; BSUtil::store("$rundir/bs_dispatch.state.new", "$rundir/bs_dispatch.state", $state); unlink("$rundir/bs_dispatch.dumpstate"); BSUtil::printlog("dumped state to $rundir/bs_dispatch.state ..."); } if (-e "$rundir/bs_dispatch.dropbadhosts") { unlink("$rundir/bs_dispatch.dropbadhosts"); BSUtil::printlog("removing all badhost entries..."); %badhost = (); $badhostchanged = 1; } } # check all workers against the job constains so that the # users get feedback sub checkconstraints { my ($info, $arch, $job, $constraints, $workercache) = @_; my @all_workers; BSUtil::printlog("checkconstraints $arch/$job"); $workercache ||= {}; my $sumworkers = 0; my $downworkers = 0; my $buildarch = $info->{'hostarch'} || $arch; my %workerseen; for my $workerstate (qw{idle building away down}) { my @workernames = sort(BSUtil::ls("$workersdir/$workerstate")); for my $workername (@workernames) { next if $workerseen{$workername}; my ($harch) = split(':', $workername, 2); next unless exists($harchcando{"$harch/$buildarch"}); my $worker = $workercache->{"$workerstate/$workername"} || readxml("$workersdir/$workerstate/$workername", $BSXML::worker, 1); next unless $worker; $workercache->{"$workerstate/$workername"} = $worker; my $helper = $harchcando{"$harch/$buildarch"}; next if $helper && $worker->{hardware} && exists($worker->{hardware}->{nativeonly}); $workerseen{$workername} = 1; push @all_workers, $worker; next if $BSConfig::dispatch_constraint && !$BSConfig::dispatch_constraint->($info, $worker, $constraints); next unless !$constraints || BSDispatcher::Constraints::oracle($worker, $constraints) > 0; $sumworkers++; $downworkers++ if $workerstate eq 'down'; } } # return if more than half of the workers satisfy the constraint return 0 if $sumworkers - $downworkers > 5 && ($sumworkers - $downworkers) > scalar(@all_workers) / 2; my $details = "waiting for $sumworkers compliant workers"; $details .= " ($downworkers of them down)" if $downworkers; if (!$sumworkers) { $details = "no compliant workers"; my $hint = ''; if ($BSConfig::dispatch_constraint) { if (!grep {$BSConfig::dispatch_constraint->($info, $_, $constraints)} @all_workers) { $hint .= " dispatch_constraint"; } } for my $cpart (sort(keys(%{$constraints || {}}))) { my $cconstraint = { $cpart => $constraints->{$cpart} }; next if (grep {BSDispatcher::Constraints::oracle($_, $cconstraint) > 0} @all_workers); if ($cpart eq 'hardware' || $cpart eq 'linux') { my $hhint = ''; for my $ccpart (sort(keys(%{$constraints->{$cpart} || {}}))) { my $ccconstraint = { $cpart => { $ccpart => $constraints->{$cpart}->{$ccpart} } }; next if (grep {BSDispatcher::Constraints::oracle($_, $ccconstraint) > 0} @all_workers); $hhint .= " $cpart:$ccpart"; } $hint .= $hhint ? $hhint : " $cpart"; next; } $hint .= " $cpart"; } $details .= " (constraints mismatch hint:$hint)" if $hint; } if (!$info->{'scheduleinfo'} || $info->{'scheduleinfo'} ne $details) { $info->{'scheduleinfo'} = $details; print "setting dispatch details to: $details\n"; if ($sumworkers) { setdispatchdetails($info, $arch, $job, $details); } else { failjob($info, $arch, $job, "package build was not possible:\n\n$details\n\nPlease adapt your constraints.\n"); } } return 1; } $| = 1; $SIG{'PIPE'} = 'IGNORE'; BSUtil::restartexit($ARGV[0], 'dispatcher', "$rundir/bs_dispatch"); BSUtil::printlog("starting build service dispatcher"); # get lock mkdir_p($rundir); open(RUNLOCK, '>>', "$rundir/bs_dispatch.lock") || die("$rundir/bs_dispatch.lock: $!\n"); flock(RUNLOCK, LOCK_EX | LOCK_NB) || die("dispatcher is already running!\n"); utime undef, undef, "$rundir/bs_dispatch.lock"; my $dispatchprios; my $dispatchprios_project; my $dispatchprios_id = ''; if (-s "$rundir/bs_dispatch.state") { BSUtil::printlog("reading old state..."); my $state = BSUtil::retrieve("$rundir/bs_dispatch.state", 2); unlink("$rundir/bs_dispatch.state"); %infocache = %{$state->{'infocache'}} if $state && $state->{'infocache'}; %badhost = %{$state->{'badhost'}} if $state && $state->{'badhost'}; $badhostchanged = 1; %newestsrcchange = %{$state->{'newestsrcchange'}} if $state && $state->{'newestsrcchange'}; } if ($BSConfig::masterdispatcher && $BSConfig::masterdispatcher ne $BSConfig::reposerver) { BSUtil::printlog("running is dispatch slave mode"); } if ($testmode) { forwardevents(); print "Test mode, dispatcher is exiting.."; exit(0); } while (1) { if (-s "$jobsdir/finished") { local *F; if (open(F, '<', "$jobsdir/finished")) { unlink("$jobsdir/finished"); my $load = BSUtil::retrieve("$jobsdir/load", 1) || {}; while () { next unless /\n$/s; my @s = split('\|', $_); s/%([a-fA-F0-9]{2})/chr(hex($1))/ge for @s; my ($projid, $repoid, $arch, $packid, $start, $end, $result, $workerid, $hostarch) = @s; next unless $start =~ /^[0-9]+$/s; next unless $end=~ /^[0-9]+$/s; next if $end <= $start; my $prpa = "$projid/$repoid/$arch"; $load->{$prpa} = [0, 0] unless $load->{$prpa}; my $l = $load->{$prpa}; if ($l->[0] < $end) { my $d = $end - $l->[0]; $l->[1] *= exp($decay * $d); $l->[1] += (1 - exp($decay * ($end - $start))); $l->[0] = $end; } else { my $d = $l->[0] - $end; $l->[1] += (1 - exp($decay * ($end - $start))) * exp($decay * $d); } } close F; my $prunetime = time() - 50 * 86400; for (keys %$load) { delete $load->{$_} if $load->{$_}->[0] < $prunetime; } BSUtil::store("$jobsdir/load.new", "$jobsdir/load", $load); } } if ($BSConfig::masterdispatcher && $BSConfig::masterdispatcher ne $BSConfig::reposerver) { my $synced = dispatchslave(); forwardevents(); sleep(1) unless $synced; filechecks(); next; } my @dispatchprios_s = stat("$jobsdir/dispatchprios"); if (!@dispatchprios_s) { $dispatchprios = undef; $dispatchprios_project = undef; $dispatchprios_id = ''; } elsif ($dispatchprios_id ne "$dispatchprios_s[9]/$dispatchprios_s[7]/$dispatchprios_s[1]") { $dispatchprios_id = "$dispatchprios_s[9]/$dispatchprios_s[7]/$dispatchprios_s[1]"; $dispatchprios = BSUtil::retrieve("$jobsdir/dispatchprios", 1); $dispatchprios_project = undef; if ($dispatchprios) { # create dispatchprios_project hash $dispatchprios_project = {}; for (@{$dispatchprios->{'prio'} || []}) { $dispatchprios_project->{$_->{'project'}} ||= [] if defined $_->{'project'}; } my @p = keys %$dispatchprios_project; push @p, ':all:'; for (@{$dispatchprios->{'prio'} || []}) { if (defined($_->{'project'})) { push @{$dispatchprios_project->{$_->{'project'}}}, $_; } else { for my $p (@p) { push @{$dispatchprios_project->{$p}}, $_; } } } } } my $load = BSUtil::retrieve("$jobsdir/load", 1) || {}; my $now = time(); for my $prpa (sort keys %$load) { my $l = $load->{$prpa}; my $ll = $l->[1]; $ll *= exp($decay * ($now - $l->[0])) if $now > $l->[0]; $load->{$prpa} = $ll; $lastbuild{$prpa} = $l->[0]; } # adapt load for masterdispatched prpas my $mdload = BSUtil::retrieve("$jobsdir/mdload", 1) || {}; for my $prpa (sort keys %$mdload) { my $l = $mdload->{$prpa}; my $ll = $l->[1]; $ll *= exp($decay * ($now - $l->[0])) if $now > $l->[0]; $load->{$prpa} = $ll; $lastbuild{$prpa} = $l->[0]; if ($l->[3]) { $load->{$prpa} += $l->[3]; $lastbuild{$prpa} = $l->[2]; } } if (%masterdispatched) { for my $prpa (sort keys %masterdispatched) { my $md = $masterdispatched{$prpa}; if ($mdload->{$prpa}) { shift(@$md) while @$md && $md->[0] < $mdload->{$prpa}->[2]; } if (@$md) { $load->{$prpa} += @$md; $lastbuild{$prpa} = $now; } else { delete $masterdispatched{$prpa}; } } } my %workerload; for (grep {!/^\./} ls("$workersdir/building")) { my $host = $_; $host =~ s/:\d+$//; $workerload{$host}->{$_} = 1; } my @idle = grep {!/^\./} ls("$workersdir/idle"); my %idlearch; my %workerinfo; my %workerinfo_mtime; for my $idle (@idle) { my ($harch) = split(':', $idle, 2); my $host = $idle; $host =~ s/:\d+$//; $workerload{$host}->{$idle} = 0; for (@{$cando{$harch} || []}) { push @{$idlearch{$_}}, $idle; } } #BSUtil::printlog("finding jobs"); my %jobs; my %maybesrcchange; my @archs = sort keys %idlearch; my %archdone; my %crossarchlist; while (@archs) { my $arch = shift @archs; next if $archdone{$arch}; $archdone{$arch} = 1; my $ic = $infocache{$arch}; $infocache{$arch} = $ic = {} unless $ic; my @b = grep {!/^\./} ls("$jobsdir/$arch"); my @crossb = grep {/:cross$/} @b; if (@crossb) { my %crossarchs; for (@crossb) { push @{$crossarchs{$2}}, $1 if /^(.*):([^:]+):cross$/; } for my $crossarch (sort keys %crossarchs) { next unless $idlearch{$arch}; my %cj = map {$_ => 1} ls("$jobsdir/$crossarch"); # deltete orphaned marker for (@{$crossarchs{$crossarch}}) { next if $cj{$_}; BSUtil::printlog(" - deleting orphaned cross marker $arch/$_:${crossarch}:cross"); unlink("$jobsdir/$arch/$_:${crossarch}:cross"); } push @archs, $crossarch; $crossarchlist{$crossarch}->{$arch} = 1; } @b = grep {!/:cross$/} @b; } my %locked = map {$_ => 1} grep {/:status$/} @b; my %notlocked = map {$_ => 1} grep {!$locked{$_}} @b; for (grep {!$notlocked{$_} && !$locked{$_}} keys (%{$infocache{$arch} || {}})) { delete $infocache{$arch}->{$_}; } # adapt load for my $job (keys %locked) { my $jn = $job; $jn =~ s/:status$//; next unless $notlocked{$jn}; $jn =~ s/-[0-9a-f]{32}$//s; my ($projid, $repoid, $packid) = split('::', $jn); if (!defined($packid)) { my $info = $ic->{$job} || readxml("$jobsdir/$arch/$job", $BSXML::buildinfo, 1); next unless $info && $info->{'file'} && $info->{'file'} ne '_aggregate'; $ic->{$job} = $info; ($projid, $repoid, $packid) = ($info->{'project'}, $info->{'repository'}, $info->{'package'}); next unless defined $packid; } my $prpa = "$projid/$repoid/$arch"; $load->{$prpa} ||= 0; $load->{$prpa} += 1; $lastbuild{$prpa} = $now; } @b = grep {!/:(?:dir|status|new)$/} @b; @b = grep {!$locked{"$_:status"}} @b; for my $job (@b) { my $info = $ic->{$job}; if (!$info) { my $jn = $job; $jn =~ s/-[0-9a-f]{32}$//s; my ($projid, $repoid, $packid) = split('::', $jn); if (defined($packid)) { $info = {'project' => $projid, 'repository' => $repoid, 'package' => $packid, 'arch' => $arch}; } else { $info = readxml("$jobsdir/$arch/$job", $BSXML::buildinfo, 1); next unless $info && $info->{'file'} && $info->{'file'} ne '_aggregate'; $ic->{$job} = $info; } } my $prpa = "$info->{'project'}/$info->{'repository'}/$info->{'arch'}"; push @{$jobs{$prpa}}, $job; $info = $ic->{$job}; if (!$info) { $maybesrcchange{$prpa} = 1; } elsif ($info->{'reason'} && ($info->{'reason'} eq 'new build' || $info->{'reason'} eq 'source change')) { # only count direct changes as source change, not changes because of # a change in a linked package if ($info->{'reason'} eq 'new build' || !$info->{'revtime'} || $info->{'readytime'} - $info->{'revtime'} < 24 * 3600) { $maybesrcchange{$prpa} = 1; } } } } # calculate and distribute project load if (%$load) { my %praload; for my $prpa (keys %$load) { my $pra = $prpa; $pra =~ s/\/.*\//\//s; $praload{$pra} += $load->{$prpa}; } for my $prpa (keys %jobs) { my $pra = $prpa; $pra =~ s/\/.*\//\//s; next unless $praload{$pra}; $load->{$prpa} = rand(.01) unless $load->{$prpa}; $load->{$prpa} = ($load->{$prpa} + $praload{$pra}) / 2; } } #BSUtil::printlog("calculating scales"); my %scales; my @jobprpas = keys %jobs; for my $prpa (@jobprpas) { $load->{$prpa} = rand(.01) unless $load->{$prpa}; my $sc = 0; if ($BSConfig::dispatch_adjust) { my @prios = @{$BSConfig::dispatch_adjust || []}; while (@prios) { my ($match, $adj) = splice(@prios, 0, 2); $sc += $adj if $prpa =~ /^$match/s; } } if ($dispatchprios) { my ($project, $repository, $arch) = split('/', $prpa, 3); for (@{$dispatchprios_project->{$project} || $dispatchprios_project->{':all:'} || []}) { next unless defined($_->{'adjust'}); next if defined($_->{'project'}) && $_->{'project'} ne $project; next if defined($_->{'repository'}) && $_->{'repository'} ne $repository; next if defined($_->{'arch'}) && $_->{'arch'} ne $arch; $sc = 0 + $_->{'adjust'}; } } # clamp $sc = -10000 if $sc < -10000; $sc = 10000 if $sc > 10000; $scales{$prpa} = exp(-$sc * (log(10.)/10.)); } if (1) { #BSUtil::printlog("writing debug data"); # write debug data if (@jobprpas) { BSUtil::store("$rundir/.dispatch.data", "$rundir/dispatch.data", { 'load' => $load, 'scales' => \%scales, 'jobs' => \%jobs, 'powerpkgs' => \%powerpkgs, }); } } my %didsrcchange; my $assigned = 0; my %extraload; # the following helps a lot... #BSUtil::printlog("fast src change load adapt"); for my $prpa (@jobprpas) { next if $maybesrcchange{$prpa}; my $arch = (split('/', $prpa))[2]; my $ic = $infocache{$arch} || {}; $didsrcchange{$prpa} = 1; $load->{$prpa} *= $nosrcchangescale; $load->{$prpa} += staleness($prpa, $now, $ic, $jobs{$prpa} || []); } @jobprpas = sort {$scales{$a} * $load->{$a} <=> $scales{$b} * $load->{$b}} @jobprpas; my %checkconstraintscache; #BSUtil::printlog("assigning jobs"); while (@jobprpas) { my $prpa = shift @jobprpas; my $arch = (split('/', $prpa))[2]; if (!@{$idlearch{$arch} || []}) { next unless $crossarchlist{$arch}; # where can be also build that? next unless grep {@{$idlearch{$_} || []}} keys %{$crossarchlist{$arch}}; } my @b = @{$jobs{$prpa} || []}; next unless @b; #printf "%s %d %d\n", $prpa, $scales{$prpa} * $load->{$prpa}, scalar(@b); my $nextload = @jobprpas ? $scales{$jobprpas[0]} * $load->{$jobprpas[0]} : undef; # sort all jobs, src change jobs first my @srcchange; my $ic = $infocache{$arch}; $ic = $infocache{$arch} = {} unless $ic; for my $job (@b) { my $info = $ic->{$job}; if (!$info) { $info = readxml("$jobsdir/$arch/$job", $BSXML::buildinfo, 1); next unless $info && $info->{'file'} && $info->{'file'} ne '_aggregate'; $ic->{$job} = $info; } # clean up job a bit for (qw{bdep subpack}) { delete $info->{$_}; } if (!$info->{'readytime'}) { my @s = stat("$jobsdir/$arch/$job"); $info->{'readytime'} = $s[9]; } if ($info->{'reason'} && ($info->{'reason'} eq 'new build' || $info->{'reason'} eq 'source change')) { # only count direct changes as source change, not changes because of # a change in a linked package if ($info->{'reason'} eq 'new build' || !$info->{'revtime'} || $info->{'readytime'} - $info->{'revtime'} < 24 * 3600) { push @srcchange, $job; $newestsrcchange{$info->{'project'}} = $info->{'readytime'} if ($newestsrcchange{$info->{'project'}} || 0) < $info->{'readytime'}; } } } @b = grep {$ic->{$_}} @b; @b = List::Util::shuffle(@b); @b = sort {($ic->{$b}->{'needed'} || 0) <=> ($ic->{$a}->{'needed'} || 0) || ($ic->{$a}->{'readytime'} || 0) <=> ($ic->{$b}->{'readytime'} || 0)} @b; my %powerjobs; if (%powerpkgs && $BSConfig::powerhosts) { for my $job (@b) { my $jn = $job; $jn =~ s/-[0-9a-f]{32}$//s; my ($projid, $repoid, $packid) = split('::', $jn); $powerjobs{$job} = $powerpkgs{$packid} if $powerpkgs{$packid}; } if (%powerjobs) { # bring em to front! my @nb = grep {!$powerjobs{$_}} @b; @b = grep {$powerjobs{$_}} @b; @b = sort {$powerjobs{$a} <=> $powerjobs{$b}} @b; push @b, @nb; } } my %srcchange = map {$_ => 1} @srcchange; if (@srcchange) { # bring em to front! @b = ((grep {$srcchange{$_}} @b), (grep {!$srcchange{$_}} @b)); } my @preinstalljobs = grep {($ic->{$_}->{'file'} || '') eq '_preinstallimage'} @b; if (@preinstalljobs) { # bring em to front! my %preinstalljobs = map {$_ => 1} @preinstalljobs; @b = ((grep {$preinstalljobs{$_}} @b), (grep {!$preinstalljobs{$_}} @b)); if (!$didsrcchange{$prpa}) { $srcchange{$_} = 1 for @preinstalljobs; } } my $rerun; for my $job (@b) { my $info = $ic->{$job}; next unless $info && $info->{'file'} && $info->{'file'} ne '_aggregate'; if (!$srcchange{$job} && !$didsrcchange{$prpa}) { $didsrcchange{$prpa} = 1; $load->{$prpa} *= $nosrcchangescale; $load->{$prpa} += staleness($prpa, $now, $ic, \@b); if (defined($nextload) && $scales{$prpa} * $load->{$prpa} > $nextload) { $rerun = 1; last; } } my @idle = List::Util::shuffle(@{$idlearch{$info->{'hostarch'} || $arch} || []}); last unless @idle; if (@idle > 1) { # sort by worker load my %idleload; for my $idle (@idle) { my $host = $idle; $host =~ s/:\d+$//; my $wl = $workerload{$host}; if ($wl && %$wl) { $idleload{$idle} = (grep {$_ == 0} values(%$wl)) / keys(%$wl); } else { $idleload{$idle} = 1; } } @idle = sort {$idleload{$b} <=> $idleload{$a}} @idle; } my %poweridle; if ($powerjobs{$job}) { # reduce to powerhosts for my $idle (splice @idle) { my $idlehost = (split(':', $idle, 2))[1]; push @idle, $idle if grep {$idlehost =~ /^$_/} @$BSConfig::powerhosts; } if (!@idle) { BSUtil::printlog("job can not be assigned on $arch due to lack of powerhosts: $job"); next; } } my $tries = 0; my $haveassigned; my ($project, $repository, $arch) = split('/', $prpa, 3); my $lastoracle = 0; my $lastoracleidle; my $constraints; if ($info->{'constraintsmd5'}) { my $constraintsmd5 = $ic->{$job}->{'constraintsmd5'}; if (!exists($constraintscache{$constraintsmd5})) { $constraintscache{$constraintsmd5} = getconstraints($info, $constraintsmd5); } $constraints = $constraintscache{$constraintsmd5}; if (!ref($constraints)) { BSUtil::printlog("job has bad constraints file: $job"); next; } if (ref($constraints) eq 'ARRAY') { delete($constraintscache{$constraintsmd5}) if $constraints->[0] < $now; next; } $constraints = overwriteconstraints($info, $constraints) if $constraints->{'overwrite'}; } if ($info->{'prjconfconstraint'}) { my @l = map { [ split(' ', $_) ] } @{$info->{'prjconfconstraint'}}; my $prjconfconstraint = BSDispatcher::Constraints::list2struct($BSXML::constraints, \@l, "$arch/$job"); if ($prjconfconstraint) { $constraints = $constraints ? BSDispatcher::Constraints::mergeconstraints($prjconfconstraint, $constraints) : $prjconfconstraint; } } undef $constraints if $constraints && !%$constraints; push @idle, '__lastoracle' if $constraints; for my $idle (@idle) { if ($idle eq '__lastoracle') { last unless $lastoracleidle; $idle = $lastoracleidle; $lastoracleidle = '__lastoracle'; } next if $badhost{"$project/$info->{'package'}/$arch/$idle"}; next if $badhost{"$project/:repo:$info->{'repository'}/$arch/$idle"}; my ($harch, $hname) = split(':', $idle, 2); my $worker = $workerinfo{$idle}; if (!$worker) { my @s = stat("$workersdir/idle/$idle"); next unless @s; $worker = readxml("$workersdir/idle/$idle", $BSXML::worker, 1); if (!$worker) { for (@{$cando{$harch} || []}) { $idlearch{$_} = [ grep {$_ ne $idle} @{$idlearch{$_}} ]; } next; } $workerinfo{$idle} = $worker; $workerinfo_mtime{$idle} = $s[7]; } if ($BSConfig::dispatch_constraint) { next if !$BSConfig::dispatch_constraint->($info, $worker, $constraints); } # is a helper needed for personality change? if ($harchcando{"$harch/$arch"} && $worker->{hardware} && exists($worker->{hardware}->{nativeonly})) { next; # worker is not supporting the needed personality change } if ($constraints) { my $ora = BSDispatcher::Constraints::oracle($worker, $constraints); next unless defined($ora) && $ora > 0; if ($ora < 1) { if ($lastoracleidle && $lastoracleidle eq '__lastoracle') { my $widle = $now - $workerinfo_mtime{$idle}; my $jwait = $now - $info->{'readytime'}; $widle = 0 if $widle < 0; $jwait = 0 if $jwait < 0; next if $widle / 60 < 1 - $ora && $jwait / 300 < 1 - $ora; } else { if ($ora > $lastoracle) { $lastoracleidle = $idle; $lastoracle = $ora; } next; } } } last unless -e "$jobsdir/$arch/$job"; last if $assigned && $tries >= 5; $tries++; my $res = assignjob($job, $idle, $arch); my $host = $idle; $host =~ s/:\d+$//; if (!$res) { for (@{$cando{$harch} || []}) { $idlearch{$_} = [ grep {$_ ne $idle} @{$idlearch{$_}} ]; } delete $workerload{$host}->{$idle}; next; } last if $res eq 'badjob'; next if $res ne 'assigned'; for (@{$cando{$harch} || []}) { $idlearch{$_} = [ grep {$_ ne $idle} @{$idlearch{$_}} ]; } $assigned++; $jobs{$prpa} = [ grep {$_ ne $job} @{$jobs{$prpa}} ]; $load->{$prpa} += 1; $workerload{$host}->{$idle} = 1; $haveassigned = 1; last; } # we have jobs that could not be assigned and have constraints. # check if non idle workers could build these job or if there are no # workers that met the constraints if (!$haveassigned && $constraints) { if ($info->{'allworkercheck'}++ % 5 == 0) { if (!$info->{'allworkerchecktime'} || $now - $info->{'allworkerchecktime'} > 300) { $info->{'allworkerchecktime'} = $now; my $r = checkconstraints($info, $arch, $job, $constraints, \%checkconstraintscache); # disable check for a day if all is well $info->{'allworkerchecktime'} = $now + 3600 * 24 unless $r; } } } # Tricky, still increase load so that we don't assign # too many non-powerjobs. But only do that once for each powerjob. if (!$haveassigned && $powerjobs{$job} && !$extraload{"$arch/$job"}) { $load->{$prpa} += 1; $extraload{"$arch/$job"} = 1; } # Check if load changes changed our order. If yes, re-sort and start over. if (defined($nextload) && $scales{$prpa} * $load->{$prpa} > $nextload) { $rerun = 1; last; } } if ($rerun) { # our load was changed so much that the order has changed. put us back # on the queue in the correct position. my $newload = $scales{$prpa} * $load->{$prpa}; my @front; push @front, shift(@jobprpas) while @jobprpas && $scales{$jobprpas[0]} * $load->{$jobprpas[0]} < $newload; unshift @jobprpas, @front, $prpa; } last if $assigned >= 50; } forwardevents(); sleep(1) unless $assigned; BSUtil::printlog("assigned $assigned jobs") if $assigned; if (%badhost) { my $now = time(); for (keys %badhost) { if ($badhost{$_} + 24*3600 < $now) { BSUtil::printlog("deleting badhost $_"); delete $badhost{$_}; $badhostchanged = 1; } } } if ($badhostchanged) { if (%badhost) { BSUtil::store("$rundir/.dispatch.badhosts", "$rundir/dispatch.badhosts", \%badhost); } else { unlink("$rundir/dispatch.badhosts"); } undef $badhostchanged; } filechecks(); } sub setdispatchdetails { my ($info, $arch, $job, $details) = @_; if ($info->{'masterdispatched'}) { my $param = { 'uri' => "$info->{'reposerver'}/jobs/$arch/$job", 'request' => 'POST', 'timeout' => 60, }; eval { BSRPC::rpc($param, undef, "cmd=setdispatchdetails", "details=$details"); }; warn($@) if $@; } else { my $ev = { type => 'dispatchdetails', job => $job, details => $details }; my $evname = "dispatchdetails:$job"; mkdir_p("$eventdir/$arch"); writexml("$eventdir/$arch/.$evname.$$", "$eventdir/$arch/$evname", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); } } sub failjob { my ($info, $arch, $job, $message) = @_; my $param = { 'uri' => "$info->{'reposerver'}/jobs/$arch/$job", 'request' => 'POST', 'timeout' => 60, }; eval { BSRPC::rpc($param, undef, "cmd=fail", "message=$message"); }; warn($@) if $@; } open-build-service-2.9.4/src/backend/bs_dodup000077500000000000000000000440031332555733200211360ustar00rootroot00000000000000#!/usr/bin/perl -w BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use POSIX; use Data::Dumper; use Digest; use Digest::MD5 (); use Encode; use Fcntl qw(:DEFAULT :flock); use XML::Structured ':bytes'; use BSConfiguration; use BSRPC ':https'; use BSHTTP; use BSXML; use BSUtil; use File::Temp (); use Build::Repo; use Build::Rpmmd; use Build::Deb; use Build::Rpm; use strict; my $bsdir = $BSConfig::bsdir || "/srv/obs"; my $reporoot = "$bsdir/build"; my $rundir = "$bsdir/run"; my $eventdir = "$bsdir/events"; my $dodsdir = "$bsdir/dods"; my $timeout_small = 60; my $timeout_large = 300; my $checkinterval_ok = 60 * 60; my $checkinterval_error = 10 * 60; BSUtil::mkdir_p_chown($bsdir, $BSConfig::bsuser, $BSConfig::bsgroup) || die("unable to create $bsdir\n"); BSUtil::drop_privs_to($BSConfig::bsuser, $BSConfig::bsgroup); my $proxy; $proxy = $BSConfig::proxy if defined($BSConfig::proxy); sub fetch { my ($url, $peerfp, $timeout, $filename, $withmd5) = @_; my $param = { 'uri' => $url, 'maxredirects' => 5, }; $param->{'withmd5'} = 1 if $withmd5; $param->{'sslpeerfingerprint'} = $peerfp if $peerfp; $param->{'timeout'} = $timeout if $timeout; $param->{'proxy'} = $proxy; if ($filename) { $param->{'receiver'} = \&BSHTTP::file_receiver; $param->{'filename'} = $filename; } #print "-- $url\n"; my $r; eval { $r = BSRPC::rpc($param); }; die("$url: $@") if $@; return $r; } sub chkverify { my ($file, $sum) = @_; die unless $sum =~ /^(.+?):(.+)$/; my ($type, $res) = ($1, $2); my %resmap = ('md5' => 'MD5', 'sha1' => 'SHA-1', 'sha256' => 'SHA-256', 'sha512' => 'SHA-512'); die("unknown checksum $type\n") unless $resmap{$type}; my $ctx = Digest->new($resmap{$type}); die("cannot create checksum object for type $type\n") unless $ctx; local *F; open(F, '<', $file) || die("$file: $!\n"); $ctx->addfile(\*F); close F; my $chk = $ctx->hexdigest(); die("checksum mismatch for $file: $chk != $res\n") unless lc($chk) eq lc($res); } sub gpgverify { my ($data, $sig, $pubkey) = @_; my $tempdir = File::Temp->newdir(); writestr("$tempdir/pubkey", undef, $pubkey); system('gpg2', '-q', '--homedir', $tempdir, '--import', "$tempdir/pubkey") && die("gpg2 key import failed: $?\n"); writestr("$tempdir/data", undef, $data); writestr("$tempdir/data.asc", undef, $sig); system('gpgv', '-q', '--homedir', $tempdir, '--keyring', "$tempdir/pubring.gpg", "$tempdir/data.asc", "$tempdir/data") && die("signature verification failed: $?\n"); } # uncompress file in-place sub uncompress { my ($file, $reffile, $appendfile) = @_; $reffile ||= $file; if ($reffile =~ /\.cz$/) { # can be gz or xz, need to probe local *F; open(F, '<', $file) || die("$file: $!\n"); my $probe; sysread(F, $probe, 5); close F; $reffile = $probe && $probe eq "\xFD7zXZ" ? '.xz' : '.gz'; } if ($reffile =~ /\.(gz|xz)$/) { local *F; my $decmp = $1 eq 'gz' ? 'gunzip' : 'xzdec'; my $nfile = $appendfile ? $appendfile : "$file.$$"; my $pid; if (!($pid = BSUtil::xfork())) { open(STDOUT, $appendfile ? '>>' : '>', $nfile) || die("$nfile: $!\n"); exec($decmp, '-dc', $file); die("$decmp: $!\n"); } waitpid($pid, 0) == $pid || die("waitpid: $!\n"); die("gunzip: $?\n") if $?; if (!$appendfile) { rename($nfile, $file) || die("rename $nfile, $file\n"); } } die("bzip2 in unimplemented\n") if $reffile =~ /\.bz2$/; } sub mastercheck { my ($doddata, $urlpath, $data) = @_; my $master = $doddata->{'master'}; return unless $master && $master->{'url'}; return if $master->{'url'} eq $doddata->{'url'}; # mirror is master my $masterurl = $master->{'url'}; $masterurl .= '/' unless $masterurl =~ /\/$/; my $masterdata = fetch("$masterurl$urlpath", $master->{'sslfingerprint'}, $timeout_small); die("mirror is out of date\n") unless $data eq $masterdata; } sub signaturecheck { my ($doddata, $url, $sslfingerprint, $data, $strip) = @_; return unless $doddata->{'pubkey'}; my $data_asc = fetch($url, $sslfingerprint, $timeout_small); if ($strip) { # remove stable key sig $data_asc =~ s/-----END PGP SIGNATURE-----\n.*$/-----END PGP SIGNATURE-----\n/s; } gpgverify($data , $data_asc, $doddata->{'pubkey'}); } sub getsslfingerprint { my ($doddata) = @_; my $master = $doddata->{'master'}; return undef unless $master; return undef if $master->{'url'} && $master->{'url'} ne $doddata->{'url'}; return $master->{'sslfingerprint'}; # mirror is master } sub dod_susetags { my ($doddata, $cookie, $file) = @_; my $url = $doddata->{'url'}; my $sslfingerprint = getsslfingerprint($doddata); my $descrdir = 'suse/setup/descr'; my $datadir = 'suse'; $url .= '/' unless $url =~ /\/$/; my $content = fetch("${url}content", $sslfingerprint, $timeout_small); my $newcookie = Digest::MD5::md5_hex("$url\n$content"); return undef if ($cookie || '') eq $newcookie; mastercheck($doddata, 'content', $content); signaturecheck($doddata, "${url}content.asc", $sslfingerprint, $content); my ($packages, $packages_sum); for (split("\n", $content)) { next unless /^META (\S+) (\S+) (packages(?:.gz)?)$/s; next unless $1 eq 'MD5' || $1 eq 'SHA1' || $1 eq 'SHA256' || $1 eq 'SHA512'; $packages = $3; $packages_sum = lc($1).":$2"; } die("no packages file in META section of content file\n") unless $packages; fetch("${url}$descrdir/$packages", $sslfingerprint, $timeout_large, $file); chkverify($file, $packages_sum); uncompress($file, $packages); return ($newcookie, $url); } sub dod_rpmmd { my ($doddata, $cookie, $file) = @_; my $url = $doddata->{'url'}; my $sslfingerprint = getsslfingerprint($doddata); $url .= '/' unless $url =~ /\/$/; my $repomd = fetch("${url}repodata/repomd.xml", $sslfingerprint, $timeout_small); my $newcookie = Digest::MD5::md5_hex("$url\n$repomd"); return undef if ($cookie || '') eq $newcookie; mastercheck($doddata, 'repodata/repomd.xml', $repomd); signaturecheck($doddata, "${url}repodata/repomd.xml.asc", $sslfingerprint, $repomd); writestr("$file.repomd", undef, $repomd); my @files; Build::Rpmmd::parse_repomd("$file.repomd", \@files); unlink("$file.repomd"); my $primaryfile = (grep {$_->{'type'} eq 'primary' && defined($_->{'location'})} @files)[0]; die("no primary file in repomd.xml\n") unless $primaryfile; die("primary file has no checksum\n") if $doddata->{'pubkey'}&& !$primaryfile->{'checksum'}; fetch("${url}$primaryfile->{'location'}", $sslfingerprint, $timeout_large, $file); chkverify($file, $primaryfile->{'checksum'}) if $primaryfile->{'checksum'}; uncompress($file, $primaryfile->{'location'}); return ($newcookie, $url); } # same parser as in build package: # distribution: //[components] # flat repo: /.[/subdir] # components: comp1,comp2... (main if empty) sub dod_deb { my ($doddata, $cookie, $file) = @_; my $url = $doddata->{'url'}; my $sslfingerprint = getsslfingerprint($doddata); my @components; my $baseurl = $url; if ($url =~ /^(.*\/)\.(\/.*)?$/) { # flat repo $baseurl = $1; @components = ('.'); $url = defined($2) ? "$1$2" : $1; $url .= '/' unless $url =~ /\/$/; } else { if ($url =~ /([^\/]+)$/) { @components = split(/[,+]/, $1); $url =~ s/([^\/]+)$//; } push @components, 'main' unless @components; $url .= '/' unless $url =~ /\/$/; $baseurl = $url; $url =~ s/([^\/]+\/)$/dists\/$1/; $baseurl =~ s/([^\/]+\/)$//; } my $release = fetch("${url}Release", $sslfingerprint, $timeout_small); my $newcookie = Digest::MD5::md5_hex("$baseurl\n".join(',',@components)."\n$release"); return undef if ($cookie || '') eq $newcookie; mastercheck($doddata, 'Release', $release); signaturecheck($doddata, "${url}Release.gpg", $sslfingerprint, $release, 1); my %files; my %csums = ('md5sum' => 'md5', 'sha1' => 'sha1', 'sha256' => 'sha256', 'sha512' => 'sha512'); my $csum; for (split("\n", $release)) { $csum = $csums{lc($1)} if /^(\S+):/; next unless $csum; next unless /^ (\S+) +\d+ +(.*)$/s; next if $files{$2} && length($files{$2}) > length("$csum:$1"); # bigger is better... $files{$2} = "$csum:$1"; } writestr($file, undef, ''); my $basearch = Build::Deb::basearch($doddata->{'arch'}); for my $component (@components) { my $pfile = $component eq '.' ? 'Packages.gz' : "$component/binary-$basearch/Packages.gz"; die("$pfile not in Release\n") if $doddata->{'pubkey'} && !$files{$pfile}; my $tmp = "$file.tmp"; fetch("$url$pfile", $sslfingerprint, $timeout_large, $tmp); chkverify($tmp, $files{$pfile}) if $files{$pfile}; uncompress($tmp, "Packages.gz", $file); unlink($tmp); } return ($newcookie, $baseurl); } sub dod_arch { my ($doddata, $cookie, $file) = @_; my $url = $doddata->{'url'}; my $sslfingerprint = getsslfingerprint($doddata); $url .= '/' unless $url =~ /\/$/; die("cannot determine repo name\n") unless $url =~ /.*\/([^\/]+)\/os\//; my $reponame = $1; my $r = fetch("${url}$reponame.db", $sslfingerprint, $timeout_large, $file, 1); die unless $r->{'md5'}; my $newcookie = Digest::MD5::md5_hex("$url\n$r->{'md5'}"); return undef if ($cookie || '') eq $newcookie; return ($newcookie, $url); } sub dod_mdk { my ($doddata, $cookie, $file) = @_; my $url = $doddata->{'url'}; my $sslfingerprint = getsslfingerprint($doddata); $url .= '/' unless $url =~ /\/$/; my $r = fetch("${url}media_info/synthesis.hdlist.cz", $sslfingerprint, $timeout_large, $file, 1); die unless $r->{'md5'}; my $newcookie = Digest::MD5::md5_hex("$url\n$r->{'md5'}"); return undef if ($cookie || '') eq $newcookie; uncompress($file, 'synthesis.hdlist.cz'); return ($newcookie, $url); } my %handler = ( 'arch' => \&dod_arch, 'deb' => \&dod_deb, 'susetags' => \&dod_susetags, 'rpmmd' => \&dod_rpmmd, 'mdk' => \&dod_mdk, ); sub cmppkg { my ($op, $p) = @_; # reconstruct evr my $evr = $p->{'epoch'} ? "$p->{'epoch'}:$p->{'version'}" : $p->{'version'}; $evr .= "-$p->{'release'}" if defined $p->{'release'}; my $oevr = $op->{'epoch'} ? "$op->{'epoch'}:$op->{'version'}" : $op->{'version'}; $oevr .= "-$op->{'release'}" if defined $op->{'release'}; if ($p->{'path'} =~ /\.deb$/) { return Build::Deb::verscmp($oevr, $evr); } else { return Build::Rpm::verscmp($oevr, $evr); } } sub addpkg { my ($cache, $p, $archfilter) = @_; return unless $p->{'location'} && $p->{'name'} && $p->{'arch'}; return if $archfilter && !$archfilter->{$p->{'arch'}}; if ($BSConfig::dodupblacklist) { return if grep {$p->{'name'} =~ /^$_/s } @$BSConfig::dodupblacklist; } $p->{'path'} = delete $p->{'location'}; my $key = "$p->{'name'}.$p->{'arch'}"; return if $cache->{$key} && cmppkg($cache->{$key}, $p) > 0; # highest version only $cache->{$key} = $p; } sub parsemetadata { my ($doddata, $file, $baseurl) = @_; my $cache = {}; my $archfilter; if ($doddata->{'archfilter'}) { $archfilter = { map {$_ => 1} split(',', $doddata->{'archfilter'}) }; for (qw{noarch all any}) { $archfilter->{$_} = 1 unless delete $archfilter->{"-$_"}; } } Build::Repo::parse($doddata->{'repotype'}, $file, sub { addpkg($cache, $_[0], $archfilter) }, 'addselfprovides' => 1, 'normalizedeps' => 1, 'withchecksum' => 1, 'testcaseformat' => 1); $baseurl =~ s/\/$//; $cache->{'/url'} = $baseurl; BSUtil::store("$file.parsed", $file, $cache); } sub sendscanrepo { my ($projid, $repoid, $arch) = @_; my $ev= { 'type' => 'scanrepo', 'project' => $projid, 'repository' => $repoid, }; my $evname = "scanrepo:${projid}::$repoid"; $evname = "scanrepo:::".Digest::MD5::md5_hex($evname) if length($evname) > 200; writexml("$eventdir/$arch/.$evname.$$", "$eventdir/$arch/$evname", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); } sub update_dod { my ($doddata, $unparsed) = @_; my $projid = $doddata->{'project'}; my $repoid = $doddata->{'repository'}; my $arch = $doddata->{'arch'}; die("bad doddata\n") unless $projid && $repoid && $arch; my $repotype = $doddata->{'repotype'} || ''; die("unknown repotype '$repotype'\n") unless $handler{$repotype}; print "updating metadata for $repotype repo at $doddata->{'url'}\n"; die("scheduler does not exist for arch '$arch'\n") unless -e "$eventdir/$arch/.ping"; my $repodir = "$reporoot/$projid/$repoid/$arch/:full"; mkdir_p($repodir) unless -d $repodir; my $cookie = readstr("$repodir/doddata.cookie", 1); chomp $cookie if $cookie; $cookie =~ s/^(\d+ )//s if $cookie; # strip lastcheck time my $newfile = "$repodir/doddata.new.$$"; unlink($newfile); my $now = time(); my ($newcookie, $baseurl) = $handler{$repotype}->($doddata, $cookie, $newfile); if ($newcookie) { if (!$unparsed) { eval { parsemetadata($doddata, $newfile, $baseurl) }; if ($@) { unlink($newfile); die($@); } } rename($newfile, "$repodir/doddata") || die("rename $newfile $repodir/doddata: $!\n"); writestr("$repodir/.doddata.cookie", "$repodir/doddata.cookie", "$now $newcookie\n"); sendscanrepo($projid, $repoid, $arch); } else { print "repository is unchanged\n"; $cookie = '' unless defined $cookie; writestr("$repodir/.doddata.cookie", "$repodir/doddata.cookie", "$now $cookie\n"); unlink($newfile); } } sub scan_dodsdir { my ($startup, $olddoddatas) = @_; print "scanning doddatas directory...\n"; my %newdoddatas; my %ids = map {$_->{'id'} => $_} values(%{$olddoddatas || {}}); for my $f (sort(grep {!/^\./s} ls($dodsdir))) { my @s = stat("$dodsdir/$f"); next unless @s; my $id = "$s[9]/$s[7]/$s[1]"; my $olddoddata = $ids{$id}; if ($olddoddata) { my $prpa = "$olddoddata->{'project'}/$olddoddata->{'repository'}/$olddoddata->{'arch'}"; $newdoddatas{$prpa} = $olddoddata; next; } my $doddata = readxml("$dodsdir/$f", $BSXML::doddata, 1); next unless $doddata; $doddata->{'id'} = $id; my $prpa = "$doddata->{'project'}/$doddata->{'repository'}/$doddata->{'arch'}"; if ($startup) { # get lastcheck from old cookie my $cookie = readstr("$reporoot/$prpa/:full/doddata.cookie", 1) || ''; $doddata->{'lastcheck'} = $1 if $cookie =~ /^(\d+) /s; } $doddata->{'lastcheck'} ||= 0; $newdoddatas{$prpa} = $doddata; } return %newdoddatas; } sub check_dod { my ($doddata) = @_; my $prpa = "$doddata->{'project'}/$doddata->{'repository'}/$doddata->{'arch'}"; BSUtil::printlog("checking $prpa..."); eval { update_dod($doddata) }; my $now = time(); $doddata->{'lastcheck'} = $now; if ($@) { warn($@); $doddata->{'haderror'} = 1; # update lastcheck time in cookie my $cookie = readstr("$reporoot/$prpa/:full/doddata.cookie", 1) || ''; chomp $cookie; $cookie =~ s/^(\d+ )//s if $cookie; # strip lastcheck time mkdir_p("$reporoot/$prpa/:full"); writestr("$reporoot/$prpa/:full/.doddata.cookie", "$reporoot/$prpa/:full/doddata.cookie", "$now $cookie\n"); } } sub check_exitrestart { if (-e "$rundir/bs_dodup.exit") { close(RUNLOCK); unlink("$rundir/bs_dodup.exit"); BSUtil::printlog("exiting..."); exit(0); } if (-e "$rundir/bs_dodup.restart") { close(RUNLOCK); unlink("$rundir/bs_dodup.restart"); BSUtil::printlog("restarting..."); exec($0); die("$0: $!\n"); } } sub daemon { my $startup = 1; my %doddatas; while (1) { if ($startup || -e "$dodsdir/.changed") { unlink("$dodsdir/.changed"); %doddatas = scan_dodsdir($startup, \%doddatas); print "checking state of dod entries...\n"; $startup = 0; } # find next dods to check my %nextcheck; for my $prpa (keys %doddatas) { my $doddata = $doddatas{$prpa}; $nextcheck{$prpa} = $doddata->{'lastcheck'} + ($doddata->{'haderror'} ? $checkinterval_error : $checkinterval_ok); } # check em for my $prpa (sort {$nextcheck{$a} <=> $nextcheck{$b} || $a cmp $b} keys %doddatas) { last if $nextcheck{$prpa} > time(); check_dod($doddatas{$prpa}); check_exitrestart(); } # good work! now rest a bit for (1 .. 10) { sleep(1); check_exitrestart(); } } } if (!@ARGV || (@ARGV == 1 && ($ARGV[0] eq '--restart' || $ARGV[0] eq '--exit' || $ARGV[0] eq '--stop'))) { $| = 1; $SIG{'PIPE'} = 'IGNORE'; BSUtil::restartexit($ARGV[0], 'dodup', "$rundir/bs_dodup"); BSUtil::printlog("starting build service DoD updater"); mkdir_p($rundir); open(RUNLOCK, '>>', "$rundir/bs_dodup.lock") || die("$rundir/bs_dodup.lock: $!\n"); flock(RUNLOCK, LOCK_EX | LOCK_NB) || die("dodup is already running!\n"); utime undef, undef, "$rundir/bs_dodup.lock"; daemon(); } if (@ARGV == 2 && $ARGV[0] eq '--dodfile') { check_dod(readxml($ARGV[1], $BSXML::doddata)); exit(0); } my $opt_pubkeyfile; my $opt_master; my $opt_masterfp; my $opt_unparsed; my $opt_archfilter; while (@ARGV) { if (@ARGV > 1 && $ARGV[0] eq '--unparsed') { shift @ARGV; $opt_unparsed = 1; next; } if (@ARGV > 2 && $ARGV[0] eq '--pubkey') { (undef, $opt_pubkeyfile) = splice(@ARGV, 0, 2); next; } if (@ARGV > 2 && $ARGV[0] eq '--master') { (undef, $opt_master) = splice(@ARGV, 0, 2); next; } if (@ARGV > 2 && $ARGV[0] eq '--masterfingerprint') { (undef, $opt_masterfp) = splice(@ARGV, 0, 2); next; } if (@ARGV > 2 && $ARGV[0] eq '--archfilter') { (undef, $opt_archfilter) = splice(@ARGV, 0, 2); next; } last; } my ($prpa, $repotype, $url) = @ARGV; my ($projid, $repoid, $arch) = split('/', ($prpa || ''), 3); die("Usage: bs_dodup [--stop|--restart]\n bs_dodup --dodfile \n bs_dodup [--pubkey ] \n") unless @ARGV == 3 && defined($arch); my $doddata = { 'project' => $projid, 'repository' => $repoid, 'arch' => $arch, 'repotype' => $repotype, 'url' => $url, }; $doddata->{'archfilter'} = $opt_archfilter if $opt_archfilter; $doddata->{'master'}->{'url'} = $opt_master if $opt_master; $doddata->{'master'}->{'sslfingerprint'} = $opt_masterfp if $opt_masterfp; $doddata->{'pubkey'} = readstr($opt_pubkeyfile) if defined $opt_pubkeyfile; update_dod($doddata, $opt_unparsed); open-build-service-2.9.4/src/backend/bs_getbinariesproxy000077500000000000000000000267101332555733200234260ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2013 OBS Team # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # A little binary proxy to reduce reposerver load # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; chdir($wd); unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use XML::Structured ':bytes'; use POSIX; use Digest::MD5 (); use Data::Dumper; use Storable (); use Symbol; use BSConfiguration; use BSRPC ':https'; use BSServer; use BSStdServer; use BSUtil; use Build; use strict; my @binsufs = qw{rpm deb pkg.tar.gz pkg.tar.xz}; my $binsufsre = join('|', map {"\Q$_\E"} @binsufs); my $gettimeout = 3600; my $port = 5254; $port = $1 if $BSConfig::getbinariesproxyserver && $BSConfig::getbinariesproxyserver =~ /:(\d+)$/; my $cachedir = "$BSConfig::bsdir/getbinariesproxycache"; my $cachesize = 1024 * 1024 * 1024; # default: 1G $cachesize = $BSConfig::getbinariesproxyserver_cachesize * 1024 * 1024 if $BSConfig::getbinariesproxyserver_cachesize; my $cachetmpdir = "$cachedir/tmp"; my $maxopen; # max number of fds we can open sub set_maxopen() { my $fd = POSIX::open('/dev/null', O_RDONLY); die("cannot open /dev/null: $!\n") unless defined $fd; my @fd = ($fd); while (1) { my $fd = POSIX::dup($fd[0]); last unless defined $fd; push @fd, $fd; last if @fd >= 65536; } POSIX::close($_) for @fd; $maxopen = @fd; print "could open $maxopen file descriptors\n"; } sub manage_cache { my ($prunesize, $cacheold, $cachenew) = @_; # get the lock local *F; BSUtil::lockopen(\*F, '+>>', "$cachedir/content", 1) || return; my $content; if (-s F) { seek(F, 0, 0); $content = Storable::fd_retrieve(\*F); } $content ||= []; my %content = map {$_->[0] => $_->[1]} @$content; # put cacheold, cachenew at the top if ($cacheold && @$cacheold) { splice(@$content, 0, 0, @$cacheold); $content{$_->[0]} = $_->[1] for @$cacheold; } if ($cachenew) { for my $c (reverse @$cachenew) { my $path = pop(@$c); my $cacheid = $c->[0]; my $cachefile = "$cachedir/".substr($cacheid, 0, 2)."/$cacheid"; mkdir_p("$cachedir/".substr($cacheid, 0, 2)); unlink("$cachefile.$$"); next unless link($path, "$cachefile.$$"); rename("$cachefile.$$", $cachefile) || die("rename $cachefile.$$ $cachefile: $!\n"); my $mpath = "$path.meta"; $mpath = "$1.meta" if $path =~ /^(.*)\.(?:$binsufsre)$/; if (-s $mpath) { unlink("$cachefile.meta.$$"); if (link($mpath, "$cachefile.meta.$$")) { rename("$cachefile.meta.$$", "$cachefile.meta") || die("rename $cachefile.meta.$$ $cachefile.meta: $!\n"); } else { unlink("$cachefile.meta"); } } else { unlink("$cachefile.meta"); } unshift @$content, $c; $content{$c->[0]} = $c->[1]; } } # prune cache for my $c (@$content) { if (!defined delete $content{$c->[0]}) { $c = undef; next; } $prunesize -= $c->[1]; if ($prunesize < 0) { my $cacheid = $c->[0]; my $cachefile = "$cachedir/".substr($cacheid, 0, 2)."/$cacheid"; unlink($cachefile); unlink("$cachefile.meta"); $c = undef; next; } } @$content = grep {defined $_} @$content; Storable::nstore($content, "$cachedir/content.new"); rename("$cachedir/content.new", "$cachedir/content") || die("rename $cachedir/content.new $cachedir/content"); close F; } sub getbinaries { my ($cgi, $projid, $repoid, $arch) = @_; my $server = $cgi->{'server'}; my $nometa = $cgi->{'nometa'}; my $metaonly = $cgi->{'metaonly'}; die("nometa and metaonly?\n") if $nometa && $metaonly; mkdir_p($cachedir); mkdir_p($cachetmpdir); set_maxopen() unless defined $maxopen; my @binaries = split(',', $cgi->{'binaries'}); my %bv; my @missingbvs; for my $bin (@binaries) { die("bad binary specification\n") unless $bin =~ /^(?:([0-9a-f]{32})([0-9a-f]{32})?:(\d*):(\S+))?\@(.+)/s; $bin = $5; if (!$1) { push @missingbvs, $bin; next; } $bv{$bin}->{'hdrmd5'} = $1; $bv{$bin}->{'metamd5'} = $2 if $2; $bv{$bin}->{'sizek'} = $3 || 0; $bv{$bin}->{'name'} = "$bin.$4"; } # get missing bvs from server if (@missingbvs) { print "missingbvs: @missingbvs\n"; my @args; push @args, "project=$projid"; push @args, "repository=$repoid"; push @args, "arch=$arch"; push @args, "nometa" if $nometa; push @args, "binaries=".join(',', @missingbvs); my $bvl; eval { $bvl = BSRPC::rpc({ 'uri' => "$server/getbinaryversions", 'timeout' => $gettimeout, }, $BSXML::binaryversionlist, @args); }; warn($@) if $@; for (@{$bvl->{'binary'} || []}) { if ($_->{'error'}) { $bv{$_->{'name'}} = $_; } else { next unless $_->{'name'} =~ /(.*)\.(?:$binsufsre)$/; $bv{$1} = $_; } } } my @cpio; # check the cache my $downloadsizek = 0; my @cacheold; my @cachenew; my @downloadbins; my $openfds = 0; my $tmpprefix = $$.'_'; for my $bin (@binaries) { my $bv = $bv{$bin}; if (!$bv) { push @downloadbins, $bin; next; } if ($bv->{'error'}) { push @cpio, {'name' => $bin, 'error' => $bv->{'error'}}; next; } my $cacheid = Digest::MD5::md5_hex("$projid/$repoid/$arch/$bv->{'hdrmd5'}"); my $cachefile = "$cachedir/".substr($cacheid, 0, 2)."/$cacheid"; my $usecache; my @s; my $binfd; my $metafd; if ($metaonly) { $usecache = 1; } else { my $fd = gensym; my $tmpname = "$cachetmpdir/$tmpprefix$bv->{'name'}"; if (link($cachefile, $tmpname)) { # check hdrmd5 to be sure we got the right bin my $id; eval { $id = Build::queryhdrmd5($tmpname); }; if ($id && $id eq $bv->{'hdrmd5'} && open($fd, '<', $tmpname)) { $binfd = $fd; $usecache = 1; unlink($tmpname); @s = stat($fd); die unless @s; } else { unlink($tmpname); } } } if ($usecache && !$nometa && $bv->{'metamd5'}) { my $fd = gensym; if (open($fd, '<', "$cachefile.meta")) { my $ctx = Digest::MD5->new; $ctx->addfile($fd); seek($fd, 0, 0) || die; if ($ctx->hexdigest() ne $bv->{'metamd5'}) { $usecache = undef; close $fd; close $binfd unless $metaonly; } else { $metafd = $fd; } } else { close $binfd unless $metaonly; $usecache = undef; } } @s = stat($cachefile) if !@s && $usecache; push @cacheold, [$cacheid, $s[7]] if @s && $usecache; if (!$usecache) { push @downloadbins, $bin; $downloadsizek += $bv->{'sizek'}; } else { push @cpio, {'name' => $bv->{'name'}, 'filename' => $binfd} if $binfd; push @cpio, {'name' => "$bin.meta", 'filename' => $metafd} if $metafd; $openfds++ if $binfd; $openfds++ if $metafd; } die("too many files to send\n") if $openfds + 16 > $maxopen; # sorry } # get files not in cache if (@downloadbins) { print "downloading: @downloadbins\n"; my $toomany; my %downloadbins = map {$_ => 1} @downloadbins; if ($downloadsizek * 1024 * 100 > $cachesize) { manage_cache($cachesize - $downloadsizek * 1024); } my @args; push @args, "project=$projid"; push @args, "repository=$repoid"; push @args, "arch=$arch"; my $res = BSRPC::rpc({ 'uri' => "$server/getbinaries", 'directory' => $cachetmpdir, 'map' => $tmpprefix, 'timeout' => $gettimeout, 'receiver' => \&BSHTTP::cpio_receiver, }, undef, @args, 'binaries='.join(',', @downloadbins)); my %havemeta; for my $r (@$res) { if ($r->{'name'} =~ /^\Q$tmpprefix\E(.*)\.($binsufsre)$/) { my $n = $1; my $suf = $2; next unless $downloadbins{$n}; my @s = stat("$cachetmpdir/$r->{'name'}"); die unless @s; my $id = Build::queryhdrmd5("$cachetmpdir/$r->{'name'}"); $r->{'hdrmd5'} = $id; if (!$metaonly && !$toomany) { my $fd = gensym; open($fd, '<', "$cachetmpdir/$r->{'name'}") || die; push @cpio, {'name' => "$n.$suf", 'filename' => $fd}; $openfds++; } my $cacheid = Digest::MD5::md5_hex("$projid/$repoid/$arch/$id"); push @cachenew, [$cacheid, $s[7], "$cachetmpdir/$r->{'name'}"]; } elsif ($r->{'name'} =~ /^\Q$tmpprefix\E(.*)\.meta$/) { my $n = $1; next unless $downloadbins{$n}; if (!$nometa && !$toomany) { my $fd = gensym; open($fd, '<', "$cachetmpdir/$r->{'name'}") || die; push @cpio, {'name' => "$n.meta", 'filename' => $fd}; $openfds++; } } $toomany = 1 if $openfds + 16 > $maxopen; # sorry } manage_cache($cachesize, \@cacheold, \@cachenew); # clean up for my $r (@$res) { unlink("$cachetmpdir/$r->{'name'}"); } die("too many files to send\n") if $toomany; } BSServer::reply_cpio(\@cpio); return undef; } sub getpreinstallimage { my ($cgi, $prpa, $hdrmd5) = @_; mkdir_p($cachedir); mkdir_p($cachetmpdir); set_maxopen() unless defined $maxopen; my $cacheid = Digest::MD5::md5_hex("$prpa/$hdrmd5"); my $cachefile = "$cachedir/".substr($cacheid, 0, 2)."/$cacheid"; my $cachefilemeta = readstr("$cachefile.meta", 1) || ''; if ($cachefilemeta eq "$hdrmd5 :preinstallimage\n") { # got it my $fd = gensym; if (open($fd, '<', $cachefile)) { # re-check to make races unlikely $cachefilemeta = readstr("$cachefile.meta", 1) || ''; if ($cachefilemeta eq "$hdrmd5 :preinstallimage\n") { # use it! my @s = stat($fd); # put entry on top manage_cache($cachesize, [ [$cacheid, $s[7]] ], undef) if @s; BSServer::reply_file($fd, "Content-Type: application/octet-stream"); return undef; } close($fd); } } # not in cache, fetch print "downloading: $prpa/$cgi->{'path'}\n"; manage_cache($cachesize - $cgi->{'sizek'} * 1024) if $cgi->{'sizek'}; my $tmpfilename = "$cachetmpdir/$$".'_preinstallimage'; unlink($tmpfilename); my $res = BSRPC::rpc({ 'uri' => "$cgi->{'server'}/build/$prpa/$cgi->{'path'}", 'timeout' => $gettimeout, 'receiver' => \&BSHTTP::file_receiver, 'filename' => $tmpfilename, }); my $fd = gensym; open($fd, '<', $tmpfilename) || die("$tmpfilename: $!\n"); my @s = stat($tmpfilename); die("stat: $!\n") unless @s; writestr("$tmpfilename.meta", undef, "$hdrmd5 :preinstallimage\n"); manage_cache($cachesize, undef, [ [$cacheid, $s[7], $tmpfilename] ]); unlink("$tmpfilename.meta"); unlink($tmpfilename); BSServer::reply_file($fd, "Content-Type: application/octet-stream"); return undef; } sub hello { my ($cgi) = @_; return "\n"; } my $dispatches = [ '/' => \&hello, '/getbinaries $project $repository $arch binaries: nometa:bool? metaonly:bool? server:' => \&getbinaries, '/getpreinstallimage $prpa $hdrmd5:md5 path: sizek:num? server:' => \&getpreinstallimage, ]; my $conf = { 'port' => $port, 'dispatches' => $dispatches, 'setkeepalive' => 1, 'maxchild' => 40, }; set_maxopen() unless @ARGV && ($ARGV[0] eq '--test' || $ARGV[0] eq '--stop' || $ARGV[0] eq '--exit'); BSStdServer::server('bs_getbinariesproxy', \@ARGV, $conf); open-build-service-2.9.4/src/backend/bs_mergechanges000077500000000000000000000130551332555733200224560ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2014 Adrian Schroeter, SUSE LLC # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use strict; use POSIX; use Data::Dumper; use Getopt::Long; use Date::Parse; sub echo_help { print "\n The SUSE changes merge tool =========================== openSUSE package sources contain a .changes file providing a full changelog. A stripped version of that gets attached to rpm %changes to avoid a too large rpm db. But the full version is kept with the package sources. The classic diff3 merge approach often fails on these files, so this merge tool is reading the entire files and sorts the entries according to it's date. If entries with same date do differ it fails. The classic diff3 merge and manual conflict resolution is the only way then. The tool takes any amount of files and is merging them into one. The first file must be the common ancestor of the other files. "; } # # Argument parsing # if (@ARGV < 1) { echo_help(); exit(1); } my @files; my $force; my $verbose; while (@ARGV) { my $arg = shift @ARGV; if ($arg eq "--help") { echo_help(); exit(0); } if ($arg eq "--verbose") { $verbose = 1; } elsif ($arg eq "--force") { $force = 1; } elsif ($arg =~ /^-/) { die("Unknown switch $arg"); } else { push @files, $arg; } } die("Give at least one file") if @files < 1; # init my $seperator = "-------------------------------------------------------------------"; my %entries; # utils sub time2mystr { my ($time) = @_; my @lt = gmtime($time); # ctime(3) format output return strftime("%a %b %e %H:%M:%S UTC %Y", @lt); } sub findsim { my ($ent, $allents) = @_; return undef unless $ent->{'fileno'}; my %sim; my @in = grep {!$_->{'fileno'}} @$allents; for my $ent2 (@in) { return $ent2 if $ent2->{'text'} eq $ent->{'text'}; } for my $ent2 (@in) { my @w = split(' ', $ent2->{'text'}); my @wc = grep {$ent->{'text'} =~ /\Q$_\E/} @w; $sim{$ent2} = @wc ? @w / @wc : 0; } my @sorted = sort {$sim{$a} <=> $sim{$b}} @in; return $sorted[-1] if @sorted && $sorted[-1] > .75; return undef; } sub decide { my ($ent, $nfiles) = @_; return $ent unless $ent->{'sim'}; my @sim = @{$ent->{'sim'}}; my @changed = grep {$_->{'text'} ne $ent->{'text'}} @sim; if (!@changed) { return $sim[-1] if @sim == $nfiles - 1; return undef if @sim == $nfiles - 2; } if (@changed && @sim == $nfiles - 1) { return $changed[-1] unless grep {$_->{'text'} ne $changed[-1]->{'text'}} @changed; } die("Conflicting entries for $ent->{time}\n"); } sub setentry { my ($time, $timestr, $email, $text, $fileno) = @_; my $ent = {'time' => $timestr, 'email' => $email, 'text' => $text, 'fileno' => $fileno}; if ($entries{$time}) { my $siment = findsim($ent, $entries{$time}); if ($siment) { push @{$siment->{'sim'}}, $ent; return; } } push @{$entries{$time}}, $ent; } # read all files into a hash my $fileno = 0; my $nfiles = @files; while (@files) { my $file = shift @files; local *F; open(F, '<', $file) || die("Unable to open $file"); my @lines = ; close F; print "Read file $file\n" if $verbose; my $init; my $email; my $time; my $timestr; my $text = ""; my $cycle = 0; foreach my $line (@lines) { chomp($line); if (!$init) { die("no ---- seperator in first line\n") unless $line eq $seperator; $init = 1; next; } if (!$time) { ($timestr, $email) = split(' - ', $line, 2); $time = str2time($timestr); die("unable to parse time $line\n") unless $time; die("unable to find email in $line\n") unless $email; print "Read ".time2mystr($time)."($time) for $line\n" if $verbose; next; } if ($line eq $seperator) { my @lt = gmtime($time); # check for the special case, we had many entries at 00:00:00 on same day in the past ... # ignoring the hour due to timezone issues, but do not accept it anymore for current entries # we take this as one blob. # Accept this only for entries in 2006 and before with 00 minutes and 00 seconds if ($lt[5] > 106 || $lt[1] != 0 || $lt[0] != 0) { setentry($time, $timestr, $email, $text, $fileno); $text = ""; $time = undef; $email = undef; next; } } # must be text $text .= "$line\n"; } # last entry setentry($time, $timestr, $email, $text, $fileno); $fileno++; } print "Merged ouput:\n===========\n" if $verbose; # output the hash for my $time (sort {$b <=> $a} keys %entries) { my %seen; for my $ent (@{$entries{$time}}) { $ent = decide($ent, $nfiles); next unless $ent && $ent->{'fileno'}; # ignore old stuff next if $seen{$ent->{text}}; print "$seperator\n$ent->{time} - $ent->{email}\n$ent->{text}"; $seen{$ent->{text}} = 1; } } open-build-service-2.9.4/src/backend/bs_mkarchrepo000077500000000000000000000054271332555733200221650ustar00rootroot00000000000000#!/usr/bin/perl -w BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use strict; use Digest; use Digest::MD5; use MIME::Base64; use BSUtil; use Build::Arch; my %todo = ( 'desc' => [ 'FILENAME' => 'filename', 'NAME' => 'pkgname', 'BASE' => 'pkgbase', 'VERSION' => 'pkgver', 'DESC' => 'pkgdesc', 'GROUPS' => 'group', 'CSIZE' => 'filesize', 'ISIZE' => 'size', 'MD5SUM' => 'filemd5', 'SHA256SUM' => 'filesha256', 'PGPSIG' => 'pgpsig', 'URL' => 'url', 'LICENSE' => 'license', 'ARCH' => 'arch', 'BUILDDATE' => 'builddate', 'PACKAGER' => 'packager', 'REPLACES' => 'replaces', ], 'depends' => [ 'DEPENDS' => 'depend', 'CONFLICTS' => 'conflict', 'PROVIDES' => 'provides', 'OPTDEPENDS' => 'optdepend', ], 'files' => [ 'FILES' => 'files', ], ); die("Usage: bs_mkarchrepo \n") unless @ARGV == 2; my $reponame = $ARGV[0]; my $dir = $ARGV[1]; my @pkgs = grep {/\.pkg\.tar\.(?:gz|xz)$/} ls($dir); my $rdir = "$dir/$reponame.db"; mkdir_p($rdir); my @pd; for my $pkg (@pkgs) { if (!open(F, '<', "$dir/$pkg")) { warn("$dir/$pkg: $!\n"); next; } my @s = stat(F); next unless @s; my $ctx = Digest::MD5->new; $ctx->addfile(\*F); close F; if (!open(F, '<', "$dir/$pkg")) { warn("$dir/$pkg: $!\n"); next; } my $ctxsha256 = Digest->new('SHA-256'); $ctxsha256->addfile(\*F) if $ctxsha256; close F; my $vars; my $files; eval { $vars = Build::Arch::queryvars("$dir/$pkg"); $files = Build::Arch::queryfiles("$dir/$pkg"); }; warn($@) if $@; next unless $vars; $vars->{'files'} = $files if $files; $vars->{'filename'} = [ $pkg ]; $vars->{'filesize'} = [ $s[7] ]; $vars->{'filemd5'} = [ $ctx->hexdigest ]; $vars->{'filesha256'} = [ $ctxsha256->hexdigest ] if $ctxsha256; my $sig = readstr("$dir/$pkg.sig", 1); if ($sig && length($sig) <= 16384) { $sig = encode_base64($sig, ''); $vars->{'pgpsig'} = [ $sig ]; } my $d = "$vars->{'pkgname'}->[0]-$vars->{'pkgver'}->[0]"; if (!mkdir("$rdir/$d")) { warn("$d: $!\n"); next; } for my $t (sort keys %todo) { my @t = @{$todo{$t}}; open(F, '>', "$rdir/$d/$t") || die("$rdir/$d/$t: $!\n"); while (@t) { my ($tag, $source) = splice(@t, 0, 2); next unless @{$vars->{$source} || []}; print F "\%$tag\%\n"; print F "$_\n" for @{$vars->{$source}}; print F "\n"; } close(F) || die("close: $!\n"); } push @pd, $d; } if (@pd) { unshift @pd, '--'; } else { push @pd, '-T', '/dev/null'; } system('tar', '-czf', "$dir/$reponame.db.tar.gz", '--exclude', 'files', '-C', "$rdir", @pd); system('tar', '-czf', "$dir/$reponame.files.tar.gz", '-C', "$rdir", @pd); BSUtil::cleandir($rdir); rmdir($rdir); open-build-service-2.9.4/src/backend/bs_notar000077500000000000000000000370371332555733200211570ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2017 SUSE Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # Notary interfacing # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use JSON::XS (); use MIME::Base64 (); use Digest::SHA (); use Data::Dumper; use BSConfiguration; use BSRPC ':https'; use BSUtil; use BSASN1; use BSPGP; use strict; my $targets_expire_delta = 3 * 366 * 24 * 3600; # 3 years my $notary_timeout = 300; my $registry_timeout = 300; my @signargs; sub keydata2asn1 { my ($keydata) = @_; die("need an rsa pubkey\n") unless ($keydata->{'algo'} || '') eq 'rsa'; my $pubkey = BSASN1::asn1_sequence(BSASN1::asn1_integer_mpi($keydata->{'mpis'}->[0]->{'data'}), BSASN1::asn1_integer_mpi($keydata->{'mpis'}->[1]->{'data'})); $pubkey = BSASN1::asn1_pack($BSASN1::BIT_STRING, pack('C', 0).$pubkey); return BSASN1::asn1_sequence(BSASN1::asn1_sequence($BSASN1::oid_rsaencryption, BSASN1::asn1_null()), $pubkey); } sub rfc3339time { my ($t) = @_; my @gt = gmtime($t || time()); return sprintf "%04d-%02d-%02dT%02d:%02d:%02dZ", $gt[5] + 1900, $gt[4] + 1, @gt[3,2,1,0]; } sub canonical_json { my ($d) = @_; return JSON::XS->new->utf8->canonical->encode($d); } sub sign { my ($data) = @_; return BSUtil::xsystem($data, $BSConfig::sign, @signargs, '-O', '-h', 'sha256'); } sub mktbscert { my ($cn, $not_before, $not_after, $subjectkeyinfo) = @_; my $serial = pack("CC", 0, 128 + int(rand(128))); $serial .= pack("C", int(rand(256))) for (1, 2, 3, 4, 5, 6, 7); my $certversion = BSASN1::asn1_pack($BSASN1::CONT | $BSASN1::CONS | 0, BSASN1::asn1_integer(2)); my $certserial = BSASN1::asn1_pack($BSASN1::INTEGER, $serial); my $sigalgo = BSASN1::asn1_sequence($BSASN1::oid_sha256withrsaencryption, BSASN1::asn1_null()); my $cnattr = BSASN1::asn1_sequence($BSASN1::oid_common_name, BSASN1::asn1_pack($BSASN1::UTF8STRING, $cn)); my $issuer = BSASN1::asn1_sequence(BSASN1::asn1_set($cnattr)); my $validity = BSASN1::asn1_sequence(BSASN1::asn1_utctime($not_before), BSASN1::asn1_utctime($not_after)); my $critical = BSASN1::asn1_boolean(1); my $basic_constraints = BSASN1::asn1_sequence($BSASN1::oid_basic_constraints, $critical, BSASN1::asn1_octet_string(BSASN1::asn1_sequence())); my $key_usage = BSASN1::asn1_sequence($BSASN1::oid_key_usage, $critical, BSASN1::asn1_octet_string(BSASN1::asn1_pack($BSASN1::BIT_STRING, pack("CC", 5, 160)))); my $ext_key_usage = BSASN1::asn1_sequence($BSASN1::oid_ext_key_usage, BSASN1::asn1_octet_string(BSASN1::asn1_sequence($BSASN1::oid_code_signing))); my $extensions = BSASN1::asn1_pack($BSASN1::CONT | $BSASN1::CONS | 3, BSASN1::asn1_sequence($basic_constraints, $key_usage, $ext_key_usage)); my $tbscert = BSASN1::asn1_sequence($certversion, $certserial, $sigalgo, $issuer, $validity, $issuer, $subjectkeyinfo, $extensions); return $tbscert; } sub mkcert { my ($cn, $not_before, $not_after, $subjectkeyinfo) = @_; my $tbscert = mktbscert($cn, $not_before, $not_after, $subjectkeyinfo); my $sigalgo = BSASN1::asn1_sequence($BSASN1::oid_sha256withrsaencryption, BSASN1::asn1_null()); my $signature = sign($tbscert); my $cert = BSASN1::asn1_sequence($tbscert, $sigalgo, BSASN1::asn1_pack($BSASN1::BIT_STRING, pack("C", 0), $signature)); return BSASN1::der2pem($cert, 'CERTIFICATE'); } # return the to-be-signed part of a certificate sub gettbscert { my ($cert) = @_; $cert = BSASN1::pem2der($cert, 'CERTIFICATE'); (undef, $cert, undef) = BSASN1::asn1_unpack($cert, $BSASN1::CONS | $BSASN1::SEQUENCE); (undef, $cert, undef) = BSASN1::asn1_unpack($cert, $BSASN1::CONS | $BSASN1::SEQUENCE); return BSASN1::asn1_pack($BSASN1::CONS | $BSASN1::SEQUENCE, $cert); } # remove the serial number from a tbs certificate. We need to do this because the # serial is random and we want to compare two certs. sub removecertserial { my ($tbscert) = @_; (undef, $tbscert, undef) = BSASN1::asn1_unpack($tbscert, $BSASN1::CONS | $BSASN1::SEQUENCE); my $tail = $tbscert; (undef, undef, $tail) = BSASN1::asn1_unpack($tail); # the version my $l = length($tail); (undef, undef, $tail) = BSASN1::asn1_unpack($tail, $BSASN1::INTEGER); # the serial substr($tbscert, length($tbscert) - $l, $l - length($tail), ''); return BSASN1::asn1_pack($BSASN1::CONS | $BSASN1::SEQUENCE, $tbscert); } # get pubkey sub getsubjectkeyinfo { my ($tbscert) = @_; (undef, $tbscert, undef) = BSASN1::asn1_unpack($tbscert, $BSASN1::CONS | $BSASN1::SEQUENCE); (undef, undef, $tbscert) = BSASN1::asn1_unpack($tbscert) for 1..6; (undef, $tbscert, undef) = BSASN1::asn1_unpack($tbscert, $BSASN1::CONS | $BSASN1::SEQUENCE); return BSASN1::asn1_pack($BSASN1::CONS | $BSASN1::SEQUENCE, $tbscert); } sub signedmultipartentry { my ($name, $d, $keyid, $extrakeyid) = @_; my $sig = MIME::Base64::encode_base64(sign(canonical_json($d)), ''); my @sigs = ({ 'keyid' => $keyid, 'method' => 'rsapkcs1v15', 'sig' => $sig }); push @sigs, { 'keyid' => $extrakeyid, 'method' => 'rsapkcs1v15', 'sig' => $sig } if $extrakeyid; # hack: signed must be first $d = { 'AAA_signed' => $d, 'signatures' => \@sigs }; $d = canonical_json($d); $d =~ s/AAA_signed/signed/; return { 'headers' => [ "Content-Disposition: form-data; name=\"files\"; filename=\"$name\"", 'Content-Type: application/octet-stream' ], 'data' => $d }; } # parse arguments my $pubkeyfile; my $dest_creds; my $justadd; while (@ARGV) { if ($ARGV[0] eq '-p') { (undef, $pubkeyfile) = splice(@ARGV, 0, 2); next; } if ($ARGV[0] eq '--dest-creds') { (undef, $dest_creds) = splice(@ARGV, 0, 2); next; } if ($ARGV[0] eq '-P' || $ARGV[0] eq '--project' || $ARGV[0] eq '-u') { push @signargs, splice(@ARGV, 0, 2); next; } if ($ARGV[0] eq '-h') { splice(@ARGV, 0, 2); # always sha256 next; } if ($ARGV[0] eq '--just-add') { shift @ARGV; $justadd = 1; next; } last; } die("Usage: bs_notar -p pubkeyfile registryserver notaryserver gun tags...\n") unless @ARGV >= 3; my ($registryserver, $notaryserver, $gun, @tags) = @ARGV; die("Need a pubkey file\n") unless defined $pubkeyfile; die("Need a repo f\n") unless defined $pubkeyfile; # calculate registry repo from notary gun my $repo = $gun; $repo =~ s/^[^\/]+\///; sub authenticator { my ($cred_header, $param, $wwwauthenticate) = @_; return undef if !$wwwauthenticate; @$cred_header = (); my $auth; my %auth = BSHTTP::parseauthenticate($wwwauthenticate); if ($auth{'basic'} && $dest_creds) { $auth = 'Basic '.MIME::Base64::encode_base64($dest_creds, ''); } elsif ($auth{'bearer'}) { my $bearer = $auth{'bearer'}; my $realm = ($bearer->{'realm'} || [])->[0]; return undef unless $realm && $realm =~ /^https?:\/\//i; my @args = BSRPC::args($bearer, 'service', 'scope'); print "requesting bearer auth from $realm [@args]\n"; my $bparam = { 'uri' => $realm }; push @{$bparam->{'headers'}}, 'Authorization: Basic '.MIME::Base64::encode_base64($dest_creds, '') if $dest_creds; my $reply; eval { $reply = BSRPC::rpc($bparam, \&JSON::XS::decode_json, @args); }; warn($@) if $@; return undef unless $reply && $reply->{'token'}; $auth = "Bearer ".$reply->{'token'}; } push @$cred_header, "Authorization: $auth" if defined $auth; return $auth; } my @registry_cred_header; my @notary_cred_header; sub registry_authenticator { return authenticator(\@registry_cred_header, @_); } sub notary_authenticator { return authenticator(\@notary_cred_header, @_); } # # collect stuff to sign # my $manifests = {}; for my $tag (@tags) { my $param = { 'headers' => [ 'Accept: application/vnd.docker.distribution.manifest.v2+json', @registry_cred_header ], 'uri' => "$registryserver/v2/$repo/manifests/$tag", 'authenticator' => \®istry_authenticator, 'timeout' => $registry_timeout, }; my $manifest_json = BSRPC::rpc($param, undef); my $manifest = JSON::XS::decode_json($manifest_json); die("bad manifest for $repo:$tag\n") unless $manifest->{'schemaVersion'} == 2; $manifests->{$tag} = { 'hashes' => { 'sha256' => MIME::Base64::encode_base64(Digest::SHA::sha256($manifest_json), '') }, 'length' => length($manifest_json), }; } # # generate key material # my $gpgpubkey = BSPGP::unarmor(readstr($pubkeyfile)); my $pubkey_data = BSPGP::pk2keydata($gpgpubkey) || {}; die("need an rsa pubkey for container signing\n") unless ($pubkey_data->{'algo'} || '') eq 'rsa'; my $pubkey_times = BSPGP::pk2times($gpgpubkey) || {}; # generate pub key and cert from pgp key data my $pub_bin = keydata2asn1($pubkey_data); my $cert; my $root_key; my $targets_key; my $timestamp_key; my $snapshot_key; my $root_version = 1; my $targets_version = 1; my $dodelete; # new key, hopeless, need to delete old data my $dorootupdate; # same key with different cert # # reuse data from old root entry if we can # if (!$dodelete) { eval { my $param = { 'uri' => "$notaryserver/v2/$gun/_trust/tuf/root.json", 'headers' => [ @notary_cred_header ], 'timeout' => $notary_timeout, 'authenticator' => \¬ary_authenticator, }; my $oldroot = BSRPC::rpc($param, \&JSON::XS::decode_json); $root_version = 1 + $oldroot->{'signed'}->{'version'}; my $oldroot_root_id = $oldroot->{'signed'}->{'roles'}->{'root'}->{'keyids'}->[0]; my $oldroot_targets_id = $oldroot->{'signed'}->{'roles'}->{'targets'}->{'keyids'}->[0]; my $oldroot_timestamp_id = $oldroot->{'signed'}->{'roles'}->{'timestamp'}->{'keyids'}->[0]; my $oldroot_snapshot_id = $oldroot->{'signed'}->{'roles'}->{'snapshot'}->{'keyids'}->[0]; my $oldroot_root_key = $oldroot->{'signed'}->{'keys'}->{$oldroot_root_id}; die("oldroot is not of type rsa-x509\n") if $oldroot_root_key->{'keytype'} ne 'rsa-x509'; my $oldroot_root_cert = MIME::Base64::decode_base64($oldroot_root_key->{'keyval'}->{'public'}); my $oldroot_root_tbscert = gettbscert($oldroot_root_cert); my $new_tbscert = mktbscert($gun, $pubkey_times->{'selfsig_create'}, $pubkey_times->{'key_expire'}, $pub_bin); if (removecertserial($oldroot_root_tbscert) eq removecertserial($new_tbscert)) { # same cert (possibly with different serial). reuse old cert. $cert = $oldroot_root_cert; $root_key = $oldroot_root_key; $targets_key = $oldroot->{'signed'}->{'keys'}->{$oldroot_targets_id}; $timestamp_key = $oldroot->{'signed'}->{'keys'}->{$oldroot_timestamp_id}; $snapshot_key = $oldroot->{'signed'}->{'keys'}->{$oldroot_snapshot_id}; } elsif (getsubjectkeyinfo($oldroot_root_tbscert) eq getsubjectkeyinfo($new_tbscert)) { # different cert but same pubkey, e.g. different expiration time $dorootupdate = $oldroot_root_id; $timestamp_key = $oldroot->{'signed'}->{'keys'}->{$oldroot_timestamp_id}; $snapshot_key = $oldroot->{'signed'}->{'keys'}->{$oldroot_snapshot_id}; } }; warn($@) if $@; } $dodelete = 1 unless $cert || $dorootupdate; if ($dodelete) { print "overwriting old key and cert...\n"; } elsif ($dorootupdate) { print "updating old key and cert...\n"; } else { print "reusing old key and cert...\n"; } # # setup needed keys # if (!$cert) { $cert = mkcert($gun, $pubkey_times->{'selfsig_create'}, $pubkey_times->{'key_expire'}, $pub_bin); } if (!$root_key) { $root_key = { 'keytype' => 'rsa-x509', 'keyval' => { 'private' => undef, 'public' => MIME::Base64::encode_base64($cert, '')}, }; } if (!$targets_key) { $targets_key = { 'keytype' => 'rsa', 'keyval' => { 'private' => undef, 'public' => MIME::Base64::encode_base64($pub_bin, '') }, }; } if (!$timestamp_key) { my $param = { 'uri' => "$notaryserver/v2/$gun/_trust/tuf/timestamp.key", 'headers' => [ @notary_cred_header ], 'timeout' => $notary_timeout, 'authenticator' => \¬ary_authenticator, }; $timestamp_key = BSRPC::rpc($param, \&JSON::XS::decode_json); } if (!$snapshot_key) { my $param = { 'uri' => "$notaryserver/v2/$gun/_trust/tuf/snapshot.key", 'headers' => [ @notary_cred_header ], 'timeout' => $notary_timeout, 'authenticator' => \¬ary_authenticator, }; $snapshot_key = BSRPC::rpc($param, \&JSON::XS::decode_json); } my $root_key_id = Digest::SHA::sha256_hex(canonical_json($root_key)); my $targets_key_id = Digest::SHA::sha256_hex(canonical_json($targets_key)); my $timestamp_key_id = Digest::SHA::sha256_hex(canonical_json($timestamp_key)); my $snapshot_key_id = Digest::SHA::sha256_hex(canonical_json($snapshot_key)); # # setup root # my $keys = {}; $keys->{$root_key_id} = $root_key; $keys->{$targets_key_id} = $targets_key; $keys->{$timestamp_key_id} = $timestamp_key; $keys->{$snapshot_key_id} = $snapshot_key; my $roles = {}; $roles->{'root'} = { 'keyids' => [ $root_key_id ], 'threshold' => 1 }; $roles->{'snapshot'} = { 'keyids' => [ $snapshot_key_id ], 'threshold' => 1 }; $roles->{'targets'} = { 'keyids' => [ $targets_key_id ], 'threshold' => 1 }; $roles->{'timestamp'} = { 'keyids' => [ $timestamp_key_id ], 'threshold' => 1 }; my $root = { '_type' => 'Root', 'consistent_snapshot' => $JSON::XS::false, 'expires' => rfc3339time($pubkey_times->{'key_expire'}), 'keys' => $keys, 'roles' => $roles, 'version' => $root_version, }; # # setup targets # my $oldtargets; eval { my $param = { 'uri' => "$notaryserver/v2/$gun/_trust/tuf/targets.json", 'headers' => [ @notary_cred_header ], 'timeout' => $notary_timeout, 'authenticator' => \¬ary_authenticator, }; $oldtargets = BSRPC::rpc($param, \&JSON::XS::decode_json); $targets_version = 1 + $oldtargets->{'signed'}->{'version'}; }; if ($justadd && $oldtargets) { for my $tag (sort keys %{$oldtargets->{'signed'}->{'targets'} || {}}) { next if $manifests->{$tag}; print "taking old tag $tag\n"; $manifests->{$tag} = $oldtargets->{'signed'}->{'targets'}->{$tag}; } } if (!$dodelete && !$dorootupdate && BSUtil::identical($manifests, $oldtargets->{'signed'}->{'targets'})) { print "no change...\n"; exit 0; } my $targets = { '_type' => 'Targets', 'delegations' => { 'keys' => {}, 'roles' => []}, 'expires' => rfc3339time(time() + $targets_expire_delta), 'targets' => $manifests, 'version' => $targets_version, }; # # delete old data if necessary # if ($dodelete) { my $param = { 'uri' => "$notaryserver/v2/$gun/_trust/tuf/", 'request' => 'DELETE', 'headers' => [ @notary_cred_header ], 'timeout' => $notary_timeout, 'authenticator' => \¬ary_authenticator, }; BSRPC::rpc($param); } # # sign and send data # my @parts; push @parts, signedmultipartentry('root', $root, $root_key_id, $dorootupdate) if $dodelete || $dorootupdate; push @parts, signedmultipartentry('targets', $targets, $targets_key_id); my $boundary = Digest::SHA::sha256_hex(join('', map {$_->{'data'}} @parts)); my $param = { 'uri' => "$notaryserver/v2/$gun/_trust/tuf/", 'request' => 'POST', 'data' => BSHTTP::makemultipart($boundary, @parts), 'headers' => [ "Content-Type: multipart/form-data; boundary=$boundary", @notary_cred_header ], 'timeout' => $notary_timeout, 'authenticator' => \¬ary_authenticator, }; print BSRPC::rpc($param); open-build-service-2.9.4/src/backend/bs_productconvert000077500000000000000000002023601332555733200231060ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2008 Klaas Freitag, Novell Inc. # Copyright (c) 2008 Adrian Schroeter, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # Converter to create Kiwi- and Spec files from product definition # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; chdir($wd); unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use strict; use Getopt::Std; use Data::Dumper; use File::Basename; use Storable; use XML::Structured ':bytes'; use BSUtil; use BSXML; use BSProductXML; use BSKiwiXML; use BSXML; my $bsdir; my $localarch; my $obsname; my $project; eval{ require BSConfig; $bsdir = "$BSConfig::bsdir" if defined($BSConfig::bsdir); $localarch = "$BSConfig::localarch" if defined($BSConfig::localarch); $obsname = "$BSConfig::obsname" if defined($BSConfig::obsname); }; # read the product xml file use vars qw ($opt_h $opt_d $opt_m @errors %conditionals %repositories %groupRefs %archSets $indir); my %product_requires; sub usage() { print<{type} = "system"; $re->{author} = "The SUSE Team"; $re->{contact} = "build\@opensuse.org"; $re->{specification} = $prodRef->{summary}[0]->{_content}; # FIXME: lang dependent return $re; } sub convertFlags( $ ){ my ($flag)=@_; $flag =~ s/GT/>/sg; $flag =~ s/EQ/=/sg; $flag =~ s/LT/=/sg; $flag =~ s/LE/<=/sg; return $flag; } sub convertRelationship( $ ){ my ( $relationship ) = @_; $relationship =~ s/suggests/Suggests/sg; $relationship =~ s/recommends/Recommends/sg; $relationship =~ s/requires/Requires/sg; $relationship =~ s/provides/Provides/sg; $relationship =~ s/conflicts/Conflicts/sg; $relationship =~ s/obsoletes/Obsoletes/sg; return $relationship; } # # The conditionals are kind of macros which are used all over the product definition. # The conditionals part of the product def is parsed into the global conditionalhash # with the conditional name as key. # sub parseConditionals( $ ) { my ($conditionalRef) = @_; # print Dumper $conditionalRef; return unless( $conditionalRef ); foreach my $condRef (@{$conditionalRef}) { my $name = $condRef->{name}; # print "Parsed conditional $name\n"; # print Dumper $condRef; $conditionals{$name} = $condRef; } } sub parseArchsets( $ ) { my ($archSetsRef ) = @_; foreach my $archSet ( @{$archSetsRef } ) { # print "Parsing Archset $archSet->{name}\n"; # print "XXXX " . Dumper( $archSet ) . "\n"; if( $archSet->{name} ) { my %h; $h{productarch} = $archSet->{productarch}; my @a; foreach my $cRef ( @{$archSet->{arch}} ) { push @a, $cRef->{_content}; } $h{archList} = \@a; $archSets{$archSet->{name}} = \%h; } } # print Dumper %archSets; } sub getUrl( $$$ ){ my ($product,$arch,$searchstring) = @_; my $resulturl=""; foreach my $url ( @{$product->{'urls'}->{'url'}} ){ if ( "$url->{'name'}" eq "$searchstring" ){ if ( exists $url->{'arch'} && "$url->{'arch'}" eq "$arch" ) { my $u = $url->{'_content'}; $u =~ s/%\{_target_cpu\}/$arch/g; return $u; } elsif (exists $url->{'arch'}) { next; } else { my $u = $url->{'_content'}; $u =~ s/%\{_target_cpu\}/$arch/g; $resulturl = $u; # Continue searching, in case we find an architecture match } } } return $resulturl; } sub createArchitectures( $ ) { my ($archSetList) = @_; my $re = {}; my @archs; my %reqArchs; my %archMatrix; foreach my $requiredArch( @{$archSetList} ) { my $ref = $requiredArch->{ref}; die ( "ERROR: No such archset $ref\n" ) unless $archSets{$ref} ; my @archis = @{ $archSets{$ref}->{archList} }; my $border = @archis; # the amount of entries print "WARN: last arch in archset must be noarch\n" unless( $archis[$border-1] eq "noarch" ); $reqArchs{ $archSets{$ref}->{productarch} } = 1; # will be requiredarch in kiwi for( my $i = 0; $i < $border; $i++ ) { $archMatrix{ $archis[$i] } = { fallback => $archis[$i+1] }; } } foreach my $arch ( sort keys %archMatrix ) { my %h; $h{id} = $arch; if( $archMatrix{$arch}->{name} ) { $h{name} = $archMatrix{$arch}->{name}; } else { $h{name} = "dummy"; # FIXME: should become optional }; $h{fallback} = $archMatrix{$arch}->{fallback} if( $archMatrix{$arch}->{fallback}); push @archs, \%h; } my @reqXml; foreach ( sort keys %reqArchs ) { my %h; $h{ref} = $_; push @reqXml, \%h; } $re->{arch} = \@archs; $re->{requiredarch} = \@reqXml; return $re; } sub createProductOptions($$$) { my( $prodRef, $medium, $archSetList ) = @_; my $mediaStyle = getMediaStyle($prodRef); # General FIXME: this works only with a single product on media. my $product = $prodRef->{products}{product}[0]; die( "Handling of multiple products on one media is currently not specified !\n" ) if $prodRef->{products}{product}[1]; my $re = {}; my %varsH; # we need the "default" arch for # - MEDIUM_NAME # - releasenotesurl my $arch="i586"; my @allarchs; foreach my $ar ( @$archSetList ) { $arch=$archSets{$ar->{ref}}->{productarch} if ($archSets{$ar->{ref}}); push @allarchs, $arch; } $varsH{MEDIUM_NAME} = $product->{'name'}."-".$product->{'version'}."-".join( "-", @allarchs ); if ( defined $medium->{'name'} ) { $varsH{MEDIUM_NAME} = $medium->{'name'}."-".join( "-", @allarchs ); } $varsH{PRODUCT_THEME} = $product->{'buildconfig'}->{'producttheme'}; $varsH{MULTIPLE_MEDIA} = "no"; $varsH{MULTIPLE_MEDIA} = "true" if (defined($medium->{'sourcemedia'}) && $medium->{'sourcemedia'} > 1); $varsH{MULTIPLE_MEDIA} = "true" if (defined($medium->{'debugmedia'}) && $medium->{'debugmedia'} > 1); $varsH{RUN_MEDIA_CHECK} = "true" if (defined($medium->{'run_media_check'})) && $medium->{'run_media_check'} ne "no" && $medium->{'run_media_check'} eq "false"; $varsH{RUN_ISOHYBRID} = "true" if (defined($medium->{'run_hybridiso'})) && $medium->{'run_hybridiso'} eq "true"; $varsH{CREATE_REPOMD} = "true" if (defined($medium->{'create_repomd'})) && $medium->{'create_repomd'} ne "no" && $medium->{'create_repomd'} ne "false"; $varsH{MAKE_LISTINGS} = 'false' if (defined($medium->{'run_make_listings'})) && $medium->{'run_make_listings'} ne "yes" && $medium->{'run_make_listings'} ne "true"; $varsH{SHA1OPT} = "-x"; $varsH{REPO_ONLY} = "true" if (defined($medium->{'repo_only'})) && $medium->{'repo_only'} ne "no" && $medium->{'repo_only'} ne "false"; # explicit removal of additional tree $varsH{DROP_REPOSITORY} = "true" if (defined($medium->{'drop_repo'})) && $medium->{'drop_repo'} ne "no" && $medium->{'drop_repo'} ne "false"; unless ($mediaStyle =~ /^suse-1[12]/ || $mediaStyle =~ /^suse-sle11/) { # remove tree for media style sle12/13.2 by default $varsH{DROP_REPOSITORY} = "true" unless defined($medium->{'drop_repo'}) || $varsH{REPO_ONLY} || $varsH{DROP_REPOSITORY}; foreach my $shortsummary ( @{$product->{'shortsummary'}} ){ $varsH{PRODUCT_SUMMARY} = $shortsummary->{'_content'} if ( ! $shortsummary->{'language'} ); } die("No short summary defined, but required for current media styles") unless $varsH{PRODUCT_SUMMARY}; $varsH{PRODUCT_RELEASE} = $product->{'release'} if defined($product->{'release'}); } # switch to sha256 sums except for old distros $varsH{SHA1OPT} .= " -2" unless $mediaStyle eq "suse-11.1" || $mediaStyle eq "suse-11.2" || $mediaStyle eq "suse-11.3" || $mediaStyle eq "suse-sle11-sp2"; $varsH{VENDOR} = $product->{'vendor'}; $varsH{DISTNAME} = $product->{'name'}; $varsH{VERSION} = $product->{'version'}; $varsH{FLAVOR} = $medium->{'flavor'}; $varsH{PRODUCT_DIR} = "/"; $varsH{PRODUCT_NAME} = '$DISTNAME-$FLAVOR'; $varsH{PRODUCT_VERSION} = '$VERSION'; my @vars; foreach my $opt ( sort keys %varsH ) { push @vars, { name => $opt, _content => $varsH{$opt} }; } $re->{productvar} = \@vars; my %options; if (defined($medium->{'sourcemedia'})) { $options{'SOURCEMEDIUM'} = $medium->{'sourcemedia'}; }; if (defined($medium->{'debugmedia'})) { $options{'DEBUGMEDIUM'} = $medium->{'debugmedia'}; }; $options{'IGNORE_MISSING_REPO_PACKAGES'} = "true" if (defined($medium->{'ignore_missing_packages'}) && $medium->{'ignore_missing_packages'} eq "true"); $options{'IGNORE_MISSING_META_PACKAGES'} = "true" if (defined($medium->{'ignore_missing_meta_packages'}) && $medium->{'ignore_missing_meta_packages'} eq "true"); $options{'PLUGIN_DIR'} = "/usr/share/kiwi/modules/plugins/$mediaStyle"; $options{'INI_DIR'} = "/usr/share/kiwi/modules/plugins/$mediaStyle"; $options{'BETA_VERSION'} = $product->{'buildconfig'}->{'betaversion'} if (defined($product->{'buildconfig'}->{'betaversion'})); $options{'MAIN_PRODUCT'} = $product->{'buildconfig'}->{'mainproduct'} if (defined($product->{'buildconfig'}->{'mainproduct'})); my %info; $info{'VENDOR'} = $product->{'vendor'}; if (defined($medium->{'descriptiondir'})) { $info{'DESCRDIR'} = $medium->{'descriptiondir'}; } else { $info{'DESCRDIR'} = $product->{'installconfig'}->{'descriptiondir'}; } if (defined($medium->{'datadir'})) { $info{'DATADIR'} = $medium->{'datadir'}; } else { $info{'DATADIR'} = $product->{'installconfig'}->{'datadir'}; } if (defined($obsname) && (defined($project)) && ("$project" ne "")){ $info{'REPOID'} = "obsproduct://".$obsname."/".$project."/".$product->{'name'}."/".$product->{'version'}."/".$medium->{'flavor'}."/".join( "-", @allarchs ); } if (defined($medium->{'preselected_patterns'})){ $info{'PATTERNS'} .= join(' ',map( $_->{'name'},@{$medium->{'preselected_patterns'}->[0]->{'pattern'}})); } if ($mediaStyle =~ "^suse-11" || $mediaStyle =~ "^suse-12" || $mediaStyle =~ "^suse-sle11") { $info{'DISTRIBUTION'} = $product->{'installconfig'}->{'distribution'}; $info{'NAME'} = $product->{'name'}; $info{'VERSION'} = $product->{'version'}; $info{'SP_VERSION'} = $product->{'patchlevel'} if (defined($product->{'patchlevel'})); foreach my $summary ( @{$product->{'summary'}} ){ $info{'LABEL'} = $summary->{'_content'} if ( ! $summary->{'language'} ); } foreach my $shortsummary ( @{$product->{'shortsummary'}} ){ $info{'SHORTLABEL'} = $shortsummary->{'_content'} if ( ! $shortsummary->{'language'} ); } $info{'RELNOTESURL'} = getUrl($product,$arch,"releasenotes"); $info{'BASEARCHS'} = ""; foreach my $ar ( @$archSetList ) { $info{'BASEARCHS'} .= "$archSets{$ar->{ref}}->{productarch} " if( $archSets{$ar->{ref}} ); } } else { # for SLE 12 and later my $label=""; foreach my $shortsummary ( @{$product->{'summary'}} ){ $label = $shortsummary->{'_content'} if ( ! $shortsummary->{'language'} ); } my $cpe = getCpeId($prodRef, $product); $info{'DISTRO'} = "$cpe,$label"; } $options{'REPO_LOCATION'} = getUrl($product,$arch,"repository"); $info{'LINGUAS'} = ""; foreach my $language ( @{$product->{'linguas'}->{'language'}} ){ $info{'LINGUAS'} .= "$language->{'_content'} "; } # Add REGISTERPRODUCT string - see: bnc#458340 if (defined($medium->{'registration'}) and $medium->{'registration'} eq "false") { $info{'REGISTERPRODUCT'} .= "false" } elsif (defined($medium->{'productdependency'}) || $product->{'productdependency'}) { $info{'REGISTERPRODUCT'} .= "true" if (defined($product->{'register'})); } # Add Product Options my @vars1; foreach my $opt ( sort keys %options ) { push @vars1, { name => $opt, _content => $options{$opt} }; } $re->{productoption} = \@vars1; # Add Product Info my @info; push @info, { name => 'CONTENTSTYLE', _content => '11' }; # Needs to be first ! foreach my $opt ( sort keys %info) { push @info, { name => $opt, _content => $info{$opt} }; } $re->{productinfo} = \@info; return $re; } sub createMetadata( $$$ ) { my( $prodRef, $medium, $archSetList ) = @_; return undef unless( $medium->{'metadata'} ); my $re = {}; # print "> " . Dumper $medium->{metadata}; my @packages; my @files; my $metadata_medium = "0"; # $metadata_medium = "1" if (defined($medium->{'sourcemedia'}) && $medium->{'sourcemedia'} > 1); foreach my $pack ( @{ $medium->{metadata}->{package} } ) { my @onlyarch; my $removearch; if ($pack->{removearch}){ next if containsMyArch( $prodRef, $archSetList, $pack->{removearch} ); $removearch = "$pack->{removearch},src,nosrc"; } else { $removearch = "src,nosrc"; } if (defined($pack->{onlyarch})) { push @onlyarch, $pack->{onlyarch}; } else { foreach my $requiredArch( @{$archSetList} ) { my $ref = $requiredArch->{ref}; die( "ERROR: No such archset $ref\n" ) unless $archSets{$ref}; push @onlyarch, $archSets{$ref}->{productarch} unless grep { $_ eq $archSets{$ref}->{productarch} } @onlyarch; } # In 11.4 and later noarch metapackages need explicit a onlyarch="noarch" in config because we do now fail on missing packages if ((not defined $medium->{'mediastyle'}) || $medium->{'mediastyle'} eq "suse-11.1" || $medium->{'mediastyle'} eq "suse-11.2" || $medium->{'mediastyle'} eq "suse-11.3") { push @onlyarch, "noarch" unless grep { $_ eq "noarch" } @onlyarch; } } my $h = { name => $pack->{name}, medium => $metadata_medium, removearch => $removearch, onlyarch => join(",",@onlyarch) }; $h->{'arch'} = $pack->{'arch'} if defined($pack->{'arch'}); $h->{'addarch'} = $pack->{addarch} if defined($pack->{addarch}); push @packages, $h; } my @a; return { repopackage => \@packages }; # my @files; # foreach my $file ( @{ $medium->{metadata}->{file} } ) { # push @files, { name => $file->{name} }; # } # # push @a, { file => \@files }; CHECK: Needed? # # return \@a; } sub containsMyArch( $$$ ) { my ($prodRef, $archSetList, $archList ) = @_; foreach my $s( split( /\s*,\s*/, $archList ) ){ foreach my $requiredArch( @{$archSetList} ) { my $ref = $requiredArch->{ref}; die( "ERROR: No such archset $ref\n" ) unless $archSets{$ref}; return 1 if ( $s eq $archSets{$ref}->{productarch} ); } } return 0; } sub useToPackages( $$$ ) { my ($prodRef, $medium, $archSetList ) = @_; return unless $medium; my @packages; my %supportstates; if (defined($medium->{use_undecided}) && $medium->{use_undecided} eq "true" ) { # Simply take all packages ? push @packages, { name => "*" }; }; return unless $medium->{use}; my @useStatements = @{$medium->{use} }; # print "Use Required: <$useRequired>, Suggested: <$useSuggested>, Recommended: <$useRecommended>\n"; foreach my $useState ( @useStatements ) { my $useRequired; my $useRecommended; my $useSuggested; # Media default settings $useRequired = $medium->{'use_required'} if ( defined($medium->{'use_required'}) && $medium->{'use_required'} eq "true" ); $useRecommended = $medium->{'use_recommended'} if ( defined($medium->{'use_recommended'}) && $medium->{'use_recommended'} eq "true" ); $useSuggested = $medium->{'use_suggested'} if ( defined($medium->{'use_suggested'}) && $medium->{'use_suggested'} eq "true" ); # can get overriden by "use group" settings $useRequired = $useState->{'use_required'} if ( defined($useState->{'use_required'}) && $useState->{'use_required'} eq "true" ); $useRecommended = $useState->{'use_recommended'} if ( defined($useState->{'use_recommended'}) && $useState->{'use_recommended'} eq "true" ); $useSuggested = $useState->{'use_suggested'} if ( defined($useState->{'use_suggested'}) && $useState->{'use_suggested'} eq "true" ); if( $useState->{group} ) { # print "Handling use of group $useState->{group}\n"; my ($packs, $supports) = groupToPackages( $prodRef, $archSetList, $useState->{group}, $useRequired, $useRecommended, $useSuggested ); push @packages, @{$packs} if $packs; if ($supports && keys %$supports > 0) { %supportstates = (%supportstates, %$supports); }; # there might be additional packages listed in the group. if( $useState->{package} ) { foreach my $addPack ( @{$useState->{package} } ) { # print Dumper( $addPack ) . "\n"; my $relType = $addPack->{relationship}; die( "ERROR: Unknown relation type string for package add!\n" ) unless( $relType eq "requires" || $relType eq "recommends" || $relType eq "suggests" ); if( ( $useRequired && $addPack->{relationship} eq "requires") || ( $useRecommended && $addPack->{relationship} eq "recommends" ) || ( $useSuggested && $addPack->{relationship} eq "suggests" ) ) { my %tmp; $tmp{name} = $addPack->{name}; $tmp{medium} = $addPack->{medium} if (defined($addPack->{medium})); if ($addPack->{removearch}) { next if containsMyArch( $prodRef, $archSetList, $addPack->{removearch} ); $tmp{removearch} = $addPack->{removearch}; } push @packages, \%tmp; # seperate list of supportstatus values if defined if (defined($addPack->{supportstatus})) { my %stmp; $stmp{$addPack->{name}} = $addPack->{supportstatus}; %supportstates = %stmp; } } } } } elsif( $useState->{pattern} ) { die( "ERROR: Patterns are not supported for repopackages!\n" ); } } return (\@packages, \%supportstates); } sub groupToPackages( $$$$$ ) { my ($prodRef, $archSetList, $group, $useReq, $useRec, $useSug ) = @_; # generate the list of current architectures out of the archSetList # FIXME: In all product configs I saw so far, there is only one entry # in the archsetlist. # What does it mean if there are more? The following code takes all # and allows all. my @validArchs; foreach my $archHashRef (@$archSetList) { my $archSetRef = $archSets{$archHashRef->{ref}}; push @validArchs, $archSetRef->{productarch}; } my @groups = @{$prodRef->{group} || {}}; my $groupRef; # search for the group we should convert here. foreach my $gl( @groups ) { if( $gl->{name} eq $group ) { $groupRef = $gl; last; } } unless( $groupRef ) { die( "ERROR: Group <$group> not found!\n" ); } unless( $groupRef->{packagelist} ) { die( "ERROR: Group <$group> has no package lists!\n" ); } # ok, here we have a valid group reference. # print " * resolving group <$groupRef->{name}>\n"; my @packagelists = @{$groupRef->{packagelist}}; my %conditionTemplate; foreach my $condList( @{$groupRef->{conditional} } ) { # print "Handling group conditional $condList->{name}\n"; my $cond = $conditionals{ $condList->{name} }; die( "ERROR: Unknown condition is used: $condList->{name}\n" ) unless $cond; if( $cond->{platform} ) { my @platforms = @{$cond->{platform}}; # the condition only becomes a template foreach my $p ( @platforms ) { my @condArchs; @condArchs = split( /\s*,\s*/, $p->{arch} ) if( $p->{arch} ); # my $takeIt = 1; # Take all condition tags if no arch-tag is there if( $p->{arch} ) { $takeIt = 0; foreach my $validArch( @validArchs ) { if( grep( /\b$validArch\b/, @condArchs ) ) { $takeIt = 1; last; } } } if( $takeIt ) { %conditionTemplate = (%conditionTemplate, %{$p}); } else { # This condition does not match, so drop it } } } } # Drop this group, if condition(s) exist for it, but none matches for this platform return () if ( @{$groupRef->{conditional}} > 0 && !keys %conditionTemplate ); my $useFlags = { requires => $useReq || 0, recommends => $useRec || 0, suggests => $useSug || 0 }; my @resultList; my %supportStates; foreach my $packList ( @packagelists ) { my $relation = $packList->{relationship} || 'requires'; # print "Relation: $relation\n"; if( $useFlags->{$relation} && $packList->{package} ) { # parse the package in my @packs = @{$packList->{package}}; foreach my $pack ( @packs ) { my %h = %conditionTemplate; my $takeIt = 1; $takeIt = 0 unless $pack->{conditional}; # print Dumper $pack; foreach my $condList( @{$pack->{conditional} } ) { my $name = $condList->{name}; my $cond = $conditionals{$name}; next unless defined $h{$name}; $takeIt = 1; print "Handling package conditional $name\n"; # print Dumper "Conditional: ". $cond . "\n"; if( $cond->{platform} ) { my @platforms = @{$cond->{platform}}; foreach my $p ( @platforms ) { %h= (%h, %{$p}); } } if( $cond->{media} ) { $h{medium} = $cond->{media}->{number}; } } $h{name} = $pack->{name}; push @resultList, \%h; if( $pack->{supportstatus} || $packList->{supportstatus} ) { $supportStates{$pack->{name}} = ($pack->{supportstatus}||$packList->{supportstatus}); } } } } return (\@resultList, \%supportStates); } # # This sub expands the patterns sub expandPackages( $ ) { my ($groupRef) = @_; my $name = $groupRef->{name}; print "Working on group $name\n"; my @patterns = @{$groupRef->{pattern}}; my $pat = @{$groupRef->{pattern}}[0]; $groupRef->{_pattern} = $pat; foreach my $pack ( @{$groupRef->{group}} ) { my $packListRef = $pack->{package}; my $relation = $pack->{relationship}; my @resultPacks; foreach my $packRef ( @${packListRef} ) { # print "Pushing $packRef->{name}\n"; my %packValues; $packValues{name} = $packRef->{name}; if( $groupRef->{platform} ) { # forcerepo?? foreach my $tag ('forcearch', 'addarch', 'onlyarch', 'removearch', 'source', 'script', 'medium' ) { $packValues{$tag} = $groupRef->{platform}->{$tag} if( $groupRef->{platform}->{$tag} ); } } push @resultPacks, \%packValues; } my $keyname = "_" . lc $relation; print "Keyname of package list: $keyname\n"; $groupRef->{$keyname} = \@resultPacks; } } # # Creation of the instsource part of the kiwi file # # note that the product spec contains a list of archsets. For each of these archsets and # for each of the media must be a separate kiwi file. # # 1. parameter: the reference on the product datastructure # 2. parameter: the reference on the current media datastructure # 3. parameter: list of the archs for this kiwi file. # sub createInstsource( $$$ ) { my( $prodRef, $medium, $archSetList ) = @_; my $re = {}; $re->{architectures} = createArchitectures( $archSetList ); $re->{productoptions} = createProductOptions( $prodRef, $medium, $archSetList ); my @r; my $count = 0; foreach my $repo ( @{$prodRef->{repositories}{repository} } ) { my %h; my $kiwipath; next if defined($repo->{build}) && $repo->{build} eq "ignore"; $count = $count + 1; $h{priority} = $count; $h{name} = "repository_".$count; if ($repo->{path} =~ /^obs:\/\/([^\/]*)\/([^\/]*)$/ ) { #old format without obsname $h{local} = "true"; $kiwipath = "obs://$1/$2"; } elsif ($repo->{path} =~ /^obs:\/\/([^\/]*)\/([^\/]*)\/([^\/]*)$/ ) { $h{local} = "true"; $kiwipath = "obs://$2/$3"; } elsif ($repo->{path} =~ /^obsrepositories:/ ) { $h{local} = "true"; $kiwipath = "obsrepositories:/"; } else { die( "ERROR: Non obs:// url as repository: $repo->{path} !\n" ); }; $h{source} = { path => $kiwipath }; push @r, \%h; } $re->{instrepo} = \@r; # metadata, media dependant my $ref = createMetadata( $prodRef, $medium, $archSetList ); if( $ref ) { $re->{metadata} = createMetadata( $prodRef, $medium, $archSetList ); } # repopackages my @packages; my ($useToPacks, $supportStates) = useToPackages( $prodRef, $medium, $archSetList ); if( $useToPacks ) { push @packages, { repopackage => $useToPacks }; } # print "Packlist: " . Dumper \@packages; $re->{repopackages} = \@packages; return ($re, \%$supportStates); } sub createRepository { # This is for a dummy entry, it is required by the kiwi DTD, but not used # for installation medias. my( $prodRef ) = @_; my @repository; my $source; my $dummydir = "/var/lib/empty"; # Do we have ever a different repo type than "yast" on products ? $source->{ 'path' } = $dummydir; push @repository, { 'type' => 'yast2', 'source' => $source }; return \@repository; } sub createFlavorReadme( $$ ){ my($prodRef,$product)=@_; my $reame_file = ""; my %seen; foreach my $flavor ( @{$prodRef->{mediasets}->{media}} ){ next if !defined($flavor->{'flavor'}) || $flavor->{'flavor'} eq '' || $seen{$flavor->{flavor}}; $seen{$flavor->{flavor}} = 1; my $readmedir = "\$RPM_BUILD_ROOT/%{_defaultdocdir}/$product->{releasepkgname}-$flavor->{flavor}"; $reame_file .= "mkdir -p $readmedir\n"; $reame_file .= "cat >$readmedir/README << EOF\n"; $reame_file .= "This package only exists for providing the product flavor \'$flavor->{flavor}\'.\n"; $reame_file .= "\nEOF\n\n"; } return $reame_file; } sub writeMigrationSPECfile( $$$$$ ) { my( $file, $prodRef, $product, $newpatchlevel, $migtarget ) = @_; my $mproduct = Storable::dclone($product); undef $mproduct->{summary}; undef $mproduct->{shortsummary}; if ($migtarget) { $mproduct->{name} = $product->{name}."-".$migtarget."-migration"; } else { $mproduct->{name} = $product->{name}."-SP".$newpatchlevel."-migration"; } # Upgrade section must not be in migration to avoid double notification delete $mproduct->{upgrades}; # Set default values for release package reference $mproduct->{'installconfig'}->{'releasepackage'} = { 'flag' => "EQ" }; $mproduct->{'installconfig'}->{'releasepackage'}->{'name'} = '%{name}'; $mproduct->{'installconfig'}->{'releasepackage'}->{'version'} = '%{version}'; $mproduct->{'installconfig'}->{'releasepackage'}->{'release'} = '%{release}'; my $content="# "; $content.="\n\n"; my $package_name = $mproduct->{name}; $package_name =~ s/\./_/g; $content.="Name: $package_name\n"; my $sum_str = ""; if ($migtarget) { $sum_str = "$product->{name} $migtarget Migration Product"; } else { $sum_str = "$product->{name} Service Pack $newpatchlevel Migration Product"; } $content.="Summary: $sum_str\n"; push @{$mproduct->{summary}}, { '_content' => $sum_str }; push @{$mproduct->{shortsummary}}, { '_content' => $sum_str }; $content.="Version: ".$product->{version}."\n"; $content.="Release: 0\n"; $content.="License: BSD-3-Clause\n"; $content.="Group: System/Fhs\n"; $content.="Provides: product()\n"; $content.="Provides: product(".$mproduct->{name}.") = %{version}-%{release}\n"; $content.="Requires: product(".$product->{name}.") = ".$product->{version}."\n"; $content.="AutoReqProv: on\n"; $content.="BuildRoot: %{_tmppath}/%{name}-%{version}-build\n"; $content.="\n%description\n"; if ($migtarget) { $content.="Product to migrate to $product->{name} $migtarget.\n"; } else { $content.="Product to migrate to $product->{name} Service Pack $newpatchlevel.\n"; } $content.="\n\n"; $content.="\n%prep\n"; $content.="\n%build\n\n"; $content.="\n%install\n"; $content.=createProductFile($prodRef,$mproduct); $content.="\n%clean\n"; $content.="rm -rf %buildroot\n"; $content.="\n%files\n"; $content.="%defattr(644,root,root,755)\n"; $content.="%dir /etc/products.d\n"; $content.="/etc/products.d/*.prod\n"; $content.="\n%changelog\n"; # write out the modified file. writestr($file, undef, $content); } sub escapeProvides($) { my( $string ) = @_; # :- must be escaped for createrepo $string =~ s/([^a-zA-Z0-9_%{}\.])/sprintf("%%%02X", ord($1))/ge; return $string; } sub getMediaStyle($) { my( $prodRef ) = @_; my $mediaStyle = "suse-11.1"; # fallback value my $medium = $prodRef->{mediasets}->{media}[0]; $mediaStyle = $medium->{'mediastyle'} if (defined($medium->{'mediastyle'})); return $mediaStyle; } sub writeProductSPECfile( $$$$ ) { my( $file, $infile, $prodRef, $product ) = @_; my $product_flavors=""; # take media style from first media. not nice, but we can only have one product package for all of them. my $medium = $prodRef->{mediasets}->{media}[0]; ### A product may obsolete packages. my $obsoletepackage=""; for my $p ( @{$product->{'installconfig'}->{'obsoletepackage'}} ) { $obsoletepackage .= "\nProvides: weakremover(".$p->{'_content'}.")"; } $obsoletepackage .= "\n"; # My product provides my $productprovides=""; $productprovides.="Provides: %name-%version\n"; $productprovides.="Provides: $prodRef->{'project'}->{'name'}\n" if ((defined($prodRef->{'project'}->{'name'})) && ("$prodRef->{'project'}->{'name'}" ne "")); my $productdependencies=""; $productdependencies=createProductDependencyLines($product->{'productdependency'}) if defined($product->{'productdependency'}); my $mediaStyle = getMediaStyle($prodRef); if ($mediaStyle =~ /^suse-1[12].[123]$/ || $mediaStyle =~ /^suse-sle11/) { $productprovides.="Provides: product()\n"; $productprovides.="Provides: product($product->{'name'}) = %version-%release\n"; } else { # openSUSE 13.2 and SLE 12 case die("product register release is obsolete") if defined($product->{register}->{release}); # was used for OEM products on SLE 10 # Note: was also used for OEM products on SLE 10 in a different way die("product register flavor with invalid content") if defined($product->{register}->{flavor}) && $product->{register}->{flavor} ne 'module' && $product->{register}->{flavor} ne 'extension'; die("product release is not set but required") unless defined($product->{release}) and $product->{release} ne ""; $productprovides.="Provides: product() = $product->{name}\n"; $productprovides.="Provides: product(".$product->{name}.") = $product->{version}-$product->{release}\n"; foreach my $r ( @{$product->{register}->{pool}->{repository}} ) { if ($r->{'url'}) { die("conflicting values url<>project") if defined($r->{'project'}); die("conflicting values url<>name") if defined($r->{'name'}); die("conflicting values url<>medium") if defined($r->{'medium'}); } else { die("missing project in pool repository") unless defined($r->{'project'}); die("missing name in pool repository") unless defined($r->{'name'}); die("missing medium in pool repository") unless defined($r->{'medium'}); } } foreach my $dt ( @{$product->{register}->{updates}->{distrotarget}} ) { $productprovides.="%ifarch $dt->{arch}\n" if $dt->{'arch'}; $productprovides.="Provides: product-register-target() = ".escapeProvides($dt->{'_content'})."\n"; $productprovides.="%endif\n" if $dt->{'arch'}; next; } foreach my $shortsummary ( @{$product->{'shortsummary'}} ){ $productprovides.="Provides: product-label() = ".escapeProvides($shortsummary->{'_content'})."\n" if ( ! $shortsummary->{'language'} ); } my $cpe = getCpeId($prodRef, $product); $productprovides.="Provides: product-cpeid() = ".escapeProvides($cpe)."\n" if $cpe; # $productprovides.="Provides: product-type()\n"; # $productprovides.="Provides: product-flags()\n"; foreach my $url ( @{$product->{'urls'}->{'url'}} ){ $productprovides.="Provides: product-url(".escapeProvides($url->{'name'}).") = ".escapeProvides($url->{'_content'})."\n"; } foreach my $repo ( @{$product->{register}->{updates}->{repository}} ) { $productprovides.="%ifarch $repo->{arch}\n" if $repo->{arch}; $productprovides.="Provides: product-updates-repoid() = ".escapeProvides("obsrepository://$obsname/$repo->{project}/$repo->{name}")."\n"; $productprovides.="%endif\n" if $repo->{arch}; } if ($product->{register}->{flavor}) { $productprovides.="Provides: product-register-flavor() = "; $productprovides.=escapeProvides($product->{register}->{flavor})."\n"; } } foreach my $predecessor ( @{$product->{'predecessor'}} ){ $productprovides.="Obsoletes: product:$predecessor\n"; } if (defined($product->{'endoflife'})) { if ($product->{'endoflife'} eq '') { $productprovides.="Provides: product-endoflife()\n"; } else { $productprovides.="Provides: product-endoflife() = ".escapeProvides($product->{'endoflife'})."\n"; } } if (defined($product->{'patchlevel'}) && $product->{'patchlevel'} ne '0') { $productprovides.="Provides: product($product->{'name'}-SP$product->{'patchlevel'}) = %version-%release\n"; $productprovides.="Obsoletes: ".$product->{name}."-SP".$product->{'patchlevel'}."-migration\n"; # release package name $productprovides.="Obsoletes: product:".$product->{name}."-SP".$product->{'patchlevel'}."-migration\n"; # product name } $productprovides.="Requires: product_flavor($product->{name})\n"; $product_flavors.=createSPECfileFlavors($prodRef,$product); # Create product file to be packaged my $zypp_product_file = createProductFile($prodRef,$product); $zypp_product_file.= createFlavorReadme($prodRef,$product); my $os_release_file = createOsReleaseFile($prodRef,$product); my $str; if ("$infile" eq ""){ my ($sec,$min,$hour,$mday,$mon,$year,$wday,$ydat,$isdst)=localtime(); $year += 1900; # write the specfile header $str="# # spec file for package $product->{releasepkgname} (Version $product->{version}) # # Copyright (c) $year $product->{'vendor'}. # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed # upon. The license for this file, and modifications and additions to the # file, is the same license as for the pristine package itself (unless the # license for the pristine package is not an Open Source License, in which # case the license is the MIT License). An \"Open Source License\" is a # license that conforms to the Open Source Definition (Version 1.9) # published by the Open Source Initiative. "; my $bugtracker=getUrl($product,"i586","bugtracker"); $str.="\n# Please submit bugfixes or comments via $bugtracker\n#\n\n" if ("$bugtracker" ne ""); $str.="\n\n"; $str.="Name: $product->{releasepkgname}\n"; $str.="%define product $product->{name}\n"; if (defined ($product->{'buildconfig'}->{'betaversion'})){ $str.="%define betaversion $product->{'buildconfig'}->{'betaversion'}\n"; } foreach my $summary ( @{$product->{'summary'}} ){ $str.="Summary: $summary->{_content}\n" if ( ! $summary->{'language'} ); } $str.="Version: ".$product->{version}."\n"; $str.="Release: 0\n"; # FIXME: check if this is this really handled via BS $str.="License: BSD-3-Clause\n"; $str.="Group: System/Fhs\n"; $str.=$obsoletepackage; $str.=$productprovides; $str.="\n___PRODUCT_DEPENDENCIES___\n"; $str.="AutoReqProv: on\n"; $str.="BuildRoot: %{_tmppath}/%{name}-%{version}-build\n"; $str.="\n%description\n"; for my $description ( @{$product->{'description'} || []} ){ $str.="$description->{_content}\n" if ( ! $description->{'description'} ); } $str.="\n\n"; $str.="___FLAVOR_PACKAGES___\n"; $str.="\n%prep\n"; $str.="\n%build\n\n"; $str.="\n%install\n"; $str.="___CREATE_PRODUCT_FILES___\n"; $str.="\n%clean\n"; $str.="rm -rf %buildroot\n"; $str.="\n%files\n"; $str.="%defattr(644,root,root,755)\n"; $str.="%dir /etc/products.d\n"; $str.="/etc/products.d/*.prod\n"; $str.="\n%changelog\n"; } else { $str = readstr($infile); } # replace all strings $str =~ s/___DISTNAME___/$product->{name}/g; $str =~ s/___BASE_VERSION___/$product->{baseversion}/g; $str =~ s/___VERSION___/$product->{version}/g; if ( defined $product->{buildconfig}->{betaversion} ) { $str =~ s/___BETA_VERSION___/$product->{buildconfig}->{betaversion}/g; } else { $str =~ s/___BETA_VERSION___//g; } if ((not defined $medium->{'mediastyle'}) || $medium->{'mediastyle'} eq "suse-11.1" || $medium->{'mediastyle'} eq "suse-11.2" || $medium->{'mediastyle'} eq "suse-11.3") { # this is the product release, not a package release. should not be used anymore. # this old way is a problem because it can produce leading zeros in product provides $str =~ s/___RELEASE___/0%{?release}/g; } else { # product release is not really used so far, but defined in zypp stack. So let's # support it. But it is so far 0 on all our products $str =~ s/___RELEASE___/$product->{release}/g; } $str =~ s/___PATCH_LEVEL___/$product->{'patchlevel'}/g; $str =~ s/___PACKAGE_NAME___/$product->{releasepkgname}/g; $str =~ s/___PRODUCT_NAME___/$product->{name}/g; $str =~ s/___SUMMARY___/$product->{summary}[0]->{_content}/g; # FIXME: find the non-lang one $str =~ s/___DESCRIPTION___/$product->{description}[0]->{_content}/g; # FIXME: find the non-lang one $str =~ s/___FLAVOR_PACKAGES___/$product_flavors/g; $str =~ s/___CREATE_PRODUCT_FILES___/$zypp_product_file/g; $str =~ s/___PRODUCT_DEPENDENCIES___/$productdependencies/g; $str =~ s/___CREATE_OS_RELEASE_FILE___/$os_release_file/g; $str =~ s/___PRODUCT_PROVIDES___/$productprovides/g; $str =~ s/___OBSOLETE_PACKAGES___/$obsoletepackage/g; # write out the modified file. writestr($file, undef, $str); } sub createPreSelectPatternDepsOld ( $ ) { my ( $patterns ) = @_; my $pattern_lines=""; foreach my $pattern (@{$patterns->[0]->{'pattern'}}){ $pattern_lines.="Recommends: pattern() = $pattern->{'name'}\n"; } return $pattern_lines; } sub createPreSelectPatternDeps ( $ ) { my ( $patterns ) = @_; my $pattern_lines=""; foreach my $pattern (@{$patterns->[0]->{'pattern'}}){ $pattern_lines.="Provides: defaultpattern($pattern->{'name'})\n"; } return $pattern_lines; } sub createProductDependencyLines ( $ ) { my ( $productDependency ) = @_; my $product_dependencies; foreach my $dependency (@{$productDependency}){ my ($relship, $version, $flavor, $flag); $relship = convertRelationship($dependency->{'relationship'}); $version = $dependency->{'version'} if defined($dependency->{'version'}); #old style if (defined($dependency->{'baseversion'})) { $version = $dependency->{'baseversion'}; $version .= ".".$dependency->{'patchlevel'} if (defined($dependency->{'patchlevel'}) && $dependency->{'patchlevel'} ne '0'); $version .= "-".$dependency->{'release'} if defined($dependency->{'release'}); } $flavor = "-".$dependency->{'flavor'} if defined($dependency->{'flavor'}); $flag = convertFlags($dependency->{'flag'}) if defined($dependency->{'flag'}); if (!$flag && ($version ne "")) { # avoid something like Requires: sles 11 $flag = "="; } $flavor = "" unless defined($flavor); $product_dependencies.="$relship: product(".$dependency->{'name'}."$flavor) $flag $version\n"; } return $product_dependencies; } sub createSPECfileFlavors ( $$ ) { my ( $prodRef,$product ) = @_; my $product_flavors = ''; my %seen; # take media style from first media. not nice, but we can only have one product package for all of them. my $defaultmedium = $prodRef->{mediasets}->{media}[0]; foreach my $flavor ( @{$prodRef->{mediasets}->{media}} ){ next if !defined($flavor->{'flavor'}) || $flavor->{'flavor'} eq '' || $seen{$flavor->{flavor}}; $seen{$flavor->{flavor}} = 1; $product_flavors.="%package $flavor->{flavor}\n"; $product_flavors.="License: BSD-3-Clause\n"; $product_flavors.="Group: System/Fhs\n"; if ((defined($prodRef->{'project'}->{'name'})) && ("$prodRef->{'project'}->{'name'}" ne "")){ # TODO: - split between ":" -> Provides: SUSE \n Provides: SUSE:Factory ... # - add plattform $product_flavors.="Provides: $prodRef->{'project'}->{'name'}\n"; } $product_flavors.="Provides: product_flavor()\n"; $product_flavors.="Provides: flavor($flavor->{flavor})\n"; if (defined($flavor->{'preselected_patterns'})){ if ($flavor->{'mediastyle'} ne "suse-sle12" && $flavor->{'mediastyle'} ne "suse-sle12-sp1" ) { $product_flavors.=createPreSelectPatternDeps($flavor->{'preselected_patterns'}); } else { $product_flavors.=createPreSelectPatternDepsOld($flavor->{'preselected_patterns'}); } } if ((not defined $flavor->{'mediastyle'}) || $flavor->{'mediastyle'} eq "suse-11.1" || $flavor->{'mediastyle'} eq "suse-11.2" || $flavor->{'mediastyle'} eq "suse-11.3") { $product_flavors.="Provides: product_flavor($product->{name}) = %version-%release\n"; } else { # this is the product version and release, not a package release. my $release = $product->{release}; $release ||= "0"; $product_flavors.="Provides: product_flavor($product->{name}) = $product->{version}-$release\n"; } if (defined($flavor->{'productdependency'})){ # old compat code. die("productdependency works only with new mediastyles") unless defined($defaultmedium->{'mediastyle'}); if ($defaultmedium->{'mediastyle'} ne "suse-11.1" && $defaultmedium->{'mediastyle'} ne "suse-11.2" && $defaultmedium->{'mediastyle'} ne "suse-11.3" && $defaultmedium->{'mediastyle'} ne "suse-12.1" && $defaultmedium->{'mediastyle'} ne "suse-sle11-sp2" ) { die("productdependency must be defined per product for post 12.1 media styles"); } $product_flavors.=createProductDependencyLines($flavor->{'productdependency'}); } foreach my $summary ( @{$product->{'summary'}} ){ $product_flavors.="Summary: $summary->{_content}\n" if ( ! $summary->{'language'} ); } $product_flavors.="\n"; $product_flavors.="%description $flavor->{flavor}\n"; foreach my $description ( @{$product->{'description'}} ){ $product_flavors.="$description->{_content}\n" if ( ! $description->{'description'} ); } $product_flavors.="\n"; $product_flavors.="%files $flavor->{flavor}\n"; $product_flavors.="%defattr(-,root,root)\n"; $product_flavors.="%doc %{_defaultdocdir}/$product->{releasepkgname}-$flavor->{flavor}\n"; $product_flavors.="\n" } return $product_flavors; } sub createSPECfileInstallSection ( $ ) { my ($product) = @_; my $content=""; my $is_main_product=0; if ( $is_main_product ){ my $greeting = $product->{'name'}." ".$product->{'version'}; foreach my $summary ( @{$product->{'summary'}} ){ $greeting = $summary->{'_content'} if ( ! $summary->{'language'} ); } my $content="mkdir -p %{buildroot}/%{_sysconfdir} echo -e 'Welcome to ".$greeting." %{?betaversion:%{betaversion} }- Kernel \\r (\\l).\n\n' > %{buildroot}/etc/issue echo \"Welcome to ".$greeting." %{?betaversion:%{betaversion} }- Kernel %%r (%%t).\" > %{buildroot}/etc/issue.net echo \"#".$greeting." %{?betaversion:%{betaversion} }(%{_target_cpu})\" > %{buildroot}/etc/SuSE-release echo \"VERSION = %{version}\" >> %{buildroot}/etc/SuSE-release\n"; $content.="PATCHLEVEL = ".$product->{'patchlevel'}."\n" if (defined($product->{'patchlevel'})); $content.="mkdir -p %{buildroot}/%{_sysconfdir} echo \"Have a lot of fun...\" > %{buildroot}/etc/motd # Bug 404141 - /etc/YaST/control.xml should be owned by some package mkdir -p %{buildroot}/etc/YaST2/ install -m 644 /CD1/control.xml %{buildroot}/etc/YaST2/ install -m 644 -D /CD1/EULA.txt %{buildroot}/%{_docdir}/%{name}/%{product}-EULA.txt "; } $content="mkdir -p %{buildroot}/etc/products.d"; return $content; } sub writekwd ( $$$ ) { my ($file, $kwdhash, $prefix) = @_; my %kh = %{$kwdhash}; my $str = ""; foreach my $key( sort keys %kh ) { $str .= $key.": +Kwd:\\n".$prefix.$kh{$key}."\\n-Kwd:\n"; } writestr($file, undef, $str); } sub getCpeId ($$) { my ($prodRef, $product) = @_; my $mediaStyle = getMediaStyle($prodRef); return undef if ($mediaStyle =~ /^suse-1[12].[123]$/ || $mediaStyle =~ /^suse-sle11/); my $tag = "/o"; # operating system # is this an add-on product? foreach my $flavor ( @{$prodRef->{mediasets}->{media}} ){ # application add-on $tag = "/a" if (defined($flavor->{'productdependency'})); } my $vendor = $product->{vendor}; # SLE people wants sometime short, sometime long version $vendor = "suse" if $product->{vendor} =~ /^SUSE LINUX/; die("Vendor contains non valid chars") if $vendor =~ /[^\-+=\.,0-9:%{}\@#%A-Z_a-z~\200-\377]/s; die("Vendor contains non valid chars") if $vendor =~ /[\/:\.\000-\037]/; my $cpeid_uri = "cpe:".$tag.":".$vendor.":".$product->{name}; if (defined($product->{baseversion})) { $cpeid_uri.=":$product->{baseversion}"; $cpeid_uri.=":sp$product->{patchlevel}" if (defined($product->{patchlevel}) && $product->{patchlevel} ne "0"); } else { $cpeid_uri.=":".$product->{version}; } return lc($cpeid_uri); } sub createOsReleaseFile ($) { my ($prodRef, $product) = @_; my $bugtracker=getUrl($product,"i586","bugtracker"); my $rfile = "\$RPM_BUILD_ROOT/etc/os-release"; my $os_release_file = "mkdir -p \$RPM_BUILD_ROOT/etc\n"; $os_release_file .= "cat >$rfile << EOF\n"; my $name = $product->{'name'}; # shall we strip ^SUSE_ here or better modify the name in SLE 12? $os_release_file .= "NAME=\"".$name."\"\n"; if (defined($product->{baseversion})) { $os_release_file .= "VERSION=\"".$product->{baseversion}; $os_release_file .= "-SP".$product->{patchlevel} if (defined($product->{patchlevel}) && $product->{patchlevel} ne "0"); } else { $os_release_file .= "VERSION=\"%{version}"; } $os_release_file .= "%{?betaversion: }%{?betaversion}\"\n"; $os_release_file .= "VERSION_ID=\"%{version}%{?betaversion?: }%{?betaversion}\"\n"; $os_release_file .= "PRETTY_NAME=\"".$product->{'summary'}[0]->{_content}."\"\n" if $product->{'summary'}; $os_release_file .= "BUG_REPORT_URL=\"".$bugtracker."\"\n" if $bugtracker; $os_release_file .= "ID=\"".lc($name)."\"\n"; $os_release_file .= "ANSI_COLOR=\"0;32\"\n"; my $cpe = getCpeId($prodRef, $product); $os_release_file .= "CPE_NAME=\"$cpe\"\n" if $cpe; $os_release_file .= "EOF\n"; } sub createProductFile ( $$ ) { my ($prodRef, $product) = @_; my $zypp_product_file = ""; my $zypp_product = Storable::dclone($product); # not wanted there delete $zypp_product->{'releasepkgname'}; my $d; my $pfile = "\$RPM_BUILD_ROOT/etc/products.d/$product->{name}.prod"; if (defined($product->{'endoflife'})&& $product->{'endoflife'} ne "" && !($product->{'endoflife'} =~ /^[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]$/)) { die("400 endoflife not in ISO 8601 format (YYYY-MM-DD)"); } my $mediaStyle = getMediaStyle($prodRef); $zypp_product_file = "mkdir -p \$RPM_BUILD_ROOT/etc/products.d\n"; $zypp_product->{'arch'} = '%{_target_cpu}'; # write product architecture during rpm build $zypp_product->{'schemeversion'} = "0"; my $cpe = getCpeId($prodRef, $product); $zypp_product->{'cpeid'} = $cpe if $cpe; $d->{"target"} = $product->{'register'}->{'target'}; $d->{"release"} = $product->{'register'}->{'release'} if defined($product->{'register'}->{'release'}); # < SLE 12 my $special_arch_update_repo; my $special_arch_update_ncc; if ($mediaStyle =~ /^suse-1[12].[123]$/ || $mediaStyle =~ /^suse-sle11/) { # old style, export used repositories for support tool my @r; foreach my $repo ( @{$prodRef->{repositories}{repository} } ) { next if defined($repo->{product_file}) && $repo->{product_file} eq "ignore"; # do only export when build name is specified push @r, { "path" => $repo->{path} } if $repo->{path} =~ /^obs:\/\/(.*)\/(.*)\/(.*)/ && $1 && $2 && $3 ; }; $d->{"repositories"}{"repository"} = \@r; } else { if ($mediaStyle =~ /^suse-sle12/) { die("Define NCC targets instead of target") if $product->{'register'}->{'target'}; # SLE 12 medias require an end-of-life definition die("400 endoflife not defined") unless defined($product->{'endoflife'}); } # convert them to repoids my @r; foreach my $repo ( @{$product->{register}->{updates}->{repository}} ) { my $obsurl = "obsrepository://$obsname/$repo->{project}/$repo->{name}"; if ($repo->{zypp}) { die("400 zypp lacks alias attribute") unless $repo->{zypp}->{alias}; die("400 zypp lacks name attribute") unless $repo->{zypp}->{name}; } if ($repo->{arch}) { push @r, { "repoid" => "___INTERNAL_MARKER___" } unless $special_arch_update_repo; $special_arch_update_repo.="%ifarch $repo->{arch}\n"; $special_arch_update_repo.=" \n"; $special_arch_update_repo.="%endif\n"; next; } push @r, { "repoid" => $obsurl }; }; $d->{"updates"}{"repository"} = \@r; } foreach my $dt ( @{$product->{register}->{updates}->{distrotarget}} ) { $d->{"target"} = "___INTERNAL_NCC_MARKER___"; $special_arch_update_ncc.="%ifarch $dt->{arch}\n" if $dt->{'arch'}; $special_arch_update_ncc.=" $dt->{'_content'}\n"; $special_arch_update_ncc.="%endif\n" if $dt->{'arch'}; next; } $d->{flavor} = $product->{register}->{flavor} if defined($product->{register}->{flavor}); $zypp_product->{'register'} = $d; # Release package number shall go with the package release $zypp_product->{'release'} = $product->{release}; if ($mediaStyle =~ /^suse-11.[123]$/ || $mediaStyle eq "suse-sle11-sp2") { $zypp_product->{'release'} = "%{release}"; } my $xml = XMLout( $BSProductXML::product, $zypp_product ); die ( "ERROR: Unable to create xml for $product->{name} !" ) unless $xml; # replace arch specific parts if ($special_arch_update_repo) { $xml =~ s/.*___INTERNAL_MARKER___.*\n/$special_arch_update_repo/; } if ($special_arch_update_ncc) { $xml =~ s/.*___INTERNAL_NCC_MARKER___.*\n/$special_arch_update_ncc/; } # add header $xml = "\n$xml"; $zypp_product_file .= "cat >$pfile << EOF\n"; $zypp_product_file .= "$xml\nEOF\n\n"; } # Process the commandline arguments. getopts('dhm:'); usage() if $opt_h; my ($infile, $outdir, $_project) = @ARGV; $project = $_project; die( "Please specify input file, output directory (and project name)\n" ) unless $infile; die( "Please specify output directory (and project) name\n" ) unless $outdir; my $d; # global indir ($d, $indir) = fileparse( $infile ); my $prodRef = readProductFile( $infile ); # # Sanity checks # die("product definition contains no products\n") unless $prodRef->{'products'}; die("product definition contains multiple products, this is not yet supported \n") if @{$prodRef->{'products'}->{'product'}} > 1; my $product = $prodRef->{'products'}->{'product'}[0]; die("no product name set\n") unless $product->{'name'}; die("illegal product name: $product->{'name'}\n") if $product->{'name'} =~ /^[_\.]/; die("illegal product name: $product->{'name'}\n") if $product->{'name'} =~ /[\/\000-\037]/; die("ERROR: File name does not match to product name ($infile/$prodRef->{'products'}->{'product'}[0]->{'name'}.product)\n") if not $infile =~ /.*\/$prodRef->{'products'}->{'product'}[0]->{'name'}.product$/; die("no support for multi product definitions\n") if $prodRef->{'products'}->{'product'}[1]; my $mediaStyle = getMediaStyle($prodRef); unless ($mediaStyle =~ /^suse-1[12].[123]$/ || $mediaStyle =~ /^suse-sle11/) { die("Do not set id attribute in product anymore.\n") if $prodRef->{'products'}->{'product'}[0]->{'id'}; } $product->{'releasepkgname'} ||= "$product->{'name'}-release"; # # Calculate version strings # die ("It is not allowed to specify baseversion and version.") if (defined($product->{'baseversion'}) && defined($product->{'version'}) ); die("You must use baseversion instead of version when using a patchlevel definition") if (defined($product->{'version'}) && defined($product->{'patchlevel'})); if (defined($product->{'baseversion'})) { $product->{'version'} = $product->{'baseversion'}; $product->{'version'} .= ".".$product->{'patchlevel'} if (defined($product->{'patchlevel'}) && $product->{'patchlevel'} ne '0'); }; # # Check missing information of release package requirement and set defaults if neeeded. # $product->{'installconfig'}->{'releasepackage'} = { 'flag' => "EQ" } if ( not defined($product->{'installconfig'}->{'releasepackage'}) ); $product->{'installconfig'}->{'releasepackage'}->{'flag'} = 'EQ' if ( not defined($product->{'installconfig'}->{'releasepackage'}->{'flag'}) ); $product->{'installconfig'}->{'releasepackage'}->{'name'} = '%{name}' if ( not defined($product->{'installconfig'}->{'releasepackage'}->{'name'}) ); $product->{'installconfig'}->{'releasepackage'}->{'version'} = '%{version}' if ( not defined($product->{'installconfig'}->{'releasepackage'}->{'version'}) ); $product->{'installconfig'}->{'releasepackage'}->{'release'} = '%{release}' if ( not defined($product->{'installconfig'}->{'releasepackage'}->{'release'}) ); # # Create a kiwi configuration for each distribution flavor # my $productRef = $prodRef->{products}->{product}->[0]; # FIXME: Support multiple products. my $kiwiImage = {}; my $name = sprintf( "OBS__%s___%s", $product->{name}, $product->{version} ); $kiwiImage->{name} = $name; $kiwiImage->{description} = createDescription( $productRef ); # so far for all media types identical. Now loop over the media types # to create media type specific versions; parseConditionals( $prodRef->{conditionals}->{conditional} ); parseArchsets( $prodRef->{archsets}{archset} ); # # Create $product-release packages # # handle migration case if (defined($product->{migrationtarget})){ my $newpatchlevel = 0; my $productname = $product->{name}."-".$product->{migrationtarget}."-migration"; # enforce correct upgrade product name for my $upgrade ( @{$product->{upgrades}->{upgrade}||[]} ) { $upgrade->{name} = $productname; $upgrade->{product} = $productname; } my $packagename = $productname; $packagename =~ s/\./_/g; mkdir_p( "$outdir/_product:$packagename" ) || die ("Unable to create migration directory\n"); writeMigrationSPECfile( "$outdir/_product:".$packagename."/".$packagename.".spec", $prodRef, $product, $newpatchlevel, $product->{migrationtarget} ); } elsif (defined($product->{patchlevel})){ my $newpatchlevel = $product->{'patchlevel'} + 1; my $productname = $product->{name}."-SP".$newpatchlevel."-migration"; # enforce correct upgrade product name for my $upgrade ( @{$product->{upgrades}->{upgrade}||[]} ) { $upgrade->{name} = $productname; $upgrade->{product} = $productname; } mkdir_p( "$outdir/_product:$productname" ) || die ("Unable to create migration directory\n"); writeMigrationSPECfile( "$outdir/_product:".$productname."/".$productname.".spec", $prodRef, $product, $newpatchlevel, undef); } my $SPECtemplateFile; if ($infile =~ /(.*\/)(.+)$/) { # not using $product->{releasepkgname} for compatibility reasons # instead only using this inside the specfile $SPECtemplateFile = $1."/".$product->{name}."-release.spec"; }; if ( !$SPECtemplateFile || ! -e $SPECtemplateFile ) { if ($infile =~ /(.*\/)(.+)$/) { $SPECtemplateFile = "$1/release.spec"; }; }; if ( ! $SPECtemplateFile || ! -e $SPECtemplateFile ) { $SPECtemplateFile=""; print "No release template file $SPECtemplateFile exists --> generating SPEC file $product->{name}-release.spec automatically\n"; } mkdir_p( "$outdir/_product:$product->{name}-release" ) || die ("Unable to create $outdir\n"); writeProductSPECfile( "$outdir/_product:$product->{name}-release/$product->{name}-release.spec", $SPECtemplateFile, $prodRef, $product ); my $ChangesFile; if ($infile =~ /(.*\/)(.+)$/) { $ChangesFile = $1."/".$product->{name}."-release.changes"; }; if ( !$ChangesFile || ! -e $ChangesFile ) { if ($infile =~ /(.*\/)(.+)$/) { $ChangesFile = "$1/release.changes"; }; }; if ( defined($ChangesFile) && -e $ChangesFile ) { my $fn="$outdir/_product:$product->{name}-release/$product->{name}-release.changes"; system( "cp", $ChangesFile, $fn) && die ("Unable to copy changes file $ChangesFile to $fn"); } # # Create kiwi images # my %generalImage = %{$kiwiImage}; my $media = $prodRef->{mediasets}->{media}; if( $opt_m ) { print "Generating only media set $opt_m, due to commandline switch\n"; } foreach my $medium ( @$media ){ my $type = $medium->{type}; my $flavor = $medium->{flavor}; my $productname; my $releasepkgname; my $name = $medium->{name}; if ($medium->{product}) { $productname = $medium->{product}; $releasepkgname = "$medium->{product}-release"; } else { # use global name as fallback $productname = $prodRef->{'products'}->{'product'}[0]->{'name'}; $releasepkgname = $prodRef->{'products'}->{'product'}[0]->{'releasepkgname'}; } # bug compatibility for 11.3 and before (ignoring product name definition per media) if ((not defined $medium->{'mediastyle'}) || $medium->{'mediastyle'} eq "suse-11.1" || $medium->{'mediastyle'} eq "suse-11.2" || $medium->{'mediastyle'} eq "suse-11.3") { $productname = $prodRef->{'products'}->{'product'}[0]->{'name'}; if ($medium->{'product'}) { print "WARNING: own product attribute \"product\" per media is used. This may create inconsistent medias!\n"; $productname = $medium->{'product'}; } } next if( $opt_m && $name ne $opt_m ); # create one kiwi file each for every of the archsets if (defined($medium->{archsets})) { my %supportstates; my @archSets = @{$medium->{archsets}}; foreach my $arch ( @archSets ) { my @archs; my $supportstates; my $kiwi = Storable::dclone(\%generalImage); if (defined($medium->{'mediastyle'}) && $medium->{'mediastyle'} ne "suse-11.1" && $medium->{'mediastyle'} ne "suse-11.2") { $kiwi->{schemaversion} = "4.1"; # new kiwi requires exact this version my $attrs = {image => "product"}; $attrs->{'firmware'} = $medium->{'firmware'} if $medium->{'firmware'}; $attrs->{'hybrid'} = 'true' if (defined($medium->{'run_hybridiso'})) && $medium->{'run_hybridiso'} eq "true"; $kiwi->{preferences}->{type} = [$attrs]; }else{ # the schemE vs. schemA is intended by kiwi! $kiwi->{schemeversion} = "2.4"; # before openSUSE 11.3, until kiwi 3.74 $kiwi->{preferences}->{type} = [{_content => "product"}]; } $kiwi->{preferences}->{version} = "1.0.0"; # hardcoded to fullfill kiwi scheme, real version is defined elsewhere $kiwi->{preferences}->{packagemanager} = "zypper" ; # hardcoded since no other support exist yet. ($kiwi->{instsource}, $supportstates) = createInstsource ( $prodRef, $medium, $arch->{archset} ); if (keys %$supportstates > 0){ %supportstates = %{$supportstates}; foreach my $key( sort keys %supportstates ) { die("Illegal support key $supportstates{$key} for $key\n") unless grep { /^$supportstates{$key}$/ } ( "l3", "l2", "acc", "unsupported" ); } } $kiwi->{repository} = createRepository ( $prodRef ); my $archStr; my @archsets = @{$arch->{archset}}; my @productarch; foreach my $ar ( @archsets ) { if( $archSets{$ar->{'ref'}} ) { my $architecture = "$archSets{$ar->{'ref'}}->{'productarch'}"; $archStr .= "_" if $archStr; $archStr .= "$architecture"; # enable this architecture in scheduler # FIXME: the scheduler arch may have a different name than the rpm archs ! push @archs, { 'arch' => $architecture }; push @productarch, $architecture; if ( defined($localarch) ) { # this is only important on a server which supports a "local" scheduler push @archs, { 'arch' => 'local' }; } push @archs, { 'arch' => 'i586' } if ( $architecture eq "x86_64" ); push @archs, { 'arch' => 'i586' } if ( $architecture eq "ia64" ); push @archs, { 'arch' => 'ppc' } if ( $architecture eq "ppc64" ); push @archs, { 'arch' => 'ppc64' } if ( $architecture eq "ppc" ); # ppc is using ppc64 stuff in openSUSE push @archs, { 'arch' => 's390' } if ( $architecture eq "s390x" ); } } # add implicit the release packages to media if ( $kiwi->{instsource}->{repopackages}[0] && (!defined($medium->{'skip_release_package'}) || $medium->{'skip_release_package'} ne "true") ){ my $addarch = join( ",", @productarch ); push @{$kiwi->{instsource}->{repopackages}[0]->{repopackage}}, { "name" => $releasepkgname, addarch => $addarch }; # add the flavor package push @{$kiwi->{instsource}->{repopackages}[0]->{repopackage}}, { "name" => "$releasepkgname-$flavor", addarch => $addarch }; if (%supportstates) { $supportstates{$releasepkgname} ||= 'l3'; $supportstates{"$releasepkgname-$flavor"} ||= 'l3'; } } my $file = "$productname-$type-$flavor-$archStr"; die("illegal kiwi product: $file\n") if $file =~ /^[_\.]/; die("illegal kiwi product: $file\n") if $file =~ /[\/\000-\037]/; my $pkgName = "_product:$file"; my $kiwiDir = "$outdir/$pkgName/"; my $outFile = "$kiwiDir/$file.kiwi"; my $metaFile= "$kiwiDir/_meta"; my $supportFile= "$kiwiDir/$file.kwd"; mkdir_p( $kiwiDir ) || die ("Unable to create $kiwiDir\n"); writexml( "$outFile$$", $outFile, $kiwi, $BSKiwiXML::kiwidesc ); writekwd( $supportFile, \%supportstates, "support_" ) if keys %supportstates > 0; # Create meta file to have a default bcntsynctag if ( $project ) { my $pkgmeta; $pkgmeta->{'name'} = $pkgName; $pkgmeta->{'project'} = $project; $pkgmeta->{'title'} = "KIWI image build" ; $pkgmeta->{'description'} = "Automatically generate from _product" ; $pkgmeta->{'bcntsynctag'} = "_product:".$productname ; writexml( "$metaFile$$", $metaFile, $pkgmeta, $BSXML::pack ); } } } } # end open-build-service-2.9.4/src/backend/bs_publish000077500000000000000000002610561332555733200215020ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2006, 2007 Michael Schroeder, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # The Publisher. Create repositories and push them to our mirrors. # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use Digest; use Digest::MD5 (); use Digest::SHA (); use XML::Structured ':bytes'; use XML::Simple (); use POSIX; use Fcntl qw(:DEFAULT :flock); use Data::Dumper; use Storable (); use MIME::Base64; use File::Temp qw/tempfile/; use BSConfiguration; use BSRPC; use BSUtil; use BSDBIndex; use Build; use BSDB; use BSXML; use BSNotify; use BSVerify; use BSStdRunner; use BSUrlmapper; use BSRepServer::Containerinfo; use strict; my $maxchild; my $maxchild_flavor; $maxchild = $BSConfig::publish_maxchild if defined $BSConfig::publish_maxchild; $maxchild_flavor = $BSConfig::publish_maxchild_flavor if defined $BSConfig::publish_maxchild_flavor; my $reporoot = "$BSConfig::bsdir/build"; my $eventdir = "$BSConfig::bsdir/events"; my $extrepodir = "$BSConfig::bsdir/repos"; my $extrepodir_sync = "$BSConfig::bsdir/repos_sync"; my $uploaddir = "$BSConfig::bsdir/upload"; my $rundir = $BSConfig::rundir || $BSConfig::rundir || "$BSConfig::bsdir/run"; my $extrepodb = "$BSConfig::bsdir/db/published"; my $myeventdir = "$eventdir/publish"; my @binsufs = qw{rpm udeb deb pkg.tar.gz pkg.tar.xz}; my $binsufsre = join('|', map {"\Q$_\E"} @binsufs); my @binsufsrsync = map {"--include=*.$_"} @binsufs; my $testmode; =head1 qsystem - secure execution of system calls with output redirection Examples: qsystem('stdout', $tempfile, $decomp, $in); qsystem('chdir', $extrep, 'stdout', 'Packages.new', 'dpkg-scanpackages', '-m', '.', '/dev/null') =cut sub qsystem { my @args = @_; my $pid; local (*RH, *WH); if ($args[0] eq 'echo') { pipe(RH, WH) || die("pipe: $!\n"); } if (!($pid = xfork())) { if ($args[0] eq 'echo') { close WH; open(STDIN, "<&RH"); close RH; splice(@args, 0, 2); } open(STDOUT, ">/dev/null"); if ($args[0] eq 'chdir') { chdir($args[1]) || die("chdir $args[1]: $!\n"); splice(@args, 0, 2); } if ($args[0] eq 'stdout') { open(STDOUT, '>', $args[1]) || die("$args[1]: $!\n"); splice(@args, 0, 2); } eval { exec(@args); die("$args[0]: $!\n"); }; warn($@) if $@; exit 1; } if ($args[0] eq 'echo') { close RH; print WH $args[1]; close WH; } waitpid($pid, 0) == $pid || die("waitpid $pid: $!\n"); return $?; } sub fillpkgdescription { my ($pkg, $extrep, $repoinfo, $name) = @_; my $binaryorigins = $repoinfo->{'binaryorigins'} || {}; my $hit; for my $p (sort keys %$binaryorigins) { next if $p =~ /src\.rpm$/; next unless $p =~ /\/\Q$name\E/; my ($pa, $pn) = split('/', $p, 2); if ($pn =~ /^\Q$name\E-([^-]+-[^-]+)\.[^\.]+\.rpm$/) { $hit = $p; last; } if ($pn =~ /^\Q$name\E_([^_]+)_[^_]+\.u?deb$/) { $hit = $p; last; } } return unless $hit; eval { my $data = Build::query("$extrep/$hit", 'description' => 1); $pkg->{'description'} = str2utf8($data->{'description'}); $pkg->{'summary'} = str2utf8($data->{'summary'}) if defined $data->{'summary'}; }; } ############################################################################################ my @db_sync; my $db_oldsync_read; my $db_sync_append; sub db_pickup { if (-s "$extrepodb.sync") { my $oldsync = BSUtil::retrieve("$extrepodb.sync"); unshift @db_sync, @{$oldsync || []}; } } sub db_open { my ($name) = @_; return undef unless $extrepodb; if (!$db_oldsync_read) { db_pickup(); $db_oldsync_read = 1; } return {'name' => $name, 'index' => "$name/"}; } sub db_updateindex_rel { my ($db, $rem, $add) = @_; push @db_sync, $db->{'name'}, $rem, $add; } sub db_store { my ($db, $k, $v) = @_; push @db_sync, $db->{'name'}, $k, $v; } sub db_sync { return undef unless $extrepodb; db_open('') unless $db_oldsync_read; return unless @db_sync; my $data = Storable::nfreeze(\@db_sync); my $ops = @db_sync; for (@db_sync) { $ops += @$_ if $_ && ref($_) eq 'ARRAY'; } my $timeout = $ops / 30; $timeout = 60 if $timeout < 60; my $param = { 'uri' => "$BSConfig::srcserver/search/published", 'request' => 'POST', 'maxredirects' => 3, 'timeout' => $timeout, 'headers' => [ 'Content-Type: application/octet-stream' ], 'data' => $data, }; print " syncing database ($ops ops)\n"; eval { BSRPC::rpc($param, undef, 'cmd=updatedb'); }; if ($@) { warn($@); mkdir_p($1) if $extrepodb =~ /^(.*)\//; if ($db_sync_append) { local *F; BSUtil::lockopen(\*F, '>>', "$extrepodb.sync"); db_pickup(); BSUtil::store("$extrepodb.sync.new$$", "$extrepodb.sync", \@db_sync); close F; @db_sync = (); } else { BSUtil::store("$extrepodb.sync.new", "$extrepodb.sync", \@db_sync); } } else { @db_sync = (); unlink("$extrepodb.sync") unless $db_sync_append; } } ############################################################################################ sub updatebinaryindex { my ($db, $keyrem, $keyadd) = @_; my $index = $db->{'index'}; $index =~ s/\/$//; my @add; for my $key (@{$keyadd || []}) { my $n; if ($key =~ /(?:^|\/)([^\/]+)-[^-]+-[^-]+\.[a-zA-Z][^\/\.\-]*\.rpm$/) { $n = $1; } elsif ($key =~ /(?:^|\/)([^\/]+)_([^\/]*)_[^\/]*\.u?deb$/) { $n = $1; } elsif ($key =~ /(?:^|\/)([^\/]+)-[^-]+-[^-]+-[a-zA-Z][^\/\.\-]*\.pkg\.tar\..z$/) { $n = $1; } else { next; } push @add, ["$index/name", $n, $key]; } my @rem; for my $key (@{$keyrem || []}) { my $n; if ($key =~ /(?:^|\/)([^\/]+)-[^-]+-[^-]+\.[a-zA-Z][^\/\.\-]*\.rpm$/) { $n = $1; } elsif ($key =~ /(?:^|\/)([^\/]+)_([^\/]*)_[^\/]*\.u?deb$/) { $n = $1; } elsif ($key =~ /(?:^|\/)([^\/]+)-[^-]+-[^-]+-[a-zA-Z][^\/\.\-]*\.pkg\.tar\..z$/) { $n = $1; } else { next; } push @rem, ["$index/name", $n, $key]; } db_updateindex_rel($db, \@rem, \@add); } ########################################################################## sub getpatterns { my ($projid) = @_; my $dir; eval { $dir = BSRPC::rpc("$BSConfig::srcserver/source/$projid/_pattern", $BSXML::dir); }; if ($@) { warn($@); return []; } my @ret; my @args; push @args, "rev=$dir->{'srcmd5'}" if $dir->{'srcmd5'} && $dir->{'srcmd5'} ne 'pattern'; for my $entry (@{$dir->{'entry'} || []}) { my $pat; eval { $pat = BSRPC::rpc("$BSConfig::srcserver/source/$projid/_pattern/$entry->{'name'}", undef, @args); # only patterns we can parse, please BSUtil::fromxml($pat, $BSXML::pattern); }; if ($@) { warn(" pattern $entry->{'name'}: $@"); next; } push @ret, {'name' => $entry->{'name'}, 'md5' => $entry->{'md5'}, 'data' => $pat}; } print " fetched ".@ret." patterns\n"; return \@ret; } ########################################################################## sub addsizechecksum { my ($filename, $d, $sum) = @_; local *F; open(F, '<', $filename) || return; $d->{'size'} = -s F; my %known = ( 'sha' => 'SHA-1', 'sha1' => 'SHA-1', 'sha256' => 'SHA-256', ); if ($known{$sum}) { my $ctx = Digest->new($known{$sum}); $ctx->addfile(\*F); $d->{'checksum'} = {'type' => $sum, '_content' => $ctx->hexdigest()}; } close F; } sub create_appdata_files { my ($dir, $appdatas) = @_; $appdatas = Storable::dclone($appdatas); print " creating appdata files\n"; my %ids; my %written; mkdir_p("$dir/app-icons"); for my $app (@{$appdatas->{'application'} || []}, @{$appdatas->{'component'} || []}) { for my $icon (@{$app->{'icon'} || []}) { my $iconname = ($icon->{'name'} || [])->[0]; my $filecontent = ($icon->{'filecontent'} || [])->[0]; next unless $iconname && $icon->{'filecontent'}; next if $iconname =~ /\//s; my %files; for my $filecontent (@{$icon->{'filecontent'}}) { if (ref($filecontent)) { next unless $filecontent->{'content'}; $files{$filecontent->{'file'} || $iconname} ||= $filecontent->{'content'}; } else { $files{$iconname} ||= $filecontent if $filecontent; } } next unless %files; my $fn; for my $size (qw(32 48 64 24)) { my @c = grep {/${size}x$size/} sort(keys %files); my @ch = grep {/\/hicolor\//} @c; @c = @ch if @ch; $fn = $c[0]; last if $fn; } $fn ||= (sort(keys %files))[0]; if ($iconname !~ /\./) { next unless $fn =~ /(\.[^\.\/]+)$/; $iconname .= $1; } if (!$written{$iconname}) { writestr("$dir/app-icons/$iconname", undef, decode_base64($files{$fn})); $written{$iconname} = 1; } $icon = { 'type' => 'cached', 'content' => $iconname}; } } unlink("$dir/app-icons.tar"); if (%written) { qsystem('chdir', "$dir/app-icons", 'tar', 'cf', '../app-icons.tar', '.') && die(" app-icons tar failed: $?\n"); BSUtil::cleandir("$dir/app-icons"); } rmdir("$dir/app-icons"); $appdatas->{'version'} ||= '0.6'; my $rootname = @{$appdatas->{'application'} || []} ? 'applications' : 'components'; my $appdatasxml = XML::Simple::XMLout($appdatas, 'RootName' => $rootname, XMLDecl => ''); Encode::_utf8_off($appdatasxml); writestr("$dir/appdata.xml", undef, $appdatasxml); } sub merge_package_appdata { my ($appdatas, $bin, $appdataxml) = @_; my $appdata; eval { $appdata = XML::Simple::XMLin($appdataxml, 'ForceArray' => 1, 'KeepRoot' => 1); }; warn("$bin: $@") if $@; return $appdatas unless $appdata; if ($appdata->{'components'} || $appdata->{'applications'}) { # appstream data as it ought to be $appdata = $appdata->{'components'} || $appdata->{'applications'}; $appdata = $appdata->[0] if ref($appdata) eq 'ARRAY'; # XML::Simple is weird } elsif ($appdata->{'component'} || $appdata->{'application'}) { # bad: just the appdata itself. no version info. assume 0.8 if we have components $appdata->{'version'} ||= '0.8' if $appdata->{'component'}; } else { return $appdatas; # huh? } # do some basic checking return $appdatas unless $appdata && ref($appdata) eq 'HASH'; return $appdatas unless $appdata->{'component'} || $appdata->{'application'}; return $appdatas if $appdata->{'component'} && ref($appdata->{'component'}) ne 'ARRAY'; return $appdatas if $appdata->{'application'} && ref($appdata->{'application'}) ne 'ARRAY'; # merge the applications/components if ($appdatas) { if ($appdata->{'version'}) { my $v1 = $appdata->{'version'}; my $v2 = $appdatas->{'version'} || ''; $v1 =~ s/(\d+)/substr("00000000$1", -9)/ge; $v2 =~ s/(\d+)/substr("00000000$1", -9)/ge; $appdatas->{'version'} = $appdata->{'version'} if $v1 gt $v2; } if ($appdata->{'component'} || $appdatas->{'component'}) { $appdatas->{'component'} = delete $appdatas->{'application'} if $appdatas->{'application'}; push @{$appdatas->{'component'}}, @{$appdata->{'component'} || $appdata->{'application'} || []}; } else { push @{$appdatas->{'application'}}, @{$appdata->{'application'} || []}; } } else { $appdatas = $appdata; } $appdatas->{'origin'} = 'appdata' if $appdatas->{'version'} && $appdatas->{'version'} >= 0.8; return $appdatas; } sub createrepo_rpmmd { my ($extrep, $projid, $repoid, $data, $options) = @_; my %options = map {$_ => 1} @{$options || []}; my @repotags = @{$data->{'repotags'} || []}; print " running createrepo\n"; my $createrepo_bin = $BSConfig::createrepo ? $BSConfig::createrepo : 'createrepo'; my $modifyrepo_bin = $BSConfig::modifyrepo ? $BSConfig::modifyrepo : 'modifyrepo'; # cleanup files unlink("$extrep/repodata/repomd.xml.asc"); unlink("$extrep/repodata/repomd.xml.key"); unlink("$extrep/repodata/latest-feed.xml"); unlink("$extrep/repodata/index.html"); my @oldrepodata = ls("$extrep/repodata"); qsystem('rm', '-rf', "$extrep/repodata/repoview") if -d "$extrep/repodata/repoview"; qsystem('rm', '-rf', "$extrep/repodata/.olddata") if -d "$extrep/repodata/.olddata"; qsystem('rm', '-f', "$extrep/repodata/patterns*"); # create generic rpm-md meta data # --update requires a newer createrepo version, tested with version 0.4.10 my @createrepoargs; push @createrepoargs, '--changelog-limit', '20'; push @createrepoargs, map { ('--repo', $_ ) } @repotags; push @createrepoargs, '--content', 'debug' if $data->{'dbgsplit'}; my @legacyargs; if ($options{'legacy'}) { push @legacyargs, '--simple-md-filenames', '--checksum=sha'; } else { # the default in newer createrepos push @legacyargs, '--unique-md-filenames', '--checksum=sha256'; } my @updateargs; # createrepo 0.9.9 defaults to creating the sqlite database. # We do disable it since it is time and space consuming. # doing this via @updateargs for the case that an old createrepo is installed which does not support # this switch. push @updateargs, '--no-database'; if (-f "$extrep/repodata/repomd.xml") { push @updateargs, '--update'; } if (qsystem($createrepo_bin, '-q', '-c', "$extrep/repocache", @updateargs, @createrepoargs, @legacyargs, $extrep)) { die(" createrepo failed: $?\n") unless @updateargs; print(" createrepo failed: $?\n"); print " re-running without extra options\n"; qsystem($createrepo_bin, '-q', '-c', "$extrep/repocache", @createrepoargs, @legacyargs, $extrep) && die(" createrepo failed again: $?\n"); } unlink("$extrep/repodata/$_") for grep {/updateinfo\.xml/} @oldrepodata; if (@{$data->{'updateinfos'} || []}) { print " adding updateinfo.xml to repodata\n"; # strip supportstatus and patchinforef from updateinfos my $updateinfos = Storable::dclone($data->{'updateinfos'}); for my $up (@$updateinfos) { delete $up->{'patchinforef'}; for my $cl (@{($up->{'pkglist'} || {})->{'collection'} || []}) { delete $_->{'supportstatus'} for @{$cl->{'package'} || []}; } } writexml("$extrep/repodata/updateinfo.xml", undef, {'update' => $updateinfos}, $BSXML::updateinfo); qsystem($modifyrepo_bin, "$extrep/repodata/updateinfo.xml", "$extrep/repodata", @legacyargs) && die(" modifyrepo failed: $?\n"); unlink("$extrep/repodata/updateinfo.xml"); } unlink("$extrep/repodata/$_") for grep {/appdata\.xml/ || /app-icons/} @oldrepodata; if (%{$data->{'appdatas'} || {}}) { create_appdata_files("$extrep/repodata", $data->{'appdatas'}); if (-e "$extrep/repodata/appdata.xml") { print " adding appdata.xml to repodata\n"; qsystem($modifyrepo_bin, "$extrep/repodata/appdata.xml", "$extrep/repodata", @legacyargs) && die(" modifyrepo failed: $?\n"); unlink("$extrep/repodata/appdata.xml"); } if (-e "$extrep/repodata/app-icons.tar") { print " adding app-icons.tar to repodata\n"; qsystem($modifyrepo_bin, "$extrep/repodata/app-icons.tar", "$extrep/repodata", @legacyargs) && die(" modifyrepo failed: $?\n"); unlink("$extrep/repodata/app-icons.tar"); } } unlink("$extrep/repodata/$_") for grep {/(?:deltainfo|prestodelta)\.xml/} @oldrepodata; if (%{$data->{'deltainfos'} || {}} && ($options{'deltainfo'} || $options{'prestodelta'})) { print " adding deltainfo.xml to repodata\n" if $options{'deltainfo'}; print " adding prestodelta.xml to repodata\n" if $options{'prestodelta'}; # things are a bit complex, as we have to merge the deltas, and we also have to add the checksum my %mergeddeltas; for my $d (values(%{$data->{'deltainfos'}})) { addsizechecksum("$extrep/$d->{'delta'}->[0]->{'filename'}", $d->{'delta'}->[0], $options{'legacy'} ? 'sha' : 'sha256'); my $mkey = "$d->{'arch'}\0$d->{'name'}\0$d->{'epoch'}\0$d->{'version'}\0$d->{'release'}\0"; if ($mergeddeltas{$mkey}) { push @{$mergeddeltas{$mkey}->{'delta'}}, $d->{'delta'}->[0]; } else { $mergeddeltas{$mkey} = $d; } } # got all, now write my @mergeddeltas = map {$mergeddeltas{$_}} sort keys %mergeddeltas; if ($options{'deltainfo'}) { writexml("$extrep/repodata/deltainfo.xml", undef, {'newpackage' => \@mergeddeltas}, $BSXML::deltainfo); qsystem($modifyrepo_bin, "$extrep/repodata/deltainfo.xml", "$extrep/repodata", @legacyargs) && die(" modifyrepo failed: $?\n"); unlink("$extrep/repodata/deltainfo.xml"); } if ($options{'prestodelta'}) { writexml("$extrep/repodata/prestodelta.xml", undef, {'newpackage' => \@mergeddeltas}, $BSXML::prestodelta); qsystem($modifyrepo_bin, "$extrep/repodata/prestodelta.xml", "$extrep/repodata", @legacyargs) && die(" modifyrepo failed: $?\n"); unlink("$extrep/repodata/prestodelta.xml"); } } if (-d "$extrep/repocache") { my $now = time; for (map { "$extrep/repocache/$_" } ls("$extrep/repocache")) { my @s = stat($_); unlink($_) if @s && $s[9] < $now - 7*86400; } } my $title = $data->{'repoinfo'}->{'title'}; my $downloadurl = BSUrlmapper::get_downloadurl("$projid/$repoid"); if (-x "/usr/bin/repoview") { my @downloadurlarg; @downloadurlarg = ("-u$downloadurl") if $downloadurl; print " running repoview\n"; qsystem('repoview', '-f', @downloadurlarg, "-t$title", $extrep) && print(" repoview failed: $?\n"); } if ($BSConfig::createrepo_rpmmd_hook) { $BSConfig::createrepo_rpmmd_hook->($projid, $repoid, $extrep, \%options, $data); } if ($options{'rsyncable'}) { if (-x '/usr/bin/rezip_repo_rsyncable') { print " re-compressing metadata with --rsyncable\n"; unlink("$extrep/repodata/repomd.xml.asc"); qsystem('/usr/bin/rezip_repo_rsyncable', $extrep) && print(" rezip_repo_rsyncable failed: $?\n"); } else { print " /usr/bin/rezip_repo_rsyncable not installed, ignoring the rsyncable option\n"; } } if ($BSConfig::sign && -e "$extrep/repodata/repomd.xml") { my @signargs; push @signargs, '--project', $projid if $BSConfig::sign_project; push @signargs, @{$data->{'signargs'} || []}; qsystem($BSConfig::sign, @signargs, '-d', "$extrep/repodata/repomd.xml") && die(" sign failed: $?\n"); writestr("$extrep/repodata/repomd.xml.key", undef, $data->{'pubkey'}) if $data->{'pubkey'}; } if ($downloadurl) { local *FILE; open(FILE, '>', "$extrep/$projid.repo$$") || die("$extrep/$projid.repo$$: $!\n"); my $projidHeader = $data->{'dbgsplit'} ? "$projid$data->{'dbgsplit'}" : $projid; $projidHeader =~ s/:/_/g; print FILE "[$projidHeader]\n"; print FILE "name=$title\n"; print FILE "type=rpm-md\n"; print FILE "baseurl=$downloadurl\n"; if ($BSConfig::sign) { print FILE "gpgcheck=1\n"; if (-e "$extrep/repodata/repomd.xml.key") { print FILE "gpgkey=${downloadurl}repodata/repomd.xml.key\n"; } else { die("neither a project key is available nor gpg_standard_key is set\n") unless defined($BSConfig::gpg_standard_key); print FILE "gpgkey=$BSConfig::gpg_standard_key\n"; } } print FILE "enabled=1\n"; close(FILE) || die("close: $!\n"); rename("$extrep/$projid.repo$$", "$extrep/$projid.repo") || die("rename $extrep/$projid.repo$$ $extrep/$projid.repo: $!\n"); } } sub deleterepo_rpmmd { my ($extrep, $projid) = @_; qsystem('rm', '-rf', "$extrep/repodata") if -d "$extrep/repodata"; unlink("$extrep/$projid.repo"); } sub createrepo_virtbuilder { my ($extrep, $projid, $repoid, $data) = @_; # cleanup unlink("$extrep/index.key"); unlink("$extrep/index.asc"); # Sign the index if ($BSConfig::sign && -e "$extrep/index") { my @signargs; print "Signing the index for $projid/$repoid\n"; push @signargs, '--project', $projid if $BSConfig::sign_project; push @signargs, @{$data->{'signargs'} || []}; print "Running command: $BSConfig::sign @signargs -c $extrep/index\n"; qsystem($BSConfig::sign, @signargs, '-c', "$extrep/index") && die(" sign failed: $?\n"); writestr("$extrep/index.key", undef, $data->{'pubkey'}) if $data->{'pubkey'}; } } sub createrepo_hdlist2 { my ($extrep, $projid, $repoid, $data, $options) = @_; print " running hdlist2\n"; # create generic rpm-md meta data for my $arch (ls($extrep)) { next if $arch =~ /^\./; my $r = "$extrep/$arch"; next unless -d $r; if (qsystem('genhdlist2', '--allow-empty-media', $r)) { print(" genhdlist2 failed: $?\n"); } } # signing is done only via rpm packages to my information } sub deleterepo_hdlist2 { my ($extrep, $projid) = @_; for my $arch (ls($extrep)) { next if $arch =~ /^\./; my $r = "$extrep/$arch"; next unless -d $r; qsystem('rm', '-rf', "$r/media_info") if -d "$r/media_info"; } } sub createrepo_susetags { my ($extrep, $projid, $repoid, $data, $options) = @_; mkdir_p("$extrep/media.1"); mkdir_p("$extrep/descr"); my @lt = localtime(time()); $lt[4] += 1; $lt[5] += 1900; my $str = sprintf("Open Build Service\n%04d%02d%02d%02d%02d%02d\n1\n", @lt[5,4,3,2,1,0]); writestr("$extrep/media.1/.media", "$extrep/media.1/media", $str); writestr("$extrep/media.1/.directory.yast", "$extrep/media.1/directory.yast", "media\n"); $str = <<"EOL"; PRODUCT Open Build Service $projid $repoid VERSION 1.0-0 LABEL $data->{'repoinfo'}->{'title'} VENDOR Open Build Service ARCH.x86_64 x86_64 i686 i586 i486 i386 noarch ARCH.k1om k1om noarch ARCH.ppc64p7 ppc64p7 noarch ARCH.ppc64 ppc64 ppc noarch ARCH.ppc64le ppc64le noarch ARCH.ppc ppc noarch ARCH.riscv64 riscv64 noarch ARCH.sh4 sh4 noarch ARCH.m68k m68k noarch ARCH.aarch64 aarch64 aarch64_ilp32 noarch ARCH.aarch64_ilp32 aarch64_ilp32 noarch ARCH.armv4l arm armv4l noarch ARCH.armv5l arm armel armv4l armv5l armv5tel noarch ARCH.armv6l arm armel armv4l armv5l armv5tel armv6l armv6vl armv6hl noarch ARCH.armv7l arm armel armv4l armv5l armv5tel armv6l armv6vl armv7l armv7hl noarch ARCH.i686 i686 i586 i486 i386 noarch ARCH.i586 i586 i486 i386 noarch DEFAULTBASE i586 DESCRDIR descr DATADIR . EOL writestr("$extrep/.content", "$extrep/content", $str); print " running create_package_descr\n"; qsystem('chdir', $extrep, 'create_package_descr', '-o', 'descr', '-x', '/dev/null') && print " create_package_descr failed: $?\n"; unlink("$extrep/descr/directory.yast"); my @d = map {"$_\n"} sort(ls("$extrep/descr")); writestr("$extrep/descr/.directory.yast", "$extrep/descr/directory.yast", join('', @d)); } sub deleterepo_susetags { my ($extrep) = @_; unlink("$extrep/directory.yast"); unlink("$extrep/content"); unlink("$extrep/media.1/media"); unlink("$extrep/media.1/directory.yast"); rmdir("$extrep/media.1"); qsystem('rm', '-rf', "$extrep/descr") if -d "$extrep/descr"; } sub compress_and_rename { my ($tmpfile, $file) =@_; if (-s $tmpfile) { unlink($file); link($tmpfile, $file); qsystem('gzip', '-9', '-n', '-f', $tmpfile) && print " gzip $tmpfile failed: $?\n"; unlink($tmpfile); unlink("$file.gz"); rename("$tmpfile.gz", "$file.gz"); } else { unlink($tmpfile); unlink($file); unlink("$file.gz"); } } sub createrepo_debian { my ($extrep, $projid, $repoid, $data, $options) = @_; print " running dpkg-scanpackages\n"; if (qsystem('chdir', $extrep, 'stdout', 'Packages.new', 'dpkg-scanpackages', '-m', '.', '/dev/null')) { die(" dpkg-scanpackages failed: $?\n"); } compress_and_rename("$extrep/Packages.new", "$extrep/Packages"); print " running dpkg-scansources\n"; if (qsystem('chdir', $extrep, 'stdout', 'Sources.new', 'dpkg-scansources', '.', '/dev/null')) { die(" dpkg-scansources failed: $?\n"); } compress_and_rename("$extrep/Sources.new", "$extrep/Sources"); createrelease_debian($extrep, $projid, $repoid, $data, $options); my $udebs = "$extrep/debian-installer"; mkdir_p($udebs) unless -d $udebs; if (qsystem('chdir', $udebs, 'stdout', 'Packages.new', 'dpkg-scanpackages', '-t', 'udeb', '-m', '..', '/dev/null')) { die(" dpkg-scanpackages for udebs failed: $?\n"); } compress_and_rename("$udebs/Packages.new", "$udebs/Packages"); if ( -e "$udebs/Packages") { createrelease_debian($udebs, $projid, $repoid, $data, $options); } else { rmdir($udebs); } } sub createrelease_debian { my ($extrep, $projid, $repoid, $data, $options) = @_; my $obsname = $BSConfig::obsname || 'build.opensuse.org'; my $date = POSIX::ctime(time()); $date =~ s/\n//m; my @debarchs = map {Build::Deb::basearch($_)} @{$data->{'repoinfo'}->{'arch'} || []}; my $archs = join(' ', @debarchs); # The Release file enables users to use Pinning. See also: # http://www.debian.org/doc/manuals/repository-howto/repository-howto#release # apt_preferences(5) # # Note: # There is no Version because this is not part of a Debian release (yet). # The Component line is missing to not accidently associate the packages with # a Debian licensing component. my $str = <<"EOL"; Archive: $repoid Codename: $repoid Origin: obs://$obsname/$projid/$repoid Label: $projid Architectures: $archs Date: $date Description: $data->{'repoinfo'}->{'title'} MD5Sum: EOL open(OUT, '>', "$extrep/Release") || die("$extrep/Release: $!\n"); print OUT $str; close(OUT) || die("close: $!\n"); # append checksums my $sha1sums = "SHA1:\n"; my $sha256sums = "SHA256:\n"; open(OUT, '>>', "$extrep/Release") || die("$extrep/Release: $!\n"); for my $f ( "Packages", "Packages.gz", "Sources", "Sources.gz" ) { my @s = stat("$extrep/$f"); next unless @s; my $fdata = readstr("$extrep/$f"); my $md5 = Digest::MD5::md5_hex($fdata); my $size = $s[7]; print OUT " $md5 $size $f\n"; my $sha1 = Digest::SHA::sha1_hex($fdata); $sha1sums .= " $sha1 $size $f\n"; my $sha256 = Digest::SHA::sha256_hex($fdata); $sha256sums .= " $sha256 $size $f\n"; } print OUT $sha1sums; print OUT $sha256sums; close(OUT) || die("close: $!\n"); unlink("$extrep/Release.gpg"); unlink("$extrep/Release.key"); # re-sign changed Release file if ($BSConfig::sign && -e "$extrep/Release") { my @signargs; push @signargs, '--project', $projid if $BSConfig::sign_project; push @signargs, @{$data->{'signargs'} || []}; qsystem($BSConfig::sign, @signargs, '-d', "$extrep/Release") && die(" sign failed: $?\n"); rename("$extrep/Release.asc","$extrep/Release.gpg"); } if ($BSConfig::sign) { writestr("$extrep/Release.key", undef, $data->{'pubkey'}) if $data->{'pubkey'}; } } sub deleterepo_debian { my ($extrep) = @_; unlink("$extrep/Packages"); unlink("$extrep/Packages.gz"); unlink("$extrep/Sources"); unlink("$extrep/Sources.gz"); unlink("$extrep/Release"); unlink("$extrep/Release.gpg"); unlink("$extrep/Release.key"); if (-d "$extrep/debian-installer") { BSUtil::cleandir("$extrep/debian-installer"); rmdir("$extrep/debian-installer"); } } ########################################################################## sub createrepo_arch { my ($extrep, $projid, $repoid, $data, $options) = @_; deleterepo_arch($extrep); my $rname = $projid; $rname .= "_$repoid" if $repoid ne 'standard'; $rname =~ s/:/_/g; for my $arch (ls($extrep)) { next unless -d "$extrep/$arch"; print " running bs_mkarchrepo $arch\n"; qsystem("$INC[0]/bs_mkarchrepo", $rname, "$extrep/$arch") && die(" repo creation failed: $?\n"); if (-e "$extrep/$arch/$rname.db.tar.gz") { link("$extrep/$arch/$rname.db.tar.gz", "$extrep/$arch/$rname.db"); } if (-e "$extrep/$arch/$rname.files.tar.gz") { link("$extrep/$arch/$rname.files.tar.gz", "$extrep/$arch/$rname.files"); } if ($BSConfig::sign) { my @signargs; push @signargs, '--project', $projid if $BSConfig::sign_project; push @signargs, @{$data->{'signargs'} || []}; if (-e "$extrep/$arch/$rname.db.tar.gz") { qsystem($BSConfig::sign, @signargs, '-D', "$extrep/$arch/$rname.db.tar.gz") && die(" sign failed: $?\n"); link("$extrep/$arch/$rname.db.tar.gz.sig", "$extrep/$arch/$rname.db.sig"); } if (-e "$extrep/$arch/$rname.files.tar.gz") { qsystem($BSConfig::sign, @signargs, '-D', "$extrep/$arch/$rname.files.tar.gz") && die(" sign failed: $?\n"); link("$extrep/$arch/$rname.files.tar.gz.sig", "$extrep/$arch/$rname.files.sig"); } writestr("$extrep/$arch/$rname.key", undef, $data->{'pubkey'}) if $data->{'pubkey'}; } } } sub deleterepo_arch { my ($extrep) = @_; for my $arch (ls($extrep)) { next unless -d "$extrep/$arch"; next if $arch eq 'repodata' || $arch eq 'repocache' || $arch eq 'media.1' || $arch eq 'descr'; for (grep {/\.(?:\?db|db\.tar\.gz|files|files\.tar\.gz|key)(?:\.sig)?$/} ls("$extrep/$arch")) { unlink("$extrep/$arch/$_"); } } } ########################################################################## sub createrepo_staticlinks { my ($extrep, $projid, $repoid, $data, $options) = @_; my $versioned = grep {$_ eq 'versioned'} @{$options || []}; for my $arch ('.', ls($extrep)) { next unless -d "$extrep/$arch"; for (ls("$extrep/$arch")) { my $link; if (/^(.*)-Build\d\d\d\d(-Media\d?(\.license)?)$/s) { $link = "$1$2"; # no support for versioned links } else { next unless -f "$extrep/$arch/$_"; } if (/^(.*)-([^-]*)-[^-]*\.rpm$/s) { $link = "$1.rpm"; $link = "$1-$2.rpm" if $versioned; } elsif (/^(.*)_([^_]*)-[^_]*\.(u?deb)$/s) { $link = "$1.$3"; $link = "${1}_$2.$3" if $versioned; } elsif (/^(.*)-([^-]*)-([^-]*)-([^-]*)\.(AppImage?(\.zsync)?)$/s) { # name version "release.glibcX.Y" arch suffix $link = "$1-latest-$4.$5"; } elsif (/^(.*)_([^_]*)_([^_]*)-Build[^_]*\.snap$/s) { $link = "${1}_${3}.snap"; $link = "${1}_${3}_$2.snap" if $versioned; } elsif (/^(.*)-Build\d\d\d\d(-Media\d)(\.iso?(\.sha256)?)$/s) { # product builds $link = "$1$2$3"; # no support for versioned links } elsif (/^(.*)-(\d+\.\d+\.\d+)?(-\w+)?(\.(?:libvirt|virtualbox))?-Build\d+\..*(\.(raw.install.raw.xz|raw.xz|tar.xz|box|json|install.iso|tbz|tgz|vmx|vmdk|vhdx|vdi|vhdfixed.xz|iso|qcow2|qcow2.xz|ova)?(?:\.sha256)?)$/s) { # kiwi appliance my $profile = $3 || ""; my $box_type = $4 || ""; $link = "$1$profile$box_type$5"; $link = "$1-$2$profile$box_type$5" if $versioned; } next unless $link; unlink("$extrep/$arch/.$link"); # drop left over symlink($_, "$extrep/$arch/.$link"); rename("$extrep/$arch/.$link", "$extrep/$arch/$link"); # atomar update } } } sub deleterepo_staticlinks { my ($extrep) = @_; for my $arch ('.', ls($extrep)) { next unless -d "$extrep/$arch"; for (ls("$extrep/$arch")) { next unless -l "$extrep/$arch/$_"; next if /\.(?:db|files)(?:\.sig)?$/; unlink("$extrep/$arch/$_"); } } } ########################################################################## sub createpatterns_rpmmd { my ($extrep, $projid, $repoid, $data, $options) = @_; deletepatterns_rpmmd($extrep); my $patterns = $data->{'patterns'}; return unless @{$patterns || []}; my $modifyrepo_bin = $BSConfig::modifyrepo ? $BSConfig::modifyrepo : 'modifyrepo'; # create patterns data structure my @pats; for my $pattern (@$patterns) { push @pats, BSUtil::fromxml($pattern->{'data'}, $BSXML::pattern); } print " adding patterns to repodata\n"; my $pats = {'pattern' => \@pats, 'count' => scalar(@pats)}; writexml("$extrep/repodata/patterns.xml", undef, $pats, $BSXML::patterns); my @legacyargs; my %options = map {$_ => 1} @{$options || []}; if ($options{'legacy'}) { push @legacyargs, '--simple-md-filenames', '--checksum=sha'; } else { # the default in newer createrepos push @legacyargs, '--unique-md-filenames', '--checksum=sha256'; } qsystem($modifyrepo_bin, "$extrep/repodata/patterns.xml", "$extrep/repodata", @legacyargs) && print(" modifyrepo failed: $?\n"); unlink("$extrep/repodata/patterns.xml"); # for my $pattern (@{$patterns || []}) { # my $pname = "patterns.$pattern->{'name'}"; # $pname =~ s/\.xml$//; # print " adding pattern $pattern->{'name'} to repodata\n"; # writestr("$extrep/repodata/$pname.xml", undef, $pattern->{'data'}); # qsystem('modifyrepo', "$extrep/repodata/$pname.xml", "$extrep/repodata", @legacyargs) && print(" modifyrepo failed: $?\n"); # unlink("$extrep/repodata/$pname.xml"); # } # re-sign changed repomd.xml file if ($BSConfig::sign && -e "$extrep/repodata/repomd.xml") { my @signargs; push @signargs, '--project', $projid if $BSConfig::sign_project; push @signargs, @{$data->{'signargs'} || []}; qsystem($BSConfig::sign, @signargs, '-d', "$extrep/repodata/repomd.xml") && die(" sign failed: $?\n"); } } sub deletepatterns_rpmmd { my ($extrep) = @_; for my $pat (ls("$extrep/repodata")) { next unless $pat =~ /^patterns/; unlink("$extrep/repodata/$pat"); } } sub createpatterns_comps { my ($extrep, $projid, $repoid, $data, $options) = @_; deletepatterns_comps($extrep); my $patterns = $data->{'patterns'}; return unless @{$patterns || []}; my $modifyrepo_bin = $BSConfig::modifyrepo ? $BSConfig::modifyrepo : 'modifyrepo'; # create comps data structure my @grps; for my $pattern (@$patterns) { my $pat = BSUtil::fromxml($pattern->{'data'}, $BSXML::pattern); my $grp = { 'id' => $pattern->{'name'} }; for (@{$pat->{'summary'}}) { my $el = { '_content' => $_->{'_content'} }; $el->{'xml:lang'} = $_->{lang} if $_->{'lang'}; push @{$grp->{'name'}}, $el; } for (@{$pat->{'description'}}) { my $el = { '_content' => $_->{'_content'} }; $el->{'xml:lang'} = $_->{'lang'} if $_->{'lang'}; push @{$grp->{'description'}}, $el; } for (@{$pat->{'rpm:requires'}->{'rpm:entry'}}) { push @{$grp->{'packagelist'}->{'packagereq'} }, { '_content' => $_->{'name'}, 'type' => 'mandatory' }; } for (@{$pat->{'rpm:recommends'}->{'rpm:entry'}}) { push @{$grp->{'packagelist'}->{'packagereq'}}, { '_content' => $_->{'name'}, 'type' => 'default' }; } for (@{$pat->{'rpm:suggests'}->{'rpm:entry'}}) { push @{$grp->{'packagelist'}->{'packagereq'}}, { '_content' => $_->{'name'}, 'type' => 'optional' }; } push @grps, $grp; } print " adding comps to repodata\n"; my $comps = {'group' => \@grps}; writexml("$extrep/repodata/group.xml", undef, $comps, $BSXML::comps); my @legacyargs; my %options = map {$_ => 1} @{$options || []}; if ($options{'legacy'}) { push @legacyargs, '--simple-md-filenames', '--checksum=sha'; } else { # the default in newer createrepos push @legacyargs, '--unique-md-filenames', '--checksum=sha256'; } qsystem($modifyrepo_bin, "$extrep/repodata/group.xml", "$extrep/repodata", @legacyargs) && print(" modifyrepo failed: $?\n"); unlink("$extrep/repodata/group.xml"); # re-sign changed repomd.xml file if ($BSConfig::sign && -e "$extrep/repodata/repomd.xml") { my @signargs; push @signargs, '--project', $projid if $BSConfig::sign_project; push @signargs, @{$data->{'signargs'} || []}; qsystem($BSConfig::sign, @signargs, '-d', "$extrep/repodata/repomd.xml") && die(" sign failed: $?\n"); } } sub deletepatterns_comps { my ($extrep) = @_; for my $pat (ls("$extrep/repodata")) { next unless $pat =~ /group.xml/; unlink("$extrep/repodata/$pat"); } } sub createpatterns_ymp { my ($extrep, $projid, $repoid, $data, $options) = @_; deletepatterns_ymp($extrep, $projid, $repoid); my $patterns = $data->{'patterns'}; return unless @{$patterns || []}; my $prp_ext = "$projid/$repoid"; $prp_ext =~ s/:/:\//g; my $patterndb = db_open('pattern'); # get title/description data for all involved projects my $repoinfo = $data->{'repoinfo'}; my %nprojpack; my @nprojids = map {$_->{'project'}} @{$repoinfo->{'prpsearchpath'} || []}; if (@nprojids) { my @args = map {"project=$_"} @nprojids; my $nprojpack = BSRPC::rpc("$BSConfig::srcserver/getprojpack", $BSXML::projpack, 'nopackages', @args); %nprojpack = map {$_->{'name'} => $_} @{$nprojpack->{'project'} || []}; } for my $pattern (@$patterns) { my $ympname = $pattern->{'name'}; $ympname =~ s/\.xml$//; $ympname .= ".ymp"; my $pat = BSUtil::fromxml($pattern->{'data'}, $BSXML::pattern); next if !exists $pat->{'uservisible'}; print " writing ymp for pattern $pat->{'name'}\n"; my $ymp = {}; $ymp->{'xmlns:os'} = 'http://opensuse.org/Standards/One_Click_Install'; $ymp->{'xmlns'} = 'http://opensuse.org/Standards/One_Click_Install'; my $group = {}; $group->{'name'} = $pat->{'name'}; if ($pat->{'summary'}) { $group->{'summary'} = $pat->{'summary'}->[0]->{'_content'}; } if ($pat->{'description'}) { $group->{'description'} = $pat->{'description'}->[0]->{'_content'}; } my @repos; my @sprp = @{$repoinfo->{'prpsearchpath'} || []}; while (@sprp) { my $sprp = shift @sprp; my $sprojid = $sprp->{'project'}; my $srepoid = $sprp->{'repository'}; my $r = {}; $r->{'recommended'} = @sprp || !@repos ? 'true' : 'false'; $r->{'name'} = $sprojid; if ($nprojpack{$sprojid}) { $r->{'summary'} = $nprojpack{$sprojid}->{'title'}; $r->{'description'} = $nprojpack{$sprojid}->{'description'}; } my $url = BSUrlmapper::get_downloadurl("$sprojid/$srepoid"); next unless defined $url; $r->{'url'} = $url; push @repos, $r; } $group->{'repositories'} = {'repository' => \@repos }; my @software; for my $entry (@{$pat->{'rpm:requires'}->{'rpm:entry'} || []}) { next if $entry->{'kind'} && $entry->{'kind'} ne 'package'; push @software, {'name' => $entry->{'name'}, 'summary' => "The $entry->{'name'} package", 'description' => "The $entry->{'name'} package."}; fillpkgdescription($software[-1], $extrep, $repoinfo, $entry->{'name'}); } for my $entry (@{$pat->{'rpm:recommends'}->{'rpm:entry'} || []}) { next if $entry->{'kind'} && $entry->{'kind'} ne 'package'; push @software, {'name' => $entry->{'name'}, 'summary' => "The $entry->{'name'} package", 'description' => "The $entry->{'name'} package."}; fillpkgdescription($software[-1], $extrep, $repoinfo, $entry->{'name'}); } for my $entry (@{$pat->{'rpm:suggests'}->{'rpm:entry'} || []}) { next if $entry->{'kind'} && $entry->{'kind'} ne 'package'; push @software, {'recommended' => 'false', 'name' => $entry->{'name'}, 'summary' => "The $entry->{'name'} package", 'description' => "The $entry->{'name'} package."}; fillpkgdescription($software[-1], $extrep, $repoinfo, $entry->{'name'}); } $group->{'software'} = { 'item' => \@software }; $ymp->{'group'} = [ $group ]; writexml("$extrep/.$ympname", "$extrep/$ympname", $ymp, $BSXML::ymp); # write database entry my $ympidx = {'type' => 'ymp'}; $ympidx->{'name'} = $pat->{'name'} if defined $pat->{'name'}; $ympidx->{'summary'} = $pat->{'summary'}->[0]->{'_content'} if $pat->{'summary'};; $ympidx->{'description'} = $pat->{'description'}->[0]->{'_content'} if $pat->{'description'}; $ympidx->{'path'} = $repoinfo->{'prpsearchpath'} if $repoinfo->{'prpsearchpath'}; db_store($patterndb, "$prp_ext/$ympname", $ympidx) if $patterndb; } } sub deletepatterns_ymp { my ($extrep, $projid, $repoid) = @_; my $prp_ext = "$projid/$repoid"; $prp_ext =~ s/:/:\//g; my $patterndb = db_open('pattern'); for my $ympname (ls($extrep)) { next unless $ympname =~ /\.ymp$/; db_store($patterndb, "$prp_ext/$ympname", undef) if $patterndb; unlink("$extrep/$ympname"); } } ########################################################################## sub sync_to_stage { my ($prp, $extdir, $dbgsplit, $isdelete) = @_; my @stageservers; if ($BSConfig::stageserver) { if (ref($BSConfig::stageserver)) { my @s = @{$BSConfig::stageserver}; while (@s) { my ($k, $v) = splice(@s, 0, 2); if ($prp =~ /^$k/) { $v = [ $v ] unless ref $v; @stageservers = @$v; last; } } } else { push @stageservers, $BSConfig::stageserver; } } # sync the parent directory for deletes my $extdirx = $extdir; $extdirx =~ s/\/[^\/]*$// if $isdelete; for my $stageserver (@stageservers) { if ($stageserver =~ /^rsync:\/\//) { print " running rsync to $stageserver at ".localtime(time)."\n"; # rsync with a timeout of 1 hour # sync first just the binaries without deletion of the old ones, afterwards the rest(esp. meta data) and cleanup qsystem('echo', "$extdirx\0", 'rsync', '-ar0', '--fuzzy', @binsufsrsync, '--include=*/', '--exclude=*', '--timeout', '7200', '--files-from=-', $extrepodir, $stageserver) && die(" rsync failed at ".localtime(time).": $?\n"); qsystem('echo', "$extdirx\0", 'rsync', '-ar0', '--delete-after', '--exclude=repocache', '--delete-excluded', '--timeout', '7200', '--files-from=-', $extrepodir, $stageserver) && die(" rsync failed at ".localtime(time).": $?\n"); } if ($stageserver =~ /^script:(\/.*)$/) { print " running sync script $1 at ".localtime(time)."\n"; if ($isdelete) { qsystem($1, $prp) && die(" sync script failed at ".localtime(time).": $?\n"); } else { qsystem($1, $prp, $extdirx) && die(" sync script failed at ".localtime(time).": $?\n"); } } } # push done trigger sync to other mirrors mkdir_p($extrepodir_sync); my $filename = $prp; $filename =~ s/\//_/g; $filename .= $dbgsplit if $dbgsplit; writestr("$extrepodir_sync/.$$:$filename", "$extrepodir_sync/$filename", "$extdir\0"); if ($BSConfig::stageserver_sync && $BSConfig::stageserver_sync =~ /^rsync:\/\//) { print " running trigger rsync to $BSConfig::stageserver_sync at ".localtime(time)."\n"; # small sync, timout 1 minute qsystem('rsync', '-a', '--timeout', '120', "$extrepodir_sync/$filename", $BSConfig::stageserver_sync."/".$filename) && warn(" trigger rsync failed at ".localtime(time).": $?\n"); } } sub deleterepo { my ($projid, $repoid, $dbgsplit) = @_; my $prp = "$projid/$repoid"; print " deleting repository\n"; my $extrep = BSUrlmapper::get_extrep($prp); return unless $extrep; my $prp_ext = $prp; $prp_ext =~ s/:/:\//g; $extrep .= $dbgsplit if $dbgsplit; $prp_ext .= $dbgsplit if $dbgsplit; if (! -d $extrep) { if ($extrep =~ /\Q$extrepodir\E\/(.+)$/) { my $extdir = $1; $extdir =~ s/\/.*?$//; rmdir("$extrepodir/$extdir"); } return if $dbgsplit; print " nothing to delete...\n"; unlink("$reporoot/$prp/:repoinfo"); rmdir("$reporoot/$prp"); return; } # get old repoinfo my $repoinfo = {}; if (-s "$reporoot/$prp/:repoinfo") { $repoinfo = BSUtil::retrieve("$reporoot/$prp/:repoinfo", 1) || {}; delete $repoinfo->{'splitdebug'} if $dbgsplit; # do not recurse! } # delete all binaries my $subdir = $repoinfo->{'subdir'} || ''; my @archs = sort(ls($extrep)); if ($subdir) { @archs = map {$_ eq $subdir ? sort(map {"$subdir/$_"} ls("$extrep/$subdir")) : $_} @archs; } my @db_deleted; for my $arch (@archs) { next if $arch =~ /^\./; next if $arch eq 'repodata' || $arch eq 'repocache' || $arch eq 'media.1' || $arch eq 'descr'; my $r = "$extrep/$arch"; next unless -d $r; for my $bin (ls($r)) { next if $bin eq 'media_info'; my $p = "$arch/$bin"; print " - $p\n"; if (-d "$r/$bin") { BSUtil::cleandir("$r/$bin"); rmdir("$r/$bin") || die("rmdir $r/$bin: $!\n"); } else { unlink("$r/$bin") || die("unlink $r/$bin: $!\n"); } push @db_deleted, $p if $p =~ /\.(?:$binsufsre)$/; } } # update published database my $binarydb = db_open('binary'); updatebinaryindex($binarydb, [ map {"$prp_ext/$_"} @db_deleted ], []) if $binarydb; my $repoinfodb = db_open('repoinfo'); db_store($repoinfodb, $dbgsplit ? "$prp$dbgsplit" : $prp, undef) if $repoinfodb; if ($BSConfig::markfileorigins) { for my $f (sort @db_deleted) { my $req = { 'uri' => "$BSConfig::markfileorigins/$prp_ext/$f", 'request' => 'HEAD', 'maxredirects' => 3, 'timeout' => 10, 'ignorestatus' => 1, }; eval { BSRPC::rpc($req, undef, 'cmd=deleted'); }; print " $f: $@" if $@; } } # delete ymps so they get removed from the database deletepatterns_ymp($extrep, $projid, $repoid); # delete everything else qsystem('rm', '-rf', $extrep); if ($extrep =~ /\Q$extrepodir\E\/(.+)$/) { my $extdir = $1; sync_to_stage($prp, $extdir, $dbgsplit, 1); rmdir("$extrepodir/$extdir"); } # also delete the split debug repo deleterepo($projid, $repoid, $repoinfo->{'splitdebug'}) if $repoinfo->{'splitdebug'} && !$dbgsplit; if (!$dbgsplit && $BSConfig::packtrack && (($repoinfo->{'projectkind'} || '') eq 'maintenance_release' || grep {$prp =~ /^$_/} @$BSConfig::packtrack)) { my $packtrack = {}; print " sending binary release tracking notification\n"; BSNotify::notify('PACKTRACK', { project => $projid , 'repo' => $repoid }, Storable::nfreeze([ map { $packtrack->{$_} } sort keys %$packtrack ])); } # delete repoinfo unlink("$reporoot/$prp/:repoinfo") unless $dbgsplit; rmdir("$reporoot/$prp"); } sub publish { my ($projid, $repoid, $dbgsplit, $dbgpacktrack) = @_; my $prp = "$projid/$repoid"; BSUtil::printlog("publishing $prp"); # get info from source server about this project/repository # we specify "withsrcmd5" so that we get the patternmd5. It still # works with "nopackages". my $projpack = BSRPC::rpc("$BSConfig::srcserver/getprojpack", $BSXML::projpack, 'withrepos', 'expandedrepos', 'withsrcmd5', 'nopackages', "project=$projid", "repository=$repoid"); if (!$projpack->{'project'}) { # project is gone deleterepo($projid, $repoid); return; } my $proj = $projpack->{'project'}->[0]; die("no such project $projid\n") unless $proj && $proj->{'name'} eq $projid; if (!$proj->{'repository'}) { # repository is gone deleterepo($projid, $repoid); return; } my $repo = $proj->{'repository'}->[0]; die("no such repository $repoid\n") unless $repo && $repo->{'name'} eq $repoid; # this is the already expanded path as we used 'expandedrepos' above my $prpsearchpath = $repo->{'path'}; # we need the config for repotype/patterntype my $config = BSRPC::rpc("$BSConfig::srcserver/getconfig", undef, "project=$projid", "repository=$repoid"); $config = Build::read_config('noarch', [ split("\n", $config) ]); if (!@{$config->{'repotype'} || []}) { # guess repotype from binarytype my $binarytype = $config->{'binarytype'} || ''; my $repotype; $repotype = 'rpm-md' if $binarytype eq 'rpm'; $repotype = 'debian' if $binarytype eq 'deb'; $repotype = 'arch' if $binarytype eq 'arch'; $repotype ||= 'rpm-md'; $config->{'repotype'} = [ $repotype ]; } my %repotype; for (@{$config->{'repotype'} || []}) { if (/^(.*?):(.*)$/) { $repotype{$1} = [ split(':', $2) ]; } else { $repotype{$_} = []; } } $dbgsplit ||= '' if $repotype{'splitdebug'} && $repotype{'splitdebug'}->[0]; my $archorder; $archorder = $repotype{'archorder'} if $repotype{'archorder'}; # is there a special subdirectory for binary packages configured? my $subdir = ''; if ($repotype{'packagesubdir'} && $repotype{'packagesubdir'}->[0]) { $subdir = $repotype{'packagesubdir'}->[0]; BSVerify::verify_filename($subdir); } my $extrep = BSUrlmapper::get_extrep($prp); return unless $extrep; my $prp_ext = $prp; $prp_ext =~ s/:/:\//g; $extrep .= $dbgsplit if $dbgsplit; $prp_ext .= $dbgsplit if $dbgsplit; # get us the lock local *F; open(F, '>', "$reporoot/$prp/.finishedlock") || die("$reporoot/$prp/.finishedlock: $!\n"); if (!flock(F, LOCK_EX | LOCK_NB)) { print " waiting for lock...\n"; flock(F, LOCK_EX) || die("flock: $!\n"); print " got the lock...\n"; } # we now know that $reporoot/$prp/*/:repo will not change. # Build repo by mixing all architectures. my @archs = @{$repo->{'arch'} || []}; my %bins; my %bins_id; my $binaryorigins = {}; my @updateinfos; my $updateinfos_state; my $appdatas; my $appdatas_state; my %appdatas_seen; my %deltas; # XXX remove hack my %deltainfos; my $deltainfos_state; my %kiwireport; # store collected report (under the original name) my %kiwimedium; # maps published name to original name my $kiwiindex = ''; # store collected index parts my $notary_uploads = {}; if ($archorder) { my %archorder = map {$_ => 1} @$archorder; my %archs = map {$_ => 1} @archs; # last one wins in code below @archs = ((grep {!$archorder{$_}} @archs), (grep {$archs{$_}} reverse(@$archorder))); } # drop entire repo it source :repo's have disappeared altogether. We need to find a way # to specify this explicit instead checking all :repo's. my $found_repo; for my $arch (@archs) { my $r = "$reporoot/$prp/$arch/:repo"; $found_repo = 1 if -e $r; } if (!defined($found_repo)) { deleterepo($projid, $repoid); return; } if ($BSConfig::publisher_compile_content_hook && $BSConfig::publisher_compile_content_hook->{$prp}) { my $hook = $BSConfig::publisher_compile_content_hook->{$prp}; $hook = [ $hook ] unless ref $hook; print " calling publish compile hook @$hook\n"; qsystem(@$hook, $prp) && warn(" @$hook failed: $?\n"); my $r = "${reporoot}.add/$prp"; for my $rbin (sort(ls($r))) { my $p = "iso/$rbin"; my @s = stat("$r/$rbin"); $bins{$p} = "$r/$rbin"; $bins_id{$p} = "$s[9]/$s[7]/$s[1]"; } } for my $arch (@archs) { my $r = "$reporoot/$prp/$arch/:repo"; my $repoinfo = {}; if (-s "${r}info") { $repoinfo = BSUtil::retrieve("${r}info") || {}; } $repoinfo->{'binaryorigins'} ||= {}; for my $rbin (sort(ls($r))) { my $bin = $rbin; if ($bin =~ /:updateinfo.xml$/) { # collect updateinfo data my $updateinfoxml = readstr("$r/$bin", 1) || ''; $updateinfos_state .= Digest::MD5::md5_hex($updateinfoxml); my $updateinfo = readxml("$r/$bin", $BSXML::updateinfo, 1) || {}; push @updateinfos, @{$updateinfo->{'update'} || []}; } if ($bin =~ /[-.]appdata.xml$/) { # collect application data my $appdataxml = readstr("$r/$bin", 1) || ''; my $appdatamd5 = Digest::MD5::md5_hex($appdataxml); next if $appdatas_seen{$appdatamd5}; $appdatas_seen{$appdatamd5} = 1; $appdatas_state .= $appdatamd5; $appdatas = merge_package_appdata($appdatas, "$arch/:repo/$bin", $appdataxml); } if ($bin =~ /^(.*\.rpm)::(.*\.drpm)$/) { # special drpm handling: only take it if we took the corresponding rpm if ($bin =~ /^(.+-[^-]+-[^-]+\.([a-zA-Z][^\/\.\-]*)\.rpm)::(.*\.drpm)$/) { if ($bins{$subdir ? "$subdir/$2/$1" : "$2/$1"} eq "$r/$1") { # ok, took it. also take delta $bin = $3; push @{$deltas{"$r/$1"}}, "$r/$rbin"; } } } $bin =~ s/^.*?:://; # strip package name for now #next unless $bin =~ /\.(?:$binsufsre)$/; my $p; if ($bin =~ /^.+-[^-]+-[^-]+\.([a-zA-Z][^\/\.\-]*)\.d?rpm$/) { $p = "$1/$bin"; $p = $1 eq 'src' || $1 eq 'nosrc' ? "SRPMS/$bin" : "RPMS/$bin" if $repotype{'resarchhack'}; } elsif ($bin =~ /^.+_[^_]+_([^_\.]+)\.u?deb$/) { $p = "$1/$bin"; } elsif ($bin =~ /\.(?:AppImage|AppImage.zsync|snap|exe)?$/) { $p = "$bin"; } elsif ($bin =~ /\.d?rpm$/) { # legacy format my $q = Build::query("$r/$rbin", 'evra' => 1); next unless $q; $p = "$q->{'arch'}/$q->{'name'}-$q->{'version'}-$q->{'release'}.$q->{'arch'}.rpm"; } elsif ($bin =~ /\.deb$/) { # legacy format XXX no udeb handling my $q = Build::query("$r/$rbin", 'evra' => 1); $p = "$q->{'arch'}/$q->{'name'}_$q->{'version'}"; $p .= "-$q->{'release'}" if defined $q->{'release'}; $p .= "_$q->{'arch'}.deb"; } elsif ($bin =~ /\.(?:pkg\.tar\.gz|pkg\.tar\.xz)(?:\.sig)?$/) { # special arch linux handling $p = "$arch/$bin"; $p = "i686/$bin" if $arch eq 'i586'; # HACK } elsif ($bin =~ /\.(?:$binsufsre)$/) { # our default my $q = Build::query("$r/$rbin", 'evra' => 1); next unless $q && defined($q->{'arch'}); $p = "$q->{'arch'}/$bin"; } else { if ($bin =~ /\.iso(?:\.sha256)?$/) { $p = "iso/$bin"; $kiwimedium{$p} = $1 if $bin =~ /(.+)\.iso$/; } elsif ($bin =~ /ONIE\.bin(?:\.sha256)?$/) { $p = "onie/$bin"; $kiwimedium{$p} = $1 if $bin =~ /(.+)ONIE\.bin$/; } elsif ($bin =~ /\.raw(?:\.install)?(?:\.(?:gz|bz2|xz))?(?:\.sha256)?$/) { $p = "$bin"; } elsif ($bin =~ /(.*-Build\d.*)\.(?:tbz|tgz|tar|tar\.gz|tar\.bz2|tar\.xz)(\.sha256)?$/) { # kiwi case $kiwimedium{$bin} = $1 if !$2 && -e "$r/$1.packages"; if ($BSConfig::publish_containers && !$2 && -e "$r/$1.containerinfo") { upload_container($r, "$1.containerinfo", $bin, $projid, $repoid, $arch, $notary_uploads); next; } $p = "$bin"; } elsif ($bin =~ /(.*)\.tar(?:\.(?:gz|bz2|xz))?(\.sha256)?$/) { # Dockerfile case $kiwimedium{$bin} = $1 if !$2 && -e "$r/$1.packages"; if ($BSConfig::publish_containers && !$2 && -e "$r/$1.containerinfo") { upload_container($r, "$1.containerinfo", $bin, $projid, $repoid, $arch, $notary_uploads); next; } $p = "$bin" unless $bin =~ /-(?:appstream|desktopfiles|polkitactions|mimetypes)\.tar/; } elsif ($bin =~ /\.(?:tgz|zip)?(?:\.sha256)?$/) { # esp. for Go builds $p = "$bin"; } elsif ($bin =~ /\.squashfs$/) { $p = "$bin"; # for simpleimage builds } elsif ($bin =~ /\.diff\.(?:gz)(?:\.sha256)?$/) { $p = "$bin"; } elsif ($bin =~ /\.dsc(?:\.sha256)?$/) { $p = "$bin"; } elsif ($bin =~ /\.(?:box|json|ovf|qcow2|qcow2\.xz|vdi|vhdfixed.xz|vmx|vmdk|vhdx)(?:\.sha256)?$/) { $p = "$bin"; } elsif ($bin =~ /\.packages$/) { $p = "$bin"; } elsif ($bin =~ /^(.*)\.report$/) { # collect kiwi reports $kiwireport{$1} = readxml("$r/$rbin", $BSXML::report, 1); next; } elsif ($bin =~ /^(.*)\.index$/) { # collect virt-builder index parts $kiwiindex .= readstr("$r/$bin", 1) . "\n"; next; } elsif (-d "$r/$rbin") { $p = "repo/$bin"; if ($repotype{'slepool'}) { # HACK: do fancy sle repo renaming my $name = $repotype{'slepool'}->[0] || 'product'; $p = $bin; if ($name eq 'nobuildid') { $p = "repo/$bin"; $p =~ s/-Build[\d\.]+-/-/; } elsif ($bin =~ /.*-Media1(\.license|)$/) { $p = "$name$1"; } elsif ($bin =~ /-Media3$/) { $p = "${name}_debug"; } elsif ($bin =~ /-Media2$/) { my $rbin3 = $rbin; $rbin3 =~ s/2$/3/; if (-d "$r/$rbin3") { $p = "${name}_source"; # 3 media available, 2 is source } else { $p = "${name}_debug"; # source is on media 1, 2 is debug } } $p = $bin if $kiwimedium{$p}; # what??? } $kiwimedium{$p} = $bin; } else { next; } } next unless defined $p; $p = "$subdir/$p" if $subdir; # next if $bins{$p}; # first arch wins my @s = stat("$reporoot/$prp/$arch/:repo/$rbin"); next unless @s; if ($bins{$p}) { if (!$archorder) { # keep old file (FIXME: should do this different) my @s2 = stat("$extrep/$p"); next if !@s2 || "$s[9]/$s[7]/$s[1]" ne "$s2[9]/$s2[7]/$s2[1]"; } # replace already taken binary. kill taken deltas again for my $d (@{$deltas{"$r/$rbin"} || []}) { for my $dp (grep {$bins{$_} eq $d} keys %bins) { delete $bins{$dp}; delete $bins_id{$dp}; delete $binaryorigins->{$dp}; delete $deltainfos{$dp}; } } } $bins{$p} = "$r/$rbin"; $bins_id{$p} = "$s[9]/$s[7]/$s[1]"; $binaryorigins->{$p} = $repoinfo->{'binaryorigins'}->{$rbin} if defined $repoinfo->{'binaryorigins'}->{$rbin}; if ($rbin =~ /^(.*)\.drpm$/) { # we took a delta rpm. collect desq if possible my $dseq = "$r/$1.dseq"; if (-s $dseq) { my %dseq; for (split("\n", readstr($dseq, 1) || '')) { $dseq{$1} = $2 if /^(.*?): (.*)$/s; } my @needed = qw{Name Epoch Version Release Arch OldName OldEpoch OldVersion OldRelease OldArch Seq}; if (!grep {!exists($dseq{$_})} @needed) { # got all required fields. convert to correct data my $dinfo = {'name' => $dseq{'Name'}, 'epoch' => $dseq{'Epoch'} || 0, 'version' => $dseq{'Version'}, 'release' => $dseq{'Release'}, 'arch' => $dseq{'Arch'}}; $dinfo->{'delta'} = [ {'oldepoch' => $dseq{'OldEpoch'} || 0, 'oldversion' => $dseq{'OldVersion'}, 'oldrelease' => $dseq{'OldRelease'}, 'filename' => $p, 'sequence' => $dseq{'Seq'}} ]; $deltainfos{$p} = $dinfo; } } } } } # calculate deltainfos_state if (%deltainfos) { $deltainfos_state = ''; for my $p (sort keys %deltainfos) { my @s = stat($bins{$p}); my $id = "$s[9]/$s[7]/$s[1]"; if ($bins{$p} =~ /^(.*)\.drpm$/) { @s = stat("$1.dseq"); $id .= "/$s[9]/$s[7]/$s[1]"; } $deltainfos_state .= Digest::MD5::md5_hex($id); } } # do debug filtering if requested if (defined($dbgsplit)) { if ($dbgsplit) { for my $p (keys %bins) { next if $p =~ /-debug(?:info|source)-.*rpm$/; delete $bins{$p}; delete $deltainfos{$p}; } } else { for my $p (keys %bins) { next unless $p =~ /-debug(?:info|source)-.*rpm$/; delete $bins{$p}; delete $deltainfos{$p}; } } } # now update external repository my $changed = 0; my @db_deleted; # for published db update my @db_changed; # for published db update my @changed; # all changed files for hooks. my %bins_done; @archs = sort(ls($extrep)); if ($subdir) { @archs = map {$_ eq $subdir ? sort(map {"$subdir/$_"} ls("$extrep/$subdir")) : $_} @archs; } for my $arch (@archs) { next if $arch =~ /^\./; next if $arch eq 'repodata' || $arch eq 'repocache' || $arch eq 'media.1' || $arch eq 'descr'; next if $arch =~ /\.repo$/; next if $arch eq 'Packages' || $arch eq 'Packages.gz' || $arch eq 'Sources' || $arch eq 'Sources.gz' || $arch eq 'Release' || $arch eq 'Release.gz' || $arch eq 'Release.key'; my $r = "$extrep/$arch"; if (-f $r) { $r = $extrep; my $bin = $arch; my $p = $arch; my @s = lstat("$r/$bin"); if (!exists($bins{$p})) { print " - $p\n"; unlink("$r/$bin") || die("unlink $r/$bin: $!\n"); push @db_deleted, $p if $p =~ /\.(?:$binsufsre)$/; $changed = 1; next; } if ("$s[9]/$s[7]/$s[1]" ne $bins_id{$p}) { unlink("$r/$bin") || die("unlink $r/$bin: $!\n"); link($bins{$p}, "$r/$bin") || die("link $bins{$p} $r/$bin: $!\n"); push @db_changed, $p if $p =~ /\.(?:$binsufsre)$/; push @changed, $p; $changed = 1; } $bins_done{$p} = 1; next; } if ($bins{$arch}) { my @s = lstat($r); die("$r: $!\n") unless @s; next if "$s[9]/$s[7]/$s[1]" eq $bins_id{$arch}; if (-d _) { if (! -l $bins{$arch} && -d _) { my $info1 = BSUtil::treeinfo($bins{$arch}); my $info2 = BSUtil::treeinfo($r); if (join(',', @$info1) eq join(',', @$info2)) { $bins_done{$arch} = 1; next; } print " ! $arch\n"; BSUtil::cleandir($r); rmdir($r) || die("rmdir $r: $!\n"); } if (! -l $bins{$arch} && -d _) { BSUtil::linktree($bins{$arch}, $r); } else { link($bins{$arch}, $r) || die("link $bins{$arch} $r: $!\n"); } push @db_changed, $arch if $arch =~ /\.(?:$binsufsre)$/; push @changed, $arch; $changed = 1; $bins_done{$arch} = 1; next; } } next unless -d $r; for my $bin (sort(ls($r))) { my $p = "$arch/$bin"; my @s = lstat("$r/$bin"); die("$r/$bin: $!\n") unless @s; if (!exists($bins{$p})) { print " - $p\n"; if (-d _) { BSUtil::cleandir("$r/$bin"); rmdir("$r/$bin") || die("rmdir $r/$bin: $!\n"); } else { unlink("$r/$bin") || die("unlink $r/$bin: $!\n"); } push @db_deleted, $p if $p =~ /\.(?:$binsufsre)$/; $changed = 1; next; } if ("$s[9]/$s[7]/$s[1]" ne $bins_id{$p}) { # changed, link over if (-d _) { if (! -l $bins{$p} && -d _) { # both are directories, compare info # should MIX instead? my $info1 = BSUtil::treeinfo($bins{$p}); my $info2 = BSUtil::treeinfo("$r/$bin"); if (join(',', @$info1) eq join(',', @$info2)) { $bins_done{$p} = 1; next; } } print " ! $p\n"; BSUtil::cleandir("$r/$bin"); rmdir("$r/$bin") || die("rmdir $r/$bin: $!\n"); } else { print " ! $p\n"; unlink("$r/$bin") || die("unlink $r/$bin: $!\n"); } if (! -l $bins{$p} && -d _) { BSUtil::linktree($bins{$p}, "$r/$bin"); } else { link($bins{$p}, "$r/$bin") || die("link $bins{$p} $r/$bin: $!\n"); } push @db_changed, $p if $p =~ /\.(?:$binsufsre)$/; push @changed, $p; $changed = 1; } $bins_done{$p} = 1; } } for my $p (sort keys %bins) { next if $bins_done{$p}; # a new one my ($arch, $bin); if ($p =~ /^(.*)\/([^\/]*)$/s) { ($arch, $bin) = ($1, $2); } else { ($arch, $bin) = ('.', $p); } my $r = "$extrep/$arch"; mkdir_p($r) unless -d $r; print " + $p\n"; if (! -l $bins{$p} && -d _) { BSUtil::linktree($bins{$p}, "$r/$bin"); } else { link($bins{$p}, "$r/$bin") || die("link $bins{$p} $r/$bin: $!\n"); } push @db_changed, $p if $p =~ /\.(?:$binsufsre)$/; push @changed, $p; $changed = 1; } # Write the kiwi index file if we got it if ($kiwiindex) { my $oldkiwiindex = readstr("$extrep/index", 1) || ''; if ($oldkiwiindex ne $kiwiindex) { writestr("$extrep/index", undef, $kiwiindex); push @changed, "$extrep/index"; $changed = 1; } } close F; # release repository lock my $title = $proj->{'title'} || $projid; $title .= " ($repoid)"; $title =~ s/\n/ /sg; my $state; $state = $proj->{'patternmd5'} || ''; $state .= "\0".join(',', @{$config->{'repotype'} || []}) if %bins; $state .= "\0".($proj->{'title'} || '') if %bins; $state .= "\0".join(',', @{$config->{'patterntype'} || []}) if $proj->{'patternmd5'}; $state .= "\0".join('/', map {"$_->{'project'}/$_->{'repository'}"} @{$prpsearchpath || []}) if $proj->{'patternmd5'}; $state .= "\0".$updateinfos_state if $updateinfos_state; $state .= "\0".$appdatas_state if $appdatas_state; $state .= "\0".$deltainfos_state if $deltainfos_state; $state = Digest::MD5::md5_hex($state) if $state ne ''; # get us the old repoinfo, so we can compare the state my $repoinfo = {}; my $packtrackcache; if (-s "$reporoot/$prp/:repoinfo") { $repoinfo = BSUtil::retrieve("$reporoot/$prp/:repoinfo") || {}; $packtrackcache = $repoinfo->{'trackercache'} if $repoinfo->{'trackercache'} && ($repoinfo->{'trackercacheversion'} || '') eq '1'; } if (($repoinfo->{'state'} || '') ne $state) { $changed = 1; } if (($repoinfo->{'splitdebug'} || '') ne (($repotype{'splitdebug'} || [])->[0] || '')) { deleterepo($projid, $repoid, $repoinfo->{'splitdebug'}) if $repoinfo->{'splitdebug'}; $changed = 1; } $changed = 1 if %$notary_uploads; # FIXME if (!$changed && !$dbgsplit) { print " nothing changed\n"; return; } mkdir_p($extrep) unless -d $extrep; # get sign key my $signargs = []; my $signkey = BSRPC::rpc("$BSConfig::srcserver/getsignkey", undef, "project=$projid", "withpubkey=1", "autoextend=1", "withalgo=1"); my $pubkey; my $algo; if ($signkey) { ($signkey, $pubkey) = split("\n", $signkey, 2); $algo = $1 if $signkey && $signkey =~ s/^(\S+)://; mkdir_p("$uploaddir"); writestr("$uploaddir/publisher.$$", undef, $signkey); $signargs = [ '-P', "$uploaddir/publisher.$$" ]; push @$signargs, '-h', 'sha256' if $algo && $algo eq 'rsa'; undef $pubkey unless $pubkey && length($pubkey) > 2; # not a valid pubkey } if (!$pubkey) { if ($BSConfig::sign_project && $BSConfig::sign) { local *S; open(S, '-|', $BSConfig::sign, '--project', $projid, '-p') || die("$BSConfig::sign: $!\n");; $pubkey = ''; 1 while sysread(S, $pubkey, 4096, length($pubkey)); if (!close(S)) { print "sign -p failed: $?\n"; $pubkey = undef; } } elsif ($BSConfig::keyfile) { if (-e $BSConfig::keyfile) { $pubkey = readstr($BSConfig::keyfile); } else { print "WARNING: configured sign key $BSConfig::keyfile does not exist\n"; } } } # do notary uploads if (%$notary_uploads) { upload_to_notary($projid, $notary_uploads, $signargs, $pubkey); } # get all patterns my $patterns = []; if ($proj->{'patternmd5'}) { $patterns = getpatterns($projid); } # collect packtrack data (if needed) my $packtrack; if ($BSConfig::packtrack && (($proj->{'kind'} || '') eq 'maintenance_release' || grep {$prp =~ /^$_/} @$BSConfig::packtrack)) { $packtrack = $dbgpacktrack || {}; my %cache = @{$packtrackcache || []}; for my $bin (sort keys %bins) { if ($bin =~ /\.(?:$binsufsre)$/) { my $res; my @s = stat("$extrep/$bin"); next unless @s; my $c = $cache{"$bin/$s[9]/$s[7]/$s[1]"}; if ($c) { my @d = qw{arch name epoch version release disturl buildtime}; $res = {}; for (@$c) { my $dd = shift @d; $res->{$dd} = $_ if defined $_; } } else { eval { $res = Build::query("$extrep/$bin", 'evra' => 1, 'buildtime' => 1, 'disturl' => 1); }; next unless $res; } my $pt = { 'project' => $projid, 'repository' => $repoid, }; $pt->{'arch'} = $1 if $bins{$bin} =~ /^\Q$reporoot\E\/\Q$prp\E\/([^\/]+)\//; $pt->{'package'} = $binaryorigins->{$bin} if $binaryorigins->{$bin}; for (qw{name epoch version release disturl buildtime}) { $pt->{$_} = $res->{$_} if defined $res->{$_}; } $pt->{'binaryarch'} = $res->{'arch'} if defined $res->{'arch'}; $pt->{'id'} = "$bin/$s[9]/$s[7]/$s[1]"; $packtrack->{$bin} = $pt; } elsif ($kiwimedium{$bin} && $kiwireport{$kiwimedium{$bin}}) { my $medium = $bin; $medium =~ s/.*\///; # basename for my $kb (@{$kiwireport{$kiwimedium{$bin}}->{'binary'} || []}) { my $pt = { %$kb }; delete $pt->{'_content'}; $pt->{'medium'} = $medium; my $fn = ''; $fn .= "/".(defined($pt->{$_}) ? $pt->{$_} : '') for qw{binaryarch name epoch version release}; $packtrack->{"$medium$fn"} = $pt; } } } # argh, find patchinforef and put it in update if (@updateinfos) { for my $up (@updateinfos) { for my $cl (@{($up->{'pkglist'} || {})->{'collection'} || []}) { for my $pkg (@{$cl->{'package'} || []}) { my $pn = ($subdir ? "$subdir/" : '') . "$pkg->{'arch'}/$pkg->{'filename'}"; if ($packtrack->{$pn}) { $packtrack->{$pn}->{'patchinforef'} = $up->{'patchinforef'} if $up->{'patchinforef'}; $packtrack->{$pn}->{'updateinfoid'} = $up->{'id'}; $packtrack->{$pn}->{'updateinfoversion'} = $up->{'version'}; # XXX: do this in hook? my $supportstatus = $pkg->{'supportstatus'}; # workaround for broken code11 imports if ($supportstatus) { $supportstatus =~ s/^support_//; $packtrack->{$pn}->{'supportstatus'} = $supportstatus; } } } } } } } undef $packtrackcache; # create and store the new repoinfo $repoinfo = { 'prpsearchpath' => $prpsearchpath, 'binaryorigins' => $binaryorigins, 'title' => $title, 'state' => $state, }; $repoinfo->{'projectkind'} = $proj->{'kind'} if $proj->{'kind'}; $repoinfo->{'arch'} = $repo->{'arch'} if $repo->{'arch'}; $repoinfo->{'splitdebug'} = $repotype{'splitdebug'}->[0] if defined $dbgsplit; $repoinfo->{'subdir'} = $subdir if $subdir; $repoinfo->{'base'} = $repo->{'base'} if $repo->{'base'}; # store repoinfo on disk if (!$dbgsplit) { if ($state ne '') { BSUtil::store("$reporoot/$prp/.:repoinfo", "$reporoot/$prp/:repoinfo", $repoinfo); } else { unlink("$reporoot/$prp/:repoinfo"); } } # do debug filtering if requested if (defined($dbgsplit)) { for (keys %$binaryorigins) { delete $binaryorigins->{$_} unless $bins{$_}; } if (@updateinfos) { my $dbgsplittype = ($repotype{'splitdebug'}->[1]) || 'mainupdateinfo'; @updateinfos = () if $dbgsplit; if ($dbgsplittype ne 'mainupdateinfo' && !$dbgsplit) { for my $up (@updateinfos) { for my $cl (@{($up->{'pkglist'} || {})->{'collection'} || []}) { next unless $cl->{'package'}; my $haveremovedpkg; for my $pkg (@{$cl->{'package'}}) { next unless $pkg->{'filename'} =~ /-debug(?:info|source)-.*rpm$/; $pkg = undef; $haveremovedpkg = 1; } next unless $haveremovedpkg; $cl->{'package'} = [ grep {defined($_)} @{$cl->{'package'}} ]; } } } } } # store repoinfo in published database my $repoinfodb = db_open('repoinfo'); db_store($repoinfodb, $dbgsplit ? "$prp$dbgsplit" : $prp, $state ne '' ? $repoinfo : undef) if $repoinfodb; # put in published database my $binarydb = db_open('binary'); updatebinaryindex($binarydb, [ map {"$prp_ext/$_"} @db_deleted ], [ map {"$prp_ext/$_"} @db_changed ]) if $binarydb; # mark file origins so we can gather per package statistics if ($BSConfig::markfileorigins) { print " marking file origins\n"; for my $f (sort @db_changed) { my $origin = $binaryorigins->{$f}; $origin = "?" unless defined $origin; my $req = { 'uri' => "$BSConfig::markfileorigins/$prp_ext/$f", 'request' => 'HEAD', 'maxredirects' => 3, 'timeout' => 10, 'ignorestatus' => 1, }; eval { BSRPC::rpc($req, undef, 'cmd=setpackage', "package=$origin"); }; print " $f: $@" if $@; } for my $f (sort @db_deleted) { my $req = { 'uri' => "$BSConfig::markfileorigins/$prp_ext/$f", 'request' => 'HEAD', 'maxredirects' => 3, 'timeout' => 10, 'ignorestatus' => 1, }; eval { BSRPC::rpc($req, undef, 'cmd=deleted'); }; print " $f: $@" if $@; } } # create repositories and patterns my %patterntype; for (@{$config->{'patterntype'} || []}) { if (/^(.*?):(.*)$/) { $patterntype{$1} = [ split(':', $2) ]; } else { $patterntype{$_} = []; } } if ($repotype{'rpm-md-legacy'}) { $repotype{'rpm-md'} = $repotype{'rpm-md-legacy'}; unshift @{$repotype{'rpm-md'}}, 'legacy'; delete $repotype{'rpm-md-legacy'}; } if ($BSConfig::publishprogram && $BSConfig::publishprogram->{$prp}) { local *PPLOCK; open(PPLOCK, '>', "$reporoot/$prp/.pplock") || die("$reporoot/$prp/.pplock: $!\n"); flock(PPLOCK, LOCK_EX) || die("flock: $!\n"); if (xfork()) { close PPLOCK; return; } if (system($BSConfig::publishprogram->{$prp}, $prp, $extrep)) { die(" $BSConfig::publishprogram->{$prp} failed: $?\n"); } goto publishprog_done; } my $xrepoid = $repoid; $xrepoid .= $dbgsplit if $dbgsplit; my @repotags = @{$repotype{'repotag'} || []}; # de-escape (mostly done for ':' s/%([a-fA-F0-9]{2})/chr(hex($1))/ge for @repotags; if (grep {$_ eq '-obsrepository'} @repotags) { @repotags = grep {$_ ne '-obsrepository'} @repotags; } else { my $obsname = $BSConfig::obsname || 'build.opensuse.org'; push @repotags, "obsrepository://$obsname/$projid/$repoid"; } my $data = { 'subdir' => $subdir, 'signargs' => $signargs, 'pubkey' => $pubkey, 'repoinfo' => $repoinfo, 'updateinfos' => \@updateinfos, 'deltainfos' => \%deltainfos, 'appdatas' => $appdatas, 'patterns' => $patterns, 'dbgsplit' => $dbgsplit, 'packtrack' => $packtrack, 'repotags' => \@repotags, }; if ($repotype{'rpm-md'}) { createrepo_rpmmd($extrep, $projid, $xrepoid, $data, $repotype{'rpm-md'}); } else { deleterepo_rpmmd($extrep, $projid, $xrepoid, $data); } if ($repotype{'suse'}) { createrepo_susetags($extrep, $projid, $xrepoid, $data, $repotype{'suse'}); } else { deleterepo_susetags($extrep, $projid, $xrepoid, $data); } # Mandriva format: if ($repotype{'hdlist2'}) { createrepo_hdlist2($extrep, $projid, $xrepoid, $data, $repotype{'hdlist2'}); } else { deleterepo_hdlist2($extrep, $projid, $xrepoid, $data); } if ($repotype{'debian'}) { createrepo_debian($extrep, $projid, $xrepoid, $data, $repotype{'debian'}); } else { deleterepo_debian($extrep, $projid, $xrepoid, $data); } if ($repotype{'arch'}) { createrepo_arch($extrep, $projid, $xrepoid, $data, $repotype{'arch'}); } else { deleterepo_arch($extrep, $projid, $xrepoid, $data); } if ($repotype{'staticlinks'}) { createrepo_staticlinks($extrep, $projid, $xrepoid, $data, $repotype{'staticlinks'}); } else { deleterepo_staticlinks($extrep, $projid, $xrepoid, $data); } if ($patterntype{'ymp'}) { createpatterns_ymp($extrep, $projid, $xrepoid, $data, $patterntype{'ymp'}); } else { deletepatterns_ymp($extrep, $projid, $xrepoid, $data); } if ($patterntype{'rpm-md'}) { createpatterns_rpmmd($extrep, $projid, $xrepoid, $data, $patterntype{'rpm-md'}); } else { deletepatterns_rpmmd($extrep, $projid, $xrepoid, $data); } if ($patterntype{'comps'}) { createpatterns_comps($extrep, $projid, $xrepoid, $data, $patterntype{'comps'}); } else { deletepatterns_comps($extrep, $projid, $xrepoid, $data); } # virt-builder repository if (-e "$extrep/index") { createrepo_virtbuilder($extrep, $projid, $xrepoid, $data); } publishprog_done: unlink("$uploaddir/publisher.$$") if $signkey; # post process step: create directory listing for poor YaST if ($repotype{'suse'}) { unlink("$extrep/directory.yast"); my @d = sort(ls($extrep)); for (@d) { $_ .= '/' if -d "$extrep/$_"; $_ .= "\n"; } writestr("$extrep/.directory.yast", "$extrep/directory.yast", join('', @d)); } # push the repo (unless there's a redirect) # FIXME: use different mechanism to disable sync if (!($BSConfig::publishredirect && $BSConfig::publishredirect->{$prp})) { if ($extrep =~ /\Q$extrepodir\E\/(.+)$/) { sync_to_stage($prp, $1, $dbgsplit); } } # support for regex usage in $BSConfig::unpublishedhook my $unpublish_prp = $prp; if ($BSConfig::unpublishedhook_use_regex || $BSConfig::unpublishedhook_use_regex) { for my $key (sort {$b cmp $a} keys %{$BSConfig::unpublishedhook}) { if ($prp =~ /^$key/) { $unpublish_prp = $key; last; } } } if ($BSConfig::unpublishedhook && $BSConfig::unpublishedhook->{$unpublish_prp}) { my $hook = $BSConfig::unpublishedhook->{$unpublish_prp}; $hook = [ $hook ] unless ref $hook; print " calling unpublished hook @$hook\n"; qsystem(@$hook, $prp, $extrep, @db_deleted) && warn(" @$hook failed: $?\n"); } # support for regex usage in $BSConfig::publishedhook my $publish_prp = $prp; if ($BSConfig::publishedhook_use_regex || $BSConfig::publishedhook_use_regex) { for my $key (sort {$b cmp $a} keys %{$BSConfig::publishedhook}) { if ($prp =~ /^$key/) { $publish_prp = $key; last; } } } if ($BSConfig::publishedhook && $BSConfig::publishedhook->{$publish_prp}) { my $hook = $BSConfig::publishedhook->{$publish_prp}; $hook = [ $hook ] unless ref $hook; print " calling published hook @$hook\n"; qsystem(@$hook, $prp, $extrep, @changed) && die(" @$hook failed: $?\n"); } BSNotify::notify('REPO_PUBLISHED', { project => $projid , 'repo' => $repoid }); # all done. till next time... if ($BSConfig::publishprogram && $BSConfig::publishprogram->{$prp}) { exit(0); } # recurse for dbgsplit publish($projid, $repoid, $repoinfo->{'splitdebug'}, $packtrack) if $repoinfo->{'splitdebug'} && !$dbgsplit; if ($packtrack && !$dbgsplit) { # update the packtrack cache (and remove the 'id' entry) my @newcache; for my $pt (values %$packtrack) { my $id = delete $pt->{'id'}; push @newcache, $id, [ map {$pt->{$_ eq 'arch' ? 'binaryarch' : $_}} qw{arch name epoch version release disturl buildtime} ] if $id; } $repoinfo = BSUtil::retrieve("$reporoot/$prp/:repoinfo", 1) || {}; if (%$repoinfo) { $repoinfo->{'trackercache'} = \@newcache; $repoinfo->{'trackercacheversion'} = '1'; BSUtil::store("$reporoot/$prp/.:repoinfo", "$reporoot/$prp/:repoinfo", $repoinfo); @newcache = (); # free mem undef $repoinfo; # free mem } # send notification print " sending binary release tracking notification\n"; BSNotify::notify('PACKTRACK', { project => $projid , 'repo' => $repoid }, Storable::nfreeze([ map { $packtrack->{$_} } sort keys %$packtrack ])); } } sub clear_repoinfo_state { my ($prp) = @_; if (-s "$reporoot/$prp/:repoinfo") { my $repoinfo = BSUtil::retrieve("$reporoot/$prp/:repoinfo", 1) || {}; if ($repoinfo->{'state'}) { delete $repoinfo->{'state'}; BSUtil::store("$reporoot/$prp/.:repoinfo$$", "$reporoot/$prp/:repoinfo", $repoinfo); } } } # check if background publish is still running sub check_publish_prg_running { my ($prp) = @_; return 0 unless $BSConfig::publishprogram && $BSConfig::publishprogram->{$prp}; local *PPLOCK; if (open(PPLOCK, '<', "$reporoot/$prp/.pplock")) { if (flock(PPLOCK, LOCK_EX | LOCK_NB)) { close PPLOCK; return 1; } close PPLOCK; } return 0; } my %publish_retry; my %publish_lasttime; my %publish_duration; sub writepublishtimeevent { my ($projid, $repoid, $now, $duration, $evn) = @_; my $ev = { 'type' => 'publishtime', 'project' => $projid, 'repository' => $repoid, 'job' => "$now $duration", }; $ev->{'job'} .= " $evn" if defined $evn; my $evname = "publishtime::${projid}::$repoid"; $evname = "publishtime::".Digest::MD5::md5_hex($evname) if length($evname) > 200; writexml("$myeventdir/.$evname$$", "$myeventdir/$evname", $ev, $BSXML::event); BSUtil::ping("$myeventdir/.ping"); } sub clean_publish_retry { my $now = time(); for (sort keys %publish_retry) { delete($publish_retry{$_}) if $publish_retry{$_} < $now; } } sub set_publish_retry { my ($req, $due) = @_; $publish_retry{$req->{'event'}} = $due; my $ev = $req->{'ev'}; writepublishtimeevent($ev->{'project'}, $ev->{'repository'}, 0, $due, $req->{'event'}) if $req->{'forked'}; } sub notify_state { my ($projid, $repoid, $state) = @_; BSNotify::notify('REPO_PUBLISH_STATE', { 'project' => $projid, 'repo' => $repoid, 'state' => $state }); } sub syncdb { die if @db_sync; db_pickup(); db_sync(); } sub publishevent { my ($req, $projid, $repoid) = @_; if ($req->{'forked'}) { my $prepend = "$$: "; $prepend = "$req->{'flavor'}.$prepend" if $req->{'flavor'}; binmode STDOUT, "via(BSStdRunner::prepend)"; print $prepend; } $db_sync_append = 1 if $req->{'forked'}; syncdb() if $req->{'syncdb'}; my $prp = "$projid/$repoid"; my $starttime = time(); if (check_publish_prg_running($prp)) { set_publish_retry($req, $starttime + 60); die("$prp: external publish program still running\n"); } my $penalty_multiplier = defined($BSConfig::publish_penalty_multiplier) ? $BSConfig::publish_penalty_multiplier : 1; if ($penalty_multiplier && $publish_lasttime{$prp} && $publish_duration{$prp} > 300 && $publish_lasttime{$prp} + $publish_duration{$prp} * $penalty_multiplier> $starttime) { set_publish_retry($req, $starttime + 60 * 5); die("$prp: not yet\n"); } notify_state($projid, $repoid, 'publishing'); eval { publish($projid, $repoid); }; if ($@) { warn("publish failed for $projid/$repoid : $@"); # delete state from repoinfo so that we will always re-publish clear_repoinfo_state($prp); set_publish_retry($req, time() + 60); db_sync(); BSUtil::printlog("publish failed for $prp") if $req->{'forked'}; return 0; } my $now = time(); $publish_lasttime{$prp} = $now; $publish_duration{$prp} = $now - $starttime; writepublishtimeevent($projid, $repoid, $now, $now - $starttime) if $req->{'forked'}; notify_state($projid, $repoid, 'published'); db_sync(); BSUtil::printlog("publish done for $prp") if $req->{'forked'}; return 1; } sub publishtimeevent { my ($req, $projid, $repoid, $job) = @_; my $prp = "$projid/$repoid"; my ($lasttime, $duration, $evn) = split(' ', $job, 3); if ($lasttime) { $publish_lasttime{$prp} = $lasttime; $publish_duration{$prp} = $duration; } elsif (defined($evn)) { $publish_retry{$evn} = $duration; } return 1; } sub configurationevent { my ($req) = @_; print "updating configuration\n"; BSConfiguration::update_from_configuration(); return 1; } sub lsevents { clean_publish_retry() if %publish_retry; my @events = BSStdRunner::lsevents(@_); # put publishtime and highprio events to the front my @publishtimeevents = grep {/^publishtime/} @events; @events = grep {!/^publishtime/} @events if @publishtimeevents; my @highprioevents = grep {/^_/} @events; @events = grep {!/^[_]/} @events if @highprioevents; return (@publishtimeevents, @highprioevents, @events); } sub getevent { my ($req) = @_; my $evname = $req->{'event'}; if ($publish_retry{$evname}) { return (undef, 1) if $publish_retry{$evname} > time(); delete $publish_retry{$evname}; } $req = BSStdRunner::getevent($req); return undef unless $req; # publishtime and configuration events are nofork events my $evtype = $req->{'ev'}->{'type'} || ''; if ($evtype eq 'publishtime') { return ($req, 1, 1) if ($req->{'ev'}->{'job'} || '') =~ /^0 /; return ($req, undef, 1); } return ($req, undef, 1) if $evtype eq 'configuration'; # also don't fork if the database is not in sync if (@db_sync) { print "not forking because of unsynced database entries\n"; return ($req, undef, 1); } # serialize if our children have unsynced data if (!@db_sync && $maxchild && $maxchild > 1 && $extrepodb && -s "$extrepodb.sync") { print "not forking because of unsynced database entries (syncdb)\n"; $req->{'syncdb'} = 1; return ($req, undef, 2); } return $req; } sub runsingleevent { my ($conf, $eventfile) = @_; my $ev = readxml($eventfile, $BSXML::event); $conf->{'eventdir'} = '.'; ($conf->{'eventdir'}, $eventfile) = ($1, $2) if $eventfile =~ /^(.*)\/([^\/]*)$/; my $req = {'conf' => $conf, 'event' => $eventfile, 'ev' => $ev}; my $r = BSStdRunner::dispatch($req); exit($r ? 0 : 1); } sub call_run { my ($conf) = @_; db_sync(); # try to sync old events runsingleevent($conf, $ARGV[1]) if @ARGV > 1 && $ARGV[0] eq '--event'; BSRunner::run($conf); } sub fc_rescan { my ($conf, $fc) = @_; unlink($fc); } my $dispatches = [ 'configuration' => \&configurationevent, 'publish $project $repository' => \&publishevent, 'publishtime $project $repository $job' => \&publishtimeevent, ]; my $conf = { 'runname' => 'bs_publish', 'eventdir' => $myeventdir, 'dispatches' => $dispatches, 'lsevents' => \&lsevents, 'getevent' => \&getevent, 'run' => \&call_run, 'filechecks' => { "$rundir/bs_publish.rescan" => \&fc_rescan }, 'inprogress' => 1, 'maxchild' => $maxchild, 'maxchild_flavor' => $maxchild_flavor, }; $conf->{'getflavor'} = $BSConfig::publish_getflavor if $BSConfig::publish_getflavor; BSStdRunner::run('publisher', \@ARGV, $conf); =head2 upload_container check if build result is valid container and upload if configured =cut sub upload_container { my ($dir, $containerinfo, $file, $projid, $repoid, $arch, $notary_uploads) = @_; # jump out if no config return unless $BSConfig::publish_containers; # get registry names for this project my @registries; my @s = @{$BSConfig::publish_containers}; while (@s) { my ($k, $v) = splice(@s, 0, 2); if ($projid =~ /^$k/) { $v = [ $v ] unless ref $v; @registries = @$v; last; } } # convert registry names to configs for my $registry (splice @registries) { my $config = get_registry_config($registry); push @registries, $config if $config; } return if !@registries; # check if containerinfo is valid and get info for container BSUtil::printlog("Checking containerinfo for $dir/$containerinfo"); my $info = BSRepServer::Containerinfo::readcontainerinfo($dir, $containerinfo); if (!$info) { BSUtil::printlog("No valid containerinfo found"); return; } if (!@{$info->{tags} || []}) { BSUtil::printlog("container is not tagged, skipping upload"); return; } my $src = "$dir/$file"; my $tempfile = decompress_container($src); eval { for my $config (@registries) { upload_to_registry($config, $info, $tempfile, $projid, $repoid, $arch, $notary_uploads); } }; my $error = $@; BSUtil::printlog("Deleting $tempfile"); unlink($tempfile); die($error) if $error; } =head2 upload_to_registry - upload decompressed file Parameters: config - validated config for registry info - content of containerinfo file tempfile - path to decompressed container tar file Returns: nothing =cut sub upload_to_registry { my ($config, $info, $tempfile, $projid, $repoid, $arch, $notary_uploads) = @_; # TODO: should be more general to implement own upload # prepare url for skopeo my $name = lc($info->{name}); my $delimiter = $config->{repository_delimiter} || '/'; my $repository_base = $config->{repository_base} || '/'; $projid =~ s/:/$delimiter/g; $repoid =~ s/:/$delimiter/g; my $tag = $info->{tags}->[0]; my $uploader = $config->{uploader} || 'skopeo'; my $reponame = lc("$repository_base$projid/$repoid/$arch/$tag"); $reponame =~ s/^\///; my $repotag = 'latest'; if ($reponame =~ /:([^\/]+)$/) { $repotag = $1; $reponame =~ s/:[^\/]+$//; } my $registryserver = $config->{server}; $registryserver =~ s/^https?:\/\///; if ($uploader eq 'skopeo') { my $dst = "docker://$registryserver/$reponame:$repotag"; #### # FIXME: Find solution to remove credentials from the cli options # HINT: https://github.com/projectatomic/skopeo/issues/434 must be solved first #### my @cmd = ("skopeo", "copy", "--dest-creds", "$config->{user}:$config->{password}", "docker-archive:$tempfile", $dst); BSUtil::printlog("Uploading: '$cmd[0] $cmd[1] $cmd[2] XXX:XXX $cmd[4] $cmd[5]'"); my $result = qsystem(@cmd); die "Error while uploading\n" if $result; } if ($config->{'notary'}) { my $gun; if ($config->{'notary_gunprefix'}) { $gun = "$config->{'notary_gunprefix'}"; } else { $gun = $registryserver; } print "adding notary upload for $gun/$reponame: $repotag\n"; $notary_uploads->{"$gun/$reponame"}->{$repotag} = $config; } } sub upload_to_notary { my ($projid, $notary_uploads, $signargs, $pubkey) = @_; unlink("$uploaddir/publisher.$$.notarypubkey"); writestr("$uploaddir/publisher.$$.notarypubkey", undef, $pubkey); my @signargs; push @signargs, '--project', $projid if $BSConfig::sign_project; push @signargs, @{$signargs || []}; for my $gun (sort keys %$notary_uploads) { my @tags = sort keys %{$notary_uploads->{$gun}}; next unless @tags; my $config = $notary_uploads->{$gun}->{$tags[0]}; my @cmd = ("$INC[0]/bs_notar", @signargs, "-p", "$uploaddir/publisher.$$.notarypubkey", $config->{'server'}, $config->{'notary'}, $gun, @tags); BSUtil::printlog("Uploading to notary: @cmd\n"); splice(@cmd, 1, 0, "--dest-creds", "$config->{user}:$config->{password}"); my $result = qsystem(@cmd); die "Error while uploading to notary\n" if $result; } unlink("$uploaddir/publisher.$$.notarypubkey"); } =head2 get_registry_config - check for valid registry config and set default values Parameters: registry - identifier for registry to be searched in $BSConfig::container_registries Returns: config - valid config for registry =cut sub get_registry_config { my ($registry) = @_; # avoid "used only once" warnings my $cr = $BSConfig::container_registries->{$registry} || $BSConfig::container_registries->{$registry}; if (ref($cr) ne 'HASH') { BSUtil::printlog( "No or invalid config found for container registry: '$registry'" ); return; } # check if minimal config items are set if (!$cr->{server} || !$cr->{user} || !$cr->{password}) { BSUtil::printlog( "No valid config found for container registry: ". "$cr->{server}/$cr->{user}/$cr->{password} (server/user/password)" ); return; } return $cr; } =head2 decompress_container - decompress or copy container into a temporary file Function returns path to the temporay file =cut sub decompress_container { my ($in) = @_; my %ext2decomp = ( 'tbz' => 'bzcat', 'tgz' => 'zcat', 'bz2' => 'bzcat', 'xz' => 'xzcat', 'gz' => 'zcat', ); my $decomp; $decomp = $ext2decomp{$1} if $in =~ /\.([^\.]+)$/; $decomp ||= 'cat'; my ($fh, $tempfile) = tempfile(); BSUtil::printlog("Decompressing: '$decomp $in > $tempfile'"); qsystem('stdout', $tempfile, $decomp, $in); return $tempfile; } open-build-service-2.9.4/src/backend/bs_repserver000077500000000000000000004360361332555733200220530ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2006, 2007 Michael Schroeder, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # The Repository Server # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; # FIXME: currently the bs_srcserver makes assumptions on being in a # properly set up working dir, e.g. with subdirs 'worker' and # 'build'. Either that is cleaned up or this stays in, for the sake # of startproc and others being able to start a bs_srcserver without # knowing that it has to be started in the right directory.... chdir "$wd"; unshift @INC, "build"; unshift @INC, "."; } use POSIX; use Fcntl qw(:DEFAULT :flock); BEGIN { Fcntl->import(':seek') unless defined &SEEK_SET; } use XML::Structured ':bytes'; use Storable (); use Data::Dumper; use Digest::MD5 (); use List::Util; use Symbol; use BSConfiguration; use BSRPC ':https'; use BSServer; use BSUtil; use BSHTTP; use BSFileDB; use BSXML; use BSVerify; use BSHandoff; use Build; use BSWatcher ':https'; use BSStdServer; use BSXPath; use BSXPathKeys; use BSDB; use BSDBIndex; use BSNotify; use BSUrlmapper; use BSSolv; use BSRepServer; use BSRepServer::BuildInfo; use BSRepServer::Containerinfo; use BSDispatcher::Constraints; use BSCando; use Build; # configure Build module for buildinfo queries $Build::Rpm::unfilteredprereqs = 1 if defined $Build::Rpm::unfilteredprereqs; $Build::Rpm::conflictdeps = 1 if defined $Build::Rpm::conflictdeps; $Build::Kiwi::repoextras = 1 if defined $Build::Kiwi::repoextras; use strict; my $port = 5252; #'RR' $port = $1 if $BSConfig::reposerver =~ /:(\d+)$/; my $proxy; $proxy = $BSConfig::proxy if defined($BSConfig::proxy); BSUtil::set_fdatasync_before_rename() unless $BSConfig::disable_data_sync || $BSConfig::disable_data_sync; my $historylay = [qw{versrel bcnt srcmd5 rev time duration}]; my $reporoot = "$BSConfig::bsdir/build"; my $workersdir = "$BSConfig::bsdir/workers"; my $jobsdir = "$BSConfig::bsdir/jobs"; my $eventdir = "$BSConfig::bsdir/events"; my $infodir = "$BSConfig::bsdir/info"; my $uploaddir = "$BSConfig::bsdir/upload"; my $rundir = $BSConfig::rundir || "$BSConfig::bsdir/run"; my $extrepodir = "$BSConfig::bsdir/repos"; my $extrepodb = "$BSConfig::bsdir/db/published"; my $ajaxsocket = "$rundir/bs_repserver.ajax"; my @binsufs = qw{rpm deb pkg.tar.gz pkg.tar.xz}; my $binsufsre = join('|', map {"\Q$_\E"} @binsufs); # XXX read jobs instead? ### TODO: (fs) move to BSUtil sub jobname { my ($prp, $packid) = @_; my $job = "$prp/$packid"; $job =~ s/\//::/g; $job = ':'.Digest::MD5::md5_hex($prp).'::'.(length($packid) > 160 ? ':'.Digest::MD5::md5_hex($packid) : $packid) if length($job) > 200; return $job; } sub fetchdodbinary { my ($pool, $repo, $p, $arch, $maxredirects, $handoff) = @_; my $reponame = $repo->name(); die("$reponame is no dod repo\n") unless $repo->dodurl(); my $path = $pool->pkg2path($p); die("$path has an unsupported suffix\n") unless $path =~ /\.($binsufsre)$/; my $suf = $1; my $pkgname = $pool->pkg2name($p); BSVerify::verify_filename($pkgname); BSVerify::verify_simple($pkgname); my $localname = "$reporoot/$reponame/$arch/:full/$pkgname.$suf"; return $localname if -e $localname; # we really need to download, handoff to ajax if not already done BSHandoff::handoff(@$handoff) if $handoff && !$BSStdServer::isajax; my $url = $repo->dodurl(); $url .= '/' unless $url =~ /\/$/; $url .= $pool->pkg2path($p); my $tmp = "$reporoot/$reponame/$arch/:full/.dod.$$.$pkgname.$suf"; #print "fetching: $url\n"; my $param = {'uri' => $url, 'filename' => $tmp, 'receiver' => \&BSHTTP::file_receiver, 'proxy' => $proxy}; $param->{'maxredirects'} = $maxredirects if defined $maxredirects; my $r; eval { $r = BSWatcher::rpc($param); }; if ($@) { $@ =~ s/(\d* *)/$1$url: /; die($@); } return unless defined $r; my $checksum; $checksum = $pool->pkg2checksum($p) if defined &BSSolv::pool::pkg2checksum; eval { # verify the checksum if we know it die("checksum error for $tmp, expected $checksum\n") if $checksum && !$pool->verifypkgchecksum($p, $tmp); # also make sure that the evra matches what we want my $q = Build::query($tmp, 'evra' => 1); my $data = $pool->pkg2data($p); $data->{'release'} = '__undef__' unless defined $data->{'release'}; $q->{'release'} = '__undef__' unless defined $q->{'release'}; die("downloaded package is not the one we want\n") if $data->{'name'} ne $q->{'name'} || ($data->{'arch'} || '') ne ($q->{'arch'} || '') || ($data->{'epoch'} || 0) != ($q->{'epoch'} || 0) || $data->{'version'} ne $q->{'version'} || $data->{'release'} ne $q->{'release'}; BSVerify::verify_nevraquery($q); # just in case... }; if ($@) { unlink($tmp); die($@); } rename($tmp, $localname) || die("rename $tmp $localname: $!\n"); return $localname; } sub readpackstatus { my ($prpa) = @_; my $psf = readstr("$reporoot/$prpa/:packstatus.finished", 1); my $ps = BSUtil::retrieve("$reporoot/$prpa/:packstatus", 1); if (!$ps) { # backward compat: try old xml format return undef unless -e "$reporoot/$prpa/:packstatus"; $ps = readxml("$reporoot/$prpa/:packstatus", $BSXML::packstatuslist, 1); return undef unless $ps; my %packstatus; my %packerror; for (@{$ps->{'packstatus'} || []}) { $packstatus{$_->{'name'}} = $_->{'status'}; $packerror{$_->{'name'}} = $_->{'error'} if $_->{'error'}; } $ps = {'packstatus' => \%packstatus, 'packerror' => \%packerror}; } if ($psf) { for (split("\n", $psf)) { my ($code, $packid) = split(' ', $_, 2); if ($code eq 'scheduled') { my ($job, $details); ($packid, $job, $details) = split('/', $packid, 3); next unless ($ps->{'packstatus'}->{$packid} || '') eq 'scheduled'; $ps->{'packerror'}->{$packid} = $details; } else { next unless ($ps->{'packstatus'}->{$packid} || '') eq 'scheduled'; $ps->{'packstatus'}->{$packid} = 'finished'; $ps->{'packerror'}->{$packid} = $code; } } } return $ps; } sub getbinaryversions { my ($cgi, $projid, $repoid, $arch) = @_; my $prp = "$projid/$repoid"; my @bins; if (defined $cgi->{'binaries'}) { @bins = split(',', $cgi->{'binaries'}); } else { die unless $cgi->{'view'} && $cgi->{'view'} eq 'binaryversions'; @bins = @{$cgi->{'binary'} || []}; } my $serial; $serial = BSWatcher::serialize("$reporoot/$projid/$repoid/$arch") if $BSStdServer::isajax; return if $BSStdServer::isajax && !defined $serial; my $pool = BSSolv::pool->new(); my $repo = BSRepServer::addrepo_scan($pool, $prp, $arch); my %names = $repo ? $repo->pkgnames() : (); @bins = sort keys %names if !@bins && !defined $cgi->{'binaries'}; my @res; my $needscan; my $dodurl = $repo->dodurl(); my $metacache; for my $bin (@bins) { my $p = $names{$bin}; if (!$p) { push @res, {'name' => $bin, 'error' => 'not available'}; next; } my $path = "$reporoot/".$pool->pkg2fullpath($p, $arch); my $sizek = $pool->pkg2sizek($p); my $hdrmd5 = $pool->pkg2pkgid($p); if ($dodurl && $hdrmd5 eq 'd0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0') { my @handoff; if (defined $cgi->{'binaries'}) { @handoff = ('/getbinaryversions', undef, "project=$projid", "repository=$repoid", "arch=$arch", "binaries=$cgi->{'binaries'}"); } else { @handoff = ("/build/$projid/$repoid/$arch/_repository", undef, 'view=binaryversions', map {"binary=$_"} @{$cgi->{'binary'} || []}); } $path = fetchdodbinary($pool, $repo, $p, $arch, 3, \@handoff); return unless defined $path; # TODO: move it out of the loop otherwise the same files might be queried multiple times my @s = stat($path); $sizek = ($s[7] + 1023) >> 10; $hdrmd5 = Build::queryhdrmd5($path); $needscan = 1; } if ($bin =~ /^container:/ && $path =~ /(\.tar(?:\..+)?)$/) { my @s = stat($path); next unless @s; $sizek = ($s[7] + 1023) >> 10; push @res, {'name' => "$bin$1", 'hdrmd5' => $hdrmd5, 'sizek' => $sizek}; next; } if ($path !~ /\.($binsufsre)$/) { push @res, {'name' => $bin, 'error' => 'unknown suffix'}; next; } my $r = {'name' => "$bin.$1", 'hdrmd5' => $hdrmd5, 'sizek' => $sizek}; push @res, $r; next if $cgi->{'nometa'}; next unless $path =~ s/\.(?:$binsufsre)$//; if (!$metacache) { $metacache = BSUtil::retrieve("$reporoot/$projid/$repoid/$arch/:full.metacache", 1) || {}; # we currently don't bother with :full.metacache.merge. this is not a problem, as the # cache is not authoritative } my @s = stat("$path.meta"); if (@s && $path =~ /([^\/]*$)/) { my $mc = $metacache->{$1}; if ($mc && $mc->[0] eq "$s[9]/$s[7]/$s[1]") { $r->{'metamd5'} = $mc->[1]; next; } } local *F; if (!open(F, '<', "$path.meta")) { next unless open(F, '<', "$path-MD5SUMS.meta"); } my $ctx = Digest::MD5->new; $ctx->addfile(*F); $r->{'metamd5'} = $ctx->hexdigest(); close F; } undef $repo; undef $pool; BSWatcher::serialize_end($serial) if defined $serial; forwardevent($cgi, 'scanrepo', $projid, undef, $repoid, $arch) if $needscan; return ({ 'binary' => \@res }, $BSXML::binaryversionlist); } sub getpackagebinaryversionlist { my ($cgi, $projid, $repoid, $arch, $packids) = @_; my $prp = "$projid/$repoid"; my @res; my $code; if ($cgi->{'withcode'}) { my $ps = readpackstatus("$projid/$repoid/$arch"); $code = ($ps || {})->{'packstatus'} || {}; } my $gbininfo = BSRepServer::read_gbininfo("$reporoot/$prp/$arch", 1); my %packids = map {$_ => 1} @{$packids || []}; if ($code) { $gbininfo->{$_} ||= {} for keys %$code; } for my $packid (sort keys %$gbininfo) { next if %packids && !$packids{$packid}; next if $packid eq '_volatile' && !$packids; my $bininfo = $gbininfo->{$packid}; filtersources_bininfo($bininfo) if $bininfo->{'.nosourceaccess'}; my @pres; for (sort keys %$bininfo) { my $bin = $bininfo->{$_}; next unless exists $bin->{'filename'}; my $r = { 'name' => $bin->{'filename'} }; $r->{'hdrmd5'} = $bin->{'hdrmd5'} if $bin->{'hdrmd5'}; $r->{'leadsigmd5'} = $bin->{'leadsigmd5'} if $bin->{'leadsigmd5'}; my $size = (split('/', $bin->{'id'}))[1]; $r->{'sizek'} = ($size + 512) >> 10; push @pres, $r; } # add nouseforbuild marker for the scheduler push @pres, { 'name' => '.nouseforbuild' } if $code && $bininfo->{'.nouseforbuild'}; push @res, {'package' => $packid, 'binary' => \@pres}; $res[-1]->{'code'} = $code->{$packid} || 'unknown' if $code; } return ({ 'binaryversionlist' => \@res }, $BSXML::packagebinaryversionlist); } sub getgbininfo { my ($cgi, $projid, $repoid, $arch) = @_; my $prp = "$projid/$repoid"; die("getgbininfo: package filtering is not supported\n") if $cgi->{'package'}; my $gbininfo = BSRepServer::read_gbininfo("$reporoot/$prp/$arch", 1); if ($cgi->{'withcode'}) { my $ps = readpackstatus("$projid/$repoid/$arch"); my $code = ($ps || {})->{'packstatus'} || {}; $gbininfo->{$_}->{'.code'} = $code->{$_} for keys %$code; } delete $_->{'.bininfo'} for values %$gbininfo; return (BSUtil::tostorable($gbininfo), 'Content-Type: application/octet-stream'); } sub getpackagelist_build { my ($cgi, $projid, $repoid, $arch) = @_; die "must specify view\n" unless $cgi->{'view'}; $cgi->{'withcode'} = 1 if $cgi->{'view'} eq 'binaryversionscode' || $cgi->{'view'} eq 'gbininfocode'; return getgbininfo($cgi, $projid, $repoid, $arch) if $cgi->{'view'} eq 'gbininfo' || $cgi->{'view'} eq 'gbininfocode'; die("unknown view '$cgi->{'view'}'\n") unless $cgi->{'view'} eq 'binaryversions' || $cgi->{'view'} eq 'binaryversionscode'; return getpackagebinaryversionlist($cgi, $projid, $repoid, $arch, $cgi->{'package'}); } # the worker thinks that out packagebinaryversionlist contains bogus entries sub badpackagebinaryversionlist { my ($cgi, $projid, $repoid, $arch, $packids) = @_; my $dir = "$reporoot/$projid/$repoid/$arch"; my $gbininfo = BSRepServer::read_gbininfo($dir); if ($gbininfo) { $packids = [ sort keys %$gbininfo ] unless $packids; for my $packid (@$packids) { unlink("$dir/$packid/.bininfo"); } unlink("$dir/:bininfo"); unlink("$dir/:bininfo.merge"); forwardevent($cgi, 'scanprjbinaries', $projid, $packids->[0], $repoid, $arch); } return $BSStdServer::return_ok; } sub getbinaries { my ($cgi, $projid, $repoid, $arch) = @_; my $prp = "$projid/$repoid"; my @bins = split(',', $cgi->{'binaries'} || ''); my $serial; $serial = BSWatcher::serialize("$reporoot/$projid/$repoid/$arch") if $BSStdServer::isajax; return if $BSStdServer::isajax && !defined $serial; my $pool = BSSolv::pool->new(); my $repo = BSRepServer::addrepo_scan($pool, $prp, $arch); my %names = $repo ? $repo->pkgnames() : (); my @send; my $needscan; my $dodurl = $repo->dodurl(); for my $bin (@bins) { my $p = $names{$bin}; if (!$p) { push @send, {'name' => $bin, 'error' => 'not available'}; next; } my $path = "$reporoot/".$pool->pkg2fullpath($p, $arch); if ($dodurl && $pool->pkg2pkgid($p) eq 'd0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0') { my @handoff = ('/getbinaries', undef, "project=$projid", "repository=$repoid", "arch=$arch", "binaries=$cgi->{'binaries'}"); $path = fetchdodbinary($pool, $repo, $p, $arch, 3, \@handoff); return unless defined $path; $needscan = 1; } if ($bin =~ /^container:/ && $path =~ /(\.tar(?:\..+)?)$/) { push @send, {'name' => "$bin$1", 'filename' => $path} unless $cgi->{'metaonly'}; next; } if ($path !~ /\.($binsufsre)$/) { push @send, {'name' => $bin, 'error' => 'unknown suffix'}; next; } push @send, {'name' => "$bin.$1", 'filename' => $path} unless $cgi->{'metaonly'}; next if $cgi->{'nometa'}; next unless $path =~ s/\.(?:$binsufsre)$//; if (-e "$path.meta" || ! -e "$path-MD5SUMS.meta") { push @send, {'name' => "$bin.meta", 'filename' => "$path.meta"}; } else { push @send, {'name' => "$bin.meta", 'filename' => "$path-MD5SUMS.meta"}; } } undef $repo; undef $pool; BSWatcher::serialize_end($serial) if defined $serial; forwardevent($cgi, 'scanrepo', $projid, undef, $repoid, $arch) if $needscan; BSWatcher::reply_cpio(\@send); return undef; } # TODO: move into Build::Rpm sub getrpmheaders { my ($path, $withhdrmd5) = @_; my $hdrmd5; local *F; open(F, '<', $path) || die("$path: $!\n"); my $buf = ''; my $l; while (length($buf) < 96 + 16) { $l = sysread(F, $buf, 4096, length($buf)); die("$path: read error\n") unless $l; } die("$path: not a rpm\n") unless unpack('N', $buf) == 0xedabeedb && unpack('@78n', $buf) == 5; my ($headmagic, $cnt, $cntdata) = unpack('@96N@104NN', $buf); die("$path: not a rpm (bad sig header)\n") unless $headmagic == 0x8eade801 && $cnt < 16384 && $cntdata < 1048576; my $hlen = 96 + 16 + $cnt * 16 + $cntdata; $hlen = ($hlen + 7) & ~7; while (length($buf) < $hlen + 16) { $l = sysread(F, $buf, 4096, length($buf)); die("$path: read error\n") unless $l; } if ($withhdrmd5) { my $idxarea = substr($buf, 96 + 16, $cnt * 16); die("$path: no md5 signature header\n") unless $idxarea =~ /\A(?:.{16})*\000\000\003\354\000\000\000\007(....)\000\000\000\020/s; my $md5off = unpack('N', $1); die("$path: bad md5 offset\n") unless $md5off; $md5off += 96 + 16 + $cnt * 16; $hdrmd5 = unpack("\@${md5off}H32", $buf); } ($headmagic, $cnt, $cntdata) = unpack('N@8NN', substr($buf, $hlen)); die("$path: not a rpm (bad header)\n") unless $headmagic == 0x8eade801 && $cnt < 1048576 && $cntdata < 33554432; my $hlen2 = $hlen + 16 + $cnt * 16 + $cntdata; while (length($buf) < $hlen2) { $l = sysread(F, $buf, 4096, length($buf)); die("$path: read error\n") unless $l; } close F; return (substr($buf, 0, 96), substr($buf, 96, $hlen - 96), substr($buf, $hlen, $hlen2 - $hlen), $hdrmd5); } sub getavailable { my ($projid, $repoid, $arch, $available, $available_pattern, $available_product) = @_; my $pool = BSSolv::pool->new(); my $dir = "$reporoot/$projid/$repoid/$arch/:full"; my $repo; if (-s "$dir.solv") { eval {$repo = $pool->repofromfile("$projid/$repoid", "$dir.solv");}; } if ($repo) { $pool->createwhatprovides(); my @pkgs = $repo->pkgnames(); while (@pkgs) { my ($name, $p) = splice(@pkgs, 0, 2); my $arch; if (defined(&BSSolv::pool::pkg2arch)) { $arch = $pool->pkg2arch($p); } else { my $d = $pool->pkg2data($p); $arch = $d->{'arch'}; } $arch ||= 'noarch'; $available->{$name}->{$arch} = 1; } for my $p ($pool->whatprovides('pattern()')) { my $d = $pool->pkg2data($p); my $name; my $visible; for my $prv (@{$d->{'provides'} || []}) { $visible = 1 if $prv =~ /^pattern-visible\(\)/; next unless $prv =~ /^pattern\(\) = ([^\.].*)/; $name ||= $1; } $available_pattern->{$name}->{'noarch'} = 1 if $visible && defined $name; } for my $p ($pool->whatprovides('product()')) { my $d = $pool->pkg2data($p); my $name; for my $prv (@{$d->{'provides'} || []}) { next unless $prv =~ /^product\(\) = ([^\.].*)/; $name ||= $1; } $available_product->{$name}->{'noarch'} = 1 if defined $name; } } } sub processavailable { my ($available) = @_; my %archlist; my @res; for my $bin (sort keys %$available) { my $archlist = join(',', sort keys %{$available->{$bin}}); $archlist{$archlist}->{$bin} = 1; } for my $archlist (sort keys %archlist) { my @archs = split(',', $archlist); push @res, {'arch' => \@archs, 'name' => [ sort keys %{$archlist{$archlist}} ]}; } return \@res; } sub mapannotationurls { my ($p) = @_; my $annotation = BSUtil::fromxml($p->{'annotation'}, $BSXML::binannotation, 1); return unless $annotation && $annotation->{'repo'}; for my $r (@{$annotation->{'repo'}}) { my $url = $r->{'url'}; next unless $url; my $urlprp; if ($url =~ /^obs:\/{1,3}([^\/]+)\/([^\/]+)\/?$/) { $urlprp = "$1/$2"; } else { $urlprp = BSUrlmapper::urlmapper($url); } ($r->{'project'}, $r->{'repository'}) = split('/', $urlprp, 2) if $urlprp; } $p->{'annotation'} = BSUtil::toxml($annotation, $BSXML::binannotation); } sub getbinarylist_repository { my ($cgi, $projid, $repoid, $arch) = @_; my $prp = "$projid/$repoid"; my $view = $cgi->{'view'} || ''; if (($view eq 'cache' || $view eq 'cpio' || $view eq 'solvstate') && !$BSStdServer::isajax && !$cgi->{'noajax'}) { my @args = BSRPC::args($cgi, 'view', 'binary'); BSHandoff::handoff("/build/$projid/$repoid/$arch/_repository", undef, @args); } if ($view eq 'solv') { my $fd = gensym; if (!open($fd, '<', "$reporoot/$prp/$arch/:full.solv")) { my $pool = BSSolv::pool->new(); my $repo = BSRepServer::addrepo_scan($pool, $prp, $arch); if ($repo) { $repo->tofile("$reporoot/$prp/$arch/:full.solv.$$"); if (!open($fd, '<', "$reporoot/$prp/$arch/:full.solv.$$")) { undef $fd; } unlink("$reporoot/$prp/$arch/:full.solv.$$"); } else { undef $fd; } undef $repo; undef $pool; } die("no solv file available") unless defined $fd; BSWatcher::reply_file($fd); return undef; } if ($view eq 'solvstate') { my $repostate = readxml("$reporoot/$prp/$arch/:repostate", $BSXML::repositorystate, 1) || {}; my @files; push @files, { 'name' => 'repositorystate', 'data' => XMLout($BSXML::repositorystate, $repostate), }; my $fd = gensym; if (open($fd, '<', "$reporoot/$prp/$arch/:full.solv")) { push @files, { 'name' => 'repositorysolv', 'filename' => $fd }; } elsif (-d "$reporoot/$prp/$arch") { my $pool = BSSolv::pool->new(); my $repo = BSRepServer::addrepo_scan($pool, $prp, $arch); if ($repo) { $repo->tofile("$reporoot/$prp/$arch/:full.solv.$$"); if (open($fd, '<', "$reporoot/$prp/$arch/:full.solv.$$")) { push @files, { 'name' => 'repositorysolv', 'filename' => $fd }; } unlink("$reporoot/$prp/$arch/:full.solv.$$"); } undef $repo; undef $pool; } BSWatcher::reply_cpio(\@files); return undef; } if ($view eq 'cache') { my $repostate = readxml("$reporoot/$prp/$arch/:repostate", $BSXML::repositorystate, 1) || {}; my @files; push @files, { 'name' => 'repositorystate', 'data' => XMLout($BSXML::repositorystate, $repostate), }; my $fd = gensym; if (-s "$reporoot/$prp/$arch/:full.solv") { my @s = stat(_); my $id64 = pack("a64", "$s[9]/$s[7]/$s[1]"); if (open($fd, '<', "$reporoot/$prp/$arch/:full.xcache")) { my $id; if (sysread($fd, $id, 64) == 64 && $id eq $id64) { push @files, { 'name' => 'repositorycache', 'filename' => $fd }; BSWatcher::reply_cpio(\@files); return undef; } unlink("$reporoot/$prp/$arch/:full.xcache"); } my $pool = BSSolv::pool->new(); my $repo = BSRepServer::addrepo_scan($pool, $prp, $arch); if ($repo) { my %data = $repo->pkgnames(); for my $p (values %data) { $p = $pool->pkg2data($p); mapannotationurls($p) if $p->{'annotation'}; } if (keys(%data) < 100 && $s[7] < 10000) { # small repo, feed from memory push @files, { 'name' => 'repositorycache', 'data' => BSUtil::tostorable(\%data) }; } else { # cache result my $tmpname = "$reporoot/$prp/$arch/:full.xcache.$$"; open($fd, '+>', $tmpname) || die("$tmpname: $!\n"); # Storable uses PerlIO_write, so we have to use print instead of syswrite here print $fd $id64; Storable::nstore_fd(\%data, $fd) || die("nstore_fd $tmpname: $!\n"); $fd->flush(); BSUtil::do_fdatasync(fileno($fd)) if $BSUtil::fdatasync_before_rename; rename($tmpname, "$reporoot/$prp/$arch/:full.xcache"); sysseek($fd, 64, Fcntl::SEEK_SET); push @files, { 'name' => 'repositorycache', 'filename' => $fd }; } } undef $repo; undef $pool; } elsif (-s "$reporoot/$prp/$arch/:full.cache") { # compatibility code, to be removed... if (open($fd, '<', "$reporoot/$prp/$arch/:full.cache")) { push @files, { 'name' => 'repositorycache', 'filename' => $fd }; } } BSWatcher::reply_cpio(\@files); return undef; } if ($view eq 'cpioheaders') { my $pool = BSSolv::pool->new(); my $repo = BSRepServer::addrepo_scan($pool, $prp, $arch); my %names = $repo ? $repo->pkgnames() : (); my @bins = $cgi->{'binary'} ? @{$cgi->{'binary'}} : sort keys %names; my @files; for my $bin (@bins) { my $p = $names{$bin}; if (!$p) { push @files, {'name' => $bin, 'error' => 'not available'}; next; } my $path = "$reporoot/".$pool->pkg2fullpath($p, $arch); if ($path !~ /\.rpm$/) { push @files, {'name' => $bin, 'error' => 'not an rpm'}; next; } my ($lead, $sighdr, $hdr, $hdrmd5); eval { ($lead, $sighdr, $hdr, $hdrmd5) = getrpmheaders($path, 1); }; if ($hdr) { push @files, {'name' => "$bin-$hdrmd5", 'data' => "$lead$sighdr$hdr"}; } else { my $err = $@; chomp $err; push @files, {'name' => $bin, 'error' => $err || 'bad rpm'}; } } BSWatcher::reply_cpio(\@files); return undef; } if ($view eq 'cpio') { my $serial; $serial = BSWatcher::serialize("$reporoot/$projid/$repoid/$arch") if $BSStdServer::isajax; return if $BSStdServer::isajax && !defined $serial; my @files; my $pool = BSSolv::pool->new(); my $repo = BSRepServer::addrepo_scan($pool, $prp, $arch); my %names = $repo ? $repo->pkgnames() : (); my @bins = $cgi->{'binary'} ? @{$cgi->{'binary'}} : sort keys %names; my $dodurl = $repo->dodurl(); my $needscan; for my $bin (@bins) { my $p = $names{$bin}; if (!$p) { push @files, {'name' => $bin, 'error' => 'not available'}; next; } my $path = "$reporoot/".$pool->pkg2fullpath($p, $arch); if ($dodurl && $pool->pkg2pkgid($p) eq 'd0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0') { my @handoff = ("/build/$projid/$repoid/$arch/_repository", undef, "view=$view", map {"binary=$_"} @{$cgi->{'binary'} || []}); $path = fetchdodbinary($pool, $repo, $p, $arch, 3, \@handoff); return unless defined $path; $needscan = 1; } my $n = $bin; if ($n =~ /^container:/) { $n .= $1 if $path =~ /(\.tar(?:\..+)?)$/; } elsif ($path =~ /\.($binsufsre)$/) { $n .= ".$1"; } if ($BSStdServer::isajax) { push @files, {'name' => $n, 'filename' => $path}; next; } my $fd = gensym; if (!open($fd, '<', $path)) { push @files, {'name' => $bin, 'error' => 'not available'}; } else { push @files, {'name' => $n, 'filename' => $fd}; } } undef $repo; undef $pool; BSWatcher::serialize_end($serial) if defined $serial; forwardevent($cgi, 'scanrepo', $projid, undef, $repoid, $arch) if $needscan; BSWatcher::reply_cpio(\@files); return undef; } if ($view eq 'binaryversions') { return getbinaryversions($cgi, $projid, $repoid, $arch); } if ($view eq 'availablebinaries') { my (%available, %available_pattern, %available_product); getavailable($projid, $repoid, $arch, \%available, \%available_pattern, \%available_product); my %res; $res{'packages'} = processavailable(\%available) if %available; $res{'patterns'} = processavailable(\%available_pattern) if %available_pattern; $res{'products'} = processavailable(\%available_product) if %available_product; return (\%res, $BSXML::availablebinaries); } die("unsupported view '$view'\n") if $view && $view ne 'names'; my $serial; $serial = BSWatcher::serialize("$reporoot/$projid/$repoid/$arch") if $BSStdServer::isajax; return if $BSStdServer::isajax && !defined $serial; my $pool = BSSolv::pool->new(); my $repo = BSRepServer::addrepo_scan($pool, $prp, $arch); my %names = $repo ? $repo->pkgnames() : (); my @bins = $cgi->{'binary'} ? @{$cgi->{'binary'}} : sort keys %names; my @res; my $needscan; my $dodurl = $repo->dodurl(); my @handoff = ("/build/$projid/$repoid/$arch/_repository", undef, BSRPC::args($cgi, 'view', 'binary')); for my $bin (@bins) { my $p = $names{$bin}; if (!$p) { push @res, {'filename' => $bin, 'size' => 0}; next; } my $path = $pool->pkg2path($p); my $n = $bin; if ($bin =~ /^container:/ && $path =~ /(\.tar(?:\..+)?)$/) { $n .= $1; } else { $n .= ".$1" if $path =~ /\.($binsufsre)$/; } my $r = {'filename' => $view eq 'names' ? $n : $path }; my $id = $pool->pkg2bsid($p); if ($id && $bin !~ /^container:/) { if ($id eq 'dod') { $r->{'mtime'} = ''; $r->{'size'} = ''; if ($dodurl && $cgi->{'binary'}) { # this is used in the interconnect, so we need to fetch the dod binary $path = fetchdodbinary($pool, $repo, $p, $arch, 3, \@handoff); return unless defined $path; my @s = stat($path); ($r->{'mtime'}, $r->{'size'}) = ($s[9], $s[7]) if @s; $needscan = 1; } } else { my @s = split('/', $id, 3); $r->{'mtime'} = $s[0]; $r->{'size'} = $s[1]; } } else { my @s = stat("$reporoot/$prp/$arch/:full/$path"); ($r->{'mtime'}, $r->{'size'}) = ($s[9], $s[7]) if @s; } push @res, $r; } undef $repo; undef $pool; BSWatcher::serialize_end($serial) if defined $serial; forwardevent($cgi, 'scanrepo', $projid, undef, $repoid, $arch) if $needscan; return ({'binary' => \@res}, $BSXML::binarylist); } sub filtersources { my (@bins) = @_; my $debian = grep {/\.(?:dsc|sdeb)$/} @bins; for my $bin (splice @bins) { next if $bin =~ /\.(?:no)?src\.rpm$/; next if $bin =~ /-debug(?:info|source).*\.rpm$/; next if $debian && ($bin !~ /\.deb$/) && ($bin !~ /[-.]appdata\.xml$/); push @bins, $bin; } return @bins; } sub filtersources_bininfo { my ($bininfo) = @_; return unless $bininfo->{'.nosourceaccess'}; for my $bin (keys %$bininfo) { delete $bininfo->{$bin} if $bin =~ /\.(?:no)?src\.rpm$/; delete $bininfo->{$bin} if $bin =~ /-debug(:?info|source).*\.rpm$/; } } sub getbinarylist { my ($cgi, $projid, $repoid, $arch, $packid) = @_; return getbinarylist_repository($cgi, $projid, $repoid, $arch) if $packid eq '_repository'; my $prp = "$projid/$repoid"; my $view = $cgi->{'view'} || ''; if ($view eq 'cpio' && !$BSStdServer::isajax && !$cgi->{'noajax'}) { my @args = BSRPC::args($cgi, 'view', 'binary', 'nosource'); BSHandoff::handoff("/build/$projid/$repoid/$arch/$packid", undef, @args); } my %binaries = map {$_ => 1} @{$cgi->{'binary'} || []}; if ($view eq 'cpio') { my @files; my @bins = grep {$_ ne 'logfile' && $_ ne 'status' && $_ ne 'reason' && $_ ne 'history' && !/^\./} ls("$reporoot/$prp/$arch/$packid"); @bins = grep {!/^::import::/} @bins if $cgi->{'noimport'}; @bins = filtersources(@bins) if $cgi->{'nosource'} || -e "$reporoot/$prp/$arch/$packid/.nosourceaccess"; for (sort @bins) { next if %binaries && !$binaries{$_}; if ($BSStdServer::isajax || @files > 1000) { # do not waste file descriptors push @files, {'name' => $_, 'filename' => "$reporoot/$prp/$arch/$packid/$_"}; next; } my $fd = gensym; next unless open($fd, '<', "$reporoot/$prp/$arch/$packid/$_"); push @files, {'name' => $_, 'filename' => $fd}; } BSWatcher::reply_cpio(\@files); return undef; } if ($view eq 'cpioheaders') { my @files; my @bins = grep {/\.rpm$/ && !/^\./} ls("$reporoot/$prp/$arch/$packid"); @bins = grep {!/^::import::/} @bins if $cgi->{'noimport'}; @bins = filtersources(@bins) if $cgi->{'nosource'} || -e "$reporoot/$prp/$arch/$packid/.nosourceaccess"; for my $bin (sort @bins) { next if %binaries && !$binaries{$_}; my ($lead, $sighdr, $hdr, $hdrmd5); eval { ($lead, $sighdr, $hdr, $hdrmd5) = getrpmheaders("$reporoot/$prp/$arch/$packid/$bin", 1); }; if ($hdr) { push @files, {'name' => "$bin-$hdrmd5", 'data' => "$lead$sighdr$hdr"}; } else { my $err = $@; chomp $err; push @files, {'name' => $bin, 'error' => $err || 'bad rpm'}; } } BSWatcher::reply_cpio(\@files); return undef; } if ($view eq 'cpioheaderchksums') { my %chksum; local *CS; if (open(CS, '<', "$reporoot/$prp/$arch/$packid/.checksums")) { while () { chomp; $chksum{$1} = $_ if /^(.{32}) /; } close CS; } my @files; my @bins = grep {$_ ne 'logfile' && $_ ne 'status' && $_ ne 'reason' && $_ ne 'history' && !/^\./} ls("$reporoot/$prp/$arch/$packid"); @bins = grep {!/^::import::/} @bins if $cgi->{'noimport'}; @bins = filtersources(@bins) if $cgi->{'nosource'} || -e "$reporoot/$prp/$arch/$packid/.nosourceaccess"; for my $bin (sort @bins) { next if %binaries && !$binaries{$bin}; if ($bin =~ /\.rpm$/) { my @s = stat "$reporoot/$prp/$arch/$packid/$bin"; die("$reporoot/$prp/$arch/$packid/$bin: $!\n") unless @s; my ($lead, $sighdr, $hdr) = getrpmheaders("$reporoot/$prp/$arch/$packid/$bin"); my $leadsigmd5 = Digest::MD5::md5_hex("$lead$sighdr"); die("$bin not in checksum file\n") unless $chksum{$leadsigmd5}; push @files, {'name' => "$bin", 'mtime' => $s[9], 'data' => "$lead$sighdr${hdr}chk:$chksum{$leadsigmd5} size:$s[7]\n"}; next; } my $fd = gensym; next unless open($fd, '<', "$reporoot/$prp/$arch/$packid/$bin"); push @files, {'name' => $bin, 'filename' => $fd}; } BSWatcher::reply_cpio(\@files); return undef; } if ($view eq 'binaryversions') { my $bininfo = BSRepServer::read_bininfo("$reporoot/$prp/$arch/$packid"); filtersources_bininfo($bininfo) if $cgi->{'nosource'} || $bininfo->{'.nosourceaccess'}; my @res; for (sort keys %$bininfo) { my $bin = $bininfo->{$_}; next if %binaries && !$binaries{$bin->{'filename'}}; next if $cgi->{'noimport'} && $bin->{'filename'} =~ /^::import::/; my $r = { 'name' => $bin->{'filename'} }; $r->{'hdrmd5'} = $bin->{'hdrmd5'} if $bin->{'hdrmd5'}; $r->{'leadsigmd5'} = $bin->{'leadsigmd5'} if $bin->{'leadsigmd5'}; my $size = (split('/', $bin->{'id'}))[1]; $r->{'sizek'} = ($size + 512) >> 10; push @res, $r; } return ({ 'binary' => \@res }, $BSXML::binaryversionlist); } die("unsupported view '$view'\n") if $view; my @res; my @bins = grep {$_ ne 'logfile' && $_ ne 'status' && $_ ne 'reason' && $_ ne 'history' && !/^\./} ls("$reporoot/$prp/$arch/$packid"); @bins = grep {!/^::import::/} @bins if $cgi->{'noimport'}; @bins = filtersources(@bins) if $cgi->{'nosource'} || -e "$reporoot/$prp/$arch/$packid/.nosourceaccess"; my %md5sums; if ($cgi->{'withmd5'}) { if (-s "$reporoot/$prp/$arch/$packid/.checksums") { my %chksum; local *CS; if (open(CS, '<', "$reporoot/$prp/$arch/$packid/.checksums")) { while () { $chksum{$1} = $2 if /^(.{32}) .*md5:(.{32})/; } close CS; } if (%chksum) { my $bininfo = BSRepServer::read_bininfo("$reporoot/$prp/$arch/$packid"); for my $fn (sort keys %{$bininfo || []}) { $md5sums{"$fn-".($bininfo->{$fn}->{'id'} || '')} = $chksum{$bininfo->{$fn}->{'leadsigmd5'} || ''}; } } } } for (sort @bins) { next if %binaries && !$binaries{$_}; my @s = stat("$reporoot/$prp/$arch/$packid/$_"); next unless @s; next if -d _; my $r = {'filename' => $_, 'size' => $s[7], 'mtime' => $s[9]}; if ($cgi->{'withmd5'}) { $r->{'md5'} = $md5sums{"$_-$s[9]/$s[7]/$s[1]"}; if (!$r->{'md5'}) { my $ctx = Digest::MD5->new; local *F; if (open(F, '<', "$reporoot/$prp/$arch/$packid/$_")) { $ctx->addfile(*F); close F; } $r->{'md5'} = $ctx->hexdigest(); } } push @res, $r; } return ({'binary' => \@res}, $BSXML::binarylist); } sub getbuildhistory { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my @history = BSFileDB::fdb_getall_reverse("$reporoot/$projid/$repoid/$arch/$packid/history", $historylay, $cgi->{'limit'} || 100); @history = reverse @history; return ({'entry' => \@history}, $BSXML::buildhist); } sub getbuildreason { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my $reason = readxml("$reporoot/$projid/$repoid/$arch/$packid/reason", $BSXML::buildreason, 1) || {}; $reason ||= {'explain' => 'no reason known'}; return ($reason, $BSXML::buildreason); } sub getbuildstatus { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my $res = {'package' => $packid}; my $ps = readpackstatus("$projid/$repoid/$arch"); if ($ps) { $ps = { 'status' => $ps->{'packstatus'}->{$packid}, 'error' => $ps->{'packerror'}->{$packid}, }; undef $ps unless $ps->{'status'}; } if ($ps && $ps->{'status'} ne 'failed' && $ps->{'status'} ne 'done' && $ps->{'status'} ne 'scheduled') { $res->{'code'} = $ps->{'status'}; $res->{'details'} = $ps->{'error'} if exists $ps->{'error'}; } else { my $status = readxml("$reporoot/$projid/$repoid/$arch/$packid/status", $BSXML::buildstatus, 1); if (!$status->{'code'}) { $res->{'code'} = $status->{'status'} || 'unknown'; $res->{'details'} = $status->{'error'} if $status->{'error'}; } else { $res->{'code'} = $status->{'code'}; $res->{'details'} = $status->{'details'} if $status->{'details'}; } if ($status->{'job'}) { my $jobstatus = readxml("$jobsdir/$arch/$status->{'job'}:status", $BSXML::jobstatus, 1); if ($jobstatus) { delete $res->{'details'}; $res->{'code'} = $jobstatus->{'code'}; $res->{'details'} = $jobstatus->{'details'} if $jobstatus->{'details'}; if ($jobstatus->{'code'} eq 'building' && $jobstatus->{'workerid'}) { $res->{'details'} = "building on $jobstatus->{'workerid'}"; } } } } return ($res, $BSXML::buildstatus); } sub getjobstatus { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my $status = readxml("$reporoot/$projid/$repoid/$arch/$packid/status", $BSXML::buildstatus, 1); # not even scheduled return ({}, $BSXML::jobstatus) unless $status && $status->{'status'} eq 'scheduled'; my $jobstatus = readxml("$jobsdir/$arch/$status->{'job'}:status", $BSXML::jobstatus, 1); # not yet building return ({}, $BSXML::jobstatus) unless $jobstatus; # find last successful build my $history = BSFileDB::fdb_getlast("$reporoot/$projid/$repoid/$arch/$packid/history", $historylay); my $lastduration; $lastduration = $history->{'duration'} if $history; $jobstatus->{'lastduration'} = $lastduration if $lastduration; return ($jobstatus, $BSXML::jobstatus); } sub getlogfile { my ($cgi, $projid, $repoid, $arch, $packid) = @_; die("unknown view '$cgi->{'view'}'\n") if $cgi->{'view'} && $cgi->{'view'} ne 'entry'; if ($cgi->{'handoff'} && !$BSStdServer::isajax) { my @args = BSRPC::args($cgi, 'nostream', 'start', 'end', 'view'); BSHandoff::handoff("/build/$projid/$repoid/$arch/$packid/_log", undef, @args); } my $status = readxml("$reporoot/$projid/$repoid/$arch/$packid/status", $BSXML::buildstatus, 1); my $jobstatus; if ($status && $status->{'status'} eq 'scheduled') { $jobstatus = readxml("$jobsdir/$arch/$status->{'job'}:status", $BSXML::jobstatus, 1); } if (!$cgi->{'last'} && $jobstatus && $jobstatus->{'code'} && $jobstatus->{'code'} eq 'building' && $jobstatus->{'uri'}) { my @args = BSRPC::args($cgi, 'nostream', 'start', 'end', 'view'); if (!$BSStdServer::isajax && !$cgi->{'view'}) { BSHandoff::handoff("/build/$projid/$repoid/$arch/$packid/_log", undef, @args); } my $param = { 'uri' => "$jobstatus->{'uri'}/logfile", 'joinable' => 1, 'receiver' => \&BSServer::reply_receiver, }; eval { BSWatcher::rpc($param, undef, @args); }; return undef unless $@; my $err = $@; die($err) if $param->{'reply_receiver_called'} || $BSStdServer::isajax; $jobstatus = readxml("$jobsdir/$arch/$status->{'job'}:status", $BSXML::jobstatus, 1); die($err) if $jobstatus && $jobstatus->{'code'} && $jobstatus->{'code'} eq 'building' && $jobstatus->{'uri'}; # no longer building, use local logfile } my $logfile = "$reporoot/$projid/$repoid/$arch/$packid/logfile"; if ($jobstatus && $jobstatus->{'code'} && ($jobstatus->{'code'} eq 'finished' || $jobstatus->{'code'} eq 'signing')) { $logfile = "$jobsdir/$arch/$status->{'job'}:dir/logfile"; } my @s = stat($logfile); die("404 package '$packid' has no logfile\n") unless @s; if ($cgi->{'view'} && $cgi->{'view'} eq 'entry') { my $entry = {'name' => '_log', 'size' => $s[7], 'mtime' => $s[9]}; return ({'entry' => [ $entry ]}, $BSXML::dir); } my $start = $cgi->{'start'} || 0; my $end = $cgi->{'end'}; $start = $s[7] + $start if $start < 0; $start = 0 if $start < 0; die("start out of range: $start\n") if $start > $s[7]; $end = $s[7] if !defined($end) || $end > $s[7]; $end = $start if defined($end) && $end < $start; my $len = $end - $start; my $fd = gensym; open($fd, '<', $logfile) || die("$logfile: $!\n"); defined(sysseek($fd, $start, Fcntl::SEEK_SET)) || die("sysseek: $!\n"); BSWatcher::reply_file($fd, 'Content-Type: text/plain', "Content-Length: $len"); close $fd unless $BSStdServer::isajax; return undef; } sub getbinary_info { my ($cgi, $projid, $repoid, $arch, $path) = @_; my @s = stat($path); die("404 $path: $!\n") unless @s; my $res = Build::query($path, 'evra' => 1, 'description' => 1) || {}; if (!%$res && $path =~ /\/updateinfo\.xml$/) { my $updateinfos = readxml($path, $BSXML::updateinfo, 1); if ($updateinfos && @{$updateinfos->{'update'} || []} == 1) { my $updateinfo = $updateinfos->{'update'}->[0]; $res->{'name'} = $updateinfo->{'id'}; $res->{'version'} = $updateinfo->{'version'}; $res->{'summary'} = $updateinfo->{'title'}; $res->{'description'} = $updateinfo->{'description'}; my $collection = ($updateinfo->{'pkglist'} || {})->{'collection'} || []; if (@$collection) { # only look at first collection for my $package (@{$collection->[0]->{'package'} || []}) { my $nevr = $package->{'name'}; $nevr .= ".$package->{'arch'}" if $package->{'arch'}; if ($package->{'version'}) { $nevr .= " = "; $nevr .= "$package->{'epoch'}:" if $package->{'epoch'}; $nevr .= "$package->{'version'}" if $package->{'version'}; $nevr .= "-$package->{'release'}" if defined $package->{'release'}; } push @{$res->{'provides'}}, $nevr; } } } } delete $res->{'hdrmd5'}; $res->{'mtime'} = $s[9]; $res->{'size'} = $s[7]; $res->{'filename'} = $path; $res->{'filename'} =~ s/.*\///; if ($cgi->{'view'} && $cgi->{'view'} eq 'fileinfo_ext') { my $projpack; my $config; if (BSServer::have_content()) { my $projpackxml = BSServer::read_data(10000000); $projpack = BSUtil::fromxml($projpackxml, $BSXML::projpack, 1); $config = ''; } if (!$projpack) { my @args = ("project=$projid", "repository=$repoid", "arch=$arch"); push @args, "partition=$BSConfig::partition" if $BSConfig::partition; $projpack = BSRPC::rpc("$BSConfig::srcserver/getprojpack", $BSXML::projpack, 'withrepos', 'expandedrepos', 'withremotemap', 'nopackages', @args); } die("404 no such project/repository\n") unless $projpack->{'project'}; my $proj = $projpack->{'project'}->[0]; die("404 no such project\n") unless $proj && $proj->{'name'} eq $projid; my $repo = $proj->{'repository'}->[0]; die("404 no such repository\n") unless $repo && $repo->{'name'} eq $repoid; my $bconf; $config = $proj->{'config'} if defined $config; # sent with the content if ($config) { $bconf = Build::read_config($arch, [split("\n", $config)]); $bconf->{'binarytype'} ||= 'UNDEFINED'; } else { $bconf = BSRepServer::getconfig($projid, $repoid, $arch); } my %remotemap = map {$_->{'project'} => $_} @{$projpack->{'remotemap'} || []}; my @prp = map {"$_->{'project'}/$_->{'repository'}"} @{$repo->{'path'} || []}; my $pool = BSSolv::pool->new(); $pool->settype('deb') if $bconf->{'binarytype'} eq 'deb'; $pool->settype('arch') if $bconf->{'binarytype'} eq 'arch'; for my $prp (@prp) { my ($rprojid, $rrepoid) = split('/', $prp, 2); my $r; if ($remotemap{$rprojid}) { $r = BSRepServer::addrepo_remote($pool, $prp, $arch, $remotemap{$rprojid}); } else { $r = BSRepServer::addrepo_scan($pool, $prp, $arch); } die("repository $prp not available\n") unless $r; } $pool->createwhatprovides(); my %keep = map {$_ => 1} qw{name epoch version release arch}; for my $prov (@{$res->{'provides'}}) { my $n = {'dep' => $prov}; push @{$res->{'provides_ext'}}, $n; for my $p ($pool->whatrequires($prov)) { my $rd = $pool->pkg2data($p); delete $rd->{$_} for grep {!$keep{$_}} keys %$rd; ($rd->{'project'}, $rd->{'repository'}) = split('/', $pool->pkg2reponame($p), 2); push @{$n->{'requiredby'}}, $rd; } } for my $req (@{$res->{'requires'}}) { my $n = {'dep' => $req}; push @{$res->{'requires_ext'}}, $n; for my $p ($pool->whatprovides($req)) { my $rd = $pool->pkg2data($p); delete $rd->{$_} for grep {!$keep{$_}} keys %$rd; ($rd->{'project'}, $rd->{'repository'}) = split('/', $pool->pkg2reponame($p), 2); push @{$n->{'providedby'}}, $rd; } } } data2utf8xml($res); return ($res, $BSXML::fileinfo); } sub getbinary_repository { my ($cgi, $projid, $repoid, $arch, $bin) = @_; if ($bin eq '_buildconfig') { my $cfg = BSRPC::rpc("$BSConfig::srcserver/getconfig", undef, "project=$projid", "repository=$repoid"); return ($cfg, 'Content-Type: text/plain'); } my $serial; $serial = BSWatcher::serialize("$reporoot/$projid/$repoid/$arch") if $BSStdServer::isajax; return if $BSStdServer::isajax && !defined $serial; my $view = $cgi->{'view'} || ''; my $path = "$reporoot/$projid/$repoid/$arch/:full/$bin"; my $needscan; if (! -f $path) { # return by name my $pool = BSSolv::pool->new(); my $repo = BSRepServer::addrepo_scan($pool, "$projid/$repoid", $arch); my $dodurl = $repo->dodurl(); my %rnames = $repo ? $repo->pkgnames() : (); my $p = $rnames{$bin}; if (!$p && $dodurl && $bin =~ /^(.*)\.($binsufsre)$/ && $rnames{$1}) { # check for future dod package path $p = $rnames{$1}; my $suf = $2; undef $p unless $pool->pkg2pkgid($p) eq 'd0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0' && $pool->pkg2path($p) =~ /\.\Q$suf\E$/; } die("404 no such binary '$bin'\n") unless $p; $path = "$reporoot/".$pool->pkg2fullpath($p, $arch); if ($dodurl && $pool->pkg2pkgid($p) eq 'd0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0') { my @handoff = ("/build/$projid/$repoid/$arch/_repository/$bin", undef, $view ? ("view=$view") : ()); $path = fetchdodbinary($pool, $repo, $p, $arch, 3, \@handoff); return unless defined $path; $needscan = 1; } undef $repo; undef $pool; die("404 $bin: $!\n") unless -f $path; } BSWatcher::serialize_end($serial) if defined $serial; forwardevent($cgi, 'scanrepo', $projid, undef, $repoid, $arch) if $needscan; return getbinary_info($cgi, $projid, $repoid, $arch, $path) if $view eq 'fileinfo' || $view eq 'fileinfo_ext'; die("unknown view '$view'\n") if $view; my $type = 'application/octet-stream'; $type = 'application/x-rpm' if $path=~ /\.rpm$/; $type = 'application/x-debian-package' if $path=~ /\.deb$/; BSWatcher::reply_file($path, "Content-Type: $type"); return undef; } sub getbinary { my ($cgi, $projid, $repoid, $arch, $packid, $bin) = @_; return getbinary_repository($cgi, $projid, $repoid, $arch, $bin) if $packid eq '_repository'; # small preinstallimage hack, see getpreinstallimageinfos() function if ($bin =~ /^_preinstallimage\.([0-9a-f]{32})$/) { my $path = "$reporoot/$projid/$repoid/$arch/$packid/.preinstallimage.$1"; if (-s $path) { BSServer::reply_file($path, 'Content-Type: application/octet-stream'); return undef; } } my $path = "$reporoot/$projid/$repoid/$arch/$packid/$bin"; if (-e "$reporoot/$projid/$repoid/$arch/$packid/.nosourceaccess") { my @bins = ls("$reporoot/$projid/$repoid/$arch/$packid"); @bins = filtersources(@bins); die("404 $bin: No such file or directory\n") unless grep {$_ eq $bin} @bins; } die("404 $bin: $!\n") unless -f $path; my $view = $cgi->{'view'} || ''; return getbinary_info($cgi, $projid, $repoid, $arch, $path) if $view eq 'fileinfo' || $view eq 'fileinfo_ext'; die("unknown view '$view'\n") if $view; my $type = 'application/octet-stream'; $type = 'application/x-rpm' if $path=~ /\.rpm$/; $type = 'application/x-debian-package' if $path=~ /\.deb$/; BSServer::reply_file($path, "Content-Type: $type"); return undef; } sub isolder { my ($old, $new) = @_; return 0 if $old !~ /\.rpm$/; return 0 unless -e $old; my %qold = Build::Rpm::rpmq($old, qw{VERSION RELEASE EPOCH}); return 0 unless %qold; my %qnew = Build::Rpm::rpmq($new, qw{VERSION RELEASE EPOCH}); return 0 unless %qnew; my $vold = $qold{'VERSION'}->[0]; $vold .= "-$qold{'RELEASE'}->[0]" if $qold{'RELEASE'}; $vold = "$qold{'EPOCH'}->[0]:$vold" if $qold{'EPOCH'}; my $vnew = $qnew{'VERSION'}->[0]; $vnew .= "-$qnew{'RELEASE'}->[0]" if $qnew{'RELEASE'}; $vnew = "$qnew{'EPOCH'}->[0]:$vnew" if $qnew{'EPOCH'}; my $r = Build::Rpm::verscmp($vold, $vnew); # print "isolder $vold $vnew: $r\n"; return $r > 0 ? 1 : 0; } sub putbinary { my ($cgi, $projid, $repoid, $arch, $bin) = @_; die("file name must end in .deb, .rpm, or .cpio\n") unless $bin =~ /\.(?:$binsufsre|cpio)$/; mkdir_p($uploaddir); my $tdir = "$reporoot/$projid/$repoid/$arch/:full"; if ($bin =~ /\.cpio$/) { my $fdir = "$uploaddir/$$.dir"; if (-d $fdir) { unlink("$fdir/$_") for ls($fdir); rmdir($fdir); } mkdir_p($fdir); my $uploaded = BSServer::read_cpio($fdir, 'accept' => '^.+\.(?:$binsufsre|iso|meta)$'); die("upload error\n") unless $uploaded; if ($cgi->{'wipe'}) { for (ls($tdir)) { unlink("$tdir/$_") || die("unlink $tdir/$_: $!\n"); } } my %upfiles = map {$_->{'name'} => 1} @$uploaded; mkdir_p($tdir); for my $file (@$uploaded) { my $fn = $file->{'name'}; next if $cgi->{'ignoreolder'} && isolder("$tdir/$fn", "$fdir/$fn"); rename("$fdir/$fn", "$tdir/$fn") || die("rename $fdir/$fn $tdir/$fn: $!\n"); $fn =~ s/\.(?:$binsufsre|meta)$//; unlink("$tdir/$fn.meta") unless $upfiles{"$fn.meta"}; } unlink("$fdir/$_") for ls($fdir); rmdir($fdir); } else { my $fn = "$uploaddir/$$"; my $tn = "$tdir/$bin"; die("upload failed\n") unless BSServer::read_file($fn); if ($cgi->{'wipe'}) { for (ls($tdir)) { unlink("$tdir/$_") || die("unlink $tdir/$_: $!\n"); } } if ($cgi->{'ignoreolder'} && isolder($tn, $fn)) { unlink($fn); return $BSStdServer::return_ok; } mkdir_p($tdir); rename($fn, $tn) || die("rename $fn $tn: $!\n"); if ($tn =~ s/\.(?:$binsufsre)$//) { unlink("$tn.meta"); } } dirty($projid, $repoid, $arch); if (-d "$eventdir/$arch") { my $ev = { type => 'scanrepo', 'project' => $projid, 'repository' => $repoid }; my $evname = "scanrepo:${projid}::$repoid"; $evname = "scanrepo:::".Digest::MD5::md5_hex($evname) if length($evname) > 200; writexml("$eventdir/$arch/.$evname.$$", "$eventdir/$arch/$evname", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); } return $BSStdServer::return_ok; } sub delbinary { my ($cgi, $projid, $repoid, $arch, $bin) = @_; my $tdir = "$reporoot/$projid/$repoid/$arch/:full"; unlink("$tdir/$bin") || die("404 $projid/$repoid/$arch/$bin: $!\n"); if ($bin =~ s/\.(?:$binsufsre)$//) { unlink("$tdir/$bin.meta"); } dirty($projid, $repoid, $arch); if (-d "$eventdir/$arch") { my $ev = { type => 'scanrepo', 'project' => $projid, 'repository' => $repoid }; my $evname = "scanrepo:${projid}::$repoid"; $evname = "scanrepo:::".Digest::MD5::md5_hex($evname) if length($evname) > 200; writexml("$eventdir/$arch/.$evname.$$", "$eventdir/$arch/$evname", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); } return $BSStdServer::return_ok; } sub updateworkerdata { my ($idlename, $state, $worker) = @_; mkdir_p("$workersdir/$state"); for my $oldstate (qw{building away down dead idle}) { next if $state eq $oldstate; rename("$workersdir/$oldstate/$idlename", "$workersdir/$state/$idlename") unless $worker; unlink("$workersdir/$oldstate/$idlename"); } writexml("$workersdir/$state/.$idlename", "$workersdir/$state/$idlename", $worker, $BSXML::worker) if $worker; } sub workerstate { my ($cgi, $harch, $peerport, $state) = @_; my $peerip = BSServer::getpeerdata(); die("cannot get your ip address\n") unless $peerip; my $workerid = defined($cgi->{'workerid'}) ? $cgi->{'workerid'} : "$peerip:$peerport"; my $workerskel; if (BSServer::have_content()) { my $workerskelxml = BSServer::read_data(10000000); $workerskel = BSUtil::fromxml($workerskelxml, $BSXML::worker); for (qw{job arch}) { delete $workerskel->{$_}; } $workerskel->{'hardware'}->{'nativeonly'} = undef if $workerskel->{'hardware'} && exists($workerskel->{'hardware'}->{'nativeonly'}); } my $idlename = "$harch:$workerid"; $idlename =~ s/\//_/g; if ($state eq 'building') { updateworkerdata($idlename, 'away'); } elsif ($state eq 'exit') { updateworkerdata($idlename, 'down'); } elsif ($state eq 'idle') { if (-e "$workersdir/building/$idlename") { # worker must have crashed, discard old job... my $worker = readxml("$workersdir/building/$idlename", $BSXML::worker, 1); if ($worker && $worker->{'arch'} && $worker->{'job'} && $worker->{'reposerver'}) { # masterdispatched, forward to correct repo server eval { BSRPC::rpc({ 'uri' => "$worker->{'reposerver'}/jobs/$worker->{'arch'}/$worker->{'job'}", 'request' => 'POST', 'timeout' => 10, }, undef, "cmd=idleworker", "workerid=$workerid"); }; warn($@) if $@; } elsif ($worker && $worker->{'arch'} && $worker->{'job'}) { local *F; my $js = BSUtil::lockopenxml(\*F, '<', "$jobsdir/$worker->{'arch'}/$worker->{'job'}:status", $BSXML::jobstatus, 1); if ($js) { # be extra careful here not to terminate jobs that run on different workers if ($js->{'code'} eq 'building' && (!defined($js->{'workerid'}) || $js->{'workerid'} eq $workerid)) { print "restarting build of job $worker->{'arch'}/$worker->{'job'}\n"; unlink("$jobsdir/$worker->{'arch'}/$worker->{'job'}:status"); } close F; } } unlink("$workersdir/building/$idlename"); } my $worker = { 'hostarch' => $harch, 'ip' => $peerip, 'port' => $peerport, 'workerid' => $workerid, }; $worker = { %$workerskel, %$worker } if $workerskel; $worker->{'tellnojob'} = $cgi->{'tellnojob'} if $cgi->{'tellnojob'}; # make sure that we can connect to the client if ($BSConfig::checkclientconnectivity || $BSConfig::checkclientconnectivity) { my $param = { 'uri' => "http://$peerip:$peerport/status", 'async' => 1, 'timeout' => 1, 'sender' => sub {}, }; eval { my $ret = BSRPC::rpc($param); close($ret->{'socket'}); }; if ($@) { warn($@); updateworkerdata($idlename, 'down', $worker); die("cannot reach you!\n"); } } if (-d "$workersdir/disable") { my @dis = ls("$workersdir/disable"); for (@dis) { next unless $workerid =~ /^$_/; print "worker ip $peerip id $workerid is disabled\n"; updateworkerdata($idlename, 'down', $worker); return $BSStdServer::return_ok; } } updateworkerdata($idlename, 'idle', $worker); } else { die("unknown state: $state\n"); } return $BSStdServer::return_ok; } sub workerdispatched { my ($cgi, $arch, $job, $jobid) = @_; my $peerip = BSServer::getpeerdata(); my $peerport = $cgi->{'port'}; my $jobstatus = { 'code' => 'building', 'uri' => "http://$peerip:$peerport", 'starttime' => time(), 'hostarch' => $cgi->{'hostarch'}, 'jobid' => $jobid, }; $jobstatus->{'workerid'} = $cgi->{'workerid'} if defined $cgi->{'workerid'}; die("404 no such job\n") unless -e "$jobsdir/$arch/$job"; if (!BSUtil::lockcreatexml(\*F, "$jobsdir/$arch/.reposerver.$$", "$jobsdir/$arch/$job:status", $jobstatus, $BSXML::jobstatus)) { die("job lock failed\n"); } # make sure this is the correct job my $infoxml = readstr("$jobsdir/$arch/$job", 1); if (!$infoxml || Digest::MD5::md5_hex($infoxml) ne $jobid) { unlink("$jobsdir/$arch/$job:status"); die("wrong job\n"); } close F; return $BSStdServer::return_ok; } sub getpreinstallimageinfos { my ($cgi, $prpas) = @_; my @infos; my $match = $cgi->{'match'}; if ($match) { if ($match eq 'body') { $match = BSServer::read_data(512, 1); } else { die("match must be 512 byte in hex\n") unless $match =~ /^[0-9a-f]{1024}$/s; $match = pack('H*', $match); } die("bad match\n") unless length($match) == 512; } my $imagescnt = 0; for my $prpa (@$prpas) { my $images = BSRepServer::getpreinstallimages($prpa); next unless $images; $imagescnt += @$images; for my $img (@$images) { # the "&" below is not a numeric/logic "and", but a bitstring operation next if defined($match) && ($img->{'bitstring'} & $match) ne $img->{'bitstring'}; $img->{'prpa'} = $prpa; $img->{'path'} = "$img->{'package'}/_preinstallimage.$img->{'hdrmd5'}"; next unless -s "$reporoot/$prpa/$img->{'package'}/.preinstallimage.$img->{'hdrmd5'}"; delete $img->{'bins'}; # currently not needed push @infos, $img; } } print "- sending data for ".@infos." of $imagescnt images\n"; # send answer as perl storable my $answer = BSUtil::tostorable(\@infos); return ($answer, 'Content-Type: application/octet-stream'); } sub dirty { my ($projid, $repoid, $arch) = @_; die("dirty: need project id\n") unless defined $projid; die("dirty: need arch\n") unless defined $arch; my @repos; if (defined($repoid)) { @repos=($repoid); } else { @repos = ls("$reporoot/$projid"); } for my $r (@repos) { BSUtil::touch("$reporoot/$projid/$r/$arch/:schedulerstate.dirty") if -d "$reporoot/$projid/$r/$arch"; } } sub getschedulerstate { my ($projid, $repoid, $arch) = @_; local *F; my $schedulerstate = readstr("$reporoot/$projid/$repoid/$arch/:schedulerstate", 1) || 'unknown'; chomp $schedulerstate; my $details; ($schedulerstate, $details) = split(' ', $schedulerstate, 2); if ($schedulerstate eq 'finished' && !$details) { return 'finished' if -e "$eventdir/publish/${projid}::$repoid"; return 'publishing' if -e "$eventdir/publish/${projid}::${repoid}::inprogress"; return 'unpublished' if (readstr("$reporoot/$projid/$repoid/$arch/:repodone", 1) || '') =~ /^disabled/; return 'published'; } return ($schedulerstate, $details); } sub workerstatus { my ($cgi) = @_; my %workerstates = ('idle' => []); if (!$cgi->{'daemonsonly'}) { for my $workerstate (qw{idle down dead away}) { my @w; for my $w (ls("$workersdir/$workerstate")) { my $worker = readxml("$workersdir/$workerstate/$w", $BSXML::worker, 1); next unless $worker; push @w, {'hostarch' => $worker->{'hostarch'}, 'uri' => "http://$worker->{'ip'}:$worker->{'port'}", 'workerid' => $worker->{'workerid'}}; } next unless @w; @w = sort {$a->{'workerid'} cmp $b->{'workerid'} || $a->{'uri'} cmp $b->{'uri'} || $a cmp $b} @w; if ($workerstate ne 'idle') { delete $_->{'uri'} for @w; } $workerstates{$workerstate} = \@w; } } my @building; my @waiting; my @blocked; my @buildaverage; my @a; @a = ls($jobsdir) unless $cgi->{'daemonsonly'}; for my $a (@a) { next unless -d "$jobsdir/$a"; my @d = grep {!/^\./ && !/:(?:dir|new|cross)$/} ls("$jobsdir/$a"); @d = sort @d; my %d = map {$_ => 1} @d; for my $d (grep {/:status$/} @d) { delete $d{$d}; $d =~ s/:status$//; next unless $d{$d}; # no buildinfo my $s = readxml("$jobsdir/$a/$d:status", $BSXML::jobstatus, 1); print "bad job, no status: $d\n" unless $s; next unless $s; my $jn = $d; $jn =~ s/-[0-9a-f]{32}$//s; my ($projid, $repoid, $packid) = split('::', $jn); my $info; if (defined($packid)) { # get info from job name like in the dispatcher $info = {'project' => $projid, 'repository' => $repoid, 'package' => $packid, 'arch' => $a}; } else { $info = readxml("$jobsdir/$a/$d", $BSXML::buildinfo, 1); } print "bad job, no info: $d\n" unless $info; next unless $info; if ($s->{'code'} ne 'building') { delete $d{$d}; next; } push @building, {'workerid' => $s->{'workerid'}, 'uri' => $s->{'uri'}, 'hostarch' => $s->{'hostarch'}, 'project' => $info->{'project'}, 'repository' => $info->{'repository'}, 'package' => $info->{'package'}, 'arch' => $info->{'arch'}, 'starttime' => $s->{'starttime'}}; delete $d{$d}; } if (!$BSConfig::masterdispatcher || $BSConfig::masterdispatcher eq $BSConfig::reposerver) { push @waiting, {'arch' => $a, 'jobs' => scalar(keys %d)}; } my $si = readxml("$infodir/schedulerinfo.$a", $BSXML::schedulerinfo, 1); if ($si && defined($si->{'notready'})) { push @blocked, {'arch' => $a, 'jobs' => $si->{'notready'}}; } if ($si && defined($si->{'buildavg'})) { push @buildaverage, {'arch' => $a, 'buildavg' => $si->{'buildavg'}}; } } @building = sort {$a->{'workerid'} cmp $b->{'workerid'} || $a->{'uri'} cmp $b->{'uri'} || $a cmp $b} @building; @waiting = sort {$a->{'arch'} cmp $b->{'arch'} || $a cmp $b} @waiting; @blocked = sort {$a->{'arch'} cmp $b->{'arch'} || $a cmp $b} @blocked; @buildaverage = sort {$a->{'arch'} cmp $b->{'arch'} || $a cmp $b} @buildaverage; my %types = map {$_ => 1} @{$cgi->{'type'} || []}; # FIXME: must be able to return multiple partitions my @partitions; my @daemons; my @daemonarchs = grep {s/^bs_sched\.(.*)\.lock$/$1/} sort(ls($rundir)); push @daemonarchs, 'repserver'; push @daemonarchs, 'dispatcher' if -e "$rundir/bs_dispatch.lock"; push @daemonarchs, 'publisher' if -e "$rundir/bs_publish.lock"; push @daemonarchs, 'signer' if -e "$rundir/bs_signer.lock"; push @daemonarchs, 'warden' if -e "$rundir/bs_warden.lock"; push @daemonarchs, 'dodup' if -e "$rundir/bs_dodup.lock"; @daemonarchs = (@{$cgi->{'arch'}}) if $cgi->{'arch'}; for my $arch (@daemonarchs) { local *F; my $daemondata = {'state' => 'dead'}; my $lock; my $state = 'running'; if ($arch eq 'dispatcher') { $lock = "$rundir/bs_dispatch.lock"; $daemondata->{'type'} = 'dispatcher'; } elsif ($arch eq 'publisher') { $lock = "$rundir/bs_publish.lock"; $daemondata->{'type'} = 'publisher'; } elsif ($arch eq 'signer') { $lock = "$rundir/bs_signer.lock"; $daemondata->{'type'} = 'signer'; } elsif ($arch eq 'warden') { $lock = "$rundir/bs_warden.lock"; $daemondata->{'type'} = 'warden'; } elsif ($arch eq 'dodup') { $lock = "$rundir/bs_dodup.lock"; $daemondata->{'type'} = 'dodup'; } elsif ($arch eq 'repserver') { my $req = $BSServer::request; $daemondata->{'type'} = 'repserver'; $daemondata->{'starttime'} = $req->{'server'}->{'starttime'} if $req && $req->{'server'}; if ($req && $req->{'conf'} && $req->{'conf'}->{'handoffpath'}) { $lock = "$req->{'conf'}->{'handoffpath'}.lock"; } $daemondata->{'state'} = 'running' unless $lock; } else { # scheduler $lock = "$rundir/bs_sched.$arch.lock"; $daemondata->{'type'} = 'scheduler'; $daemondata->{'arch'} = $arch; my $si = readxml("$infodir/schedulerinfo.$arch", $BSXML::schedulerinfo, 1); $daemondata->{'queue'} = $si->{'queue'} if $si && $si->{'queue'}; $state = 'booting' if defined($si->{'booting'}); } next if %types && !$types{$daemondata->{'type'}}; if ($lock && open(F, '<', $lock)) { if (!flock(F, LOCK_EX | LOCK_NB)) { my @s = stat(F); $daemondata->{'state'} = $state; $daemondata->{'starttime'} ||= $s[9] if @s; } close F; } push @daemons, $daemondata; } my $partition = {}; $partition->{'name'} = $BSConfig::partition if $BSConfig::partition; $partition->{'daemon'} = \@daemons if @daemons; push @partitions, $partition; my $ret = {'partition' => \@partitions}; if (!$cgi->{'daemonsonly'}) { $ret->{'clients'} = @building + @{$workerstates{'idle'}}; $ret->{'building'} = \@building; $ret->{'waiting'} = \@waiting; $ret->{'blocked'} = \@blocked; $ret->{'buildavg'} = \@buildaverage; $ret->{$_} = $workerstates{$_} for keys %workerstates; } return ($ret, $BSXML::workerstatus); } sub sendbadhostevent { my ($info, $idlename, $job) = @_; my $ev = { 'type' => 'badhost', 'project' => $info->{'project'}, 'package' => $info->{'package'}, 'repository' => $info->{'repository'}, 'arch' => $info->{'arch'}, 'worker' => $idlename, }; $ev->{'job'} = $job if $job; my $evname = "badhost::$info->{'project'}::$info->{'package'}::$info->{'arch'}::$idlename"; $evname = "badhost:::".Digest::MD5::md5_hex($evname) if length($evname) > 200; mkdir_p("$eventdir/dispatch"); writexml("$eventdir/dispatch/.$evname.$$", "$eventdir/dispatch/$evname", $ev, $BSXML::event); } sub receivekiwitree_scan { my ($buildinfo) = @_; print "receivekiwitree_scan start\n"; my %res; my %prpas; for my $dep (@{$buildinfo->{'bdep'} || []}) { next unless defined $dep->{'package'}; my $repoarch = $dep->{'repoarch'} || $buildinfo->{'arch'}; next if $repoarch eq 'src'; $prpas{"$dep->{'project'}/$dep->{'repository'}/$repoarch"}->{$dep->{'package'}} = 1; } for my $prpa (sort keys %prpas) { my $gbininfo = BSRepServer::read_gbininfo("$reporoot/$prpa") || {}; for my $packid (sort keys %{$prpas{$prpa}}) { my $bininfo = $gbininfo->{$packid} || BSRepServer::read_bininfo("$reporoot/$prpa/$packid"); next unless $bininfo; filtersources_bininfo($bininfo) if $bininfo->{'.nosourceaccess'}; for my $bin (values %$bininfo) { $res{$bin->{'leadsigmd5'}} = "$prpa/$packid/$bin->{'filename'}" if $bin->{'leadsigmd5'}; } } } print "receivekiwitree_scan end\n"; return \%res; } sub receivekiwitree { my ($info, $js, $dir) = @_; print "receivekiwitree start\n"; local *F; open(F, '<', "$dir/.kiwitree") || die("$dir/.kiwitree: $!\n"); unlink("$dir/.kiwitree"); my %todo; my %done; my $leads; my @tosign; my $nlinked = 0; while(1) { my $line = ; last unless defined $line; chomp $line; die("bad line: '$line'\n") unless $line =~ /^([fdl]) ([^ ]+)(?: ([^ ]+))?$/; my ($type, $file, $extra) = ($1, $2, $3); $file =~ s/%([a-fA-F0-9]{2})/chr(hex($1))/ge; die("bad file '$file' (contains \\0)\n") if $file =~ /\0/s; die("already processed: $file\n") if $done{$file}; die("bad file '$file'\n") if "/$file/" =~ /\/\.{0,2}\//s; if ($file =~ /^(.*)\//s) { die("file without directory\n") unless $done{$1} && $done{$1} eq 'd'; } if ($type eq 'd') { mkdir("$dir/$file") || die("mkdir $dir/$file: $!\n"); } elsif ($type eq 'l') { $extra =~ s/%([a-fA-F0-9]{2})/chr(hex($1))/ge; die("bad symlink: $extra (contains \\0)\n") if $extra =~ /\0/s; die("bad symlink\n") if "/$extra/" =~ /\/\.?\//; if ("/$extra/" =~ /^((?:\/\.\.)+)\/(.*?)$/s) { my ($head, $tail) = ($1, $2); die("bad upref in symlink\n") if "/$tail/" =~ /\/\.\.\//; die("bad upref in symlink\n") if ($head =~ y!/!!) > ($file =~ y!/!!); } else { die("bad upref in symlink\n") if "/$extra/" =~ /\/\.\.\//; } symlink($extra, "$dir/$file") || die("symlink $extra $dir/$file: $!\n"); } else { my $found; if ($extra) { die("extra is not a md5 sum\n") unless $extra =~ /^[0-9a-f]{32}$/s; $leads ||= receivekiwitree_scan($info); if ($leads->{$extra} && link("$reporoot/$leads->{$extra}", "$dir/$file")) { # make sure it's really the correct file my $leadsigmd5; eval { Build::queryhdrmd5("$dir/$file", \$leadsigmd5); }; if ($@ || !$leadsigmd5 || $leadsigmd5 ne $extra) { unlink("$dir/$file"); } else { $nlinked++; $found = 1; } } } elsif ($file =~ /\.(?:asc|key)$/s) { push @tosign, $file; } $todo{$file} = 1 unless $found; } $done{$file} = $type; } print "receivekiwitree: linked $nlinked files\n"; if (%todo) { print "receivekiwitree: fetching ".(keys %todo)." files\n"; my $param = { 'uri' => "$js->{'uri'}/kiwitree", 'request' => 'POST', 'formurlencode' => 1, 'directory' => $dir, 'timeout' => 600, 'acceptsubdirs' => 1, 'accept' => sub {$todo{$_[1]}}, 'receiver' => \&BSHTTP::cpio_receiver, }; my $res = BSRPC::rpc($param, undef, map {"file=$_"} sort keys %todo); die("kiwitree rpc failed\n") unless $res; for (@$res) { delete $todo{$_->{'name'}}; } my @missing = sort keys %todo; die("could not fetch: @missing\n") if @missing; } return \@tosign; } sub notify_jobresult { my ($info, $jobstatus, $prpa) = @_; # create notification info my %ninfo; for (qw{project package repository arch rev srcmd5 verifymd5 readytime reason versrel bcnt release}) { $ninfo{$_} = $info->{$_} if defined $info->{$_}; } $ninfo{'starttime'} = $jobstatus->{'starttime'}; $ninfo{'endtime'} = $jobstatus->{'endtime'}; $ninfo{'workerid'} = $jobstatus->{'workerid'}; $ninfo{'previouslyfailed'} = 1 if -e "$reporoot/$prpa/:logfiles.fail/$info->{'package'}"; if ($jobstatus->{'result'} eq 'unchanged') { BSNotify::notify('BUILD_UNCHANGED', \%ninfo); } elsif ($jobstatus->{'result'} eq 'succeeded') { BSNotify::notify('BUILD_SUCCESS', \%ninfo); } else { BSNotify::notify('BUILD_FAIL', \%ninfo); } } sub putjob { my ($cgi, $arch, $job, $jobid) = @_; local *F; die("no such job\n") unless -e "$jobsdir/$arch/$job"; die("job is not building\n") unless -e "$jobsdir/$arch/$job:status"; my $oldjobstatus = BSUtil::lockopenxml(\*F, '<', "$jobsdir/$arch/$job:status", $BSXML::jobstatus); die("different jobid\n") if $oldjobstatus->{'jobid'} ne $jobid; die("job is not building\n") if $oldjobstatus->{'code'} ne 'building'; die("job is building on a different worker: $cgi->{'workerid'} -- $oldjobstatus->{'workerid'}\n") if $cgi->{'workerid'} && $oldjobstatus->{'workerid'} && $cgi->{'workerid'} ne $oldjobstatus->{'workerid'}; if (defined($BSConfig::putjob_verify_peerip) && $BSConfig::putjob_verify_peerip) { my $peerip = BSServer::getpeerdata(); my $uri = $oldjobstatus->{'uri'}; $uri =~ s/.*\///s; $uri =~ s/:[\d]+$//s; die("job was dispatched to a different peer\n") unless $peerip eq $uri; } my $infoxml = readstr("$jobsdir/$arch/$job"); my $infoxmlmd5 = Digest::MD5::md5_hex($infoxml); die("job info does not match\n") if $infoxmlmd5 ne $jobid; my $info = readxml("$jobsdir/$arch/$job", $BSXML::buildinfo); my $projid = $info->{'project'} || $info->{'path'}->[0]->{'project'}; my $repoid = $info->{'repository'} || $info->{'path'}->[0]->{'repository'}; my $now = time(); my $idlename = "$oldjobstatus->{'hostarch'}:$oldjobstatus->{'workerid'}"; $idlename =~ s/\//_/g; if (!($BSConfig::masterdispatcher && $BSConfig::masterdispatcher ne $BSConfig::reposerver)) { print "oops, we are not building ($idlename)?\n" unless -e "$workersdir/building/$idlename"; unlink("$workersdir/building/$idlename"); } if ($cgi->{'code'} && $cgi->{'code'} eq 'badhost') { # turned out that this host couldn't build the job # rebuild on some other sendbadhostevent($info, $idlename, $job); unlink("$jobsdir/$arch/$job:status"); close(F); return $BSStdServer::return_ok; } # check if worker time is "good enough" if ($cgi->{'now'} && ($cgi->{'now'} > $now + 3600 || $cgi->{'now'} < $now - 3600)) { sendbadhostevent($info, $idlename); unlink("$jobsdir/$arch/$job:status"); close(F); die("time mismatch\n"); } # now release lock and fetch everything close F; my $dir = "$jobsdir/$arch/$job:dir"; my $tmpdir = "$jobsdir/$arch/.putjob.$$"; if (-e $tmpdir) { BSUtil::cleandir($tmpdir); rmdir($tmpdir); unlink($tmpdir); die("$tmpdir: can't remove\n") if -e $tmpdir; } mkdir_p($tmpdir); my $uploaded = BSServer::read_cpio($tmpdir); # make sure the meta file is well-formed if (-f "$tmpdir/meta") { local *F; eval { open (F, '<', "$tmpdir/meta") || die("$tmpdir/meta: $!\n"); die("empty meta file\n") unless -s F; while () { chomp; die("bad meta line: $_\n") unless /^[0-9a-f]{32} .+/s; } }; if ($@) { my $err = $@; unlink("$jobsdir/$arch/$job:status"); sendbadhostevent($info, $idlename); BSUtil::cleandir($tmpdir); rmdir($tmpdir); die($err); } } # now get the lock again my $jobstatus; my $kiwitree_tosign; eval { $kiwitree_tosign = receivekiwitree($info, $oldjobstatus, $tmpdir) if $cgi->{'kiwitree'}; die("no such job\n") unless -e "$jobsdir/$arch/$job"; die("job is not building\n") unless -e "$jobsdir/$arch/$job:status"; $jobstatus = BSUtil::lockopenxml(\*F, '<', "$jobsdir/$arch/$job:status", $BSXML::jobstatus); die("different jobid\n") if $jobstatus->{'jobid'} ne $jobid; die("job is not building\n") if $jobstatus->{'code'} ne 'building'; die("job is building on a different worker\n") if $jobstatus->{'workerid'} ne $oldjobstatus->{'workerid'} || $jobstatus->{'starttime'} ne $oldjobstatus->{'starttime'}; die("job contains an illegal file\n") if grep {$_->{'name'} =~ /\.obsbinlnk$/s} @$uploaded; if (!@$uploaded && -e $dir) { # local image building hack rmdir($tmpdir); } else { if (-e $dir) { BSUtil::cleandir($dir); rmdir($dir); } rename($tmpdir, $dir) || die("rename $tmpdir $dir: $!\n"); } }; if ($@) { my $err = $@; BSUtil::cleandir($tmpdir); rmdir($tmpdir); die($err); } $jobstatus->{'code'} = 'finished'; $jobstatus->{'endtime'} = $now; $jobstatus->{'result'} = 'failed'; # upload is empty for local image building if (!@$uploaded) { $jobstatus->{'result'} = $cgi->{'code'} || 'succeeded'; } # usual build should have uploaded content. for my $file (@$uploaded) { next if $file->{'name'} eq 'meta' || $file->{'name'} eq 'logfile'; $jobstatus->{'result'} = 'succeeded'; last; } $jobstatus->{'result'} = 'unchanged' if $cgi->{'code'} && $cgi->{'code'} eq 'unchanged'; notify_jobresult($info, $jobstatus, "$projid/$repoid/$arch"); my $bininfo = {}; # create obsbinlnk file for kiwi docker results if (grep {$_->{'name'} =~ /\.containerinfo$/} @$uploaded) { for my $file (@$uploaded) { my $prefix = $file->{'name'}; next unless $prefix =~ s/\.containerinfo$//; my $obsbinlink = BSRepServer::Containerinfo::containerinfo2obsbinlnk($dir, "$prefix.containerinfo", $info->{'package'}); next unless $obsbinlink; BSUtil::store("$dir/$prefix.obsbinlnk", undef, $obsbinlink); my @s = stat("$dir/$prefix.obsbinlnk"); next unless @s; my $data = { %$obsbinlink, 'filename' => "$prefix.obsbinlnk", 'id' => "$s[9]/$s[7]/$s[1]" }; delete $data->{'path'}; $bininfo->{$data->{'filename'}} = $data; } } # calculate binary info to speed up scheduler for my $file (@$uploaded) { my @s = stat("$dir/$file->{'name'}"); next unless @s; my $id = "$s[9]/$s[7]/$s[1]"; my $data; if ($file->{'name'} !~ /\.(?:$binsufsre)$/) { if ($file->{'name'} =~ /^.*[-.]appdata.xml$/) { # used in product building, store md5sum local *F; open(F, '<', "$dir/$file->{'name'}"); @s = stat(F); next unless @s; $id = "$s[9]/$s[7]/$s[1]"; my $ctx = Digest::MD5->new; $ctx->addfile(*F); close F; $data = {'md5sum' => $ctx->hexdigest(), 'filename' => $file->{'name'}, 'id' => $id }; $bininfo->{$file->{'name'}} = $data; } next; } eval { my $leadsigmd5; die("has no hdrmd5\n") unless Build::queryhdrmd5("$dir/$file->{'name'}", \$leadsigmd5); $data = Build::query("$dir/$file->{'name'}", 'evra' => 1); die("query failed\n") unless $data; BSVerify::verify_nevraquery($data); $data->{'leadsigmd5'} = $leadsigmd5 if $leadsigmd5; }; $data->{'filename'} = $file->{'name'}; $data->{'id'} = $id; if ($@) { BSUtil::appendstr("$dir/logfile", "$file->{'name'}: $@"); unlink("$dir/$file->{'name'}"); $uploaded = [ grep {$_->{'name'} ne $file->{'name'}} @$uploaded ]; $jobstatus->{'result'} = 'failed'; next; } $bininfo->{$file->{'name'}} = $data; } $bininfo->{'.bininfo'} = {}; BSUtil::store("$dir/.bininfo", undef, $bininfo); # write build stats for dispatcher my @l = ($projid, $repoid, $arch, $info->{'package'}, $jobstatus->{'starttime'}, $jobstatus->{'endtime'}, $jobstatus->{'result'}, $jobstatus->{'workerid'}, $jobstatus->{'hostarch'}); s/([\000-\037%|=\177-\237])/sprintf("%%%02X", ord($1))/ge for @l; BSUtil::appendstr("$jobsdir/finished", join('|', @l)."\n"); my $ev = {'type' => 'built', 'arch' => $arch, 'job' => $job}; if ($BSConfig::sign && (@{$kiwitree_tosign || []} || grep {$_->{'name'} =~ /\.(?:d?rpm|sha256|iso|pkg\.tar\.gz|pkg\.tar.xz|AppImage|deb)$/} @$uploaded)) { # write jobstatus and free lock if (@{$kiwitree_tosign || []}) { my $c = ''; $c .= BSRPC::urlencode($_)."\n" for @$kiwitree_tosign; writestr("$dir/.kiwitree_tosign", undef, $c); } else { unlink("$dir/.kiwitree_tosign"); } $jobstatus->{'code'} = 'signing'; writexml("$jobsdir/$arch/.$job:status", "$jobsdir/$arch/$job:status", $jobstatus, $BSXML::jobstatus); close F; mkdir_p("$eventdir/signer"); writexml("$eventdir/signer/.finished:$arch:$job$$", "$eventdir/signer/finished:$arch:$job", $ev, $BSXML::event); BSUtil::ping("$eventdir/signer/.ping"); } else { # write jobstatus and free lock $jobstatus->{'code'} = 'finished'; writexml("$jobsdir/$arch/.$job:status", "$jobsdir/$arch/$job:status", $jobstatus, $BSXML::jobstatus); close F; dirty($projid, $repoid, $arch); mkdir_p("$eventdir/$arch"); writexml("$eventdir/$arch/.finished:$job$$", "$eventdir/$arch/finished:$job", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); } return $BSStdServer::return_ok; } sub getjobdata { my ($cgi, $arch, $job, $jobid) = @_; local *F; die("no such job\n") unless -e "$jobsdir/$arch/$job"; die("job is not building\n") unless -e "$jobsdir/$arch/$job:status"; my $jobstatus = BSUtil::lockopenxml(\*F, '<', "$jobsdir/$arch/$job:status", $BSXML::jobstatus); die("different jobid\n") if $jobstatus->{'jobid'} ne $jobid; die("job is not building\n") if $jobstatus->{'code'} ne 'building'; my $dir = "$jobsdir/$arch/$job:dir"; die("job has no jobdata\n") unless -d $dir; my @send; for my $file (grep {!/^\./} ls($dir)) { next unless -f "$dir/$file"; push @send, {'name' => "$file", 'filename' => "$dir/$file"}; } close F; # XXX: too early? BSServer::reply_cpio(\@send); return undef; } sub moveproject { my ($cgi, $projid) = @_; my $oprojid = $cgi->{'oproject'}; return $BSStdServer::return_ok if $oprojid eq $projid; return $BSStdServer::return_ok unless -d "$reporoot/$oprojid"; # FIXME: this is only save when scheduler are stopped. let them doing this ... rename("$reporoot/$oprojid", "$reporoot/$projid"); return $BSStdServer::return_ok; } sub copybuild { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my $oprojid = defined($cgi->{'oproject'}) ? $cgi->{'oproject'} : $projid; my $orepoid = defined($cgi->{'orepository'}) ? $cgi->{'orepository'} : $repoid; my $opackid = defined($cgi->{'opackage'}) ? $cgi->{'opackage'} : $packid; return $BSStdServer::return_ok if $oprojid eq $projid && $orepoid eq $repoid && $opackid eq $packid; return $BSStdServer::return_ok unless -d "$reporoot/$oprojid/$orepoid/$arch/$opackid"; my $job = "copy-".Digest::MD5::md5_hex("$$/$projid/$repoid/$arch/$packid".time()); local *F; my $jobstatus = { 'code' => 'finished', }; mkdir_p("$jobsdir/$arch") unless -d "$jobsdir/$arch"; if (!BSUtil::lockcreatexml(\*F, "$jobsdir/$arch/.$job:status", "$jobsdir/$arch/$job:status", $jobstatus, $BSXML::jobstatus)) { die("job lock failed\n"); } my $dir = "$jobsdir/$arch/$job:dir"; my $ogdst = "$reporoot/$oprojid/$orepoid/$arch"; my $odir = "$ogdst/$opackid"; mkdir_p($dir); my %delayed_linking; my $needsign; my %renamed; for my $bin (grep {$_ ne 'status' && $_ ne 'reason' && $_ ne 'history' && $_ ne 'meta' && !/^\./} sort(ls($odir))) { if ($bin eq "updateinfo.xml" && $cgi->{'setupdateinfoid'}) { my $updateinfo = readxml("$odir/$bin", $BSXML::updateinfo); for (@{$updateinfo->{'update'} || []}) { $_->{'id'} = $cgi->{'setupdateinfoid'}; $_->{'issued'} = { 'date' => time() } if $_->{'issued'}; } writexml("$dir/$bin", undef, $updateinfo, $BSXML::updateinfo); } else { next if $bin =~ /^::import::/; # can't copy those yet $needsign = 1 if $bin =~ /\.(?:d?rpm|sha256|iso)$/; my $nbin = $bin; my $setrelease = $cgi->{'setrelease'}; # directories are stripped of the build/release number by default if (!defined($setrelease)) { $setrelease = '' if -d "$odir/$bin"; $setrelease = '' if $bin =~ /^(.*)\.report$/ && -d "$odir/$1"; # need to keep report in sync with dir } if (defined($setrelease)) { $setrelease =~ s/^-?/-/; # "-" will drop the release tag $setrelease =~ s/-?$//; # drop leading "-", it depends on the format $nbin =~ s/-([^-]+)(-Media(?:\d?)(?:\..*?)?)$/$setrelease$2/; # kiwi product builds $nbin =~ s/-([^-.]+).([^.]*.rpm)$/$setrelease.$2/; # rpms } $renamed{$bin} = $nbin if $bin ne $nbin; if (-d "$odir/$bin") { $delayed_linking{"$dir/$nbin"} = "$odir/$bin"; } elsif ($bin =~ /\.containerinfo$/) { # update file path in containerinfo my $containerinfo = readstr("$odir/$bin"); my $from = $bin; my $to = $nbin; $from =~ s/\.containerinfo$//; $to =~ s/\.containerinfo$//; # the hacky way to change json $containerinfo =~ s/(\"file\": [^\n]*)\Q$from\E/$1$to/s; writestr("$dir/$nbin", undef, $containerinfo); } elsif ($bin =~ /\.obsbinlnk$/) { my $obsbinlnk = BSUtil::retrieve("$odir/$bin"); my $from = $bin; my $to = $nbin; $from =~ s/\.obsbinlnk$//; $to =~ s/\.obsbinlnk$//; $obsbinlnk->{'path'} =~ s/.*\///; $obsbinlnk->{'path'} =~ s/\Q$from\E/$to/; $obsbinlnk->{'path'} = "../$packid/$obsbinlnk->{'path'}"; BSUtil::store("$dir/$nbin", undef, $obsbinlnk); } else { # patch in new file name if we renamed files if (%renamed && $bin =~ /\.sha256$/ && (((-s "$odir/$bin") || 0) <= 65536)) { my $shafile = readstr("$odir/$bin"); if ($shafile =~ /-----BEGIN PGP SIGNED MESSAGE-----\n/s) { # de-pgp $shafile =~ s/.*-----BEGIN PGP SIGNED MESSAGE-----//s; $shafile =~ s/.*?\n\n//s; $shafile =~ s/-----BEGIN PGP SIGNATURE-----.*//s; } my $writeit; for (sort keys %renamed) { $writeit = 1 if $shafile =~ s/([ \/])\Q$_\E\n/$1$renamed{$_}\n/g; } if ($writeit) { writestr("$dir/$nbin", undef, $shafile); next; } } link("$odir/$bin", "$dir/$nbin") || die("link $odir/$bin $dir/$nbin: $!\n"); } } } link("$odir/.meta.success", "$dir/.meta.success") if -e "$odir/.meta.success"; link("$ogdst/:meta/$opackid", "$dir/meta") if -e "$ogdst/:meta/$opackid"; link("$ogdst/:logfiles.success/$opackid", "$dir/.logfile.success"); link("$ogdst/:logfiles.fail/$opackid", "$dir/.logfile.fail"); BSUtil::touch("$dir/.preinstallimage") if -e "$odir/.preinstallimage"; # we run the linking of directory trees in background, since it can take a long time # for simple files it happened already if (%delayed_linking) { my $pid = xfork(); return $BSStdServer::return_ok if $pid; for (sort(keys %delayed_linking)) { BSUtil::linktree($delayed_linking{$_}, $_); } } # and emit signals to signer or scheduler my $info = { 'project' => $projid, 'repository' => $repoid, 'package' => $packid, 'arch' => $arch, 'job' => $job, 'file' => '_aggregate', # HACK: makes signer remove old signatures }; writexml("$jobsdir/$arch/.$job", "$jobsdir/$arch/$job", $info, $BSXML::buildinfo); my $ev = {'type' => 'uploadbuild', 'arch' => $arch, 'job' => $job}; if ($BSConfig::sign && $cgi->{'resign'} && $needsign) { $jobstatus->{'code'} = 'signing'; writexml("$jobsdir/$arch/.$job:status", "$jobsdir/$arch/$job:status", $jobstatus, $BSXML::jobstatus); $arch = 'signer'; } close F; dirty($projid, $repoid, $arch) if $arch ne 'signer'; mkdir_p("$eventdir/$arch"); writexml("$eventdir/$arch/.copybuild:$job$$", "$eventdir/$arch/copybuild:$job", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); return $BSStdServer::return_ok; } sub uploadbuild { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my $job = "upload-".Digest::MD5::md5_hex("$$/$projid/$repoid/$arch/$packid".time()); local *F; my $jobstatus = { 'code' => 'finished', }; mkdir_p("$jobsdir/$arch") unless -d "$jobsdir/$arch"; if (!BSUtil::lockcreatexml(\*F, "$jobsdir/$arch/.$job:status", "$jobsdir/$arch/$job:status", $jobstatus, $BSXML::jobstatus)) { die("job lock failed\n"); } my $dir = "$jobsdir/$arch/$job:dir"; mkdir_p($dir); my $uploaded = BSServer::read_cpio($dir); if (!$uploaded || !@$uploaded) { rmdir($dir); unlink("$jobsdir/$arch/$job:status"); close F; die("upload failed\n"); } my $info = { 'project' => $projid, 'repository' => $repoid, 'package' => $packid, 'arch' => $arch, 'job' => $job, }; writexml("$jobsdir/$arch/.$job", "$jobsdir/$arch/$job", $info, $BSXML::buildinfo); dirty($projid, $repoid, $arch); mkdir_p("$eventdir/$arch"); my $ev = {'type' => 'uploadbuild', 'job' => $job}; writexml("$eventdir/$arch/.uploadbuild:$job$$", "$eventdir/$arch/uploadbuild:$job", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); return $BSStdServer::return_ok; } sub forwardevent { my ($cgi, $type, $projid, $packid, $repoid, $arch) = @_; my $ev = { type => $type }; $ev->{'project'} = $projid unless $type eq 'configuration'; my $job; my $worker; if ($type eq 'badhost') { $repoid = $cgi->{'repository'} if exists $cgi->{'repository'}; $arch = $cgi->{'arch'} if exists $cgi->{'arch'}; $worker = $cgi->{'worker'} if exists $cgi->{'worker'}; $job = $cgi->{'job'} if exists $cgi->{'job'}; } $job = $cgi->{'job'} if exists($cgi->{'job'}) && $type eq 'suspendproject' || $type eq 'resumeproject'; # hack: mis-use job to transfer wipe target if ($type eq 'wipe' && $cgi->{'wipe'}) { $job = join(',', @{$cgi->{'wipe'}}); } my $evname = "$type:$projid"; $ev->{'package'} = $packid if defined $packid; $evname .= "::$packid" if defined $packid; $ev->{'repository'} = $repoid if defined $repoid; $evname .= "::$repoid" if defined $repoid; $ev->{'arch'} = $arch if defined $arch; $evname .= "::$arch" if defined $arch; $ev->{'worker'} = $worker if defined $worker; $evname .= "::$worker" if defined $worker; $ev->{'job'} = $job if defined $job; $evname .= "::$job" if defined $job; $evname = "${type}:::".Digest::MD5::md5_hex($evname) if length($evname) > 200; $arch = 'dispatch' if $type eq 'badhost'; $arch = 'publish' if $type eq 'publish'; mkdir_p("$eventdir/$arch") if $arch; if ($arch) { dirty($projid, $repoid, $arch) unless !defined($repoid) || $arch eq 'dispatch' || $arch eq 'publish'; writexml("$eventdir/$arch/.$evname.$$", "$eventdir/$arch/$evname", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); } else { BSConfiguration::check_configuration_once(); my @archs = @{$BSConfig::schedulerarchs || []}; if (!$BSConfig::schedulerarchs) { # unconfigured, fallback to all existing directories for my $a (ls($eventdir)) { next if $a =~ /^\./; next if $a eq 'publish' || $a eq 'repository' || $a eq 'watch' || $a eq 'signer' || $a eq 'dispatch' || $a eq 'service' || $a eq 'deltastore'; push @archs, $a if -d "$eventdir/$a"; } } for my $a (@archs) { eval { mkdir_p("$eventdir/$a"); dirty($projid, $repoid, $a) if defined $repoid; writexml("$eventdir/$a/.$evname.$$", "$eventdir/$a/$evname", $ev, $BSXML::event); BSUtil::ping("$eventdir/$a/.ping"); }; warn($@) if $@; } } return $BSStdServer::return_ok; } # done -> failed|succeeded # scheduled -> scheduled|dispatching|building|finished|signing + packerror # if codefilter is set, packages with a not-matching code will not get fixed sub fixpackstatus { my ($prpa, $ps, $buildingjobs, $codefilter) = @_; return unless $ps && $ps->{'packstatus'}; my $packstatus = $ps->{'packstatus'}; $buildingjobs ||= {}; my ($prp, $arch) = $prpa =~ /(.*)\/([^\/]*)$/; my $num = keys %$packstatus; my $logfiles_fail; my $needjob = 1; if ($codefilter) { $needjob = 0 if $codefilter->{'dontmapscheduled'}; my %cf = %$codefilter; delete $cf{$_} for qw{unresolvable succeeded failed}; $needjob = 0 unless %cf; } for my $packid (keys %$packstatus) { $packstatus->{$packid} ||= 'unknown'; # For old :packstatus files (before 2.0) if ($packstatus->{$packid} eq 'expansion error') { $packstatus->{$packid} = 'unresolvable'; } elsif ($packstatus->{$packid} eq 'done') { next if $codefilter && !$codefilter->{'failed'} && !$codefilter->{'succeeded'}; if ($num > 10) { $logfiles_fail ||= { map {$_ => 1} ls("$reporoot/$prpa/:logfiles.fail") }; $packstatus->{$packid} = $logfiles_fail->{$packid} ? 'failed' : 'succeeded'; } else { if (-e "$reporoot/$prpa/:logfiles.fail/$packid") { $packstatus->{$packid} = 'failed'; } else { $packstatus->{$packid} = 'succeeded'; } } } elsif ($packstatus->{$packid} eq 'scheduled') { next unless $needjob; if (!$buildingjobs->{$arch}) { my $ba = {}; for (grep {s/\:status$//} ls("$jobsdir/$arch")) { if (/^(.*)-[0-9a-f]{32}$/s) { $ba->{$1} = $_; } else { $ba->{$_} = $_; } } $buildingjobs->{$arch} = $ba; } my $job = jobname($prp, $packid); $job = $buildingjobs->{$arch}->{$job}; if ($job) { my $js = readxml("$jobsdir/$arch/$job:status", $BSXML::jobstatus, 1); if ($js) { $packstatus->{$packid} = $js->{'code'}; $ps->{'packerror'}->{$packid} = $js->{'details'} if $js->{'details'}; $ps->{'packerror'}->{$packid} = "building on $js->{'workerid'}" if $js->{'code'} eq 'building'; } } } } } sub getresult { my ($cgi, $prpas) = @_; if ($cgi->{'oldstate'} && $BSStdServer::isajax) { for my $prpa (@$prpas) { BSWatcher::addfilewatcher("$reporoot/$prpa/:packstatus"); } } my $r = []; my $state = ''; my %packfilter = map {$_ => 1} @{$cgi->{'package'} || []}; my %code = map {$_ => 1} @{$cgi->{'code'} || []}; my %buildingjobs; my %lastpublished; for my $prpa (@$prpas) { my %sum; my ($projid, $repoid, $arch) = split('/', $prpa, 3); $state .= "$prpa\0\0"; my $ps = readpackstatus($prpa); $ps ||= {'packstatus' => {}, 'packerror' => {}}; if (%packfilter) { for (keys %{$ps->{'packstatus'} || {}}) { delete $ps->{'packstatus'}->{$_} unless $packfilter{$_}; } for (keys %packfilter) { $ps->{'packststus'}->{$_} ||= 'unknown'; } } my ($schedulerstate, $schedulerdetails) = getschedulerstate($projid, $repoid, $arch); my $sl = {'project' => $projid, 'repository' => $repoid, 'arch' => $arch, 'code' => $schedulerstate, 'state' => $schedulerstate }; $sl->{'details'} = $schedulerdetails if defined $schedulerdetails; $sl->{'dirty'} = 'true' if -e "$reporoot/$prpa/:schedulerstate.dirty"; $sl->{'dirty'} = 'true' if $schedulerstate eq 'scheduling'; # flag already removed, but new state not yet written $state .= "$schedulerstate\0\0"; fixpackstatus($prpa, $ps, \%buildingjobs, %code ? \%code : undef) unless $cgi->{'lastbuild'}; for my $packid (sort(keys %{$ps->{'packstatus'} || {}})) { my $code = $ps->{'packstatus'}->{$packid}; if ($cgi->{'lastbuild'}) { if (-e "$reporoot/$prpa/:logfiles.fail/$packid") { $code = 'failed'; } elsif (-e "$reporoot/$prpa/:logfiles.success/$packid") { $code = 'succeeded'; } else { $code = 'unknown'; } } next if %code && !$code{$code}; $state .= "$packid\0$code\0"; if ($cgi->{'summary'}) { $sum{$code} = ($sum{$code} || 0) + 1; } else { my $s = {'package' => $packid, 'code' => $code}; $s->{'details'} = $ps->{'packerror'}->{$packid} if !$cgi->{'lastbuild'} && $ps->{'packerror'}->{$packid}; if ($cgi->{'withversrel'} && -e "$reporoot/$prpa/:logfiles.success/$packid") { my $history = BSFileDB::fdb_getlast("$reporoot/$prpa/$packid/history", $historylay) || {}; $s->{'versrel'} = $history->{'versrel'} if $history->{'versrel'}; } push @{$sl->{'status'}}, $s; } if ($cgi->{'withbinarylist'}) { my @b; for (sort(ls("$reporoot/$prpa/$packid"))) { next if $_ eq 'logfile' || $_ eq 'status' || $_ eq 'reason' || $_ eq 'history' || /^\./; my @s = stat("$reporoot/$prpa/$packid/$_"); next unless @s; next if -d _; push @b, {'filename' => $_, 'mtime' => $s[9], 'size' => $s[7]}; } my $bl = {'package' => $packid, 'binary' => \@b}; push @{$sl->{'binarylist'}}, $bl; } } if ($cgi->{'summary'}) { my @order = ('succeeded', 'failed', 'unresolvable', 'broken', 'scheduled'); my %order = map {$_ => 1} @order; my @sum = grep {exists $sum{$_}} @order; push @sum, grep {!$order{$_}} sort keys %sum; $sl->{'summary'} = {'statuscount' => [ map {{'code' => $_, 'count' => $sum{$_}}} @sum ] }; } if ($cgi->{'withstats'}) { my $stats = {}; my @s = stat("$reporoot/$prpa/:packstatus"); $stats->{'lastchecked'} = $s[9] if @s; @s = stat("$reporoot/$prpa/:repoinfo"); $stats->{'lastfinished'} = $s[9] if @s; # not really true for image builds... my $prp = $prpa; $prp =~ s/\/[^\/]+$//; if (!exists($lastpublished{$prp})) { $lastpublished{$prp} = undef; my @s = stat("$reporoot/$prp/:repoinfo"); my $ri = BSUtil::retrieve("$reporoot/$prp/:repoinfo", 1); if ($ri && $ri->{'state'}) { $lastpublished{$prp} = $s[9]; } } $stats->{'lastpublished'} = $lastpublished{$prp} if $lastpublished{$prp}; $sl->{'stats'} = $stats; } push @$r, $sl; } $state = Digest::MD5::md5_hex($state); if ($cgi->{'oldstate'} && $state eq $cgi->{'oldstate'}) { return if $BSStdServer::isajax; # watcher will call us back... my @args = map {"prpa=$_"} @{$prpas || []}; push @args, BSRPC::args($cgi, 'oldstate', 'package', 'code', 'withbinarylist'); BSHandoff::handoff('/_result', undef, @args); } return ({'result' => $r, 'state' => $state}, $BSXML::resultlist); } # special call that completely wipes the published area from a prp sub wipepublishedlocked { my ($projid, $repoid) = @_; my $prp = "$projid/$repoid"; return unless -d "$reporoot/$prp"; local *F; BSUtil::lockopen(\*F, '>', "$reporoot/$prp/.finishedlock"); for my $arch (sort(ls("$reporoot/$prp"))) { my $r = "$reporoot/$prp/$arch/:repo"; next unless -d $r; unlink("${r}info"); BSUtil::cleandir($r); rmdir($r); } close F; forwardevent({}, 'publish', $projid, undef, $repoid, undef); } # call that deletes packages from publishing stage and triggers a scanrepo and # publish event for the prp. sub unpublish { my ($projid, $repoid, $prparchs, $packids) = @_; my $prp = "$projid/$repoid"; my %packids = map {$_ => 1} @{$packids}; local *F; BSUtil::lockopen(\*F, '>', "$reporoot/$prp/.finishedlock"); for my $arch (@{$prparchs}) { my $rpath = "$reporoot/$prp/$arch/:repo"; if (%packids) { # just wipe some packages, need the repoinfo my $repoinfo = BSUtil::retrieve("$reporoot/$prp/$arch/:repoinfo"); my $binaryorigins = $repoinfo->{'binaryorigins'} || {}; my $dirty; for my $bin (sort keys %$binaryorigins) { next unless $packids{$binaryorigins->{$bin}}; if (-d "$rpath/$bin") { BSUtil::cleandir("$rpath/$bin"); rmdir("$rpath/$bin"); } else { unlink("$rpath/$bin"); } delete $binaryorigins->{$bin}; $dirty = 1; } BSUtil::store("${rpath}info.new", "${rpath}info", $repoinfo) if $dirty; } else { # wipe all packages unlink("${rpath}info"); BSUtil::cleandir($rpath); rmdir($rpath); } if (-d "$eventdir/$arch") { my $ev = { type => 'recheck', 'project' => $projid, 'repository' => $repoid }; my $evname = "recheck:${projid}::$repoid"; $evname = "recheck:::".Digest::MD5::md5_hex($evname) if length($evname) > 200; writexml("$eventdir/$arch/.$evname.$$", "$eventdir/$arch/$evname", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); } } close F; forwardevent({}, 'publish', $projid, undef, $repoid, undef); } sub docommand { my ($cgi, $cmd, $prpas) = @_; my %code = map {$_ => 1} @{$cgi->{'code'} || []}; my %buildingjobs; my %wipepublishedlockeddone; if ($cmd eq 'unpublish') { die("code filter not supported for unpublish\n") if $cgi->{'code'}; my %prparchs; for my $prpa (@$prpas) { my ($projid, $repoid, $arch) = split('/', $prpa); push @{$prparchs{"$projid/$repoid"}}, $arch; } for my $prp (sort keys %prparchs) { my ($projid, $repoid) = split('/', $prp); unpublish($projid, $repoid, $prparchs{$prp}, $cgi->{'package'} || []); } return $BSStdServer::return_ok; } if ($cmd eq 'availablebinaries') { my (%available, %available_pattern, %available_product); for my $prpa (@$prpas) { my ($projid, $repoid, $arch) = split('/', $prpa); getavailable($projid, $repoid, $arch, \%available, \%available_pattern, \%available_product); } my %res; $res{'packages'} = processavailable(\%available) if %available; $res{'patterns'} = processavailable(\%available_pattern) if %available_pattern; $res{'products'} = processavailable(\%available_product) if %available_product; return (\%res, $BSXML::availablebinaries); } for my $prpa (@$prpas) { my ($projid, $repoid, $arch) = split('/', $prpa); my @packids = @{$cgi->{'package'} || []}; my $allpacks; if (@packids && $packids[0] eq '*') { shift @packids; $allpacks = 1; } if (%code) { my $ps = readpackstatus($prpa); fixpackstatus($prpa, $ps, \%buildingjobs); @packids = grep {$code{$ps->{'packstatus'}->{$_} || 'unknown'}} @packids; } if ($cmd eq 'rebuild') { if (@packids) { dirty($projid, $repoid, $arch); for my $packid (@packids) { unlink("$reporoot/$projid/$repoid/$arch/:meta/$packid"); my $ev = { type => 'rebuild', 'project' => $projid, 'package' => $packid }; my $evname = "rebuild:${projid}::$packid"; $evname = "rebuild:::".Digest::MD5::md5_hex($evname) if length($evname) > 200; if (-d "$eventdir/$arch") { writexml("$eventdir/$arch/.$evname.$$", "$eventdir/$arch/$evname", $ev, $BSXML::event); } } BSUtil::ping("$eventdir/$arch/.ping") if -d "$eventdir/$arch"; } } elsif ($cmd eq 'killbuild' || $cmd eq 'abortbuild') { for my $packid (@packids) { eval { abortbuild($cgi, $projid, $repoid, $arch, $packid); }; warn("$@") if $@; } } elsif ($cmd eq 'restartbuild') { for my $packid (@packids) { eval { restartbuild($cgi, $projid, $repoid, $arch, $packid); }; warn("$@") if $@; } } elsif ($cmd eq 'wipepublishedlocked') { my $prp = "$projid/$repoid"; wipepublishedlocked($projid, $repoid) unless $wipepublishedlockeddone{$prp}; $wipepublishedlockeddone{$prp} = 1; } elsif ($cmd eq 'wipe') { undef $allpacks; if ($allpacks) { forwardevent($cgi, 'wipe', $projid, undef, $repoid, $arch); } else { for my $packid (@packids) { forwardevent($cgi, 'wipe', $projid, $packid, $repoid, $arch); } } } elsif ($cmd eq 'force_publish') { forwardevent($cgi, 'force_publish', $projid, undef, $repoid, $arch); } } return $BSStdServer::return_ok; } # special lastfailures mode: return the last success and the first failure # after the success if there was a failure. If the package never succeeded, # return the first failure. sub getlastfailures { my ($cgi, $projid, $repoid, $arch) = @_; my $prpa = "$projid/$repoid/$arch"; # update our little database my $db; $db = BSUtil::retrieve("$reporoot/$prpa/:lastfailures", 1) || {}; my $changed; local *F; return ({jobhist => []}, $BSXML::jobhistlist) unless open(F, '<', "$reporoot/$prpa/:jobhistory"); if ($db->{'offset'} && $db->{'lastline'} && seek(F, $db->{'offset'}, 0)) { if (Digest::MD5::md5_hex( || '') ne $db->{'lastline'}) { seek(F, 0, 0) || die("could not rewind\n"); $db = {}; } } else { $db = {}; } $db->{'failure'} ||= {}; $db->{'success'} ||= {}; my $failure = $db->{'failure'}; my $success = $db->{'success'}; my $ll; my $llo; while () { next if chop($_) ne "\n"; $ll = $_; $llo = tell(F) - length($_) - 1; my $r = BSFileDB::decode_line($_, $BSXML::jobhistlay); my $n = $r->{'package'}; if ($r->{'code'} eq 'succeeded' || $r->{'code'} eq 'unchanged') { $success->{$n} = $r; delete $failure->{$n}; } elsif (!$failure->{$n}) { $failure->{$n} = $r; } } if (defined($ll)) { $db->{'lastline'} = Digest::MD5::md5_hex("$ll\n"); $db->{'offset'} = $llo; BSUtil::store("$reporoot/$prpa/.:lastfailures$$", "$reporoot/$prpa/:lastfailures", $db); } my %packid = map {$_ => 1} @{$cgi->{'package'}}; %packid = %{ { %$failure, %$success} } unless %packid; my @hist; for my $packid (sort keys %packid) { push @hist, $success->{$packid} if $success->{$packid}; push @hist, $failure->{$packid} if $failure->{$packid}; } my $ret = {jobhist => \@hist}; return ($ret, $BSXML::jobhistlist); } sub getlastfailures_old { my ($cgi, $projid, $repoid, $arch) = @_; my $filter; # report last success/unchanged and all fails for each package my %success; if ($cgi->{'package'}) { my %packid = map {$_ => 1} @{$cgi->{'package'}}; $filter = sub { return 0 unless $packid{$_[0]->{'package'}}; return 1 unless $_[0]->{'code'} eq 'succeeded' || $_[0]->{'code'} eq 'unchanged'; delete $packid{$_[0]->{'package'}}; return %packid ? 1 : -1; }; } else { $filter = sub { return 0 if $success{$_[0]->{'package'}}; $success{$_[0]->{'package'}} = 1 if $_[0]->{'code'} eq 'succeeded' || $_[0]->{'code'} eq 'unchanged'; return 1; }; } my @hist = BSFileDB::fdb_getall_reverse("$reporoot/$projid/$repoid/$arch/:jobhistory", $BSXML::jobhistlay, undef, $filter); @hist = reverse @hist; my $ret = {jobhist => \@hist}; return ($ret, $BSXML::jobhistlist); } sub getjobhistory { my ($cgi, $projid, $repoid, $arch) = @_; my $filter; if ($cgi->{'code'} && @{$cgi->{'code'}} == 1 && $cgi->{'code'}->[0] eq 'lastfailures') { return getlastfailures($cgi, $projid, $repoid, $arch); } if ($cgi->{'package'} && $cgi->{'code'}) { my %packid = map {$_ => 1} @{$cgi->{'package'}}; my %code = map {$_ => 1} @{$cgi->{'code'}}; $filter = sub {$packid{$_[0]->{'package'}} && $code{$_[0]->{'code'}}}; } elsif ($cgi->{'package'}) { my %packid = map {$_ => 1} @{$cgi->{'package'}}; $filter = sub {$packid{$_[0]->{'package'}}}; } elsif ($cgi->{'code'}) { my %code = map {$_ => 1} @{$cgi->{'code'}}; $filter = sub {$code{$_[0]->{'code'}}}; } my @hist = BSFileDB::fdb_getall_reverse("$reporoot/$projid/$repoid/$arch/:jobhistory", $BSXML::jobhistlay, $cgi->{'limit'} || 100, $filter); @hist = reverse @hist; my $ret = {jobhist => \@hist}; return ($ret, $BSXML::jobhistlist); } $Build::Kiwi::urlmapper = \&BSUrlmapper::urlmapper; sub getbuildinfo { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my $binfo = BSRepServer::BuildInfo::buildinfo($projid, $repoid, $arch, $packid, internal => $cgi->{'internal'}, add => $cgi->{'add'}, debug => $cgi->{'debug'}, ); return ($binfo, $BSXML::buildinfo); } sub getbuildinfo_post { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my $fn = $cgi->{'_fn'}; my $depfile = $cgi->{'_depfile'}; if (!$fn) { mkdir_p($uploaddir); $fn = "$uploaddir/$$"; die("upload failed\n") unless BSServer::read_file($fn); } local *F; open(F, '<', $fn) || die("$fn: $!\n"); my $magic; sysread(F, $magic, 6); if ($magic eq "070701" && !$cgi->{'_fn'}) { # have cpio archive, extract recipe and depfile, recurse unlink($fn); sysseek(F, 0, 0); my $dir = "$uploaddir/$$.dir"; mkdir_p($dir); my $uploaded = BSHTTP::cpio_receiver(BSHTTP::fd2req(\*F), {'directory' => $dir}); close(F); # should we check if the cpio archive contains <= 2 files? $depfile = (grep { $_->{'name'} eq 'deps' } @$uploaded)[0]; $depfile = "$dir/$depfile->{'name'}" if $depfile; my $servicefile = (grep { $_->{'name'} eq '_service' } @$uploaded)[0]; $servicefile = "$dir/$servicefile->{'name'}" if $servicefile; my $bifile = (grep { $_->{'name'} eq 'buildenv' } @$uploaded)[0]; $bifile = "$dir/$bifile->{'name'}" if $bifile; $fn = (grep { $_->{'name'} ne "deps" && $_->{'name'} ne 'buildenv' && $_->{'name'} ne '_service'} @$uploaded)[0]; die("no build recipe file found\n") unless $fn; my @r; eval { @r = getbuildinfo_post({ %$cgi, '_fn' => "$dir/$fn->{'name'}", '_depfile' => $depfile, '_buildenvfile' => $bifile, '_servicefile' => $servicefile, '_buildtype' => Build::recipe2buildtype($fn->{'name'})}, $projid, $repoid, $arch, $packid); }; unlink("$dir/$_") for ls($dir); rmdir($dir) if -d $dir; die("$@\n") if $@; return @r; } close(F); undef $packid if $packid eq '_repository'; my $bconf = BSRepServer::getconfig($projid, $repoid, $arch); $bconf->{'type'} = $cgi->{'_buildtype'} if $cgi->{'_buildtype'}; if (defined($packid)) { $bconf->{'obspackage'} = $packid; if ($packid =~ /(?{'obspackage'} = $1; $bconf->{'buildflavor'} = $2; } } my $d = Build::parse_typed($bconf, $fn, $bconf->{'type'}); unlink($fn); die("unknown repository type $bconf->{'type'}\n") unless $d; die("could not parse build description ($bconf->{'type'}): $d->{'error'}\n") if $d->{'error'}; die("could not parse name in build description ($bconf->{'type'})\n") unless defined $d->{'name'}; # build info from parsed data my $info = { 'repository' => $repoid }; $info->{'name'} = $d->{'name'}; $info->{'dep'} = $d->{'deps'}; $info->{'subpacks'} = $d->{'subpacks'} if $d->{'subpacks'}; if ($d->{'prereqs'}) { my %deps = map {$_ => 1} (@{$d->{'deps'} || []}, @{$d->{'subpacks'} || []}); my @prereqs = grep {!$deps{$_} && !/^%/} @{$d->{'prereqs'}}; $info->{'prereq'} = \@prereqs if @prereqs; } $info->{'path'} = $d->{'path'} if $d->{'path'}; $info->{'containerpath'} = $d->{'containerpath'} if $d->{'containerpath'}; if ($bconf->{'type'} eq 'kiwi') { $info->{'imagetype'} = $d->{'imagetype'}; $info->{'imagearch'} = $d->{'exclarch'} if $d->{'exclarch'}; } my $pdata = {'buildtype' => $bconf->{'type'}, 'info' => [ $info ]}; $pdata->{'buildenv'} = readxml($cgi->{'_buildenvfile'}, $BSXML::buildinfo) if $cgi->{'_buildenvfile'}; if ($cgi->{'_servicefile'}) { my $services = readxml($cgi->{'_servicefile'}, $BSXML::services); for my $service (@{$services->{'service'} || []}) { next unless $service->{'mode'} && $service->{'mode'} eq 'buildtime'; my $pkgname = "obs-service-$service->{'name'}"; # debian does not allow _ in package name $pkgname =~ s/_/-/g if $bconf->{'binarytype'} eq 'deb'; push @{$info->{'dep'}}, $pkgname; } } $pdata->{'ldepfile'} = $depfile if defined $depfile; my $binfo = BSRepServer::BuildInfo::buildinfo($projid, $repoid, $arch, $packid, pdata => $pdata, internal => $cgi->{'internal'}, add => $cgi->{'add'}, debug => $cgi->{'debug'}, ); return ($binfo, $BSXML::buildinfo); } sub getbuilddepinfo { my ($cgi, $projid, $repoid, $arch) = @_; my $builddepinfo_in; if (BSServer::have_content()) { my $content = BSServer::read_data(10000000); $builddepinfo_in = BSUtil::fromxml($content, $BSXML::builddepinfo); } my %packids = map {$_ => 1} @{$cgi->{'package'} || []}; my $view = $cgi->{'view'} || ''; my $depends = BSUtil::retrieve("$reporoot/$projid/$repoid/$arch/:depends", 1) || {}; my $subpacks = $depends->{'subpacks'} || {}; my $pkgdeps = $depends->{'pkgdeps'} || {}; my $pkg2src = $depends->{'pkg2src'} || {}; if ($builddepinfo_in) { for my $in (@{$builddepinfo_in->{'package'} || []}) { my $packid = $in->{'name'}; next unless $packid; $pkg2src->{$packid} = $in->{'source'} if $in->{'source'}; $subpacks->{$packid} = $in->{'subpkg'} if $in->{'subpkg'}; delete $pkgdeps->{$packid}; $pkgdeps->{$packid} = $in->{'pkgdep'} if $in->{'pkgdep'}; } } my %subpack2pack; if ($view eq 'order') { # order like the scheduler does my @cycles; my @packs = sort keys %$pkg2src; @packs = sort keys %packids if %packids; @packs = BSSolv::depsort($pkgdeps, $pkg2src, \@cycles, @packs) if @packs > 1; my @res = map { { 'name' => $_ } } @packs; my $res = { 'package' => \@res, }; $res->{'cycle'} = [map {{'package' => $_}} @cycles] if @cycles; return ($res, $BSXML::builddepinfo); } if ($view eq 'pkgnames' || $view eq 'revpkgnames') { for my $packid (sort keys %$pkg2src) { my $n = $pkg2src->{$packid} || $packid; if ($subpacks->{$n} && @{$subpacks->{$n}}) { push @{$subpack2pack{$_}}, $packid for @{$subpacks->{$n}}; } else { push @{$subpack2pack{$n}}, $packid; } } if ($view eq 'revpkgnames') { my %rdeps; for my $packid (sort keys %$pkg2src) { my $deps = $pkgdeps->{$packid} || []; $deps = [ map {@{$subpack2pack{$_} || []}} @$deps ]; for (@$deps) { push @{$rdeps{$_}}, $packid; } } $pkgdeps = \%rdeps; } } my @res; for my $packid (sort keys %$pkg2src) { next if %packids && !$packids{$packid}; my $n = $pkg2src->{$packid}; my @sp = sort @{$subpacks->{$n} || []}; push @sp, $n unless @sp; if ($n ne $sp[0] && (grep {$_ eq $n} @sp)) { @sp = grep {$_ ne $n} @sp; unshift @sp, $n; } my $deps = $pkgdeps->{$packid} || []; $deps = [ map {@{$subpack2pack{$_} || []}} @$deps ] if $view eq 'pkgnames'; $deps = [ sort(BSUtil::unify(@$deps)) ] if $view eq 'pkgnames' || $view eq 'revpkgnames'; push @res, {'name' => $packid, 'source' => $n, 'pkgdep' => $deps, 'subpkg' => \@sp, }; } my @cycles = map {{'package' => $_}} @{$depends->{'cycles'} || []}; my $res = { 'package' => \@res, }; $res->{'cycle'} = \@cycles if @cycles; return ($res, $BSXML::builddepinfo); } ### FIXME: read status instead! sub findjob { my ($projid, $repoid, $arch, $packid) = @_; my $prp = "$projid/$repoid"; my $job = jobname($prp, $packid); my @jobdatadirs = grep {$_ eq "$job:status" || /^\Q$job\E-[0-9a-f]{32}:status$/} ls("$jobsdir/$arch"); return undef unless @jobdatadirs; $job = $jobdatadirs[0]; $job =~ s/:status$//; return $job; } sub restartbuild { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my $job = findjob($projid, $repoid, $arch, $packid); die("not building\n") unless $job; local *F; my $js = BSUtil::lockopenxml(\*F, '<', "$jobsdir/$arch/$job:status", $BSXML::jobstatus); die("not building\n") if $js->{'code'} ne 'building'; my $req = { 'uri' => "$js->{'uri'}/discard", 'timeout' => 30, }; eval { BSRPC::rpc($req, undef, "jobid=$js->{'jobid'}"); }; warn($@) if $@; unlink("$jobsdir/$arch/$job:status"); close F; return $BSStdServer::return_ok; } sub abortbuild { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my $job = findjob($projid, $repoid, $arch, $packid); die("not building\n") unless $job; local *F; my $js = BSUtil::lockopenxml(\*F, '<', "$jobsdir/$arch/$job:status", $BSXML::jobstatus); die("not building\n") if $js->{'code'} ne 'building'; my $req = { 'uri' => "$js->{'uri'}/kill", 'timeout' => 30, }; BSRPC::rpc($req, undef, "jobid=$js->{'jobid'}"); return $BSStdServer::return_ok; } # # OBSOLETE: qemu shall be installed into the target system # FIXME3.0: remove this # if there is a qemu dir in OBS backend install dir workers load qemu from OBS backend server # this is similiar to the rest of build script code # if that does also not exist, workers copy qemu from worker local installed qemu # sub getqemuinterpreters { my @send; for my $file (grep {!/^\./} ls('qemu')) { next unless -f "qemu/$file"; push @send, {'name' => $file, 'filename' => "qemu/$file"}; } return @send; } sub getcode { my ($cgi, $dir) = @_; my @send; push @send, getqemuinterpreters() if $dir eq 'build'; for my $file (grep {!/^\./} ls($dir)) { if (($file eq 'Build' || $file eq 'emulator') && -d "$dir/$file") { push @send, {'name' => $file, 'mode' => 0x41ed, 'data' => ''}; for my $file2 (grep {!/^\./} ls("$dir/$file")) { push @send, {'name' => "$file/$file2", 'filename' => "$dir/$file/$file2"}; } } next unless -f "$dir/$file"; push @send, {'name' => "$file", 'filename' => "$dir/$file"}; } die("$dir is empty\n") unless @send; $_->{'follow'} = 1 for @send; # follow all symlinks BSServer::reply_cpio(\@send); return undef; } sub getbuildcode { my ($cgi) = @_; return getcode($cgi, 'build'); } sub getworkercode { my ($cgi) = @_; return getcode($cgi, 'worker'); } sub postrepo { my ($cgi, $projid, $repoid, $arch) = @_; my @args = ("project=$projid", "repository=$repoid", "arch=$arch"); push @args, "partition=$BSConfig::partition" if $BSConfig::partition; # FIXME: add remote support my $projpack = BSRPC::rpc("$BSConfig::srcserver/getprojpack", $BSXML::projpack, 'withrepos', 'expandedrepos', @args); my $proj = $projpack->{'project'}->[0]; die("no such project\n") unless $proj && $proj->{'name'} eq $projid; my $repo = $proj->{'repository'}->[0]; die("no such repository\n") unless $repo && $repo->{'name'} eq $repoid; my @prp = map {"$_->{'project'}/$_->{'repository'}"} @{$repo->{'path'} || []}; my $pool = BSSolv::pool->new(); for my $prp (@prp) { BSRepServer::addrepo_scan($pool, $prp, $arch); } $pool->createwhatprovides(); my %data; for my $p ($pool->consideredpackages()) { my $d = $pool->pkg2data($p); $data{$d->{'name'}} = $d; } undef $pool; my @data; for (sort keys %data) { push @data, $data{$_}; $data[-1]->{'_content'} = $data[-1]->{'name'}; } my $match = $cgi->{'match'}; $match = "[$match]" unless $match =~ /^[\.\/]?\[/; $match = ".$match" if $match =~ /^\[/; my $v = BSXPath::valuematch(\@data, $match); return {'value' => $v}, $BSXML::collection; } my %prp_to_repoinfo; sub prp_to_repoinfo { my ($prp) = @_; my $repoinfo = $prp_to_repoinfo{$prp}; if (!$repoinfo) { if (-s "$reporoot/$prp/:repoinfo") { $repoinfo = BSUtil::retrieve("$reporoot/$prp/:repoinfo"); for (@{$repoinfo->{'prpsearchpath'} || []}) { next if ref($_); # legacy my ($p, $r) = split('/', $_, 2); $_ = {'project' => $p, 'repository' => $r}; } } else { $repoinfo = {'binaryorigins' => {}}; } $prp_to_repoinfo{$prp} = $repoinfo; } return $repoinfo; } sub binary_key_to_data { my ($db, $key) = @_; my @p = split('/', $key); my $binary = pop(@p); my $name = $binary; my $version = ''; if ($name =~ s/-([^-]+-[^-]+)\.[^\.]+\.rpm$//) { $version = $1; } elsif ($name =~ s/_([^_]+)_[^_]+\.deb$//) { $version = $1; } my $arch = pop(@p); while (@p > 1 && $p[0] =~ /:$/) { splice(@p, 0, 2, "$p[0]$p[1]"); } my $project = shift(@p); while (@p > 1 && $p[0] =~ /:$/) { splice(@p, 0, 2, "$p[0]$p[1]"); } my $repository = shift(@p); my $prp = "$project/$repository"; my $repoinfo = $prp_to_repoinfo{$prp} || prp_to_repoinfo($prp); my $type; $type = 'rpm' if $binary =~ /\.rpm$/; $type = 'deb' if $binary =~ /\.deb$/; my $res = { 'name' => $name, 'version' => $version, 'arch' => $arch, 'type' => $type, 'project' => $project, 'repository' => $repository, 'filename' => $binary, 'filepath' => $key, }; $res->{'path'} = $repoinfo->{'prpsearchpath'} if $repoinfo->{'prpsearchpath'}; $res->{'package'} = $repoinfo->{'binaryorigins'}->{"$arch/$binary"} if defined $repoinfo->{'binaryorigins'}->{"$arch/$binary"}; $res->{'baseproject'} = $res->{'path'}->[-1]->{'project'} if $res->{'path'}; return $res; } sub pattern_key_to_data { my ($db, $key) = @_; my @p = split('/', $key); my $filename = pop(@p); while (@p > 1 && $p[0] =~ /:$/) { splice(@p, 0, 2, "$p[0]$p[1]"); } my $project = shift(@p); while (@p > 1 && $p[0] =~ /:$/) { splice(@p, 0, 2, "$p[0]$p[1]"); } my $repository = shift(@p); my @v = BSDBIndex::getvalues($db, $db->{'table'}, $key); return {} unless @v; my $res = $v[0]; $res->{'baseproject'} = $res->{'path'}->[-1]->{'project'} if $res->{'path'}; $res->{'project'} = $project; $res->{'repository'} = $repository; $res->{'filename'} = $filename; $res->{'filepath'} = $key; return $res; } sub search_published_binary_id { my ($cgi, $match) = @_; my $binarydb = BSDB::opendb($extrepodb, 'binary'); $binarydb->{'allkeyspath'} = 'name'; $binarydb->{'noindex'} = {'arch' => 1, 'project' => 1, 'repository' => 1, 'package' => 1, 'type' => 1, 'path/project' => 1, 'path/repository' => 1}; $binarydb->{'fetch'} = \&binary_key_to_data; $binarydb->{'cheapfetch'} = 1; my $rootnode = BSXPathKeys::node($binarydb, ''); my $data = BSXPath::match($rootnode, $match) || []; # epoch? @$data = sort {Build::Rpm::verscmp($b->{'version'}, $a->{'version'}) || $a->{'name'} cmp $b->{'name'} || $a->{'arch'} cmp $b->{'arch'}} @$data; delete $_->{'path'} for @$data; my $res = {'binary' => $data}; return ($res, $BSXML::collection); } sub search_published_pattern_id { my ($cgi, $match) = @_; my $patterndb = BSDB::opendb($extrepodb, 'pattern'); $patterndb->{'noindex'} = {'project' => 1, 'repository' => 1}; $patterndb->{'fetch'} = \&pattern_key_to_data; my $rootnode = BSXPathKeys::node($patterndb, ''); my $data = BSXPath::match($rootnode, $match) || []; for (@$data) { delete $_->{'path'}; delete $_->{'description'}; delete $_->{'summary'}; } my $res = {'pattern' => $data}; return ($res, $BSXML::collection); } sub listpublished { my ($dir, $fileok) = @_; my @r; for my $d (ls($dir)) { if ($fileok && -f "$dir/$d") { push @r, $d; next; } next unless -d "$dir/$d"; if ($d =~ /:$/) { my $dd = $d; chop $dd; push @r, map {"$dd:$_"} listpublished("$dir/$d"); } else { push @r, $d; } } return @r; } sub findympbinary { my ($binarydir, $binaryname) = @_; for my $b (ls($binarydir)) { next unless $b =~ /\.(?:$binsufsre)$/; next unless $b =~ /^\Q$binaryname\E/; if ($b =~ /(.+)-[^-]+-[^-]+\.[a-zA-Z][^\.\-]*\.rpm$/) { my $bn = $1; next unless $binaryname =~ /^\Q$bn\E/; } my $data = Build::query("$binarydir/$b", 'evra' => 1); if ($data->{'name'} eq $binaryname || "$data->{'name'}-$data->{'version'}" eq $binaryname) { return "$binarydir/$b"; } } return undef; } sub publisheddir { my ($cgi, $projid, $repoid, $arch) = @_; my @res = (); if (!defined($projid)) { @res = listpublished($extrepodir); if ($BSConfig::publishredirect) { for (keys %{$BSConfig::publishredirect}) { push @res, (split('/', $_, 2))[0]; } @res = BSUtil::unify(@res); } } elsif (!defined($repoid)) { my $prp_ext = $projid; $prp_ext =~ s/:/:\//g; @res = listpublished("$extrepodir/$prp_ext"); if ($BSConfig::publishredirect) { for (keys %{$BSConfig::publishredirect}) { my @p = split('/', $_, 2); push @res, $p[1] if $p[0] eq $projid; } @res = BSUtil::unify(@res); } } elsif (!defined($arch)) { my $extrep = BSUrlmapper::get_extrep("$projid/$repoid"); @res = listpublished($extrep, 1); } else { my $extrep = BSUrlmapper::get_extrep("$projid/$repoid"); return publishedfile($cgi, $projid, $repoid, undef, $arch) if -f "$extrep/$arch"; if ($cgi->{'view'} && $cgi->{'view'} eq 'ymp') { my $binaryname = $arch; my $binary; my @archs = ls($extrep); for my $a (@archs) { next if $a eq 'repodata' || $a eq 'repocache'; next unless -d "$extrep/$a"; $binary = findympbinary("$extrep/$a", $binaryname); last if $binary; } $binary ||= "$extrep/$binaryname"; my $projpack; if (BSServer::have_content()) { my $projpackxml = BSServer::read_data(10000000); $projpack = BSUtil::fromxml($projpackxml, $BSXML::projpack, 1); } return makeymp($projid, $repoid, $binary, $projpack); } @res = ls("$extrep/$arch"); } @res = sort @res; @res = map {{'name' => $_}} @res; return ({'entry' => \@res}, $BSXML::dir); } sub makeymp { my ($projid, $repoid, $binary, $projpackin) = @_; my $binaryname; my $data; if ($binary =~ /(?:^|\/)([^\/]+)-[^-]+-[^-]+\.[a-zA-Z][^\/\.\-]*\.rpm$/) { $binaryname = $1; } elsif ($binary =~ /(?:^|\/)([^\/]+)_([^\/]*)_[^\/]*\.deb$/) { $binaryname = $1; } elsif ($binary =~ /(?:^|\/)([^\/]+)\.(?:rpm|deb)$/) { $binaryname = $1; } else { my $binarydir; ($binarydir, $binaryname) = $binary =~ /^(.*)\/([^\/]*)$/; $binary = findympbinary($binarydir, $binaryname) || $binary; } $data = Build::query($binary, 'description' => 1); #die("no such binary\n") unless $data; my $projpack; if ($projpackin && $projpackin->{'project'}->[0]->{'name'} eq $projid) { $projpack = $projpackin; } else { my @args = ("project=$projid", "repository=$repoid"); $projpack = BSRPC::rpc("$BSConfig::srcserver/getprojpack", $BSXML::projpack, 'withrepos', 'expandedrepos', 'nopackages', @args); } my $proj = $projpack->{'project'}->[0]; die("no such project\n") unless $proj && $proj->{'name'} eq $projid; my $repo = $proj->{'repository'}->[0]; die("no such repository\n") unless $repo && $repo->{'name'} eq $repoid; my @nprojids = grep {$_ ne $projid} map {$_->{'project'}} @{$repo->{'path'} || []}; my %nprojpack; if ($projpackin) { $nprojpack{$_->{'name'}} ||= $_ for @{$projpackin->{'project'} || []}; } @nprojids = grep {!$nprojpack{$_}} @nprojids; if (@nprojids) { my @args = map {"project=$_"} @nprojids; my $nprojpack = BSRPC::rpc("$BSConfig::srcserver/getprojpack", $BSXML::projpack, 'nopackages', @args); $nprojpack{$_->{'name'}} ||= $_ for @{$nprojpack->{'project'} || []}; } my $ymp = {}; $ymp->{'xmlns:os'} = 'http://opensuse.org/Standards/One_Click_Install'; $ymp->{'xmlns'} = 'http://opensuse.org/Standards/One_Click_Install'; my @group; $ymp->{'group'} = \@group; my @repos; my @pa = @{$repo->{'path'} || []}; while (@pa) { my $pa = shift @pa; my $r = {}; $r->{'recommended'} = @pa || !@repos ? 'true' : 'false'; $r->{'name'} = $pa->{'project'}; if ($pa->{'project'} eq $projid) { $r->{'summary'} = $proj->{'title'}; $r->{'description'} = $proj->{'description'}; } elsif ($nprojpack{$pa->{'project'}}) { $r->{'summary'} = $nprojpack{$pa->{'project'}}->{'title'}; $r->{'description'} = $nprojpack{$pa->{'project'}}->{'description'}; } my $url = BSUrlmapper::get_downloadurl("$pa->{'project'}/$pa->{'repository'}"); next unless defined $url; $r->{'url'} = $url; push @repos, $r; } my $pkg = {}; if ($data) { $pkg->{'name'} = str2utf8xml($data->{'name'}); $pkg->{'description'} = str2utf8xml($data->{'description'}); } else { $pkg->{'name'} = str2utf8xml($binaryname); $pkg->{'description'} = "The $pkg->{'name'} package"; } if (defined $data->{'summary'}) { $pkg->{'summary'} = str2utf8xml($data->{'summary'}); } else { $pkg->{'summary'} = "The $pkg->{'name'} package"; } my $inner_group = {}; $inner_group->{'repositories'} = {'repository' => \@repos }; $inner_group->{'software'} = {'item' => [$pkg]}; push @group, $inner_group; return ($ymp, $BSXML::ymp, 'Content-Type: text/x-suse-ymp'); } sub fileinfo { my ($cgi, $filepath, $filename) = @_; my $res = {'filename' => $filename}; my $q = {}; die("filename: $!\n") unless -f $filepath; if ($filename =~ /\.(?:$binsufsre)$/) { $q = Build::query($filepath, 'evra' => 1, 'description' => 1, 'alldeps' => 1); data2utf8xml($q); } elsif ($filename =~ /\.ymp$/) { my $ymp = readxml($filepath, $BSXML::ymp, 1); if ($ymp) { my $g0 = $ymp->{'group'}[0]; $q->{'name'} = $g0->{'name'} if defined $g0->{'name'}; $q->{'summary'} = $g0->{'summary'} if defined $g0->{'summary'}; $q->{'description'} = $g0->{'description'} if defined $g0->{'description'}; $q->{'size'} = $g0->{'size'} if defined $g0->{'size'}; if ($g0->{'repositories'}) { $q->{'recommends'} = [ map {$_->{'name'}} grep {$_->{'recommended'} && $_->{'recommended'} eq 'true'} @{$g0->{'packages'}->{'package'} || []} ]; $q->{'suggests'} = [ map {$_->{'name'}} grep {!($_->{'recommended'} && $_->{'recommended'} eq 'true')} @{$g0->{'packages'}->{'package'} || []} ]; delete $q->{'recommends'} unless @{$q->{'recommends'}}; delete $q->{'suggests'} unless @{$q->{'suggests'}}; } } } my @s = stat($filepath); $q->{'size'} = $s[7] unless defined $q->{'size'}; $q->{'mtime'} = $s[9] unless defined $q->{'mtime'}; for (qw{name epoch version size mtime release arch summary description provides requires recommends suggests}) { $res->{$_} = $q->{$_} if defined $q->{$_}; } return ($res, $BSXML::fileinfo); } sub publishedfile { my ($cgi, $projid, $repoid, $arch, $filename, $subfilename) = @_; $filename .= "/$subfilename" if defined $subfilename; my $extrep = BSUrlmapper::get_extrep("$projid/$repoid"); $extrep .= "/$arch" if defined $arch; if (-d "$extrep/$filename") { return publisheddir($cgi, $projid, $repoid, "$arch/$filename"); } if ($cgi->{'view'} && $cgi->{'view'} eq 'ymp') { my $projpack; if (BSServer::have_content()) { my $projpackxml = BSServer::read_data(10000000); $projpack = BSUtil::fromxml($projpackxml, $BSXML::projpack, 1); } return makeymp($projid, $repoid, "$extrep/$filename", $projpack); } die("404 no such file\n") unless -f "$extrep/$filename"; if ($cgi->{'view'} && $cgi->{'view'} eq 'fileinfo') { return fileinfo($cgi, "$extrep/$filename", $filename); } my $type = 'application/octet-stream'; $type = 'application/x-rpm' if $filename =~ /\.rpm$/; $type = 'application/x-debian-package' if $filename =~ /\.deb$/; $type = 'text/xml' if $filename=~ /\.xml$/; BSServer::reply_file("$extrep/$filename", "Content-Type: $type"); return undef; } sub getrelsync { my ($cgi, $projid, $repoid, $arch) = @_; my $prp = "$projid/$repoid"; my $relsyncdata; my $relsync_merge = BSUtil::retrieve("$reporoot/$prp/$arch/:relsync.merge", 1); if ($relsync_merge) { my $relsync = BSUtil::retrieve("$reporoot/$prp/$arch/:relsync", 1) || {}; $relsync = { %$relsync, %$relsync_merge }; $relsyncdata = BSUtil::tostorable($relsync); } else { $relsyncdata = readstr("$reporoot/$prp/$arch/:relsync"); $relsyncdata ||= BSUtil::tostorable({}); } return ($relsyncdata, 'Content-Type: application/octet-stream'); } sub postrelsync { my ($cgi, $projid, $repoid, $arch) = @_; my $prp = "$projid/$repoid"; my $newdata = BSServer::read_data(10000000); my $new = BSUtil::fromstorable($newdata); die("no data\n") unless $new; local *F; BSUtil::lockopen(\*F, '+>>', "$reporoot/$prp/$arch/:relsync.max"); my $relsyncmax; if (-s "$reporoot/$prp/$arch/:relsync.max") { $relsyncmax = BSUtil::retrieve("$reporoot/$prp/$arch/:relsync.max", 2); } $relsyncmax ||= {}; my $changed; for my $packid (keys %$new) { if ($packid =~ /\//) { next if defined($relsyncmax->{$packid}) && $relsyncmax->{$packid} >= $new->{$packid}; $relsyncmax->{$packid} = $new->{$packid}; } else { next unless $new->{$packid} =~ /^(.*)\.([^-]*)$/; next if defined($relsyncmax->{"$packid/$1"}) && $relsyncmax->{"$packid/$1"} >= $2; $relsyncmax->{"$packid/$1"} = $2; } $changed = 1; } BSUtil::store("$reporoot/$prp/$arch/:relsync.max.new", "$reporoot/$prp/$arch/:relsync.max", $relsyncmax) if $changed; close(F); if ($changed) { forwardevent($cgi, 'relsync', $projid, undef, $repoid, $arch); } return $BSStdServer::return_ok; } sub putdispatchprios { my ($cgi) = @_; mkdir_p($uploaddir); die("upload failed\n") unless BSServer::read_file("$uploaddir/dispatchprios.$$"); my $prios = readxml("$uploaddir/dispatchprios.$$", $BSXML::dispatchprios); unlink("$uploaddir/dispatchprios.$$"); mkdir_p($jobsdir); BSUtil::store("$jobsdir/.dispatchprios", "$jobsdir/dispatchprios", $prios); return $BSStdServer::return_ok; } sub getdispatchprios { my $prios = BSUtil::retrieve("$jobsdir/dispatchprios", 1) || {}; return ($prios, $BSXML::dispatchprios); } sub listjobarchs { my ($cgi) = @_; my @res = grep {-d "$jobsdir/$_"} ls ($jobsdir); @res = sort @res; @res = map {{'name' => $_}} @res; return ({'entry' => \@res}, $BSXML::dir); } sub listjobs { my ($cgi, $arch) = @_; my @b = grep {!/^\./} ls("$jobsdir/$arch"); @b = grep {!/:cross$/} @b; my %locked = map {$_ => 1} grep {/:status$/} @b; @b = grep {!/:(?:dir|status|new)$/} @b; my @res = map {{'name' => $_}} @b; return ({'entry' => \@res}, $BSXML::dir); } sub addjob { my ($cgi, $arch, $job) = @_; my $infoxml = BSServer::read_data(100000000); # just check xml structure die("job '$job' already exists\n") if -e "$jobsdir/$arch/$job"; my $info = XMLin($BSXML::buildinfo, $infoxml); mkdir_p("$jobsdir/$arch"); writestr("$jobsdir/$arch/.$job.$$", "$jobsdir/$arch/$job", $infoxml); if ($info->{'hostarch'} && $arch ne $info->{'hostarch'}) { mkdir_p("$jobsdir/$info->{'hostarch'}"); BSUtil::touch("$jobsdir/$info->{'hostarch'}/$job:$arch:cross"); } return $BSStdServer::return_ok; } sub getjob { my ($cgi, $arch, $job) = @_; die("404 no such job\n") unless -e "$jobsdir/$arch/$job"; if ($cgi->{'view'}) { die("unknown view '$cgi->{'view'}'\n") unless $cgi->{'view'} eq 'status'; my $js = readxml("$jobsdir/$arch/$job:status", $BSXML::jobstatus, 1); $js ||= {'job' => $job, 'code' => 'scheduled'}; return ($js, $BSXML::jobstatus); } my $info = readxml("$jobsdir/$arch/$job", $BSXML::buildinfo); return ($info, $BSXML::buildinfo); } sub deljob { my ($cgi, $arch, $job) = @_; return $BSStdServer::return_ok unless -e "$jobsdir/$arch/$job"; local *F; if (! -e "$jobsdir/$arch/$job:status") { my $js = {'code' => 'deleting'}; if (BSUtil::lockcreatexml(\*F, "$jobsdir/$arch/.repo.$$", "$jobsdir/$arch/$job:status", $js, $BSXML::jobstatus)) { if (-d "$jobsdir/$arch/$job:dir") { BSUtil::cleandir("$jobsdir/$arch/$job:dir"); rmdir("$jobsdir/$arch/$job:dir"); } unlink("$jobsdir/$arch/$job"); unlink("$jobsdir/$arch/$job:status"); close F; return $BSStdServer::return_ok; } } my $js = BSUtil::lockopenxml(\*F, '<', "$jobsdir/$arch/$job:status", $BSXML::jobstatus); if ($js->{'code'} eq 'building') { my $req = { 'uri' => "$js->{'uri'}/discard", 'timeout' => 60, }; eval { BSRPC::rpc($req, undef, "jobid=$js->{'jobid'}"); }; warn("kill $job: $@") if $@; } if (-d "$jobsdir/$arch/$job:dir") { BSUtil::cleandir("$jobsdir/$arch/$job:dir"); rmdir("$jobsdir/$arch/$job:dir"); } unlink("$jobsdir/$arch/$job"); unlink("$jobsdir/$arch/$job:status"); close F; return $BSStdServer::return_ok; } sub postmdload { my ($cgi) = @_; my $newdata = BSServer::read_data(200000000); my $newmdload = BSUtil::fromstorable($newdata); die("no data\n") unless $newmdload; return $BSStdServer::return_ok unless %$newmdload; local *F; BSUtil::lockopen(\*F, '+>>', "$jobsdir/mdload"); my $oldmdload = {}; if (-s "$jobsdir/mdload") { $oldmdload = BSUtil::retrieve("$jobsdir/mdload"); } for (keys %$newmdload) { if (!$oldmdload->{$_} || $oldmdload->{$_}->[0] < $newmdload->{$_}->[0]) { $oldmdload->{$_} = $newmdload->{$_}; } elsif ($newmdload->{$_}->[2] && $oldmdload->{$_}->[2] < $newmdload->{$_}->[2]) { ($oldmdload->{$_}->[2], $oldmdload->{$_}->[3]) = ($newmdload->{$_}->[2], $newmdload->{$_}->[3]); } } my $prunetime = time() - 50 * 86400; for (keys %$oldmdload) { my $l = $oldmdload->{$_}; delete $oldmdload->{$_} if $l->[0] < $prunetime && $l->[2] < $prunetime; } BSUtil::store("$jobsdir/.mdload.$$", "$jobsdir/mdload", $oldmdload); close F; return $BSStdServer::return_ok; } sub idleworkerjob { my ($cgi, $arch, $job) = @_; local *F; my $js = BSUtil::lockopenxml(\*F, '<', "$jobsdir/$arch/$job:status", $BSXML::jobstatus, 1); if ($js) { # be extra careful here not to terminate jobs that run on different workers $js->{'code'} = 'different' if $cgi->{'jobid'} && ($js->{'jobid'} || '') ne $cgi->{'jobid'}; if ($js->{'code'} eq 'building' && (!defined($js->{'workerid'}) || $js->{'workerid'} eq $cgi->{'workerid'})) { print "restarting build of job $arch/$job\n"; unlink("$jobsdir/$arch/$job:status"); } close F; } return $BSStdServer::return_ok; } sub setdispatchdetails { my ($cgi, $arch, $job) = @_; my $info = readxml("$jobsdir/$arch/$job", $BSXML::buildinfo, 1); if ($info) { my $ev = { type => 'dispatchdetails', job => $job, details => $cgi->{'details'}}; my $evname = "dispatchdetails:$job"; mkdir_p("$eventdir/$arch"); writexml("$eventdir/$arch/.$evname.$$", "$eventdir/$arch/$evname", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); } return $BSStdServer::return_ok; } sub failjob { my ($cgi, $arch, $job) = @_; local *F; return unless -e "$jobsdir/$arch/$job"; if (!BSUtil::lockopen(\*F, '+>>', "$jobsdir/$arch/$job:status", 1)) { die("job lock failed!\n"); } if (-s "$jobsdir/$arch/$job:status") { close F; die("job is building!\n"); } my $info = readxml("$jobsdir/$arch/$job", $BSXML::buildinfo, 1); if (!$info) { unlink("$jobsdir/$arch/$job:status"); close F; die("job disappeared!\n"); } my $projid = $info->{'project'} || $info->{'path'}->[0]->{'project'}; my $repoid = $info->{'repository'} || $info->{'path'}->[0]->{'repository'}; my $dir = "$jobsdir/$arch/$job:dir"; mkdir_p($dir); BSUtil::cleandir($dir); writestr("$dir/logfile", undef, $cgi->{'message'}); my $now = time(); my $jobstatus = { code => 'finished', result => 'failed', starttime => $now, endtime => $now, workerid => 'dispatcher', 'hostarch' => '' }; notify_jobresult($info, $jobstatus, "$projid/$repoid/$arch"); my $ev = {'type' => 'built', 'arch' => $arch, 'job' => $job}; writexml("$jobsdir/$arch/.$job:status", "$jobsdir/$arch/$job:status", $jobstatus, $BSXML::jobstatus); close F; dirty($projid, $repoid, $arch); mkdir_p("$eventdir/$arch"); writexml("$eventdir/$arch/.finished:$job$$", "$eventdir/$arch/finished:$job", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); } sub putconfiguration { my ($cgi) = @_; mkdir_p($uploaddir); my $uploaded = BSServer::read_file("$uploaddir/$$"); die("upload failed\n") unless $uploaded; my $configurationxml = readstr("$uploaddir/$$"); unlink("$uploaddir/$$"); my $oldconfigurationxml = readstr("$BSConfig::bsdir/configuration.xml", 1); if ($configurationxml ne ($oldconfigurationxml || '')) { BSUtil::fromxml($configurationxml, $BSXML::configuration); # test xml syntax writestr("$BSConfig::bsdir/.configuration.xml", "$BSConfig::bsdir/configuration.xml", $configurationxml); } # signal schedulers and publisher forwardevent($cgi, 'configuration', ''); forwardevent($cgi, 'configuration', '', undef, undef, 'publish') if -d "$eventdir/publish"; return $BSStdServer::return_ok; } sub getconfiguration { my $configuration = readxml("$BSConfig::bsdir/configuration.xml", $BSXML::configuration, 1) || {}; return ($configuration, $BSXML::configuration); } sub getajaxstatus { my ($cgi) = @_; BSHandoff::handoff('/ajaxstatus') if !$BSStdServer::isajax; my $r = BSWatcher::getstatus(); return ($r, $BSXML::ajaxstatus); } sub getworkercap { my ($cgi, $workerid) = @_; my $worker_cap; for my $workerstate (qw{idle building away dead down}) { $worker_cap ||= readxml("$workersdir/$workerstate/$workerid", $BSXML::worker, 1); } die("404 unknown worker\n") unless $worker_cap; delete $worker_cap->{$_} for qw{port ip}; return ($worker_cap, $BSXML::worker); } sub checkconstraints { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my $constraints; if (BSServer::have_content()) { mkdir_p($uploaddir); my $uploaded = BSServer::read_file("$uploaddir/$$"); die("upload failed\n") unless $uploaded; $constraints = readxml("$uploaddir/$$", $BSXML::constraints); unlink("$uploaddir/$$"); } my $pconf = BSRPC::rpc("$BSConfig::srcserver/getconfig", undef, "project=$projid", "repository=$repoid"); my $bconf = Build::read_config($arch, [ split("\n", $pconf)] ); my @list = map { [ split(' ', $_) ] } @{$bconf->{'constraint'}}; my $prjconfconstraint = BSDispatcher::Constraints::list2struct($BSXML::constraints, \@list); $constraints = $constraints ? BSDispatcher::Constraints::mergeconstraints($prjconfconstraint, $constraints) : $prjconfconstraint; my %harchcando; # can the harch build an arch? for my $harch (keys %BSCando::cando) { for my $arch (@{$BSCando::cando{$harch}}) { if ($arch =~ /^([^:]+):(.+)$/) { $harchcando{"$harch/$1"} = $2; } else { $harchcando{"$harch/$arch"} = ''; } } } my $dispatch_constraints_info = { 'project' => $projid , 'repoid' => $repoid, 'arch' => $arch, 'packid' => $packid, }; my @comp_workers; for my $workerstate (qw{idle building away dead down}) { my @workernames = sort(grep {!/^\./} BSUtil::ls("$workersdir/$workerstate")); for my $workername (@workernames) { my ($harch) = split(':', $workername, 2); next unless exists($harchcando{"$harch/$arch"}); my $worker = readxml("$workersdir/$workerstate/$workername", $BSXML::worker, 1); next if $BSConfig::dispatch_constraint && !$BSConfig::dispatch_constraint->($dispatch_constraints_info, $worker, $constraints); next if $constraints && BSDispatcher::Constraints::oracle($worker, $constraints) <= 0; push @comp_workers, $workername; } } @comp_workers = BSUtil::unify(sort @comp_workers); @comp_workers = map {{'name' => $_}} @comp_workers; return ({'entry' => \@comp_workers}, $BSXML::dir); } sub hello { my ($cgi) = @_; my $part = ""; $part = "partition=\"$BSConfig::partition\" " if $BSConfig::partition; return "\n" if $BSStdServer::isajax; return "\n"; } my $dispatches = [ '/' => \&hello, '!rw :' => undef, '!- GET:' => undef, '!- HEAD:' => undef, 'POST:/build/$project cmd=move oproject:project' => \&moveproject, 'POST:/build/$project/$repository/$arch/_repository match:' => \&postrepo, '/build/$project/$repository/$arch package* view:?' => \&getpackagelist_build, '/build/$project/$repository/$arch/_builddepinfo package* view:?' => \&getbuilddepinfo, '/build/$project/$repository/$arch/_jobhistory package* code:* limit:num?' => \&getjobhistory, 'POST:/build/$project/$repository/$arch/_relsync' => \&postrelsync, '/build/$project/$repository/$arch/_relsync' => \&getrelsync, 'POST:/build/$project/$repository/$arch/$package cmd=copy oproject:project? opackage:package? orepository:repository? setupdateinfoid:? resign:bool? setrelease:?' => \©build, 'POST:/build/$project/$repository/$arch/$package' => \&uploadbuild, '!worker,rw /build/$project/$repository/$arch/$package:package_repository view:? binary:filename* nometa:bool? noajax:bool? nosource:bool? noimport:bool? withmd5:bool?' => \&getbinarylist, 'POST:/build/$project/$repository/$arch/$package_repository/_buildinfo add:* internal:bool? debug:bool? deps:bool?' => \&getbuildinfo_post, '/build/$project/$repository/$arch/$package/_buildinfo add:* internal:bool? debug:bool? deps:bool?' => \&getbuildinfo, '/build/$project/$repository/$arch/$package/_reason' => \&getbuildreason, '/build/$project/$repository/$arch/$package/_status' => \&getbuildstatus, '/build/$project/$repository/$arch/$package/_jobstatus' => \&getjobstatus, '/build/$project/$repository/$arch/$package/_history limit:num?' => \&getbuildhistory, '/build/$project/$repository/$arch/$package/_log nostream:bool? start:intnum? end:num? handoff:bool? last:bool? view:?' => \&getlogfile, '/build/$project/$repository/$arch/$package:package_repository/$filename view:?' => \&getbinary, 'PUT:/build/$project/$repository/$arch/_repository/$filename ignoreolder:bool? wipe:bool?' => \&putbinary, 'DELETE:/build/$project/$repository/$arch/_repository/$filename' => \&delbinary, '/search/published/binary/id $match:' => \&search_published_binary_id, '/search/published/pattern/id $match:' => \&search_published_pattern_id, 'PUT:/build/_dispatchprios' => \&putdispatchprios, '/build/_dispatchprios' => \&getdispatchprios, # src server calls 'POST:/event $type: $project $package? repository? arch? job? worker:job?' => \&forwardevent, # worker capabilities '/worker/$workerid' => \&getworkercap, 'POST:/worker cmd=checkconstraints $project $repository $arch $package' => \&checkconstraints, # worker calls '!worker /worker $arch $port $state: workerid? working:bool? memory:num? disk:num? buildarch:arch* tellnojob:bool?' => \&workerstate, '!worker /getbuildcode' => \&getbuildcode, '!worker /getworkercode' => \&getworkercode, '!worker POST:/putjob $arch $job $jobid $code:? now:num? kiwitree:bool? workerid?' => \&putjob, '!worker POST:/workerdispatched $arch $job $jobid hostarch:arch port workerid?' => \&workerdispatched, '!worker /getbinaries $project $repository $arch binaries: nometa:bool? metaonly:bool? workerid?' => \&getbinaries, '!worker /getbinaryversions $project $repository $arch binaries: nometa:bool? workerid?' => \&getbinaryversions, '!worker /getjobdata $arch $job $jobid workerid?' => \&getjobdata, '!worker /getpackagebinaryversionlist $project $repository $arch $package* withcode:bool? workerid?' => \&getpackagebinaryversionlist, '!worker /badpackagebinaryversionlist $project $repository $arch $package* workerid?' => \&badpackagebinaryversionlist, '!worker /getpreinstallimageinfos $prpa+ match:? workerid?' => \&getpreinstallimageinfos, # published files '/published' => \&publisheddir, '/published/$project' => \&publisheddir, '/published/$project/$repository' => \&publisheddir, '/published/$project/$repository/$arch:filename view:?' => \&publisheddir, '/published/$project/$repository/$arch:filename/$filename view:?' => \&publishedfile, '/published/$project/$repository/$arch:filename/$filename/$subfilename:filename view:?' => \&publishedfile, # jobs '/jobs' => \&listjobarchs, 'POST:/jobs/_mdload' => \&postmdload, '/jobs/$arch' => \&listjobs, 'PUT:/jobs/$arch/$job' => \&addjob, 'POST:/jobs/$arch/$job cmd=idleworker workerid jobid?' => \&idleworkerjob, 'POST:/jobs/$arch/$job cmd=setdispatchdetails details:?' => \&setdispatchdetails, 'POST:/jobs/$arch/$job cmd=fail message:' => \&failjob, 'DELETE:/jobs/$arch/$job' => \&deljob, '/jobs/$arch/$job view:?' => \&getjob, # info '/workerstatus daemonsonly:bool? arch* type:*' => \&workerstatus, # configuration 'PUT:/configuration' => \&putconfiguration, '/configuration' => \&getconfiguration, '/_result $prpa+ oldstate:md5? package* code:* lastbuild:bool? withbinarylist:bool? withstats:bool? summary:bool? withversrel:bool?' => \&getresult, 'POST:/_command $cmd: $prpa+ package* code:*' => \&docommand, '/serverstatus' => \&BSStdServer::serverstatus, '/ajaxstatus' => \&getajaxstatus, ]; my $dispatches_ajax = [ '/' => \&hello, '/ajaxstatus' => \&getajaxstatus, '/build/$project/$repository/$arch/$package/_log nostream:bool? last:bool? start:intnum? end:num? view:?' => \&getlogfile, '/build/$project/$repository/$arch/$package:package_repository view:? binary:filename* nosource:bool?' => \&getbinarylist, '/build/$project/$repository/$arch/$package:package_repository/$filename view:?' => \&getbinary, '/_result $prpa+ oldstate:md5? package* code:* withbinarylist:bool? withstats:bool? summary:bool? withversrel:bool?' => \&getresult, '/getbinaries $project $repository $arch binaries: nometa:bool? metaonly:bool?' => \&getbinaries, '/getbinaryversions $project $repository $arch binaries: nometa:bool?' => \&getbinaryversions, ]; my $conf = { 'port' => $port, 'dispatches' => $dispatches, 'maxchild' => 20, 'maxchild2' => 20, 'slowrequestthr' => 10, }; my $aconf = { 'socketpath' => $ajaxsocket, 'dispatches' => $dispatches_ajax, }; if ($BSConfig::workerreposerver) { my $wport = $port; $wport = $1 if $BSConfig::workerreposerver =~ /:(\d+)$/; $conf->{'port2'} = $wport if $wport != $port; } # create bsdir before root privileges are dropped BSUtil::mkdir_p_chown($BSConfig::bsdir, $BSConfig::bsuser, $BSConfig::bsgroup); BSStdServer::server('bs_repserver', \@ARGV, $conf, $aconf); open-build-service-2.9.4/src/backend/bs_sched000077500000000000000000001147501332555733200211200ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2006, 2007 Michael Schroeder, Novell Inc. # Copyright (c) 2008 Adrian Schroeter, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # The Scheduler. One big chunk of code for now. # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use Digest::MD5 (); use Data::Dumper; use Storable (); use XML::Structured ':bytes'; use POSIX; use Fcntl qw(:DEFAULT :flock); use BSConfiguration; use BSRPC ':https'; use BSUtil; use BSFileDB; use BSXML; use BSDBIndex; use BSBuild; use BSVerify; use Build; use BSDB; use BSSolv; use BSCando; use BSSched::RPC; use BSSched::Remote; use BSSched::DoD; use BSSched::ProjPacks; use BSSched::BuildRepo; use BSSched::BuildResult; use BSSched::PublishRepo; use BSSched::EventQueue; use BSSched::EventHandler; use BSSched::EventSource::Directory; use BSSched::EventSource::Retry; use BSSched::EventSource::RemoteWatcher; use BSSched::BuildJob; use BSSched::Access; use BSSched::Lookat; use BSSched::Checker; use BSSched::RepoCache; use strict; my $testprojid; my $testmode; my $asyncmode; my $startupmode; $asyncmode = $BSConfig::sched_asyncmode if $BSConfig::sched_asyncmode; $startupmode = $BSConfig::sched_startupmode if $BSConfig::sched_startupmode; my $maxserverload = 1; $maxserverload = $BSConfig::sched_maxserverload if $BSConfig::sched_maxserverload; my $genmetaalgo = 0; my $bsdir = $BSConfig::bsdir || "/srv/obs"; my @binsufs = qw{rpm deb pkg.tar.gz pkg.tar.xz}; my $binsufsre = join('|', map {"\Q$_\E"} @binsufs); BSUtil::mkdir_p_chown($bsdir, $BSConfig::bsuser, $BSConfig::bsgroup); BSUtil::drop_privs_to($BSConfig::bsuser, $BSConfig::bsgroup); BSUtil::set_fdatasync_before_rename() unless $BSConfig::disable_data_sync || $BSConfig::disable_data_sync; # directries we use my $_reporoot = "$bsdir/build"; my $_jobsdir = "$bsdir/jobs"; my $_eventdir = "$bsdir/events"; my $_dodsdir = "$bsdir/dods"; my $_rundir = $BSConfig::rundir || "$bsdir/run"; my $_infodir = "$bsdir/info"; my $_remotecache = "$BSConfig::bsdir/remotecache"; # parse arguments if (@ARGV && $ARGV[0] eq '--testmode') { $testmode = 1; shift @ARGV; } if (@ARGV && ($ARGV[0] eq '--exit' || $ARGV[0] eq '--stop')) { $testmode = 'exit'; shift @ARGV; } elsif (@ARGV && $ARGV[0] eq '--restart') { $testmode = 'restart'; shift @ARGV; } my $_myarch = $ARGV[0] || 'i586'; if (!$BSCando::knownarch{$_myarch}) { die("Architecture '$_myarch' is unknown, please adapt BSCando.pm\n"); } ########################################################################## sub select_read { my ($timeout, @watchers) = @_; my @retrywatchers = grep {$_->{'retry'}} @watchers; if (@retrywatchers) { my $now = time(); for (splice @retrywatchers) { if ($_->{'retry'} <= $now) { push @retrywatchers, $_; next; } $timeout = $_->{'retry'} - $now if !defined($timeout) || $_->{'retry'} - $now < $timeout; } return @retrywatchers if @retrywatchers; @watchers = grep {!$_->{'retry'}} @watchers; } @watchers = grep {exists $_->{'socket'}} @watchers; while(1) { my $rin = ''; for (@watchers) { vec($rin, fileno($_->{'socket'}), 1) = 1; } my $nfound = select($rin, undef, undef, $timeout); if (!defined($nfound) || $nfound == -1) { next if $! == POSIX::EINTR; die("select: $!\n"); } return () if !$nfound && defined($timeout); die("select: $!\n") unless $nfound; @watchers = grep {vec($rin, fileno($_->{'socket'}), 1)} @watchers; die unless @watchers; return @watchers; } } sub writeschedulerinfo { my ($gctx) = @_; my $myarch = $gctx->{'arch'}; my $projpacks = $gctx->{'projpacks'}; my $prpunfinished = $gctx->{'prpunfinished'}; my $prpchecktimes = $gctx->{'prpchecktimes'}; # update scheduler stats my $sinfo = {'arch' => $myarch, 'started' => $gctx->{'schedulerstart'}, 'time' => time(), 'slept' => $gctx->{'slept'}}; $sinfo->{'projects'} = keys %$projpacks; $sinfo->{'repositories'} = @{$gctx->{'prps'} || []}; my $unfinishedsum = 0; $unfinishedsum += $_ for values %{$prpunfinished || {}}; $sinfo->{'notready'} = $unfinishedsum; $sinfo->{'queue'} = {}; $sinfo->{'queue'}->{'high'} = @{$gctx->{'lookat_high'}}; $sinfo->{'queue'}->{'med'} = @{$gctx->{'lookat_med'}}; $sinfo->{'queue'}->{'low'} = @{$gctx->{'lookat_low'}}; $sinfo->{'queue'}->{'next'} = keys %{$gctx->{'lookat_next'}}; my $sum = 0; my $sum2 = 0; my $n = keys %$prpchecktimes; for my $prp (sort keys %$prpchecktimes) { my $t = $prpchecktimes->{$prp}; $sum += $t; $sum2 += $t * $t; } $n ||= 1; $sinfo->{'avg'} = $sum / $n; $sinfo->{'variance'} = sqrt(abs(($sum2 - $sum * $sum / $n) / $n)); for my $prp (splice(@{[sort {$prpchecktimes->{$b} <=> $prpchecktimes->{$a}} keys %$prpchecktimes]}, 0, 10)) { my ($projid, $repoid) = split('/', $prp, 2); my $worst = {'project' => $projid, 'repository' => $repoid}; $worst->{'packages'} = keys %{($projpacks->{$projid} || {})->{'package'} || {}}; $worst->{'time'} = $prpchecktimes->{$prp}; push @{$sinfo->{'worst'}}, $worst; } $sinfo->{'buildavg'} = $gctx->{'buildavg'}; my $infodir = $gctx->{'infodir'}; writexml("$infodir/.schedulerinfo.$myarch", "$infodir/schedulerinfo.$myarch", $sinfo, $BSXML::schedulerinfo); } sub updaterelsyncmax { my ($dir, $new, $cleanup) = @_; local *F; BSUtil::lockopen(\*F, '+>>', "$dir/:relsync.max"); my $relsyncmax; if (-s "$dir/:relsync.max") { $relsyncmax = BSUtil::retrieve("$dir/:relsync.max", 2); } $relsyncmax ||= {}; my $changed; for my $tag (keys %$new) { next if defined($relsyncmax->{$tag}) && $relsyncmax->{$tag} >= $new->{$tag}; $relsyncmax->{$tag} = $new->{$tag}; $changed = 1; } if ($cleanup) { for (grep {!$new->{$_}} keys %$relsyncmax) { delete $relsyncmax->{$_}; $changed = 1; } } BSUtil::store("$dir/.:relsync.max", "$dir/:relsync.max", $relsyncmax) if $changed; close(F); return $changed; } sub sendrelsyncupdate { my ($gctx, $prp, $isfinished) = @_; print " updating relsync information\n"; my $myarch = $gctx->{'arch'}; my $gdst = "$gctx->{'reporoot'}/$prp/$myarch"; my ($projid, $repoid) = split('/', $prp, 2); my $projpacks = $gctx->{'projpacks'}; my $packs = ($projpacks->{$projid} || {})->{'package'} || {}; # retrieve new data my $relsync = BSUtil::retrieve("$gdst/:relsync") || {}; # convert to max format my $relsyncmax = {}; for my $packid (sort keys %$relsync) { next unless $relsync->{$packid} =~ /^(.*)\.([^-]*)$/; my $tag = ($packs->{$packid} || {})->{'bcntsynctag'} || $packid; next if defined($relsyncmax->{"$tag/$1"}) && $relsyncmax->{"$tag/$1"} >= $2; $relsyncmax->{"$tag/$1"} = $2; } # merge with relsync.max updaterelsyncmax($gdst, $relsyncmax, $isfinished); # send to other schedulers my $param = { 'uri' => "$BSConfig::srcserver/relsync", 'request' => 'POST', 'timeout' => 600, 'data' => BSUtil::tostorable($relsyncmax), }; eval { BSRPC::rpc($param, undef, "project=$projid", "repository=$repoid", "arch=$myarch"); }; if (!$@) { unlink("$gdst/:relsync$$"); link("$gdst/:relsync", "$gdst/:relsync$$"); rename("$gdst/:relsync$$", "$gdst/:relsync.sent"); } else { warn($@); } } sub mergerelsyncfile { my ($gctx, $prp) = @_; print " merging relsync data\n"; my $myarch = $gctx->{'arch'}; my $reporoot = $gctx->{'reporoot'}; my $relsync_merge = BSUtil::retrieve("$reporoot/$prp/$myarch/:relsync.merge", 2); if ($relsync_merge) { my $relsync; $relsync = BSUtil::retrieve("$reporoot/$prp/$myarch/:relsync", 2) if -e "$reporoot/$prp/$myarch/:relsync"; $relsync = { %{$relsync || {}}, %$relsync_merge }; BSUtil::store("$reporoot/$prp/$myarch/.:relsync", "$reporoot/$prp/$myarch/:relsync", $relsync); } unlink("$reporoot/$prp/$myarch/:relsync.merge"); } sub mergemetacachefile { my ($gctx, $prp) = @_; print " merging metacache data\n"; my $myarch = $gctx->{'arch'}; my $reporoot = $gctx->{'reporoot'}; my $metacache_merge = BSUtil::retrieve("$reporoot/$prp/$myarch/:full.metacache.merge", 2); if ($metacache_merge) { my $metacache; $metacache = BSUtil::retrieve("$reporoot/$prp/$myarch/:full.metacache", 2) if -e "$reporoot/$prp/$myarch/:full.metacache"; $metacache = { %{$metacache || {}}, %$metacache_merge }; delete $metacache->{$_} for grep {!defined($metacache_merge->{$_})} keys %$metacache_merge; if (%$metacache) { BSUtil::store("$reporoot/$prp/$myarch/.:full.metacache", "$reporoot/$prp/$myarch/:full.metacache", $metacache); } else { unlink("$reporoot/$prp/$myarch/:full.metacache"); } } unlink("$reporoot/$prp/$myarch/:full.metacache.merge"); } sub mergebininfofile { my ($gctx, $prp) = @_; my $myarch = $gctx->{'arch'}; my $reporoot = $gctx->{'reporoot'}; BSSched::BuildResult::read_gbininfo("$reporoot/$prp/$myarch"); my $repounchanged = $gctx->{'repounchanged'}; $repounchanged->{$prp} = 2 if $repounchanged->{$prp}; } ########################################################################## ########################################################################## ## ## Scheduler startup code ## $| = 1; $SIG{'PIPE'} = 'IGNORE'; if ($testmode && ($testmode eq 'exit' || $testmode eq 'restart')) { if (!(-e "$_rundir/bs_sched.$_myarch.lock") || BSUtil::lockcheck('>>', "$_rundir/bs_sched.$_myarch.lock")) { die("scheduler is not running for $_myarch.\n") if $testmode eq 'restart'; print("scheduler is not running for $_myarch.\n"); exit(0); } if ($testmode eq 'restart') { print "restarting scheduler for $_myarch...\n"; } else { print "shutting down scheduler for $_myarch...\n"; } my $ev = { 'type' => $testmode eq 'restart' ? 'restart' : 'exitcomplete', }; my $evname = "$ev->{'type'}::"; my $gctx = {'eventdir' => $_eventdir}; BSSched::EventSource::Directory::sendevent($gctx, $ev, $_myarch, $evname); BSUtil::waituntilgone("$_eventdir/$_myarch/$evname"); if ($testmode eq 'exit') { # scheduler saw the event, wait until the process is gone local *F; BSUtil::lockopen(\*F, '>>', "$_rundir/bs_sched.$_myarch.lock", 1); close F; } exit(0); } print "starting build service scheduler\n"; # get lock mkdir_p($_rundir); if (!$testprojid) { open(RUNLOCK, '>>', "$_rundir/bs_sched.$_myarch.lock") || die("$_rundir/bs_sched.$_myarch.lock: $!\n"); flock(RUNLOCK, LOCK_EX | LOCK_NB) || die("scheduler is already running for $_myarch!\n"); utime undef, undef, "$_rundir/bs_sched.$_myarch.lock"; } # create directories for my $d ("$_eventdir/$_myarch", "$_jobsdir/$_myarch", $_infodir) { next if -d $d; mkdir_p($d) || die("$d: $!\n"); } # setup event mechanism my $_myeventdir = "$_eventdir/$_myarch"; if (!-p "$_myeventdir/.ping") { POSIX::mkfifo("$_myeventdir/.ping", 0666) || die("$_myeventdir/.ping: $!"); chmod(0666, "$_myeventdir/.ping"); } sysopen(PING, "$_myeventdir/.ping", POSIX::O_RDWR) || die("$_myeventdir/.ping: $!"); fcntl(PING, F_SETFL, POSIX::O_NONBLOCK); # create global context my $gctx = { 'arch' => $_myarch, 'reporoot' => $_reporoot, # config 'obsname' => $BSConfig::obsname, 'jobsdir' => $_jobsdir, 'myjobsdir' => "$_jobsdir/$_myarch", 'eventdir' => $_eventdir, 'myeventdir' => $_myeventdir, 'dodsdir' => $_dodsdir, 'rundir' => $_rundir, 'infodir' => $_infodir, 'remotecache' => $_remotecache, 'remoteproxy' => $BSConfig::proxy, 'asyncmode' => $asyncmode, # repository state cache # 'lastscan' last time we scanned # 'meta' meta cache # 'solv' solv data cache (for remote repos) 'repodatas' => BSSched::RepoCache->new($_myarch, $_reporoot), # remote bininfo cache 'remotegbininfos' => {}, 'remotepackstatus' => {}, 'remotepackstatus_cleanup' => {}, # project data 'projpacks' => undef, # data of all local projects 'channeldata' => {}, # global channel data unificator to save memory 'remoteprojs' => {}, # remote project cache 'remotemissing' => {}, # missing remote projects cache 'projsuspended' => {}, # project is suspended for now # lastcheck cache 'lastcheck' => {}, # package check data of last check # maps prp => { packid => checkdata } # postprocessed project data 'projpacks_linked' => {}, # data of all linked sources 'prps' => [], # sorted list of all local prps (project repos) 'prpdeps' => {}, # searchpath plus aggregate deps plus kiwi deps 'rprpdeps' => {}, # reverse prpdeps # maps prp => [ prp, prp ... ], used for sorting 'prpnoleaf' => {}, # is this prp referenced by another prp? 'prpsearchpath' => {}, # which prps to use for building # maps prp => [ prp, prp ... ] 'haveinterrepodep' => {}, # projid => bool: some repos prpdeps contain another repo of the same project # triggers 'prpcheckuseforbuild' => {}, # project/package meta has changed 'prpfinished' => {}, # which prps are finished 'repounchanged' => {}, # which prps are changed: deleted = full tree changed, 1 = unchanged, 2 = just package changes 'prpnotready' => {}, # which packages are not ready in a prp # maps prp => { packid => 1, ... } # remote watchers 'watchremote' => {}, # remote_url => { eventdescr => projid } 'watchremote_start' => {}, # remote_url => lasteventno # changed: 1: something "local" changed, :full unchanged, # 2: the :full repo is changed as well 'changed_low' => {}, # something changed, put this in lookup_low 'changed_med' => {}, # something changed, put this in lookup_med 'changed_high' => {}, # something changed, put this in lookup_high 'changed_dirty' => {}, # set the dirty flag for those 'lookat_low' => [], # not so important 'lookat_med' => [], # builds are finished here 'lookat_high' => [], # user interaction, do those ASAP 'lookat_next' => {}, # not so important, next series 'notlow' => 0, 'notmed' => 0, 'delayedfetchprojpacks' => {}, # projpacks fetches we have delayed till prp check time 'nextmed' => {}, 'retryevents' => BSSched::EventSource::Retry->new(), # stats 'buildavg' => 1200, # start not at 0, but with 20min for the average ounter 'prpunfinished' => {}, 'prpchecktimes' => {}, 'schedulerstart' => time(), 'slept' => 0, 'prplastcheck' => {}, # XXX: currently not used }; # find max gen_meta algorithm my $maxgenmetaalgo = 0; if (defined(&BSSolv::setgenmetaalgo)) { $maxgenmetaalgo = BSBuild::setgenmetaalgo(-1); my $solvgenmetaalgo = BSSolv::setgenmetaalgo(-1); $maxgenmetaalgo = $solvgenmetaalgo if $solvgenmetaalgo < $maxgenmetaalgo; } # find gen_meta algorithm to use if (defined($BSConfig::genmetaalgo)) { $genmetaalgo = $BSConfig::genmetaalgo; } else { $genmetaalgo = BSBuild::setgenmetaalgo($genmetaalgo); if ($genmetaalgo > $maxgenmetaalgo) { warn("downgraded genmeta algorithm from $genmetaalgo to $maxgenmetaalgo because of old perl-BSSolv\n"); $genmetaalgo = $maxgenmetaalgo; } } die("perl-BSSolv is too old for meta algo $genmetaalgo\n") if $genmetaalgo && !defined(&BSSolv::setgenmetaalgo); # configure gen_meta algorithm BSBuild::setgenmetaalgo($genmetaalgo); BSSolv::setgenmetaalgo($genmetaalgo) if defined(&BSSolv::setgenmetaalgo); $gctx->{'genmetaalgo'} = $genmetaalgo; $gctx->{'maxgenmetaalgo'} = $maxgenmetaalgo; # create rpc context my $rctx = BSSched::RPC->new( 'maxserverload' => $maxserverload, 'wakeupfunction' => \&BSSched::Checker::setchanged, ); $gctx->{'rctx'} = $rctx; $gctx->{'testmode'} = 1 if $testmode; $BSSched::ProjPacks::testprojid = $testprojid if $testprojid; # read old state if present if (!$testprojid && -s "$_rundir/bs_sched.$_myarch.state") { print "reading old state...\n"; my $schedstate = BSUtil::retrieve("$_rundir/bs_sched.$_myarch.state", 2); unlink("$_rundir/bs_sched.$_myarch.state"); if ($schedstate) { # just for testing... print " - $_\n" for sort keys %$schedstate; if ($schedstate->{'projpacks'}) { $gctx->{'projpacks'} = $schedstate->{'projpacks'}; if ($schedstate->{'remoteprojs'}) { $gctx->{'remoteprojs'} = $schedstate->{'remoteprojs'}; for (values %{$gctx->{'remoteprojs'}}) { next unless $_->{'sibling'}; $_->{'partition'} ||= $_->{'sibling'}; delete $_->{'sibling'}; } } } else { # get project and package information from src server BSSched::ProjPacks::get_projpacks($gctx, undef); # XXX: async } BSSched::ProjPacks::get_projpacks_postprocess($gctx); my $projpacks = $gctx->{'projpacks'}; my $prps = $gctx->{'prps'}; my %oldprps = map {$_ => 1} @{$schedstate->{'prps'} || []}; my @newprps = grep {!$oldprps{$_}} @$prps; # update lookat arrays $gctx->{'lookat_low'} = $schedstate->{'lookat'} || []; $gctx->{'lookat_med'} = $schedstate->{'lookat_oob'} || []; $gctx->{'lookat_high'} = $schedstate->{'lookat_oobhigh'} || []; # update changed hash my $changed_low = $gctx->{'changed_low'}; my $changed_med = $gctx->{'changed_med'}; my $changed_high = $gctx->{'changed_high'}; for my $prp (@newprps) { $changed_med->{$prp} = 2; $changed_med->{(split('/', $prp, 2))[0]} = 2; } my $oldchanged_low = $schedstate->{'changed_low'} || {}; my $oldchanged_med = $schedstate->{'changed_med'} || {}; my $oldchanged_high = $schedstate->{'changed_high'} || {}; for my $projid (keys %$projpacks) { $changed_low->{$projid} = $oldchanged_low->{$projid} if exists $oldchanged_low->{$projid}; $changed_med->{$projid} = $oldchanged_med->{$projid} if exists $oldchanged_med->{$projid}; $changed_high->{$projid} = $oldchanged_high->{$projid} if exists $oldchanged_high->{$projid}; } for my $prp (@$prps) { $changed_low->{$prp} = $oldchanged_low->{$prp} if exists $oldchanged_low->{$prp}; $changed_med->{$prp} = $oldchanged_med->{$prp} if exists $oldchanged_med->{$prp}; $changed_high->{$prp} = $oldchanged_high->{$prp} if exists $oldchanged_high->{$prp}; } ## update repodata hash #my $oldrepodata = $schedstate->{'repodata'} || {}; #for my $prp (@$prps) { # $repodata{$prp} = $oldrepodata->{$prp} if exists $oldrepodata->{$prp}; #} # update prpfinished hash my $oldprpfinished = $schedstate->{'prpfinished'} || {}; my $prpfinished = $gctx->{'prpfinished'}; for my $prp (@$prps) { $prpfinished->{$prp} = $oldprpfinished->{$prp} if exists $oldprpfinished->{$prp}; } # update prpnotready hash my $oldprpnotready = $schedstate->{'globalnotready'} || {}; my $prpnotready = $gctx->{'prpnotready'}; for my $prp (@$prps) { $prpnotready->{$prp} = $oldprpnotready->{$prp} if %{$oldprpnotready->{$prp} || {}}; } # update repounchanged hash my $oldrepounchanged = $schedstate->{'repounchanged'} || {}; my $repounchanged = $gctx->{'repounchanged'}; for my $prp (@$prps) { $repounchanged->{$prp} = $oldrepounchanged->{$prp} if exists $oldrepounchanged->{$prp}; } # update delayedfetchprojpacks hash my $delayedfetchprojpacks = $gctx->{'delayedfetchprojpacks'}; my $olddelayedfetchprojpacks = $schedstate->{'delayedfetchprojpacks'} || {}; for my $projid (keys %$projpacks) { $delayedfetchprojpacks->{$projid} = $olddelayedfetchprojpacks->{$projid} if $olddelayedfetchprojpacks->{$projid}; } # update projsuspended hash my $oldprojsuspended = $schedstate->{'projsuspended'} || {}; %{$gctx->{'projsuspended'}} = %$oldprojsuspended; # use old start values if ($schedstate->{'watchremote_start'}) { $gctx->{'watchremote_start'} = $schedstate->{'watchremote_start'}; } # start project data fetch for delayed startup projects for my $projid (sort keys %$projpacks) { my $packs = $projpacks->{$projid}->{'package'} || {}; for my $packid (sort keys %$packs) { $delayedfetchprojpacks->{$projid} = [ '/all' ] if ($packs->{$packid}->{'error'} || '') eq 'delayed startup'; } } if ($schedstate->{'fetchprojpacks'} && $schedstate->{'projpacks'}) { my %fetchprojpacks_nodelay = map {$_ => 1} keys %{$schedstate->{'fetchprojpacks'}}; BSSched::ProjPacks::do_fetchprojpacks($gctx, $schedstate->{'fetchprojpacks'}, \%fetchprojpacks_nodelay, {}, {}); } if ($schedstate->{'retryevents'}) { for my $ev (@{$schedstate->{'retryevents'}}) { if ($ev->{'type'} eq 'project' || ($ev->{'type'} eq 'package' && !$ev->{'package'})) { $gctx->{'retryevents'}->addretryevent({'type' => $ev->{'type'}, 'project' => $ev->{'project'}}); } elsif ($ev->{'type'} eq 'package') { $gctx->{'retryevents'}->addretryevent({'type' => 'package', 'project' => $ev->{'project'}, 'package' => $ev->{'package'}}); } } } } } my $infodir = $gctx->{'infodir'}; my $sinfo = readxml("$infodir/schedulerinfo.$_myarch", $BSXML::schedulerinfo, 1) || {}; $sinfo->{'booting'} = undef; writexml("$infodir/.schedulerinfo.$_myarch", "$infodir/schedulerinfo.$_myarch", $sinfo, $BSXML::schedulerinfo); if (!$gctx->{'projpacks'} && $startupmode) { if ($startupmode == 1) { print "cold start, scanning all non-remote projects\n"; } else { print "cold start, initializing all projects\n"; } my $param = { 'uri' => "$BSConfig::srcserver/getprojpack", }; my @args = ('withrepos', 'withconfig', "arch=$_myarch", 'withremotemap=1', 'noremote=1'); push @args, 'withsrcmd5', 'withdeps' if $startupmode == 1; push @args, "partition=$BSConfig::partition" if $BSConfig::partition; my $projpacksin; while (1) { eval { $projpacksin = BSRPC::rpc($param, $BSXML::projpack, @args); }; last unless $@ || !$projpacksin; print $@ if $@; print "retrying in 60 seconds...\n"; sleep(60); } BSSched::ProjPacks::update_projpacks($gctx, $projpacksin); BSSched::ProjPacks::get_projpacks_postprocess($gctx); my $projpacks = $gctx->{'projpacks'}; my $delayedfetchprojpacks = $gctx->{'delayedfetchprojpacks'}; for my $projid (sort keys %$projpacks) { my $packs = $projpacks->{$projid}->{'package'} || {}; next unless %$packs; if ($startupmode == 1) { my @delayed; my $ok; for my $packid (sort keys %$packs) { my $pdata = $packs->{$packid}; if ($pdata->{'error'}) { if ($pdata->{'error'} =~ /noremote option/) { $pdata->{'error'} = 'delayed startup'; push @delayed, $packid; } else { $ok++; } } else { if (grep {$_->{'error'} && $_->{'error'} =~ /noremote option/} @{$pdata->{'info'} || []}) { $pdata->{'error'} = 'delayed startup'; push @delayed, $packid; } else { $ok++; } } } if (!$ok) { $delayedfetchprojpacks->{$projid} = [ '/all' ]; # hack } else { $delayedfetchprojpacks->{$projid} = [ @delayed ]; } } else { $delayedfetchprojpacks->{$projid} = [ '/all' ]; # hack for my $packid (sort keys %$packs) { $packs->{$packid}->{'error'} = 'delayed startup'; } } } @{$gctx->{'lookat_low'}} = sort keys %$projpacks; push @{$gctx->{'lookat_low'}}, @{$gctx->{'prps'}}; my $prpcheckuseforbuild = $gctx->{'prpcheckuseforbuild'}; $prpcheckuseforbuild->{$_} = 1 for @{$gctx->{'prps'}}; } if (!$gctx->{'projpacks'}) { # get project and package information from src server print "cold start, scanning all projects\n"; BSSched::ProjPacks::get_projpacks($gctx, undef); BSSched::ProjPacks::get_projpacks_postprocess($gctx); # look at everything @{$gctx->{'lookat_low'}} = sort keys %{$gctx->{'projpacks'}}; push @{$gctx->{'lookat_low'}}, @{$gctx->{'prps'}}; } # reset booting flag writeschedulerinfo($gctx); # bring dods in sync with projpacks if ($BSConfig::enable_download_on_demand) { BSSched::DoD::init_doddata($gctx); } else { my $dodsdir = $gctx->{'dodsdir'}; BSUtil::cleandir($dodsdir) if -d $dodsdir; } BSSched::BuildJob::init_ourjobs($gctx); unlink("$_rundir/bs_sched.$_myarch.dead"); # alive and kicking if (@{$gctx->{'lookat_low'}}) { %{$gctx->{'lookat_next'}} = map {$_ => 1} @{$gctx->{'lookat_low'}}; @{$gctx->{'lookat_low'}} = (); } my $gotevent = 1; $gotevent = 0 if $testprojid; my $lastschedinfo = 0; my $initialstartup = 1; my %remotewatchers; # XXX: put in gctx? ## ## Here comes the big loop... ## my $reporoot = $gctx->{'reporoot'}; my $myarch = $gctx->{'arch'}; eval { while(1) { if (%{$gctx->{'changed_low'}} || %{$gctx->{'changed_med'}} || %{$gctx->{'changed_high'}}) { BSSched::Lookat::changed2lookat($gctx); next; } my $watchremote = $gctx->{'watchremote'}; my $watchremote_start = $gctx->{'watchremote_start'}; delete $gctx->{'watchremote_start_copy'}; # delete no longer needed or outdated remotewatchers for my $remoteurl (sort keys %remotewatchers) { delete $remotewatchers{$remoteurl} if $remotewatchers{$remoteurl}->isobsolete($watchremote->{$remoteurl}); } # create missing watchers for my $remoteurl (sort keys %$watchremote) { $remotewatchers{$remoteurl} ||= BSSched::EventSource::RemoteWatcher->new($myarch, $remoteurl, $watchremote->{$remoteurl}, 'start' => $watchremote_start->{$remoteurl}, 'remoteproxy' => $gctx->{'remoteproxy'}, 'obsname' => $gctx->{'obsname'}, ); } # collect events to process my $ev_queue = BSSched::EventQueue->new($gctx, 'initialstartup' => $initialstartup); my $pingwatcher = { 'socket' => \*PING, 'remoteurl' => 'ping', }; # add retry events if ($gctx->{'retryevents'}->count()) { my @due = $gctx->{'retryevents'}->due($gctx); if (@due) { print "retrying ".@due." events\n"; $ev_queue->add_events(@due); } } # add events from watchers, also process finished xrpc calls if ($testprojid) { print "ignoring events due to test mode\n"; } else { my @watchers = (values(%remotewatchers), $gctx->{'rctx'}->xrpc_handles()); if (@watchers) { @watchers = select_read(0, $pingwatcher, @watchers); for my $watcher (@watchers) { my $remoteurl = $watcher->{'remoteurl'}; if (!defined($remoteurl)) { $gctx->{'rctx'}->xrpc_resume($watcher); $gotevent = 1; # force loop restart } elsif ($remoteurl eq 'ping') { $gotevent = 1; } elsif ($watcher->{'retry'}) { print "retrying watcher for $remoteurl\n"; delete $remotewatchers{$remoteurl}; $gotevent = 1; # force loop restart } else { $gctx->{'watchremote_start_copy'} ||= { %$watchremote_start }; my @events = $watcher->getevents($watchremote->{$remoteurl}, $watchremote_start); $ev_queue->add_events(@events); delete $remotewatchers{$remoteurl} unless $watcher->{'retry'}; # watcher is finished $gotevent = 1; # force loop restart } } } else { my $dummy; $gotevent = 1 if (sysread(PING, $dummy, 1, 0) || 0) > 0; } } # add events from the event directory if ($gotevent) { $gotevent = 0; # drain ping pipe my $dummy; 1 while (sysread(PING, $dummy, 1024, 0) || 0) > 0; # add events from myeventdir my @events = BSSched::EventSource::Directory::readevents($gctx, $gctx->{'myeventdir'}); $ev_queue->add_events(@events); next unless $ev_queue->events_in_queue(); } # process all collected events if ($ev_queue->events_in_queue()) { die if $testprojid; eval { $ev_queue->process_events(); }; if ($@) { warn($@); BSSched::EventHandler::event_exit($ev_queue, {'type' => 'emergencydump'}); exit(1); } next; } # done with first time event processing $initialstartup = undef; # mark all indirect affected repos dirty if (%{$gctx->{'changed_dirty'}}) { for my $prp (keys %{$gctx->{'changed_dirty'}}) { my $gdst = "$gctx->{'reporoot'}/$prp/$myarch"; next if ! -d $gdst; next if -e "$gdst/:schedulerstate.dirty"; BSUtil::touch("$gdst/:schedulerstate.dirty"); } %{$gctx->{'changed_dirty'}} = (); } my ($lookattype, $prp) = BSSched::Lookat::nextlookat($gctx); # postpone if we got source change RPCs running if (defined($prp)) { my ($projid) = split('/', $prp, 2); if ($gctx->{'rctx'}->xrpc_busy($projid)) { #print "postponed looking at $prp\n"; my $ctx = {'changeprp' => $prp, 'changetype' => $lookattype, 'gctx' => $gctx}; $gctx->{'rctx'}->xrpc_addwakeup($ctx, $projid); next; } } $gctx->{'rctx'}->xrpc_printstats(); if (!defined($prp)) { # nothing to do. good night, sleep tight... if ($testmode && !$gctx->{'rctx'}->xrpc_busy()) { print "Test mode, all sources and events processed, exiting...\n"; my $ectx = BSSched::EventQueue->new($gctx); BSSched::EventHandler::event_exit($ectx, { 'type' => 'exitcomplete' }); # notreached } BSUtil::printlog("waiting for an event..."); exit 0 if $testprojid; my $sleepstart = time(); my @watchers = (values(%remotewatchers), $gctx->{'retryevents'}->events(), $gctx->{'rctx'}->xrpc_handles()); select_read(undef, $pingwatcher, @watchers); $gctx->{'slept'} += time() - $sleepstart; next; } BSSched::Lookat::lookatprp($gctx, $lookattype, $prp); my ($projid, $repoid) = split('/', $prp, 2); next if $testprojid && $projid ne $testprojid; if (!defined($repoid)) { # project maintenance, check for deleted repositories my $projpacks = $gctx->{'projpacks'}; my %repoids; for my $repo (@{($projpacks->{$projid} || {})->{'repository'} || []}) { $repoids{$repo->{'name'}} = 1 if grep {$_ eq $myarch} @{$repo->{'arch'} || []}; } for my $repoid (ls("$reporoot/$projid")) { next if $repoid eq ':all'; # XXX next if $repoids{$repoid}; my $prp = "$projid/$repoid"; next if -l "$reporoot/$prp"; # XXX my $gdst = "$reporoot/$prp/$myarch"; next unless -d $gdst; # we no longer build this repoid print " - deleting repository $prp\n"; delete $gctx->{'prpfinished'}->{$prp}; delete $gctx->{'prpnotready'}->{$prp}; delete $gctx->{'prpunfinished'}->{$prp}; delete $gctx->{'prpchecktimes'}->{$prp}; $gctx->{'repodatas'}->drop($prp, $myarch); delete $gctx->{'lastcheck'}->{$prp}; delete $gctx->{'prpcheckuseforbuild'}->{$prp}; my $ctx = BSSched::Checker->new($gctx, $prp); $ctx->wipe(); } rmdir("$reporoot/$projid"); # in case this was the last repo next; } # do delayed projpack fetches if ($gctx->{'delayedfetchprojpacks'}->{$projid}) { my $inprogress; my $delayed; while ($delayed = delete($gctx->{'delayedfetchprojpacks'}->{$projid})) { my $async; $async = {'_changeprp' => $prp, '_changetype' => $lookattype} if $gctx->{'asyncmode'}; $inprogress ||= !BSSched::ProjPacks::do_delayedprojpackfetches($gctx, $async, $projid, @$delayed); } next if $inprogress; # async projpack fetch in progress... } my $projpacks = $gctx->{'projpacks'}; if (!$projpacks->{$projid}) { print " - $prp: no longer exists\n" unless $gctx->{'remoteprojs'}->{$projid}; next; } my $ctx = BSSched::Checker->new($gctx, $prp, 'changeprp' => $prp, 'changetype' => $lookattype, 'verbose' => 1); my $gdst = "$reporoot/$prp/$myarch"; # merge bininfo if (-e "$gdst/:bininfo.merge" || ! -e "$gdst/:bininfo") { mergebininfofile($gctx, $prp); } # merge relsync if (-e "$gdst/:relsync.merge") { mergerelsyncfile($gctx, $prp); } # merge metacache if (-e "$gdst/:full.metacache.merge") { mergemetacachefile($gctx, $prp); } my ($state, $details); ($state, $details) = $ctx->setup(); if ($state ne 'scheduling') { if ($state) { $ctx->set_repo_state($state, $details); $gctx->{'prpfinished'}->{$prp} = 1; } $details ||= $state; print " - $prp: $details\n"; next; } print " - $prp\n"; $ctx->set_repo_state('scheduling'); if ($gctx->{'prpcheckuseforbuild'}->{$prp}) { my $packs = $projpacks->{$projid}->{'package'} || {}; my $prpsearchpath = $ctx->{'prpsearchpath'}; # the if statement below is to ease transition to the new full handling # for manually created "base" repos if (!$BSSched::BuildResult::new_full_handling || %$packs || ! -d "$gdst/:full" || -e "$gdst/:full.useforbuild") { BSSched::BuildRepo::checkuseforbuild($gctx, $prp, $prpsearchpath, undef); delete $gctx->{'prpcheckuseforbuild'}->{$prp}; } } # Step 2a: check if packages got deleted/excluded $ctx->wipeobsolete(); # Step 2b: set up pool and repositories ($state, $details) = $ctx->preparepool(); if ($state ne 'scheduling') { $ctx->set_repo_state($state, $details); print " $details\n"; print " (delayed)\n" if $ctx->{'havedelayed'}; next; } # setup our special expander my $xp = BSSolv::expander->new($ctx->{'pool'}, $ctx->{'conf'}); $ctx->{'expander'} = $xp; no warnings 'redefine'; local *Build::expand = sub { $_[0] = $xp; goto &BSSolv::expander::expand; }; use warnings 'redefine'; # Step 2c: expand all dependencies, put them in %pdeps hash and sort the packages ($state, $details) = $ctx->expandandsort(); if ($state ne 'scheduling') { $ctx->set_repo_state($state, $details); print " $details\n"; next; } # fetch relsync data $ctx->calcrelsynctrigger(); ($state, $details) = $ctx->checkpkgs(); my $unfinished = $ctx->{'unfinished'}; # notify remote build services of repository changes or block state # changes # we alse send it if we finish a prp to give linked aggregates a # chance to work my $repounchanged = $gctx->{'repounchanged'}; if (!$repounchanged->{$prp} || (!%$unfinished && !$gctx->{'prpfinished'}->{$prp})) { BSSched::EventSource::Directory::sendrepochangeevent($gctx, $prp); $repounchanged->{$prp} = 1; } elsif ($repounchanged->{$prp} == 2) { BSSched::EventSource::Directory::sendrepochangeevent($gctx, $prp, 'repoinfo'); $repounchanged->{$prp} = 1; } # free memory Build::forgetdeps($ctx->{'conf'}); $ctx->printstats(); # trigger dod package fetching if ($BSConfig::enable_download_on_demand) { BSSched::DoD::dodfetch($ctx) if $ctx->{'doddownloads'}; } # we always publish kiwi... if ((!%$unfinished && !$ctx->{'havedelayed'}) || $ctx->{'prptype'} eq 'kiwi') { ($state, $details) = $ctx->publish($state, $details); } my $wasfinished = $gctx->{'prpfinished'}->{$prp}; # clean up and free memory if we are finished if (!%$unfinished && !$ctx->{'havedelayed'} && $state eq 'finished') { $gctx->{'prpfinished'}->{$prp} = 1; # write out lastcheck cache and delete it my $lastcheck = $gctx->{'lastcheck'}->{$prp}; if ($lastcheck && %$lastcheck) { BSUtil::store("$gdst/.:lastcheck", "$gdst/:lastcheck", $lastcheck); } else { unlink("$gdst/:lastcheck"); } delete $gctx->{'lastcheck'}->{$prp}; # delete pkg meta cache $gctx->{'repodatas'}->dropmeta($prp, $myarch); if (!$gctx->{'prpnoleaf'}->{$prp}) { # only free repo data if all projects we depend on are finished, too. # (we always have to do the expansion if something changes) my @unfinishedprps; my $remoteprojs = $gctx->{'remoteprojs'}; my $prpfinished = $gctx->{'prpfinished'}; for (@{$gctx->{'prpdeps'}->{$prp}}) { next if $prpfinished->{$_}; # if this is a remote repo, check prpnotready if (!%{$gctx->{'prpnotready'}->{$_} || {}}) { my ($p) = split('/', $_, 2); next if $remoteprojs->{$p}; } push @unfinishedprps, $_; } if (!@unfinishedprps) { print " leaf prp, freeing data\n"; $gctx->{'repodatas'}->drop($prp, $myarch); } else { print " leaf prp, unfinished prps: @unfinishedprps\n"; } } } else { delete $gctx->{'prpfinished'}->{$prp}; unlink("$gdst/:repodone"); } if (!$wasfinished && $gctx->{'haveinterrepodep'}->{$projid}) { # check for inter-repo deps print " checking inter-repo deps\n"; my $proj = $projpacks->{$projid} || {}; for my $arepoid (map {$_->{'name'}} @{$proj->{'repository'} || []}) { next if $arepoid eq $repoid; next unless grep {$_ eq $prp} @{$gctx->{'prpdeps'}->{"$projid/$arepoid"} || []}; my $aprp = "$projid/$arepoid"; print " - $aprp\n"; # make sure the user sees something if ($gctx->{'prpfinished'}->{$aprp}) { BSUtil::touch("$reporoot/$aprp/$myarch/:schedulerstate.dirty") if -d "$reporoot/$aprp/$myarch"; } # add med event to make users happy my $changed = $gctx->{"changed_med"}; $changed->{$aprp} ||= 1; } } $ctx->set_repo_state($state, $details); if (%$unfinished) { $gctx->{'prpunfinished'}->{$prp} = scalar(keys %$unfinished); } else { delete $gctx->{'prpunfinished'}->{$prp}; } $gctx->{'prpchecktimes'}->{$prp} = $ctx->{'prpchecktime'}; # send relsync file if something has been changed my @relsync1 = stat("$gdst/:relsync"); my @relsync2 = stat("$gdst/:relsync.sent"); if (@relsync1 && (!@relsync2 || "$relsync1[9]/$relsync1[7]/$relsync1[1]" ne "$relsync2[9]/$relsync2[7]/$relsync2[1]")) { sendrelsyncupdate($gctx, $prp, %$unfinished ? 0 : 1); } BSSched::Remote::cleanup_remotepackstatus($gctx, $prp) if $gctx->{'remotepackstatus_cleanup'}->{$prp} && !$ctx->{'havedelayed'}; my $now = time(); $gctx->{'prplastcheck'}->{$prp} = $now; if ($ctx->{'prpchecktime'}) { $gctx->{'nextmed'}->{$prp} = $now + 10 * $ctx->{'prpchecktime'}; } else { delete $gctx->{'nextmed'}->{$prp}; } if ($now - $lastschedinfo > 60) { # update scheduler stats writeschedulerinfo($gctx); $lastschedinfo = $now; } BSSched::ProjPacks::get_projpacks_postprocess($gctx) if $gctx->{'get_projpacks_postprocess_needed'}; } }; if ($@) { warn($@); my $ev_queue = BSSched::EventQueue->new($gctx); BSSched::EventHandler::event_exit($ev_queue, {'type' => 'emergencydump'}); exit(1); } exit(0); open-build-service-2.9.4/src/backend/bs_serverstatus000077500000000000000000000076411332555733200226040ustar00rootroot00000000000000#!/usr/bin/perl -w use strict; BEGIN { # sigh, must do it in this way so that Socket::MsgHdr's INIT is called if (@ARGV && -S $ARGV[-1]) { require BSXML; require BSHandoff; } } use Data::Dumper; my $cols; sub users { my $users = join(" ", map {$_->{'ev'}} @{$_[0] || []}); $users = substr($users, 0, $cols - 13)." ..." if length($users) > $cols - 9; return " $users"; } if (-t STDOUT) { eval { require Term::ReadKey; ($cols) = Term::ReadKey::GetTerminalSize(\*STDOUT); $SIG{'WINCH'} = sub { ($cols) = Term::ReadKey::GetTerminalSize(\*STDOUT) } if $cols; }; } $cols ||= 80; my $top; my $nolastevents; while (@ARGV) { if ($ARGV[0] eq '--top') { $top = 1; shift @ARGV; } elsif ($ARGV[0] eq '--nolastevents') { $nolastevents = 1; shift @ARGV; } else { last; } } die("Usage: bs_serverstatus [--top] \n") unless $ARGV[0]; $| = 1; print "\033[H\033[J" if $top; my $nl = "\n"; $nl = "\033[K\n" if $top; if (-S $ARGV[0]) { # ajaxstatus case my $param = { 'uri' => '/ajaxstatus', 'handoffpath' => $ARGV[0], }; my %slots; while (1) { my $ajaxstatus = BSHandoff::rpc($param, $BSXML::ajaxstatus || $BSXML::ajaxstatus); my $now = time(); # find jobs my %jobs; for my $job (@{($ajaxstatus->{'joblist'} || {})->{'job'} || []}) { my $id = $job->{'ev'}; next if $id == $ajaxstatus->{'ev'}; $jobs{$id} = 1; } # clean up old slots for (keys %slots) { delete $slots{$_} unless $jobs{$_}; } # create slots my $lasteventscount = 0; my @slots; for my $job (@{($ajaxstatus->{'joblist'} || {})->{'job'} || []}) { my $id = $job->{'ev'}; next if $id == $ajaxstatus->{'ev'}; if ($nolastevents && $job->{'request'} && $job->{'request'} =~ /^GET \/lastevents\?/) { $lasteventscount++; next; } my $slot = $slots{$id}; if (!defined $slot) { $slot = 0; for (sort {$a <=> $b} values %slots) { last if $_ != $slot; $slot++; } $slots{$id} = $slot; } my $d = $now - $job->{'starttime'}; my $req = $job->{'request'} || '?'; my $str = sprintf("%5d %5d %3d %s", $d, $id, $job->{'fd'}, $req); $slots[$slot] = substr($str, 0, $cols - 1); } if ($nolastevents) { unshift @slots, "suppressed lastevents: $lasteventscount"; } if ($ajaxstatus->{'watcher'}) { push @slots, undef; for my $fw (@{$ajaxstatus->{'watcher'}}) { push @slots, sprintf("%s", $fw->{'filename'}); push @slots, users($fw->{'job'}) if $fw->{'job'}; } } if ($ajaxstatus->{'rpc'}) { push @slots, undef; for my $rpc (@{$ajaxstatus->{'rpc'}}) { my $d = $now - $rpc->{'starttime'}; my $str = sprintf("%5d %5d %3d %s", $d, $rpc->{'ev'}, $rpc->{'fd'}, $rpc->{'uri'}); push @slots, substr($str, 0, $cols - 1); push @slots, users($rpc->{'job'}) if $rpc->{'job'}; } } if ($ajaxstatus->{'serialize'}) { push @slots, undef; for my $re (@{$ajaxstatus->{'serialize'}}) { push @slots, sprintf("%s", $re->{'filename'}); push @slots, users($re->{'job'}) if $re->{'job'}; } } for my $str (@slots) { $str = '' unless defined $str; print "$str$nl"; } last unless $top; print "\033[J"; sleep(1); print "\033[H"; } exit(0); } while(1) { open(STA, '<', $ARGV[0]) || die("$ARGV[0]: $!\n"); my $now = time(); my $sta; my $empty = ''; while ((sysread(STA, $sta, 256) || 0) == 256) { my ($ti, $pid, $group, $extra, $state, $data) = unpack("NNCCnZ244", $sta); if ($state == 0) { $empty .= "-$nl"; next; } my $d = $now - $ti; if ($state == 1) { $state = 'F'; } elsif ($state == 2) { $state = 'R'; } else { $state = '?'; } $state .= $group ? $group : ' '; printf "%s%s %3d %5d %s$nl", $empty, $state, $d, $pid, $data; $empty = ''; } close STA; last unless $top; print "\033[J"; sleep(1); print "\033[H"; } open-build-service-2.9.4/src/backend/bs_service000077500000000000000000000217171332555733200214720ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2009 Adrian Schroeter, Novell Inc. # Copyright (c) 2006-2009 Michael Schroeder, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # Source service process. Processes package and project _service # files. # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use Digest::MD5 (); use XML::Structured ':bytes'; use Data::Dumper; use POSIX; use Fcntl qw(:DEFAULT :flock); use BSRPC; use BSServer; use BSStdServer; use BSConfiguration; use BSUtil; use BSXML; use BSHTTP; use BSBuild; use strict; no warnings "once"; $BSConfig::bsuser = $BSConfig::bsserviceuser; $BSConfig::bsgroup = $BSConfig::bsservicegroup; BSUtil::set_fdatasync_before_rename() unless $BSConfig::disable_data_sync || $BSConfig::disable_data_sync; my $tempdir = $BSConfig::servicetempdir; my $verbose; my $port = 5152; $port = $1 if $BSConfig::serviceserver =~ /:(\d+)$/; my $servicedir = $BSConfig::servicedir || "/usr/lib/obs/service"; my $rootservicedir = $BSConfig::serviceroot ? "$BSConfig::serviceroot/$servicedir" : $servicedir; my $uploaddir = "$BSConfig::bsdir/upload"; my $noproxy = $BSConfig::noproxy; my $maxchild = $BSConfig::service_maxchild; use warnings; sub usage { my ($ret) = @_; print <{'service'}}) { my $name = $service->{'name'}; BSVerify::verify_filename($name); if (defined($service->{'mode'}) && ($service->{'mode'} eq 'localonly' || $service->{'mode'} eq 'disabled' || $service->{'mode'} eq 'buildtime')) { print "Skip $name\n"; next; } BSUtil::printlog("Run for $name"); my $servicedef = readxml("$rootservicedir/$name.service", $BSXML::servicetype); my @run; if (defined $BSConfig::service_wrapper->{$name} ) { push @run, $BSConfig::service_wrapper->{$name}; } elsif (defined $BSConfig::service_wrapper->{'*'}) { push @run, $BSConfig::service_wrapper->{'*'}; } push @run, "$servicedir/$name"; for my $param (@{$service->{'param'}}) { next if $param->{'name'} eq 'outdir'; next unless $param->{'_content'}; die("$name: service parameter \"$param->{'name'}\" is not defined\n") unless grep {$_->{'name'} eq $param->{'name'}} @{$servicedef->{'parameter'}}; push @run, "--$param->{'name'}"; push @run, $param->{'_content'}; } push @run, "--outdir"; push @run, "$myworkdir/out"; mkdir("$myworkdir/out") || die("mkdir $myworkdir/out: $!\n"); BSUtil::printlog("Running command '@run'"); # call the service my $child_pid = open(SERVICE, '-|'); die "500 Unable to open pipe: $!\n" unless defined($child_pid); if (! $child_pid) { open(STDERR, ">&STDOUT"); exec(@run); die("$run[0]: $!\n"); } local $SIG{ALRM} = sub { kill 'TERM', $child_pid; die "500 timeout while execution of $name\n"; }; # Wait $BSConfig::service_timeout or per default 7200 sec (2 hours) for # service to finish BSUtil::printlog("Waiting $BSConfig::service_timeout for service($child_pid) to finish\n") if $verbose; alarm($BSConfig::service_timeout); # collect output my $output = ''; while () { $output .= $_; } BSUtil::printlog(" $name: $output") if $verbose; if (close SERVICE) { # SUCCESS, move files inside and add prefix BSUtil::printlog('Service succeed') if $verbose; for my $file (grep {!/^[:\.]/} ls("$myworkdir/out")) { next if -l "$myworkdir/out/$file" || ! -f _; # only plain files for now my $tfile = $file; $tfile =~ s/^_service://s; $tfile = "_service:$name:$tfile"; rename("$myworkdir/out/$file", $tfile) || die("rename $myworkdir/out/$file $tfile: $!\n"); } } else { # FAILURE, Create error file BSUtil::printlog("Service failed: $!") if $verbose; $output =~ s/[\r\n\s]+$//s; BSUtil::cleandir('.'); die("500 remote execution error in $name detected\n") if $? >> 8 == 3; BSUtil::writestr('_service_error', undef, "service $name failed:\n$output\n"); $error = 1; } alarm(0); # delete no longer needed outdir rm_rf("$myworkdir/out"); last if $error; } last if $error; } # remove old files (from former service run) rm_rf('.old'); # get all generate files my @send = map {{'name' => $_, 'filename' => "$_"}} grep {/^_service[_:]/} ls('.'); # check for non files (symlinks or directories) for my $file (@send) { die("Service result contains unreadable file '$file->{'filename'}'\n") unless -f $file->{'filename'}; } # send everything back for real BSServer::reply_cpio(\@send); # clean up rm_rf($myworkdir); return undef; # already replied } sub hello { my ($cgi) = @_; return "\n"; } sub list_service { my ($cgi) = @_; my @sl; for my $servicefile (grep {/\.service$/} ls($rootservicedir)) { my $service = readxml("$rootservicedir/$servicefile", $BSXML::servicetype, 1); next unless $service && $service->{'name'}; push @sl, $service; } return ({'service' => \@sl}, $BSXML::servicelist); } sub putconfiguration { my ($cgi) = @_; mkdir_p($uploaddir); my $uploaded = BSServer::read_file("$uploaddir/$$"); die("upload failed\n") unless $uploaded; my $configurationxml = readstr("$uploaddir/$$"); unlink("$uploaddir/$$"); my $oldconfigurationxml = readstr("$BSConfig::bsdir/configuration.xml", 1); if ($configurationxml ne ($oldconfigurationxml || '')) { BSUtil::fromxml($configurationxml, $BSXML::configuration); # test xml syntax writestr("$BSConfig::bsdir/.configuration.xml", "$BSConfig::bsdir/configuration.xml", $configurationxml); } return $BSStdServer::return_ok; } BSUtil::mkdir_p_chown($tempdir, $BSConfig::bsuser, $BSConfig::bsgroup); # define server my $dispatches = [ '/' => \&hello, '!rw :' => undef, '!- GET:' => undef, '!- HEAD:' => undef, '/service' => \&list_service, '/serverstatus' => \&BSStdServer::serverstatus, '!- POST:/sourceupdate/$project/$package' => \&run_source_update, # configuration 'PUT:/configuration' => \&putconfiguration, ]; my $conf = { 'port' => $port, 'dispatches' => $dispatches, 'setkeepalive' => 1, 'maxchild' => $maxchild, }; BSStdServer::server('bs_service', \@ARGV, $conf); open-build-service-2.9.4/src/backend/bs_servicedispatch000077500000000000000000000157111332555733200232070ustar00rootroot00000000000000#!/usr/bin/perl BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use XML::Structured ':bytes'; use POSIX; use BSConfiguration; use BSRPC ':https'; use BSUtil; use BSSrcrep; use BSRevision; use BSNotify; use BSStdRunner; use BSVerify; use strict; my $bsdir = $BSConfig::bsdir || "/srv/obs"; my $eventdir = "$BSConfig::bsdir/events"; my $srcrep = "$BSConfig::bsdir/sources"; my $uploaddir = "$srcrep/:upload"; my $rundir = $BSConfig::rundir || $BSConfig::rundir || "$BSConfig::bsdir/run"; my $maxchild = 4; $maxchild = $BSConfig::servicedispatch_maxchild if defined $BSConfig::servicedispatch_maxchild; my $myeventdir = "$eventdir/servicedispatch"; sub notify_repservers { my ($type, $projid, $packid) = @_; BSRPC::rpc({ 'uri' => "$BSConfig::srcserver/source/$projid/$packid", 'request' => 'POST', 'timeout' => 60, }, undef, 'cmd=notifypackagechange'); } sub addrev_service { my ($rev, $servicemark, $files, $error) = @_; if ($error) { chomp $error; $error ||= 'unknown service error'; } if ($files->{'_service_error'} && !$error) { $error = BSRevision::revreadstr($rev, '_service_error', $files->{'_service_error'}); chomp $error; $error ||= 'unknown service error'; } if (!$error) { eval { BSSrcrep::addmeta_service($rev->{'project'}, $rev->{'package'}, $files, $servicemark, $rev->{'srcmd5'}); }; $error = $@ if $@; } if ($error) { BSSrcrep::addmeta_serviceerror($rev->{'project'}, $rev->{'package'}, $servicemark, $error); $error =~ s/[\r\n]+$//s; $error =~ s/.*[\r\n]//s; $error = str2utf8xml($error) || 'unknown service error'; } my $user = $rev->{'user'}; my $comment = $rev->{'comment'}; my $requestid = $rev->{'requestid'}; $user = '' unless defined $user; $user = 'unknown' if $user eq ''; $user = str2utf8xml($user); $comment = '' unless defined $comment; $comment = str2utf8xml($comment); my $p = { 'project' => $rev->{'project'}, 'package' => $rev->{'package'}, 'rev' => $rev->{'rev'}, 'user' => $user, 'comment' => $comment, 'requestid' => $requestid, }; $p->{'error'} = $error if $error; BSNotify::notify($error ? 'SRCSRV_SERVICE_FAIL' : 'SRCSRV_SERVICE_SUCCESS', $p); notify_repservers('package', $rev->{'project'}, $rev->{'package'}); } sub getrev { my ($projid, $packid, $revid) = @_; my $rev = BSRevision::getrev_local($projid, $packid, $revid); $rev = BSRevision::getrev_deleted_srcmd5($projid, $packid, $revid) if !$rev && $revid && $revid =~ /^[0-9a-f]{32}$/; return $rev; } sub runservice { my ($projid, $packid, $servicemark, $srcmd5, $revid, $linksrcmd5, $projectservicesmd5, $oldsrcmd5) = @_; print "dispatching service $projid/$packid $servicemark $srcmd5\n"; # get revision and file list my $rev; if ($revid) { eval { $rev = getrev($projid, $packid, $revid); }; } if (!$rev || $rev->{'srcmd5'} ne $srcmd5) { $rev = getrev($projid, $packid, $srcmd5); } my $linkinfo = {}; my $files = BSRevision::lsrev($rev, $linkinfo); die("servicemark mismatch\n") unless ($linkinfo->{'xservicemd5'} || '') eq $servicemark; # check if in progress my $serviceerror = BSSrcrep::getserviceerror($projid, $packid, $servicemark); return if $serviceerror ne 'service in progress'; # handle link case my $linkfiles; if ($linksrcmd5) { $linkfiles = $files; my $lrev = getrev($projid, $packid, $linksrcmd5); $files = BSRevision::lsrev($lrev); } # get old files my $oldfiles; if ($oldsrcmd5) { my $oldrev = getrev($projid, $packid, $oldsrcmd5); $oldfiles = BSRevision::lsrev($oldrev); } $oldfiles ||= {}; my @send = map {BSRevision::revcpiofile($rev, $_, $files->{$_})} sort(keys %$files); push @send, BSRevision::revcpiofile({'project' => $projid, 'package' => '_project'}, '_serviceproject', $projectservicesmd5) if $projectservicesmd5; push @send, map {BSRevision::revcpiofile($rev, $_, $oldfiles->{$_})} grep {!$files->{$_}} sort(keys %$oldfiles); my $odir = "$uploaddir/runservice$$"; BSUtil::cleandir($odir) if -d $odir; mkdir_p($odir); my $receive; eval { $receive = BSRPC::rpc({ 'uri' => "$BSConfig::serviceserver/sourceupdate/$projid/$packid", 'request' => 'POST', 'headers' => [ 'Content-Type: application/x-cpio' ], 'chunked' => 1, 'data' => \&BSHTTP::cpio_sender, 'cpiofiles' => \@send, 'directory' => $odir, 'timeout' => $BSConfig::service_timeout, 'withmd5' => 1, 'receiver' => \&BSHTTP::cpio_receiver, }, undef); }; my $error = $@; # and update source repository with the result if ($receive) { # drop all existing service files for (keys %$files) { delete $files->{$_} if /^_service[_:]/; } # add new service files eval { for my $pfile (ls($odir)) { if ($pfile eq '.errors') { my $e = readstr("$odir/.errors"); $e ||= 'empty .errors file'; die($e); } unless ($pfile =~ /^_service[_:]/) { die("service returned a non-_service file: $pfile\n"); } BSVerify::verify_filename($pfile); $files->{$pfile} = BSSrcrep::addfile($projid, $packid, "$odir/$pfile", $pfile); } }; $error = $@ if $@; } else { $error ||= 'error'; die("Transient error for $projid/$packid: $error") if $error =~ /^5/; die("RPC error for $projid/$packid: $error") if $error !~ /^\d/; $error = "service daemon error:\n $error"; } BSUtil::cleandir($odir); rmdir($odir); if ($linkfiles) { # argh, a link! put service run result in old filelist if (!$error) { $linkfiles->{$_} = $files->{$_} for grep {/^_service[_:]/} keys %$files; } $files = $linkfiles; } addrev_service($rev, $servicemark, $files, $error); } sub getevent { my ($req, $notdue, $nofork) = BSStdRunner::getevent(@_); if ($req && $req->{'ev'} && $req->{'conf'}->{'limitinprogress'}) { my ($projid, $packid) = ($req->{'ev'}->{'project'}, $req->{'ev'}->{'package'}); if ($projid && $packid) { my @inprogress = grep {/^servicedispatch:\Q$projid\E::\Q$packid\E::.*::inprogress$/} ls($req->{'conf'}->{'eventdir'}); if (@inprogress >= $req->{'conf'}->{'limitinprogress'}) { return (undef, 1); } } } return ($req, $notdue, $nofork); } sub servicedispatchevent { my ($req, @args) = @_; eval { runservice(@args); }; if ($@) { # retry in 10 minutes BSStdRunner::setdue($req, time() + 10 * 60); die($@); } return 1; } my $dispatches = [ 'servicedispatch $project $package $job $srcmd5 $rev? $linksrcmd5? $projectservicesmd5? $oldsrcmd5?' => \&servicedispatchevent, ]; my $conf = { 'eventdir' => $myeventdir, 'dispatches' => $dispatches, 'maxchild' => $maxchild, 'getevent' => \&getevent, 'inprogress' => 1, }; $conf->{'limitinprogress'} = $BSConfig::servicedispatch_limitinprogress if $BSConfig::servicedispatch_limitinprogress; BSStdRunner::run('bs_servicedispatch', \@ARGV, $conf); open-build-service-2.9.4/src/backend/bs_signer000077500000000000000000000473661332555733200213310ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2009 Michael Schroeder, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # Sign the built packages # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use POSIX; use Data::Dumper; use Digest; use Digest::MD5 (); use XML::Structured ':bytes'; use Build; use Storable; use BSConfiguration; use BSRPC; use BSUtil; use BSXML; use BSHTTP; use BSVerify; use BSPGP; use BSStdRunner; use strict; my $bsdir = $BSConfig::bsdir || "/srv/obs"; my $jobsdir = "$BSConfig::bsdir/jobs"; my $eventdir = "$BSConfig::bsdir/events"; my $myeventdir = "$eventdir/signer"; my $uploaddir = "$BSConfig::bsdir/upload"; my $maxchild = 4; my $maxchild_flavor; $maxchild = $BSConfig::signer_maxchild if defined $BSConfig::signer_maxchild; $maxchild_flavor = $BSConfig::signer_maxchild_flavor if defined $BSConfig::signer_maxchild_flavor; my $sign_supports_S; sub check_sign_S { my $pid = BSUtil::xfork(); return unless defined $pid; if (!$pid) { open(STDOUT, ">/dev/null"); open(STDERR, ">&STDOUT"); my @signargs; push @signargs, '--project', 'dummy' if $BSConfig::sign_project; exec($BSConfig::sign, @signargs, '-S', '/dev/null', '-k'); die("$BSConfig::sign: $!\n"); } $sign_supports_S = 1 if waitpid($pid, 0) == $pid && !$?; } sub readblk { my ($fd, $blk, $num, $blksize) = @_; $blksize ||= 2048; sysseek($fd, $blk * $blksize, SEEK_SET) || die("sysseek: $!\n"); $num ||= 1; $num *= $blksize; my $ret = ''; (sysread($fd, $ret, $num) || 0) == $num || die("sysread: $!\n"); return $ret; } sub writeblk { my ($fd, $blk, $cnt) = @_; my $blksize = 2048; sysseek($fd, $blk * $blksize, SEEK_SET) || die("sysseek: $!\n"); (syswrite($fd, $cnt) || 0) == length($cnt) || die("syswrite: $!\n"); } sub readisodir { my ($fd, $dirpos) = @_; my $dirblk = readblk($fd, $dirpos); my $dirlen = unpack('@10V', $dirblk); die("bad directory len\n") if $dirlen & 0x7ff; my $sp_bytes_skip = 0; my @contents; my $entryoff = 0; while ($dirlen) { if ($dirblk eq '' || unpack('C', $dirblk) == 0) { $dirlen -= 0x800; $dirblk = readblk($fd, ++$dirpos) if $dirlen; $entryoff = 0; next; } my ($l, $fpos, $flen, $f, $inter, $nl) = unpack('C@2V@10V@25Cv@32C', $dirblk); die("bad dir entry\n") if $l > length($dirblk); if ($f & 2) { $dirblk = substr($dirblk, $l); $entryoff += $l; next; } die("associated file\n") if $f & 4; die("interleaved file\n") if $inter; die("bad dir entry\n") if !$nl || $nl + 33 > length($dirblk); $nl++ unless $nl & 1; my $e = substr($dirblk, $nl + 33, $l - $nl - 33); if (length($e) >= 7 && substr($e, 0, 2) eq 'SP') { ($sp_bytes_skip) = unpack('@6C', $e); } else { $e = substr($e, $sp_bytes_skip) if $sp_bytes_skip; } my ($ce_len, $ce_blk, $ce_off) = (0, 0, 0); my $fname = ''; my $nmf = 0; while ($e ne '') { if (length($e) <= 2) { last unless $ce_len; $e = readblk($fd, $ce_blk); $e = substr($e, $ce_off, $ce_len); $ce_len = 0; next; } if (substr($e, 0, 2) eq 'CE') { ($ce_blk, $ce_off, $ce_len) = unpack('@4V@12V@20V', $e); } elsif (substr($e, 0, 2) eq 'NM') { my $nml = (unpack('@2C', $e))[0] - 5; $fname = '' unless $nmf & 1; ($nmf) = unpack('@4C', $e); $fname .= substr($e, 5, $nml) if $nml > 0; } $e = substr($e, (unpack('@2C', $e))[0]); } push @contents, [$fname, $fpos, $flen, $dirpos, $entryoff]; $dirblk = substr($dirblk, $l); $entryoff += $l; } return @contents; } sub signisofiles { my ($fd, $pubkey, @signargs) = @_; my $signed = 0; my $vol = readblk($fd, 16); die("primary volume descriptor missing\n") if substr($vol, 0, 6) ne "\001CD001"; my ($path_table_size, $path_table_pos) = unpack('@132V@140V', $vol); my $path_table = readblk($fd, $path_table_pos * 2048, $path_table_size, 1); while ($path_table ne '') { my ($l, $dirpos) = unpack('C@2V', $path_table); die("empty dir in path table\n") unless $l; $path_table = substr($path_table, 8 + $l + ($l & 1)); my @c = readisodir($fd, $dirpos); for my $e (@c) { #print "$e->[0] $e->[1] $e->[2] $e->[3] $e->[4]\n"; if ($e->[0] =~ /^(.*)\.asc$/i && $e->[2] == 2048) { my $n = $1; my $signfile = readblk($fd, $e->[1]); next if substr($signfile, 0, 8) ne "sIGnMe!\n"; my $len = hex(substr($signfile, 8, 8)); my $sum = hex(substr($signfile, 16, 8)); my @se = grep {$_->[0] =~ /^\Q$n\E$/i && $_->[2] == $len} @c; die("don't know which file to sign: $e->[0]\n") unless @se == 1; my $sf = readblk($fd, $se[0]->[1], ($len + 0x7ff) >> 11); $sf = substr($sf, 0, $len); die("selected wrong file\n") if $sum != unpack("%32C*", $sf); my $sig = BSUtil::xsystem($sf, $BSConfig::sign, @signargs, '-d'); die("returned signature is empty\n") unless $sig; die("returned signature is too big\n") if length($sig) > 2048; # replace old content writeblk($fd, $e->[1], $sig . ("\0" x (2048 - length($sig)))); my $dirblk = readblk($fd, $e->[3]); # patch in new content len substr($dirblk, $e->[4] + 10, 4) = pack('V', length($sig)); writeblk($fd, $e->[3], $dirblk); $signed++; } if ($e->[0] =~ /\.key$/i && $e->[2] == 8192) { my $signfile = readblk($fd, $e->[1]); next if substr($signfile, 0, 8) ne "sIGnMeP\n"; $pubkey = BSUtil::xsystem(undef, $BSConfig::sign, @signargs, '-p') unless $pubkey; die("pubkey is not available\n") unless $pubkey; die("pubkey is too big\n") if length($pubkey) > 8192; # replace old content writeblk($fd, $e->[1], $pubkey . ("\0" x (8192 - length($pubkey)))); my $dirblk = readblk($fd, $e->[3]); # patch in new content len substr($dirblk, $e->[4] + 10, 4) = pack('V', length($pubkey)); writeblk($fd, $e->[3], $dirblk); $signed++; } } } return $signed; } sub retagiso { my ($fd) = @_; my $blk = readblk($fd, 0, 17); die("primary volume descriptor missing\n") if substr($blk, 0x8000, 6) ne "\001CD001"; my $tags = ';'.substr($blk, 0x8373, 0x200); return unless $tags =~ /;(md5sum=[0-9a-fA-F]{32}|sha1sum=[0-9a-fA-F]{40}|sha256sum=[0-9a-fA-F]{64})/; my $sum = $1; my $sumtype = (split('=', $sum, 2))[0]; print "updating $sumtype tag\n"; substr($blk, 0x0000, 0x200) = "\0" x 0x200; substr($blk, 0x8373, 0x200) = ' ' x 0x200; my $numblks = unpack("V", substr($blk, 0x8050, 4)); die("bad block number\n") if $numblks < 17; my $chkmap = { 'md5sum' => 'MD5', 'sha1sum' => 'SHA-1', 'sha256sum' => 'SHA-256' }; my $ctx = Digest->new($chkmap->{$sumtype}); $ctx->add($blk); $numblks -= 17; my $blkno = 16; while ($numblks-- > 0) { my $b = readblk($fd, ++$blkno); $ctx->add($b); } my $newsum = "$sumtype=".$ctx->hexdigest; die unless length($sum) == length($newsum); $tags =~ s/;\Q$sum\E/;$newsum/; substr($blk, 0x8373, 0x200) = substr($tags, 1); writeblk($fd, 16, substr($blk, 0x8000, 0x800)); } sub signiso { my ($file, $pubkey, @signargs) = @_; local *ISO; open(ISO, '+<', $file) || die("$file: $!\n"); my $signed = signisofiles(\*ISO, $pubkey, @signargs); retagiso(\*ISO) if $signed; close(ISO) || die("close $file: $!\n"); } sub rsasign { my ($signfile, $jobstatus, @signargs) = @_; my @opensslsignargs = ('-h', 'sha256'); if ($signfile !~ /\.cpio\.rsasign$/) { BSUtil::xsystem(undef, $BSConfig::sign, @signargs, '-O', @opensslsignargs, $signfile); return; } # cpio case, sign every plain file in the archive my $retrysign; eval { local *CPIOFILE; my @res; open(CPIOFILE, '<', $signfile) || die("open $signfile: $!\n"); my $param = { 'acceptsubdirs' => 1, 'cpiopostfile' => sub { my ($par, $ent) = @_; return unless ($ent->{'mode'} & 0xf000) == 0x8000; # files only $retrysign = 1; my $sig = BSUtil::xsystem($ent->{'data'}, $BSConfig::sign, @signargs, '-O', @opensslsignargs); undef $retrysign; $ent->{'data'} = ''; # free mem push @res, { 'name' => "$ent->{'name'}.sig", 'data' => $sig }; }, }; BSHTTP::cpio_receiver(BSHTTP::fd2req(\*CPIOFILE), $param); close CPIOFILE; $retrysign = 1; open(CPIOFILE, '>', "$signfile.sig") || die("open $signfile.sig: $!\n"); BSHTTP::cpio_sender({ 'cpiofiles' => \@res }, \*CPIOFILE); close(CPIOFILE) || die("close $signfile.sig: $!\n"); }; if ($@) { $jobstatus->{'result'} = 'failed' unless $retrysign; die("openssl sign: $@"); } } sub fixup_sha256_checksum { my ($jobdir, $shafile, $isofile) = @_; return if ((-s "$jobdir/$shafile") || 0) > 65536; my $sha = readstr("$jobdir/$shafile", 1); return unless $sha; return unless $sha =~ /[ \/]\Q$isofile\E\n/s; # ok, needs patching... if ($sha =~ /-----BEGIN PGP SIGNED MESSAGE-----\n/s) { # de-pgp $sha =~ s/.*-----BEGIN PGP SIGNED MESSAGE-----//s; $sha =~ s/.*?\n\n//s; $sha =~ s/-----BEGIN PGP SIGNATURE-----.*//s; } local *F; open(F, '<', "$jobdir/$isofile") || return; my $ctx = Digest->new('SHA-256'); $ctx->addfile(\*F); close F; $sha =~ s/^.{64}( (?:.*\/)?\Q$isofile\E)$/$ctx->hexdigest().$1/em; writestr("$jobdir/$shafile", undef, $sha); } sub signjob { my ($job, $arch) = @_; print "signing $arch/$job\n"; local *F; if (! -e "$jobsdir/$arch/$job") { print "no such job\n"; return undef; } if (! -e "$jobsdir/$arch/$job:status") { print "job is not done\n"; return undef; } my $jobstatus = BSUtil::lockopenxml(\*F, '<', "$jobsdir/$arch/$job:status", $BSXML::jobstatus); # finished can be removed here later, but running jobs shall not be lost on code update. if ($jobstatus->{'code'} ne 'finished' && $jobstatus->{'code'} ne 'signing') { print "job is not assigned for signing\n"; close F; return undef; } my $jobdir = "$jobsdir/$arch/$job:dir"; die("jobdir does not exist\n") unless -d $jobdir; my $info = readxml("$jobsdir/$arch/$job", $BSXML::buildinfo); my $projid = $info->{'project'}; my @files = sort(ls($jobdir)); my @signfiles = grep {/\.(?:d?rpm|sha256|iso|pkg\.tar\.gz|pkg\.tar\.xz|rsasign|AppImage)$/} @files; my $needpubkey; if (grep {$_ eq '.kiwitree_tosign'} @files) { for my $f (split("\n", readstr("$jobdir/.kiwitree_tosign"))) { next if $f eq ''; $f =~ s/%([a-fA-F0-9]{2})/chr(hex($1))/ge; die("bad file in kiwitree_tosign: $f\n") if "/$f/" =~ /\/\.{0,2}\//s; if ($f =~ /.\.key$/) { next unless ((-s "$jobdir/$f") || 0) == 8192; $needpubkey = 1; push @signfiles, $f; next; } die("bad file in kiwitree_tosign: $f\n") unless $f =~ /^(.*)\.asc$/s; push @signfiles, $f if -s "$jobdir/$f" && -e "$jobdir/$1"; } } if (@signfiles) { $needpubkey ||= grep {/\.iso$/} @signfiles; my @signargs; push @signargs, '--project', $projid if $BSConfig::sign_project; my $param = { 'uri' => "$BSConfig::srcserver/getsignkey", 'timeout' => 60, }; my @args; push @args, "project=$projid"; push @args, "withpubkey=1" if $needpubkey; push @args, "autoextend=1" if $needpubkey; push @args, "withalgo=1"; my $signkey = BSRPC::rpc($param, undef, @args); my $algo; $algo = $1 if $signkey && $signkey =~ s/^(\S+)://; my $pubkey; if ($signkey) { ($signkey, $pubkey) = split("\n", $signkey, 2) if $needpubkey; undef $pubkey unless $pubkey && length($pubkey) > 2; # not a valid pubkey if ($needpubkey && !$pubkey) { if ($BSConfig::sign_project && $BSConfig::sign) { local *S; open(S, '-|', $BSConfig::sign, '--project', $projid, '-p') || die("$BSConfig::sign: $!\n");; $pubkey = ''; 1 while sysread(S, $pubkey, 4096, length($pubkey)); if (!close(S)) { print "sign -p failed: $?\n"; $pubkey = undef; } } } die("returned pubkey is empty\n") if $needpubkey && length($pubkey || '') <= 2 && length($signkey) > 2; mkdir_p($uploaddir); writestr("$uploaddir/signer.$$", undef, $signkey); push @signargs, '-P', "$uploaddir/signer.$$"; push @signargs, '-h', 'sha256' if $algo && $algo eq 'rsa'; } unlink("$jobdir/.checksums"); my $followupfile; # check for followup files if (!$info->{'followupfile'} && ($info->{'file'} || '') ne '_aggregate') { if (grep {/\.rsasign$/} @signfiles) { $followupfile = (grep {/\.(spec|dsc)$/} @files)[0]; @signfiles = grep {/\.rsasign$/} @signfiles if $followupfile; } if (!$followupfile && grep {/\.followup.spec$/} @files) { $followupfile = (grep {/\.followup.spec$/} @files)[0]; } } push @signargs, '-S', "$jobdir/.checksums" if !$followupfile && $sign_supports_S; eval { for my $signfile (@signfiles) { if ($signfile =~ /\.iso$/) { signiso("$jobdir/$signfile", $pubkey, @signargs); next; } if ($signfile =~ /\.rsasign$/) { rsasign("$jobdir/$signfile", $jobstatus, @signargs) if $followupfile; next; } my $signtime; if ($info->{'file'} eq '_aggregate' && ($signfile =~ /\.d?rpm$/)) { # special aggregate handling: remove old sigs # but get old sig time first eval { my %res = Build::Rpm::rpmq("$jobdir/$signfile", 'SIGTAG_GPG', 'SIGTAG_PGP'); my $sig = $res{'SIGTAG_PGP'} || $res{'SIGTAG_GPG'}; $sig = $sig->[0] if $sig; $signtime = BSPGP::pk2signtime($sig) if $sig; }; warn("get signtime: $@") if $@; system('rpm', '--delsign', "$jobdir/$signfile") && warn("delsign $jobdir/$signfile failed: $?\n"); print "using signtime $signtime\n" if $signtime; } my @signmode; @signmode = ('-r', '-T', $signtime) if $signtime; @signmode = ('-r', '-T', 'buildtime') if $signfile =~ /\.drpm$/; @signmode = ('-D') if $signfile =~ /\.pkg\.tar\.(?:gz|xz)$/; @signmode = ('-a') if $signfile =~ /\.AppImage$/; if ($signfile =~ /\.key$/s) { next unless (-s "$jobdir/$signfile") == 8192; my $signfilec = readstr("$jobdir/$signfile"); next if substr($signfilec, 0, 8) ne "sIGnMeP\n"; $pubkey ||= BSUtil::xsystem(undef, $BSConfig::sign, @signargs, '-p'); die("pubkey is not available\n") unless $pubkey; writestr("$jobdir/$signfile.tmp$$", "$jobdir/$signfile", $pubkey); next; } if ($signfile =~ /^(.*\.iso)\.sha256$/) { fixup_sha256_checksum($jobdir, $signfile, $1); } if ($signfile =~ /\.asc$/s) { next unless (-s "$jobdir/$signfile") == 2048; my $signfilec = readstr("$jobdir/$signfile"); next if substr($signfilec, 0, 8) ne "sIGnMe!\n"; @signmode = ('-d'); $signfile =~ s/\.asc$//s; # fallthrough... } if (system($BSConfig::sign, @signargs, @signmode, "$jobdir/$signfile")) { if ($signfile =~ /\.rpm$/) { print("sign failed: $? - checking digest\n"); if (system('rpm', '--checksig', '--nosignature', "$jobdir/$signfile")) { print("rpm checksig failed: $? - restarting job\n"); $jobstatus->{'result'} = 'rebuild'; } } die("sign $jobdir/$signfile failed\n"); } if ($signfile =~ /\.AppImage$/ && -e "$jobdir/$signfile.zsync") { print("regenerating zsync file\n"); # re-generate zsync data if (system('zsyncmake', '-u', $signfile, '-o', "$jobdir/$signfile.zsync.new", "$jobdir/$signfile")) { print("zync file recreation failed: $?\n"); unlink("$jobdir/$signfile.zsync.new"); } else { rename("$jobdir/$signfile.zsync.new", "$jobdir/$signfile.zsync"); } } } }; if ($@) { # signing failed, either retry, rebuild or fail my $error = $@; unlink("$uploaddir/signer.$$") if $signkey; if ($error =~ /Need RSA key for openssl sign/i) { $error = "Need an RSA key for openssl signing, please create a new key\n"; $jobstatus->{'result'} = 'failed'; } if ($jobstatus->{'result'} && $jobstatus->{'result'} eq 'rebuild') { warn("rebuilding: $error\n"); if ($info->{'followupfile'}) { delete $info->{'followupfile'}; writexml("$jobsdir/$arch/.$job", "$jobsdir/$arch/$job", $info, $BSXML::buildinfo); } BSUtil::cleandir($jobdir); rmdir($jobdir); unlink("$jobsdir/$arch/$job:status"); close F; return undef; } if ($jobstatus->{'result'} && $jobstatus->{'result'} eq 'failed') { warn("failed: $error\n"); BSUtil::appendstr("$jobdir/logfile", "\n\n$error"); $jobstatus->{'code'} = 'finished'; writexml("$jobsdir/$arch/.$job:status", "$jobsdir/$arch/$job:status", $jobstatus, $BSXML::jobstatus); close F; return 1; } close(F); die($error); } # all files signed now unlink("$uploaddir/signer.$$") if $signkey; if ($followupfile) { # we need to create a followup job to integrate the signatures $info->{'followupfile'} = $followupfile; writexml("$jobsdir/$arch/.$job", "$jobsdir/$arch/$job", $info, $BSXML::buildinfo); unlink("$jobsdir/$arch/$job:status"); close F; return undef; } # we have changed the file ids, thus we need to re-create # the .bininfo file my $bininfo = {}; my $oldbininfo = BSUtil::retrieve("$jobdir/.bininfo", 1) || {}; for my $file (@files) { my @s = stat("$jobdir/$file"); my $id = "$s[9]/$s[7]/$s[1]"; next unless @s; if ($file !~ /\.(?:rpm|deb)$/) { $bininfo->{$file} = $oldbininfo->{$file} if $oldbininfo->{$file}; next; } my $data = Build::query("$jobdir/$file", 'evra' => 1); die("$jobdir/$file: query failed") unless $data; eval { BSVerify::verify_nevraquery($data); }; die("$jobdir/$file: $@") if $@; my $leadsigmd5; die("$jobdir/$file: queryhdrmd5 failed\n") unless Build::queryhdrmd5("$jobdir/$file", \$leadsigmd5); $data->{'leadsigmd5'} = $leadsigmd5 if $leadsigmd5; $data->{'filename'} = $file; $data->{'id'} = $id; $bininfo->{$file} = $data; } $bininfo->{'.bininfo'} = {}; # mark new version BSUtil::store("$jobdir/.bininfo", undef, $bininfo); } # write finished job status and release lock $jobstatus->{'code'} = 'finished'; writexml("$jobsdir/$arch/.$job:status", "$jobsdir/$arch/$job:status", $jobstatus, $BSXML::jobstatus); close F; unlink("$jobdir/.kiwitree_tosign"); return 1; } sub signevent { my ($req, $job, $arch) = @_; my $res; eval { $res = signjob($job, $arch); }; die("sign failed: $@") if $@; if ($res) { my $ev = $req->{'ev'}; my $type = $ev->{'type'} eq 'built' ? 'finished' : $ev->{'type'}; writexml("$eventdir/$arch/.${type}:$job$$", "$eventdir/$arch/${type}:$job", $ev, $BSXML::event); BSUtil::ping("$eventdir/$arch/.ping"); } return 1; # event is processed } # we currently support two flavors, rsasign and iso sub getflavor { my ($req) = @_; my $ev = $req->{'ev'}; my @dir = ls("$jobsdir/$ev->{'arch'}/$ev->{'job'}:dir"); return undef unless @dir; return 'iso' if grep {/\.iso$/} @dir; return 'rsasign' if grep {/\.rsasign$/} @dir; return undef; } my $dispatches = [ 'built $job $arch' => \&signevent, 'uploadbuild $job $arch' => \&signevent, ]; my $conf = { 'runname' => 'bs_signer', 'eventdir' => $myeventdir, 'dispatches' => $dispatches, 'maxchild' => $maxchild, 'maxchild_flavor' => $maxchild_flavor, 'getflavor' => \&getflavor, 'inprogress' => 1, }; $conf->{'getflavor'} = $BSConfig::signer_getflavor if $BSConfig::signer_getflavor; die("sign program is not configured!\n") unless $BSConfig::sign; check_sign_S(); print "warning: sign does not seem to support checksum files, please update\n" unless $sign_supports_S; BSStdRunner::run('signer', \@ARGV, $conf); open-build-service-2.9.4/src/backend/bs_srcserver000077500000000000000000007447151332555733200220620ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2006, 2007 Michael Schroeder, Novell Inc. # Copyright (c) 2008 Adrian Schroeter, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # The Source Server # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; chdir($wd); unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use XML::Structured ':bytes'; use POSIX; use Fcntl qw(:DEFAULT :flock); use Digest::MD5 (); use Digest::SHA (); use Data::Dumper; use Storable (); use Symbol; use JSON::XS (); use BSConfiguration; use BSRPC ':https'; use BSServer; use BSUtil; use BSFileDB; use BSXML; use BSProductXML; use BSVerify; use BSHandoff; use BSWatcher ':https'; use BSXPath; use BSStdServer; use BSSrcdiff; use Build; use BSNotify; use BSPGP; use BSSrcrep; use BSRevision; use BSKiwiXML; use BSUrlmapper; use BSXPath; use BSXPathKeys; use BSDB; use BSDBIndex; use BSSrcServer::Remote; use BSSrcServer::Partition; use BSSrcServer::Access; use BSSrcServer::Projlink; use BSSrcServer::Link; use BSSrcServer::Service; use BSSrcServer::Product; use BSSrcServer::Multibuild; use BSSrcServer::Blame; # configure modules $BSSrcServer::Projlink::getrev = \&getrev; $BSSrcServer::Projlink::findpackages = \&findpackages; $BSSrcServer::Projlink::readpackage = \&readpackage; # links point to service expanded files $BSSrcServer::Link::getrev = \&getrev; $BSSrcServer::Link::lsrev_linktarget = \&lsrev_service; $BSSrcServer::Service::getrev = \&getrev; $BSSrcServer::Service::readpackage = \&readpackage; $BSSrcServer::Service::addrev = \&addrev; $BSSrcServer::Service::notify = \¬ify; $BSSrcServer::Service::notify_repservers = \¬ify_repservers; $BSSrcServer::Blame::getrev = \&getrev; $BSSrcServer::Blame::lsrev_expanded = \&lsrev_expanded; $BSSrcServer::Blame::lsrev_service = \&lsrev_service; $BSSrcServer::Product::notify_repservers = \¬ify_repservers; $Build::Rpm::unfilteredprereqs = 1 if defined $Build::Rpm::unfilteredprereqs; $Build::Rpm::conflictdeps = 1 if defined $Build::Rpm::conflictdeps; $Build::Kiwi::repoextras = 1 if defined $Build::Kiwi::repoextras; use strict; my $port = 5352; #'SR' $port = $1 if $BSConfig::srcserver =~ /:(\d+)$/; my $proxy; $proxy = $BSConfig::proxy if defined($BSConfig::proxy); BSUtil::set_fdatasync_before_rename() unless $BSConfig::disable_data_sync || $BSConfig::disable_data_sync; my $projectsdir = "$BSConfig::bsdir/projects"; my $eventdir = "$BSConfig::bsdir/events"; my $srcrep = "$BSConfig::bsdir/sources"; my $treesdir = $BSConfig::nosharedtrees ? "$BSConfig::bsdir/trees" : $srcrep; my $requestsdir = "$BSConfig::bsdir/requests"; my $oldrequestsdir = "$BSConfig::bsdir/requests.old"; my $rundir = $BSConfig::rundir || "$BSConfig::bsdir/run"; my $diffcache = "$BSConfig::bsdir/diffcache"; my $extrepodir = "$BSConfig::bsdir/repos"; my $reqindexdb = "$BSConfig::bsdir/db/request"; my $extrepodb = "$BSConfig::bsdir/db/published"; my $sourcedb = "$BSConfig::bsdir/db/source"; my $notificationpayloaddir = "$BSConfig::bsdir/notificationpayload"; my $srcrevlay = [qw{rev vrev srcmd5 version time user comment requestid}]; my $eventlay = [qw{number time type project package repository arch}]; my $notificationlay = [qw{number type time data []}]; my $ajaxsocket = "$rundir/bs_srcserver.ajax"; my $uploaddir = "$srcrep/:upload"; my @binsufs = qw{rpm deb pkg.tar.gz pkg.tar.xz}; my $binsufsre = join('|', map {"\Q$_\E"} @binsufs); my $datarepoid; my %packagequota; sub notify { my ($type, $p) = @_; my $time = $p->{'time'} || time(); delete $p->{'time'}; if ($type eq 'PACKTRACK' && BSServer::have_content()) { my $payload = Storable::thaw(BSServer::read_data()); if ($payload) { my $json_payload = JSON::XS::encode_json($payload); my $payloadkey = "$time.".Digest::MD5::md5_hex($json_payload); mkdir_p($notificationpayloaddir); writestr("$notificationpayloaddir/.$payloadkey", "$notificationpayloaddir/$payloadkey", $json_payload); $p->{'payload'} = $payloadkey; print "notification payload for $p->{'project'}/$p->{'repo'} stored in $payloadkey\n" if $p->{'project'} && $p->{'repo'}; } } my @n = map {"$_=$p->{$_}"} grep {defined($p->{$_}) && !ref($p->{$_})} sort keys %{$p || {}}; mkdir_p($eventdir); BSFileDB::fdb_add_i("$eventdir/lastnotifications", $notificationlay, {'type' => $type, 'time' => $time, 'data' => \@n}); } sub prune_notificationpayloads { my ($cuttime) = @_; my @pl = ls($notificationpayloaddir); for my $p (@pl) { next unless $p =~ /^(\d+)\./; unlink("$notificationpayloaddir/$p") if $1 < $cuttime; } } sub prune_lastnotifications { my ($cutoff) = @_; local *F; return unless BSUtil::lockopen(\*F, '<', "$eventdir/lastnotifications", 1); my $filter = sub { $_[0]->{'number'} >= $cutoff ? 1 : 0 }; my @l = BSFileDB::fdb_getall("$eventdir/lastnotifications", $notificationlay, undef, $filter); if (@l) { unlink("$eventdir/.lastnotifications.$$"); if (! -e "$eventdir/.lastnotifications.$$") { BSFileDB::fdb_add_multiple("$eventdir/.lastnotifications.$$", $notificationlay, @l); rename("$eventdir/.lastnotifications.$$", "$eventdir/lastnotifications") || die("rename $eventdir/.lastnotifications.$$ $eventdir/lastnotifications: $!\n"); prune_notificationpayloads($l[0]->{'time'} - 240 * 3600) if -d $notificationpayloaddir; # keep 10 days } } close F; } sub lastnotifications { my ($cgi) = @_; my $view = $cgi->{'view'}; die("unsupported view\n") if $view && $view ne 'json'; if (!$cgi->{'start'}) { # just fetch the current event number my $lastev = BSFileDB::fdb_getlast("$eventdir/lastnotifications", $notificationlay); my $lastno = $lastev ? $lastev->{'number'} : 0; my $ret = {'next' => $lastno + 1, 'sync' => 'lost'}; return (JSON::XS::encode_json($ret), 'Content-Type: application/json') if $view && $view eq 'json'; return ($ret, $BSXML::notifications); } # check if we need to prune if (!$BSStdServer::isajax && !$cgi->{'noprune'}) { my $firstev = (BSFileDB::fdb_getall("$eventdir/lastnotifications", $notificationlay, undef, sub {-1}))[0]; if ($firstev && $cgi->{'start'} - $firstev->{'number'} > 1000) { my $lastev = BSFileDB::fdb_getlast("$eventdir/lastnotifications", $notificationlay); prune_lastnotifications($cgi->{'start'} - 500) if $lastev && $cgi->{'start'} <= $lastev->{'number'} + 1; } } my $filter = sub { $cgi->{'start'} > $_[0]->{'number'} ? -2 : 1 }; my @l = BSFileDB::fdb_getall_reverse("$eventdir/lastnotifications", $notificationlay, undef, $filter); if ($cgi->{'block'} && !@l) { # pass on to AJAX if (!$BSStdServer::isajax) { BSHandoff::handoff('/lastnotifications', undef, "start=$cgi->{'start'}", 'block=1'); } # wait untill we have a winner BSWatcher::addfilewatcher("$eventdir/lastnotifications"); my $lastev = BSFileDB::fdb_getlast("$eventdir/lastnotifications", $notificationlay); return undef if !$lastev || $lastev->{'number'} < $cgi->{'start'}; if ($cgi->{'start'} == $lastev->{'number'}) { @l = ($lastev); } else { @l = BSFileDB::fdb_getall_reverse("$eventdir/lastnotifications", $notificationlay, undef, $filter); return undef unless @l; } } my $res = {}; @l = reverse @l; if (@l) { $res->{'next'} = $l[-1]->{'number'} + 1; } else { my $lastev = BSFileDB::fdb_getlast("$eventdir/lastnotifications", $notificationlay); my $no = ($lastev->{'number'} || 0); $no = $cgi->{'start'} - 1 if $no >= $cgi->{'start'}; $res->{'next'} = $no + 1; } if (@l && $l[0]->{'number'} > $cgi->{'start'}) { $res->{'sync'} = 'lost'; @l = (); } # don't send more than 1000 notifications to the poor api if (@l > 1000) { $res->{'limit_reached'} = 1; # tell the api that there is more $res->{'next'} = $l[1000]->{'number'}; @l = splice(@l, 0, 1000); } $res->{'notification'} = \@l; delete $_->{'number'} for @l; # XXX: why? if ($view && $view eq 'json') { for my $l (@l) { my %d; for (@{$l->{'data'} || []}) { my @s = split('=', $_, 2); $d{$s[0]} = $s[1]; } $l->{'data'} = \%d; } return (JSON::XS::encode_json($res), 'Content-Type: application/json'); } for my $l (@l) { for (@{$l->{'data'} || []}) { my @s = split('=', $_, 2); $_ = {'key' => $s[0]}; $_->{'_content'} = $s[1] if defined $s[1]; } } return ($res, $BSXML::notifications); } sub getnotificationpayload { my ($cgi, $payloadkey) = @_; my $file = "$notificationpayloaddir/$payloadkey"; die("404 payload does not exist!\n") unless -e $file; return (readstr($file), 'Content-Type: application/json'); } sub deletenotificationpayload { my ($cgi, $payloadkey) = @_; unlink("$notificationpayloaddir/$payloadkey"); return $BSStdServer::return_ok; } sub notify_repservers { my ($type, $projid, $packid, $job) = @_; my $ev = {'type' => $type, 'project' => $projid}; $ev->{'package'} = $packid if defined $packid; addevent($ev) unless $type eq 'suspendproject'; my @args = ("type=$type", "project=$projid"); push @args, "package=$packid" if defined $packid; push @args, "job=$job" if defined $job; my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $param = { 'uri' => "$reposerver/event", 'request' => 'POST', 'background' => 1, }; eval { BSWatcher::rpc($param, undef, @args); }; print "warning: $reposerver: $@" if $@; } # this is only used from getfilelist_ajax. # as projid is a remote project, we don't know which reposerver # we need to notify. so send the event to all of them. # we also do not call addevent in this specific case. sub notify_all_repservers { my ($type, $projid, $packid) = @_; my @reposervers = BSSrcServer::Partition::allreposervers(); my @args = ("type=$type", "project=$projid"); push @args, "package=$packid" if defined $packid; for my $reposerver (@reposervers) { my $param = { 'uri' => "$reposerver/event", 'request' => 'POST', 'background' => 1, }; eval { BSWatcher::rpc($param, undef, @args); }; print "warning: $reposerver: $@" if $@; } } sub triggerservicerun { my ($cgi, $projid, $packid) = @_; my $rev = getrev($projid, $packid); my $linkinfo = {}; my $files = BSRevision::lsrev($rev, $linkinfo); $cgi->{'triggerservicerun'} = 1; # hack if ($BSConfig::old_style_services) { # old style, just run the service again... BSSrcServer::Service::runservice($cgi, $rev, $files); } else { my $servicemark = BSSrcServer::Service::genservicemark($projid, $packid, $files); if ($servicemark || $linkinfo->{'xservicemd5'} || $packid eq '_product') { # have to do a new commit... $cgi->{'comment'} ||= 'trigger service run'; $cgi->{'servicemark'} = $servicemark; $rev = addrev($cgi, $projid, $packid, $files); BSSrcServer::Service::runservice($cgi, $rev, $files); } else { die("404 no source service defined!\n"); } } return $BSStdServer::return_ok; } sub waitservicerun { my ($cgi, $projid, $packid) = @_; die("not implemented for old style services\n") if $BSConfig::old_style_services; if (!$BSStdServer::isajax) { my $rev = getrev($projid, $packid); my $linkinfo = {}; my $files = BSRevision::lsrev($rev, $linkinfo); my $servicemark = $linkinfo->{'xservicemd5'}; return $BSStdServer::return_ok unless $servicemark; eval { BSSrcServer::Service::handleservice($rev, $files, $servicemark); }; return $BSStdServer::return_ok unless $@; die($@) if $@ !~ /service in progress/; # pass on to ajax BSHandoff::handoff("/source/$projid/$packid", undef, 'cmd=waitservice', "servicemark=$servicemark"); } my $servicemark = $cgi->{'servicemark'}; die("need a servicemark\n") unless $servicemark; BSWatcher::addfilewatcher(BSSrcrep::serviceerrorfile($projid, $packid, $servicemark)); my $serror = BSSrcrep::getserviceerror($projid, $packid, $servicemark); return $BSStdServer::return_ok unless $serror; return undef if $serror =~ /service in progress/; die("$serror\n"); } sub mergeservicerun { my ($cgi, $projid, $packid) = @_; my $rev = getrev($projid, $packid); my $linkinfo = {}; my $files = BSRevision::lsrev($rev, $linkinfo); my $servicemark = $linkinfo->{'xservicemd5'}; die("package has no service\n") unless $servicemark; $files = BSSrcServer::Service::handleservice($rev, $files, $servicemark); # merge delete $files->{'_service'}; for (sort keys %$files) { next unless /^_service:.*:(.*?)$/s; $files->{$1} = $files->{$_}; delete $files->{$_}; BSSrcrep::copyonefile($projid, $packid, $1, $projid, $packid, $_, $files->{$1}); } $rev = addrev($cgi, $projid, $packid, $files); BSSrcServer::Service::runservice($cgi, $rev, $files); delete $rev->{'project'}; delete $rev->{'package'}; return ($rev, $BSXML::revision); } # # return version and release of commit # sub getcommitinfo { my ($projid, $packid, $srcmd5, $files) = @_; # get version/release from rpm spec/deb dsc/kiwi xml file my $version = 'unknown'; my $release; if ($files->{'_link'}) { # can't know the version/release of a link as it is # a moving target return ('unknown', '0'); } my $cfile; $cfile = "$projectsdir/$projid.conf" if -e "$projectsdir/$projid.conf"; my $bconf = Build::read_config('noarch', $cfile); for my $type ('spec', 'dsc', 'kiwi') { my $rev = {'project' => $projid, 'package' => $packid, 'srcmd5' => $srcmd5}; my $file = findfile($rev, undef, $type, $files); next unless defined $file; my $d = Build::parse($bconf, BSRevision::revfilename($rev, $file, $files->{$file})); next unless defined $d->{'version'}; $version = $d->{'version'}; $release = $d->{'release'} if defined $d->{'release'}; $version = 'unknown' if $d->{'multiversion'}; last; } if (defined($release)) { if ($release =~ /(\d+)\./) { $release = $1; } elsif ($release =~ //) { $release = $1; } elsif ($release =~ /^(\d+)/) { $release = $1; } else { $release = '0'; } } $release ||= '0'; if ($bconf->{'cicntstart'} && $bconf->{'cicntstart'} =~ /(\d+)$/) { my $r = $release; $release = $bconf->{'cicntstart'}; $release =~ s/\d+$/$r/ if $r > $1; } return ($version, $release); } # # get a revision object from a revision identifier # sub getrev { my ($projid, $packid, $revid, $linked, $missingok) = @_; if ($packid =~ /(?{'originpackage'} = $packid if $rev; return $rev; } my $rev = BSRevision::getrev_local($projid, $packid, $revid); return $rev if $rev; my $proj = BSRevision::readproj_local($projid, 1); if ($proj && $proj->{'link'}) { $rev = BSSrcServer::Projlink::getrev_projlink($projid, $proj, $packid, $revid, $linked, $missingok); return $rev if $rev; } if ($revid && $revid =~ /^[0-9a-f]{32}$/) { $rev = BSRevision::getrev_deleted_srcmd5($projid, $packid, $revid); return $rev if $rev; } if (!$proj || $proj->{'remoteurl'}) { $proj = BSSrcServer::Remote::remoteprojid($projid); $rev = BSSrcServer::Remote::getrev_remote($projid, $proj, $packid, $revid, $linked, $missingok) if $proj; return $rev if $rev; } return {'project' => $projid, 'package' => $packid, 'srcmd5' => 'pattern', 'rev' => 'pattern'} if $packid eq '_pattern'; if ($missingok) { $rev = { 'project' => $projid, 'package' => $packid, 'srcmd5' => $BSSrcrep::emptysrcmd5 }; if ($proj && $proj->{'link'}) { my $vrev = BSSrcServer::Projlink::getnewvrev($projid, $proj); $rev->{'vrev'} = $vrev if $vrev; } return $rev; } die("404 package '$packid' does not exist\n") if -e "$projectsdir/$projid.xml"; die("404 project '$projid' does not exist\n"); } sub addmeta_upload { my ($projid, $packid, $files) = @_; # calculate new meta sum my $meta = ''; $meta .= "$files->{$_} $_\n" for sort keys %$files; my $srcmd5 = Digest::MD5::md5_hex($meta); mkdir_p($uploaddir); mkdir_p("$projectsdir/$projid.pkg"); writestr("$uploaddir/addmeta$$", "$projectsdir/$projid.pkg/$packid.upload-MD5SUMS", $meta); return $srcmd5; } ## # generate_commit_flist($files_old, $files_new) # # $files_old/$files_new are hash references as returned by lsrep # # returns a list of changed files categorized similar to svn commit mails # sub generate_commit_flist { my $ret = ""; my %categorized_files; my ($files_old, $files_new) = @_; my %files_all = (%$files_new, %$files_old); for my $fname (sort keys %files_all) { if(!$files_old->{$fname}) { my $flist = $categorized_files{"Added:"} ||= []; push(@$flist, $fname); } elsif(!$files_new->{$fname}) { my $flist = $categorized_files{"Deleted:"} ||= []; push(@$flist, $fname); } elsif($files_old->{$fname} ne $files_new->{$fname}) { my $flist = $categorized_files{"Modified:"} ||= []; push(@$flist, $fname); } } for my $cat (sort keys %categorized_files) { $ret .= "$cat\n"; for my $fname (@{$categorized_files{$cat}}) { $ret .= " $fname\n"; } $ret .= "\n"; } return $ret; } # # create a new revision from a file list, returns revision object # sub addrev { my ($cgi, $projid, $packid, $files, $target) = @_; die("404 project '$projid' does not exist\n") unless -e "$projectsdir/$projid.xml"; if ($packid =~ /^_product:/) { die("403 package '$packid' is read-only if a '_product' package exists\n") if -e "$projectsdir/$projid.pkg/_product.xml"; } die("403 package '$packid' is read-only\n") if $packid =~ /(?{'user'}; my $comment = $cgi->{'comment'}; my $requestid = $cgi->{'requestid'}; $user = '' unless defined $user; $user = 'unknown' if $user eq ''; $comment = '' unless defined $comment; $user = str2utf8xml($user); $comment = str2utf8xml($comment); # check if the commit will need a service run my $servicemark; delete $files->{'/SERVICE'}; # just in case... if (!$BSConfig::old_style_services && $packid ne '_project') { if ($cgi->{'noservice'}) { ($servicemark, $files) = BSSrcServer::Service::servicemark_noservice($cgi, $projid, $packid, $files, $target); } else { # we do not want any generated files in the commit! delete $files->{$_} for grep {/^_service[:_]/} keys %$files; $servicemark = exists($cgi->{'servicemark'}) ? $cgi->{'servicemark'} : BSSrcServer::Service::genservicemark($projid, $packid, $files, $target); } } if ($packid eq '_pattern' && ! -e "$projectsdir/$projid.pkg/$packid.xml") { # upgrade pseudo _pattern package to real package my $pack = { 'name' => $packid, 'project' => $projid, 'title' => 'pseudo package to store pattern information', 'description' => "pseudo package to store pattern information\n", }; mkdir_p($uploaddir); writexml("$uploaddir/$$.2", undef, $pack, $BSXML::pack); BSRevision::addrev_meta_replace($cgi, $projid, $packid, [ "$uploaddir/$$.2", "$projectsdir/$projid.pkg/$packid.xml", '_meta' ]); } die("404 package '$packid' does not exist\n") unless $packid eq '_project' || -e "$projectsdir/$projid.pkg/$packid.xml"; if ($target && $target eq 'upload') { my $srcmd5 = addmeta_upload($projid, $packid, $files); my $filenames = join( ', ', keys %$files); notify("SRCSRV_UPLOAD", {project => $projid, package => $packid, filename => $filenames, comment => $comment, target => $target, requestid => $requestid, user => $user}); return {'project' => $projid, 'package' => $packid, 'rev' => 'upload', 'srcmd5' => $srcmd5}; } elsif ($target && $target eq 'repository') { # repository only upload. return {'project' => $projid, 'package' => $packid, 'rev' => 'repository', 'srcmd5' => $BSSrcrep::emptysrcmd5}; } elsif (defined($target)) { # internal version only upload. my $srcmd5 = BSSrcrep::addmeta($projid, $packid, $files); return {'project' => $projid, 'package' => $packid, 'rev' => $srcmd5, 'srcmd5' => $srcmd5}; } die("bad projid\n") if $projid =~ /\// || $projid =~ /^\./; die("bad packid\n") if $packid =~ /\// || $packid =~ /^\./; die("bad files (slash)\n") if grep {/\// && $_ ne '/SERVICE'} keys %$files; die("bad files (glyph)\n") if grep {!/^[0-9a-f]{32}$/} values %$files; if ($files->{'_patchinfo'}) { die("bad files in patchinfo container\n") if grep {$_ ne '_patchinfo'} keys %$files; my $p = BSRevision::revreadxml({'project' => $projid, 'package' => $packid}, '_patchinfo', $files->{'_patchinfo'}, $BSXML::patchinfo); BSVerify::verify_patchinfo($p); } # create tree entry $files->{'/SERVICE'} = $servicemark if $servicemark; my $srcmd5 = BSSrcrep::addmeta($projid, $packid, $files); delete $files->{'/SERVICE'}; my $rev = {'srcmd5' => $srcmd5, 'time' => time(), 'user' => $user, 'comment' => $comment, 'requestid' => $requestid}; if ($packid eq '_product') { BSSrcServer::Product::expandproduct($projid, $packid, $rev, $files, $user, 1); } if ($packid ne '_project' && $packid ne '_pattern') { my ($version, $release) = getcommitinfo($projid, $packid, $srcmd5, $files); $rev->{'version'} = $version; $rev->{'vrev'} = $release; } my $rev_old = getrev($projid, $packid); $rev_old->{'keepsignkey'} = 1; my $files_old = BSRevision::lsrev($rev_old); delete $rev_old->{'keepsignkey'}; my $filestr = generate_commit_flist($files_old, $files); $rev->{'version'} = $cgi->{'version'} if defined $cgi->{'version'}; $rev->{'vrev'} = $cgi->{'vrev'} if defined $cgi->{'vrev'}; if ($cgi->{'time'}) { die("specified time is less than time in last commit\n") if ($rev_old->{'time'} || 0) > $cgi->{'time'}; $rev->{'time'} = $cgi->{'time'}; } my $acceptinfo; if ($requestid) { $acceptinfo = {}; $acceptinfo->{'osrcmd5'} = $rev_old->{'srcmd5'} if $rev_old->{'srcmd5'} ne 'empty'; if ($files_old->{'_link'}) { # see if we can expand it eval { my %rev = %$rev_old; BSSrcServer::Link::handlelinks(\%rev, $files_old); $acceptinfo->{'oxsrcmd5'} = $rev{'srcmd5'}; }; } } if ($packid eq '_project') { $rev = BSRevision::addrev_local($cgi, $projid, $packid, $rev); if ($acceptinfo && $cgi->{'withacceptinfo'}) { $acceptinfo->{'rev'} = $rev->{'rev'}; $acceptinfo->{'srcmd5'} = $rev->{'srcmd5'}; $rev->{'acceptinfo'} = $acceptinfo; } notify_repservers('project', $projid); notify("SRCSRV_UPDATE_PROJECT_CONFIG", { "project" => $projid, "files" => $filestr, "comment" => $comment, "sender" => $user }); return $rev; } # help a little with link<->nolink and singleversion<->multiversion changes if (defined($rev->{'version'}) && defined($rev_old->{'version'}) && !defined($cgi->{'vrev'})) { # if this is a known -> unknown version change, max with vrev of last commit # same for unknown -> known if (($rev->{'version'} eq 'unknown' && $rev_old->{'version'} ne 'unknown') || ($rev->{'version'} ne 'unknown' && $rev_old->{'version'} eq 'unknown')) { my $l_old = 0; $l_old = $1 if $rev_old->{'vrev'} =~ /(\d+)$/; my $l_new = 0; $l_new = $1 if $rev->{'vrev'} =~ /(\d+)$/; $rev->{'vrev'} =~ s/\d+$/$l_old + 1/e if $l_old + 1 > $l_new; } } # add to revision database $rev = BSRevision::addrev_local($cgi, $projid, $packid, $rev, $files); # update request acceptinfo if ($acceptinfo && $cgi->{'withacceptinfo'}) { $acceptinfo->{'rev'} = $rev->{'rev'}; $acceptinfo->{'srcmd5'} = $rev->{'srcmd5'}; if ($files->{'_link'}) { # see if we can expand it eval { my %rev = %$rev; BSSrcServer::Link::handlelinks(\%rev, $files); $acceptinfo->{'xsrcmd5'} = $rev{'srcmd5'}; }; } $rev->{'acceptinfo'} = $acceptinfo; } # send out notification notify("SRCSRV_COMMIT", {project => $projid, package => $packid, files => $filestr, rev => $rev->{'rev'}, user => $user, comment => $comment, 'requestid' => $requestid}); $rev_old->{'version'} = "unknown" unless defined($rev_old->{'version'}); if (defined($rev->{'version'}) && defined($rev_old->{'version'}) && $rev->{'version'} ne $rev_old->{'version'}) { notify("SRCSRV_VERSION_CHANGE", {project => $projid, package => $packid, files => $filestr, rev => $rev->{'rev'}, oldversion => $rev_old->{'version'}, newversion => $rev->{'version'}, user => $user, comment => $comment, 'requestid' => $requestid}); } notify_repservers('package', $projid, $packid); # put marker back $files->{'/SERVICE'} = $servicemark if $servicemark; return $rev; } # returns service expanded filelist # modifies $rev->{'srcmd5'} sub lsrev_service { my ($rev, $linkinfo) = @_; $linkinfo ||= {}; my $files = BSRevision::lsrev($rev, $linkinfo); $files = BSSrcServer::Service::handleservice($rev, $files, $linkinfo->{'xservicemd5'}) if $linkinfo->{'xservicemd5'}; return $files; } # returns expanded filelist # modifies $rev->{'srcmd5'}, $rev->{'vrev'} sub lsrev_expanded { my ($rev, $linkinfo) = @_; my $files = lsrev_service($rev, $linkinfo); return $files unless $files->{'_link'}; $files = BSSrcServer::Link::handlelinks($rev, $files, $linkinfo); die("$files\n") unless ref $files; return $files; } ########################################################################### ### ### project/package management ### sub findprojects { my ($deleted) = @_; # add support for remote projects? return BSRevision::lsprojects_local($deleted); } sub findpackages { my ($projid, $proj, $nonfatal, $origins, $noexpand, $deleted) = @_; $proj ||= readxml("$projectsdir/$projid.xml", $BSXML::proj, 1); if (!$proj || $proj->{'remoteurl'}) { $proj = BSSrcServer::Remote::remoteprojid($projid); return BSSrcServer::Remote::findpackages_remote($projid, $proj, $nonfatal, $origins, $noexpand, $deleted) if $proj; } # get local packages my @packids = BSRevision::lspackages_local($projid, $deleted); if ($origins) { for (@packids) { $origins->{$_} = $projid unless defined $origins->{$_}; } } @packids = BSSrcServer::Multibuild::addmultibuildpackages($projid, $origins, @packids) unless $deleted; # handle project links (but not if deleted) if ($proj && $proj->{'link'} && !$noexpand && !$deleted) { push @packids, BSSrcServer::Projlink::findpackages_projlink($projid, $proj, $nonfatal, $origins); @packids = sort(BSUtil::unify(@packids)); } return @packids; } sub readpackage { my ($projid, $proj, $packid, $rev, $missingok) = @_; $proj ||= BSRevision::readproj_local($projid, 1); if (!$proj || $proj->{'remoteurl'}) { $proj = BSSrcServer::Remote::remoteprojid($projid); return BSSrcServer::Remote::readpackage_remote($projid, $proj, $packid, $rev, $missingok) if $proj; } my $pack = BSRevision::readpack_local($projid, $packid, 1, $rev); $pack->{'project'} ||= $projid if $pack; if (!$pack && $proj && $proj->{'link'}) { if (!defined($rev) || !BSRevision::readpack_local($projid, $packid, 1)) { $pack = BSSrcServer::Projlink::readpackage_projlink($projid, $proj, $packid, $rev, $missingok); } } die("404 package '$packid' does not exist in project '$projid'\n") if !$missingok && !$pack; return $pack; } sub readproject { my ($projid, $proj, $rev, $missingok) = @_; $proj ||= BSRevision::readproj_local($projid, 1, $rev); if (!$proj || ($proj->{'remoteurl'} && $proj->{'remoteproject'})) { $proj = BSSrcServer::Remote::remoteprojid($projid); return BSSrcServer::Remote::readproject_remote($projid, $proj, $rev, $missingok) if $proj && $proj->{'remoteproject'}; } $proj->{'name'} ||= $projid if $proj; die("404 project '$projid' does not exist\n") if !$missingok && !$proj; return $proj; } # collect all global source services via all package and project links sub getprojectservices { my ($cgi, $projid, $packid) = @_; my $services = BSSrcServer::Service::getprojectservices($projid, $packid); return ($services, $BSXML::services); } sub notifypackagechange { my ($cgi, $projid, $packid) = @_; notify_repservers('package', $projid, $packid); return $BSStdServer::return_ok; } # find matching .spec/.dsc/.kiwi file depending on packid and/or repoid sub findfile { my ($rev, $repoid, $ext, $files) = @_; return undef if !$files || !$ext || $ext eq 'none'; # map services files to their real name my %files = map {$_ => $_} keys %$files; if ($files{'_service'}) { for (sort keys %files) { next unless /^_service:.*:(.*?)$/s; $files{$1} = delete($files{$_}) if $files{$_}; } } return $files{'_preinstallimage'} if $ext ne 'kiwi' && keys(%files) == 1 && $files{'_preinstallimage'}; return $files{'simpleimage'} if $files{'simpleimage'}; return $files{'snapcraft.yaml'} if $files{'snapcraft.yaml'} && $ext eq 'snapcraft'; return $files{'appimage.yml'} if $files{'appimage.yml'} && $ext eq 'appimage'; return $files{'Dockerfile'} if $files{'Dockerfile'} && $ext eq 'docker'; return $files{'fissile.yml'} if $files{'fissile.yml'} && $ext eq 'fissile'; if ($ext eq 'arch') { return $files{'PKGBUILD'} if $files{'PKGBUILD'}; return undef; } my $packid = $rev->{'package'}; $packid = $1 if $rev->{'originpackage'} && $rev->{'originpackage'} =~ /:([^:]+)$/; return $files{"$packid-$repoid.$ext"} if defined($repoid) && $files{"$packid-$repoid.$ext"}; # 28.4.2009 mls: deleted "&& defined($repoid)" return $files{"$packid.$ext"} if $files{"$packid.$ext"}; # try again without last components if ($packid =~ /^(.*?)\./) { return $files{"$1.$ext"} if $files{"$1.$ext"}; } my @files = grep {/\.$ext$/} keys %files; @files = grep {/^\Q$packid\E/i} @files if @files > 1; return $files{$files[0]} if @files == 1; if (@files > 1) { if (!defined($repoid)) { # return undef; @files = sort @files; return $files{$files[0]}; } @files = grep {/^\Q$packid-$repoid\E/i} @files if @files > 1; return $files{$files[0]} if @files == 1; } return undef; } ######################################################################### # set up kiwi project callback sub kiwibootcallback { my ($projid, $packid) = @_; BSVerify::verify_projid($projid); BSVerify::verify_packid($packid); BSSrcServer::Access::checksourceaccess($projid, $packid); my $rev = getrev($projid, $packid); my $files = BSRevision::lsrev($rev); my $file = findfile($rev, undef, 'kiwi', $files); die("no kiwi file found\n") unless $file; my $xml = BSRevision::revreadstr($rev, $file, $files->{$file}); return ($xml, {'project' => $projid, 'package' => $packid, 'srcmd5' => $rev->{'srcmd5'}, 'file' => $file}); } $Build::Kiwi::bootcallback = \&kiwibootcallback; $Build::Kiwi::urlmapper = \&BSUrlmapper::urlmapper; ######################################################################### sub getprojquotapackage { my ($projid) = @_; if (!exists($packagequota{':packages'})) { my $quotaxml = readxml($BSConfig::bsquotafile, $BSXML::quota, 1); for my $p (@{$quotaxml->{'project'} || []}) { $packagequota{$p->{'name'}} = $p->{'packages'}; } $packagequota{':packages'} = $quotaxml->{'packages'}; } while ($projid) { return $packagequota{$projid} if exists $packagequota{$projid}; last unless $projid =~ s/:[^:]*$//; } return $packagequota{':packages'}; } sub readbuildenv { my ($pinfo, $rev, $files, $repoid, $arch) = @_; my $bifile = "_buildenv.$repoid.$arch"; $bifile = '_buildenv' unless $files->{$bifile}; eval { die("$bifile does not exist\n") unless $files->{$bifile}; $pinfo->{'hasbuildenv'} = BSRevision::revreadstr($rev, $bifile, $files->{$bifile}); }; if ($@) { $pinfo->{'error'} = "buildenv: $@"; chomp($pinfo->{'error'}); } } sub getprojpack { my ($cgi, $projids, $repoids, $packids, $arch) = @_; local *oldbsrpc = *BSRPC::rpc; local *BSRPC::rpc; die("unsupported view\n") if $cgi->{'view'} && $cgi->{'view'} ne 'storable'; if ($cgi->{'noremote'}) { *BSRPC::rpc = sub {die("400 interconnect error: noremote option\n");}; } else { *BSRPC::rpc = sub { my $r = eval { oldbsrpc(@_) }; if ($@) { $@ = "interconnect error: $@" unless $@ =~ /(?:remote|interconnect) error:/; die($@); } return $r; }; } $arch ||= 'noarch'; if ($cgi->{'buildinfo'}) { die("getprojpack buildinfo requests must have at least one project parameter\n") unless @{$projids || []} >= 1; die("getprojpack buildinfo requests must have exactly one repository parameter\n") unless @{$repoids || []} == 1; die("getprojpack buildinfo requests must not have more than one package parameter\n") unless @{$packids || []} <= 1; $cgi->{'parseremote'} = 1; $cgi->{'withremotemap'} = 1; $cgi->{'withconfig'} = 1; $cgi->{'withrepos'} = 1; $cgi->{'ignoredisable'} = 1; $cgi->{'nopackages'} = 1 unless @{$packids || []}; } if ($cgi->{'remotemaponly'}) { die("getprojpack remotemaponly makes no sense with packages\n") if @{$packids || []}; $cgi->{'withremotemap'} = 1; } my $partition = $cgi->{'partition'}; die("No such partition '$partition'\n") if $partition && $BSConfig::partitionservers && !$BSConfig::partitionservers->{$partition}; my $remotemap = $cgi->{'withremotemap'} ? {} : undef; if ($remotemap && $partition) { $remotemap->{':partition'} = $partition; $remotemap->{':partitions'} = {}; } $projids = [ findprojects() ] unless $projids; if ($partition) { for my $projid (splice @$projids) { my $par = BSSrcServer::Partition::projid2partition($projid); die("cannot determine partition for $projid\n") unless defined $par; if ($par ne $partition) { # check if it is remote, if not then it belongs to another partition my $r = BSRevision::readproj_local($projid, 1); $r = BSSrcServer::Remote::remoteprojid($projid) unless defined $r; if (!($r && $r->{'remoteurl'})) { # not remote, but on wrong partition # if asked for a specific project, put it in remotemap next unless $remotemap && $cgi->{'project'}; BSSrcServer::Partition::checkpartition($remotemap, $projid, $r) if $remotemap->{':partitions'} && !$remotemap->{':partitions'}->{$projid}; if ($remotemap->{$projid} && $cgi->{'withconfig'} && $remotemap->{$projid}->{'partition'}) { # also put config in remotemap if asked for a specific project my $c = readstr("$projectsdir/$projid.conf", 1); $remotemap->{$projid}->{'config'} = defined($c) ? $c : ''; } next; } } push @$projids, $projid; } } if ($BSConfig::limit_projects && $BSConfig::limit_projects->{$arch}) { my %limit_projids = map {$_ => 1} @{$BSConfig::limit_projects->{$arch}}; $projids = [ grep {$limit_projids{$_}} @$projids ]; } $repoids = { map {$_ => 1} @$repoids } if $repoids; $packids = { map {$_ => 1} @$packids } if $packids; my $bconf = Build::read_config($arch); BSSrcServer::Projlink::enable_frozenlinks_cache(); my %channeldata; my @res; my @projids = @$projids; while (@projids) { my $projid = shift @projids; my $jinfo = { 'name' => $projid }; my $proj = BSRevision::readproj_local($projid, 1); if ($remotemap && (!$proj || $proj->{'remoteurl'})) { if ($cgi->{'project'}) { # fill remote data if asked for that specific project my $r = BSSrcServer::Remote::remoteprojid($projid); if ($r) { eval { BSSrcServer::Remote::fetchremoteproj($r, $projid, $remotemap); BSSrcServer::Remote::fetchremoteconfig($r, $projid, $remotemap) if $cgi->{'withconfig'}; }; } } elsif (!exists($remotemap->{$projid}) ) { # put at least the proto into the remotemap my $r = BSSrcServer::Remote::remoteprojid($projid); $remotemap->{$projid} = {%$r, 'proto' => 1} if $r; } } next if $cgi->{'remotemaponly'}; if (!$proj && $cgi->{'parseremote'} && $cgi->{'project'} && $remotemap && $remotemap->{$projid}) { $proj = $remotemap->{$projid}; } if ($cgi->{'buildinfo'}) { if (!@res) { die("project $projid does not exist\n") unless $proj; die("repository @{[keys %$repoids]} does not exist in $projid\n") unless grep {$repoids->{$_->{'name'}}} @{$proj->{'repository'} || []}; } else { # kiwi projects, we just need the project data and config $packids = undef; $repoids = undef;; $cgi->{'nopackages'} = 1; $cgi->{'parseremote'} = 0; } } next unless $proj; for (qw{kind}) { $jinfo->{$_} = $proj->{$_} if exists $proj->{$_}; } my %expandedrepos; if ($cgi->{'withrepos'}) { $jinfo->{'repository'} = $proj->{'repository'} || []; if ($repoids && !$cgi->{'buildinfo'}) { $jinfo->{'repository'} = [ grep {$repoids->{$_->{'name'}}} @{$proj->{'repository'} || []} ]; } if ($cgi->{'expandedrepos'}) { $jinfo->{'repository'} = Storable::dclone($jinfo->{'repository'}); for my $repo (@{$jinfo->{'repository'}}) { my $base = {}; my @prps = expandsearchpath($projid, $repo->{'name'}, $remotemap, $base); $expandedrepos{"$projid/$repo->{'name'}"} = [ @prps ]; for my $prp (@prps) { my @s = split('/', $prp, 2); $prp = {'project' => $s[0], 'repository' => $s[1]}; } $repo->{'path'} = \@prps; $repo->{'base'} = $base; } } elsif ($remotemap) { for my $repo (@{$jinfo->{'repository'}}) { next if $repoids && !$repoids->{$repo->{'name'}}; eval { my @prps = expandsearchpath($projid, $repo->{'name'}, $remotemap); $expandedrepos{"$projid/$repo->{'name'}"} = [ @prps ]; }; $expandedrepos{"$projid/$repo->{'name'}"} = $@ if $@; } } } if ($cgi->{'buildinfo'} && !@res) { # add all projects from the expanded path my $er = $expandedrepos{"$projid/$cgi->{'repository'}->[0]"}; die($er) unless ref($er); eval { concatconfigs($projid, $cgi->{'repository'}->[0], $remotemap, @$er); }; for my $prp (@$er) { my ($p) = split('/', $prp, 2); next if $remotemap && $remotemap->{$p}; push @projids, $p unless $p eq $projid || grep {$_ eq $p} @projids; } } if ($remotemap) { for my $lprojid (map {$_->{'project'}} @{$proj->{'link'} || []}) { my $lproj = BSSrcServer::Remote::remoteprojid($lprojid); eval { BSSrcServer::Remote::fetchremoteproj($lproj, $lprojid, $remotemap) if $lproj; }; BSSrcServer::Partition::checkpartition($remotemap, $lprojid) if $remotemap->{':partitions'} && !$remotemap->{':partitions'}->{$lprojid}; } } if ($cgi->{'withconfig'}) { my $config = readstr("$projectsdir/$projid.conf", 1); if ($config) { #my $s1 = '^\s*macros:\s*$.*?^\s*:macros\s*$'; #my $s2 = '^\s*macros:\s*$.*\Z'; #$config =~ s/$s1//gmsi; #$config =~ s/$s2//gmsi; $jinfo->{'config'} = $config unless $config =~ /^\s*$/s; } } if ($cgi->{'withsrcmd5'} && -s "$projectsdir/$projid.pkg/pattern-MD5SUMS") { my $patterns = readstr("$projectsdir/$projid.pkg/pattern-MD5SUMS", 1); $jinfo->{'patternmd5'} = Digest::MD5::md5_hex($patterns) if $patterns; } elsif ($cgi->{'withsrcmd5'} && $cgi->{'nopackages'}) { # used by publisher to get patternmd5 eval { my $rev = getrev($projid, '_pattern'); my $files = lsrev_expanded($rev); $jinfo->{'patternmd5'} = $rev->{'srcmd5'}; }; } my @packages; @packages = findpackages($projid, $proj, 2) unless $cgi->{'nopackages'} || $proj->{'remoteurl'}; @packages = @{$cgi->{'package'}} if $proj->{'remoteurl'} && $cgi->{'package'} && $cgi->{'parseremote'}; my $missing_packages = grep {$_ eq ':missing_packages'} @packages; if (!$proj->{'remoteurl'} && !$missing_packages && !$cgi->{'nopackages'}) { BSSrcServer::Multibuild::prunemultibuild($projid, \@packages); } if ($missing_packages) { @packages = grep {$_ ne ':missing_packages'} @packages; $jinfo->{'missingpackages'} = 1; } next if $repoids && !grep {$repoids->{$_->{'name'}}} @{$proj->{'repository'} || []}; next if $packids && !grep {$packids->{$_}} @packages; for (qw{title description build publish debuginfo useforbuild remoteurl remoteproject download link sourceaccess privacy access lock}) { $jinfo->{$_} = $proj->{$_} if exists $proj->{$_}; } if ($proj->{'access'}) { # we need the roles if the project is protected, see checkroles() in the scheduler my ($person, $group) = BSSrcServer::Access::mergeroles($projid, $proj); $jinfo->{'person'} = $person if $person && @$person; $jinfo->{'group'} = $group if $group && @$group; } # Check build flags in project meta data # packages inherit the project wide settings and may override them my $pdisabled; my $pdisable = {}; my $penable = {}; undef($penable) if $cgi->{'ignoredisable'}; if ($jinfo->{'build'} && $penable) { for (@{$proj->{'repository'} || []}) { my $disen = BSUtil::enabled($_->{'name'}, $jinfo->{'build'}, 1, $arch); if ($disen) { $penable->{$_->{'name'}} = 1; } else { $pdisable->{$_->{'name'}} = 1; } } $pdisabled = 1 if !keys(%$penable); } else { # build is enabled undef($penable); } # check for a global lock my $plocked; if (!$cgi->{'ignoredisable'} && $jinfo->{'lock'}) { for (@{$proj->{'repository'} || []}) { if (BSUtil::enabled($_->{'name'}, $jinfo->{'lock'}, 0, $arch)) { $plocked = 1; } else { $plocked = undef; # at least one repo is not locked last; } } } # Check package number quota my $quota_exceeded; if ($BSConfig::bsquotafile) { my $pquota = getprojquotapackage($projid); $quota_exceeded = 1 if defined($pquota) && @packages > $pquota; } if (!$cgi->{'ignoredisable'} && !grep {!$_->{'status'} || $_->{'status'} ne 'disabled'} @{$proj->{'repository'} || []}) { # either no repositories or all disabled. No need to check packages @packages = (); } @packages = () if $cgi->{'nopackages'}; my @pinfo; my %bconfs; my $exclude_all; my $exclude_repos; if (!$cgi->{'ignoredisable'} && defined($cgi->{'arch'})) { $exclude_repos = {}; $exclude_all = 1; for (@{$proj->{'repository'} || []}) { if (grep {$_ eq $arch} @{$_->{'arch'} || []}) { undef $exclude_all; } else { $exclude_repos->{$_->{'name'}} = 1; } } } my %packages_multibuild; for my $packid (@packages) { next unless $packid =~ /(?{$packid}) { next unless $packid =~ /(?{$1}; } my $pinfo = {'name' => $packid}; push @pinfo, $pinfo; my $pack; if ($packid =~ /(?{'remoteurl'} && $cgi->{'parseremote'}; $pack ||= {} if $proj->{'link'}; if (!$pack) { $pinfo->{'error'} = 'no metadata'; next; } for (qw{build publish debuginfo useforbuild bcntsynctag sourceaccess privacy access lock releasename}) { $pinfo->{$_} = $pack->{$_} if $pack->{$_}; } if (!$pinfo->{'build'}) { $pinfo->{'build'}->{'enable'} = $pack->{'enable'} if $pack->{'enable'}; $pinfo->{'build'}->{'disable'} = $pack->{'disable'} if $pack->{'disable'}; } if ($exclude_all) { $pinfo->{'error'} = 'excluded'; next; } if ($plocked && !$pinfo->{'lock'}) { $pinfo->{'error'} = 'locked'; next; } my $enable = defined($penable) ? {%$penable} : undef; my $disable = {%$pdisable}; if (!$cgi->{'ignoredisable'} && $pinfo->{'build'}) { for (@{$proj->{'repository'} || []}) { my $default = exists($disable->{$_->{'name'}}) ? 0 : 1; my $disen = BSUtil::enabled($_->{'name'}, $pinfo->{'build'}, $default, $arch); if ($disen) { $enable->{$_->{'name'}} = 1; delete $disable->{$_->{'name'}}; } else { $disable->{$_->{'name'}} = 1; delete $enable->{$_->{'name'}}; } } } undef($disable) if $enable && !keys(%$enable); undef($enable) if $disable && !keys(%$disable); if ((!$disable || $pdisabled) && $enable && !%$enable) { $pinfo->{'error'} = 'disabled'; next; } if ($quota_exceeded) { $pinfo->{'error'} = 'quota exceeded'; next; } if ($cgi->{'withsrcmd5'} || $cgi->{'withdeps'}) { my $rev; my $linked = []; $BSSrcServer::Remote::collect_remote_getrev = 1 unless $packages_pass; eval { $rev = getrev($projid, $packid, 'build', $linked); }; $BSSrcServer::Remote::collect_remote_getrev = 0; $pinfo->{'originproject'} = $rev->{'originproject'} if $rev && $rev->{'originproject'}; $pinfo->{'linked'} = $linked if @$linked; if ($@) { $pinfo->{'error'} = $@; $pinfo->{'error'} =~ s/\n$//s; if (!$packages_pass && $pinfo->{'error'} =~ /collect_remote_getrev$/) { pop @pinfo; push @packages_delayed, $packid; } next; } if (!$rev || $rev->{'srcmd5'} eq 'empty' || $rev->{'srcmd5'} eq $BSSrcrep::emptysrcmd5) { $pinfo->{'error'} = 'no source uploaded'; next; } $pinfo->{'srcmd5'} = $rev->{'srcmd5'}; $pinfo->{'rev'} = $rev->{'rev'}; $pinfo->{'revtime'} = $rev->{'time'} if $rev->{'time'}; # need the relsynctag/releasename for packages from a project link if ($rev->{'originproject'} && !%$pack) { # XXX: what about remote projects? my $lpack = BSRevision::readpack_local($rev->{'originproject'}, $rev->{'package'}, 1); if ($lpack) { for (qw{bcntsynctag releasename}) { $pinfo->{$_} = $lpack->{$_} if $lpack->{$_}; } } } my $files; eval { my $linkinfo = {}; $files = BSRevision::lsrev($rev, $linkinfo); if ($linkinfo->{'xservicemd5'}) { $files = BSSrcServer::Service::handleservice($rev, $files, $linkinfo->{'xservicemd5'}); $pinfo->{'srcmd5'} = $rev->{'srcmd5'}; } if ($linkinfo->{'xservicemd5'} || $linkinfo->{'lservicemd5'} || $linkinfo->{'lsrcmd5'}) { my $meta = ''; $meta .= "$files->{$_} $_\n" for sort keys %$files; $pinfo->{'verifymd5'} = Digest::MD5::md5_hex($meta); } }; if ($@) { $pinfo->{'error'} = $@; $pinfo->{'error'} =~ s/\n$//s; next; } if ($files->{'_service'} && -e "$eventdir/service/${projid}::$packid") { $pinfo->{'error'} = 'source update running'; next; } if ($files->{'_service_error'}) { $pinfo->{'error'} = 'source service failed'; next; } if ($files->{'_link'}) { $BSSrcServer::Remote::collect_remote_getrev = 1 unless $packages_pass; eval { $files = BSSrcServer::Link::handlelinks($rev, $files, {'linked' => $linked}); }; $BSSrcServer::Remote::collect_remote_getrev = 0; if ($@) { $files = "$@"; $files =~ s/\n$//; } if (@$linked) { $pinfo->{'linked'} = $linked; if ($remotemap && $remotemap->{':partitions'}) { # we need to have all partition infos set for the links for my $li (@$linked) { my $lprojid = $li->{'project'}; next if $remotemap->{$lprojid} || $remotemap->{':partitions'}->{$lprojid}; my $lproj = BSRevision::readproj_local($lprojid, 1); if ($lproj && !$lproj->{'remoteurl'}) { BSSrcServer::Partition::checkpartition($remotemap, $lprojid, $lproj); } else { $remotemap->{':partitions'}->{$lprojid} = 1; # not on a partition } } } } if (!ref $files) { $pinfo->{'error'} = defined($files) ? $files : "could not get file list"; if (!$packages_pass && $pinfo->{'error'} =~ /collect_remote_getrev$/) { pop @pinfo; push @packages_delayed, $packid; } next; } $pinfo->{'srcmd5'} = $rev->{'srcmd5'}; my $meta = ''; $meta .= "$files->{$_} $_\n" for sort keys %$files; $pinfo->{'verifymd5'} = Digest::MD5::md5_hex($meta); } # get rid of old multibuild packages, this assumes the main package comes first @packages = grep {!/^\Q$packid\E:/} @packages if $packages_multibuild{$packid}; # get current multibuild state my $mb = BSSrcServer::Multibuild::updatemultibuild($projid, $packid, $files, 1); unshift @packages, map {"$packid:$_"} @{$mb->{'flavor'} || $mb->{'package'} || []} if $mb; if ($packid eq '_pattern') { $jinfo->{'patternmd5'} = $pinfo->{'srcmd5'}; $pinfo->{'error'} = 'excluded'; next; } if ($files->{'_aggregate'}) { my $aggregatelist = BSRevision::revreadxml($rev, '_aggregate', $files->{'_aggregate'}, $BSXML::aggregatelist, 1); if (!$aggregatelist) { $pinfo->{'error'} = "bad aggregatelist data"; next; } eval { BSVerify::verify_aggregatelist($aggregatelist); }; if ($@) { my $err = $@; $err =~ s/\n$//s; $pinfo->{'error'} = "bad aggregatelist: $err"; next; } $pinfo->{'aggregatelist'} = $aggregatelist; if ($remotemap && $aggregatelist) { for my $aggregate (@{$aggregatelist->{'aggregate'} || []}) { my $aprojid = $aggregate->{'project'}; next if $remotemap->{$aprojid} && !$remotemap->{$aprojid}->{'proto'}; my $aproj = BSRevision::readproj_local($aprojid, 1); if (!$aproj || $aproj->{'remoteurl'}) { $aproj = BSSrcServer::Remote::remoteprojid($aprojid); eval { BSSrcServer::Remote::fetchremoteproj($aproj, $aprojid, $remotemap) if $aproj; }; } else { BSSrcServer::Partition::checkpartition($remotemap, $aprojid, $aproj) if $remotemap->{':partitions'} && !$remotemap->{':partitions'}->{$aprojid}; } } } if (($enable && %$enable) || ($disable && %$disable)) { my @dinfo = (); for my $repo (@{$proj->{'repository'} || []}) { my $repoid = $repo->{'name'}; next if $repoids && !$repoids->{$repoid}; if ((!$disable || $disable->{$repoid}) && !(!$enable || $enable->{$repoid})) { push @dinfo, {'repository' => $repoid, 'error' => 'disabled'}; next; } } $pinfo->{'info'} = \@dinfo if @dinfo; } } elsif ($files->{'_patchinfo'}) { my $patchinfo = BSRevision::revreadxml($rev, '_patchinfo', $files->{'_patchinfo'}, $BSXML::patchinfo, 1); if (!$patchinfo) { $pinfo->{'error'} = "bad patchinfo data"; next; } eval { BSVerify::verify_patchinfo($patchinfo); }; if ($@) { my $err = $@; chomp $err; $pinfo->{'error'} = "bad patchinfo: $err"; next; } $pinfo->{'patchinfo'} = $patchinfo; if (($enable && %$enable) || ($disable && %$disable)) { my @dinfo = (); for my $repo (@{$proj->{'repository'} || []}) { my $repoid = $repo->{'name'}; next if $repoids && !$repoids->{$repoid}; if ((!$disable || $disable->{$repoid}) && !(!$enable || $enable->{$repoid})) { push @dinfo, {'repository' => $repoid, 'error' => 'disabled'}; next; } } $pinfo->{'info'} = \@dinfo if @dinfo; } } elsif ($files->{'_channel'}) { if (!exists($channeldata{$files->{'_channel'}})) { eval { my $channel = BSRevision::revreadxml($rev, '_channel', $files->{'_channel'}, $BSXML::channel); BSVerify::verify_channel($channel); $channeldata{$files->{'_channel'}} = $channel; }; if ($@) { my $err = $@; chomp $err; $channeldata{$files->{'_channel'}} = $err; } } my $channel = $channeldata{$files->{'_channel'}} || 'bad data'; if (!ref($channel)) { $pinfo->{'error'} = "bad channel: $channel"; next; } $pinfo->{'channelmd5'} = $files->{'_channel'}; if (($enable && %$enable) || ($disable && %$disable)) { my @dinfo = (); for my $repo (@{$proj->{'repository'} || []}) { my $repoid = $repo->{'name'}; next if $repoids && !$repoids->{$repoid}; if ((!$disable || $disable->{$repoid}) && !(!$enable || $enable->{$repoid})) { push @dinfo, {'repository' => $repoid, 'error' => 'disabled'}; next; } } $pinfo->{'info'} = \@dinfo if @dinfo; } } elsif ($cgi->{'withdeps'}) { my @dinfo; if (!%$files) { $pinfo->{'error'} = 'empty'; next; } $pinfo->{'constraintsmd5'} = $files->{'_constraints'} if $files->{'_constraints'}; if ($files->{'_buildenv'}) { $pinfo->{'hasbuildenv'} = 1; readbuildenv($pinfo, $rev, $files, (keys %$repoids)[0], $arch) if $cgi->{'buildinfo'}; } for my $repo (@{$proj->{'repository'} || []}) { my $repoid = $repo->{'name'}; next if $repoids && !$repoids->{$repoid}; my $rinfo = {'repository' => $repoid}; push @dinfo, $rinfo; if ($exclude_repos && $exclude_repos->{$repoid}) { $rinfo->{'error'} = 'excluded'; next; } if ((!$disable || $disable->{$repoid}) && !(!$enable || $enable->{$repoid})) { $rinfo->{'error'} = 'disabled'; next; } if (!$bconfs{$repoid}) { print "calculating config for $projid/$repoid $arch\n"; my $path = $expandedrepos{"$projid/$repoid"}; if (!$path) { eval { my @path = expandsearchpath($projid, $repoid, $remotemap); $expandedrepos{"$projid/$repoid"} = \@path; }; $expandedrepos{"$projid/$repoid"} = $@ if $@; $path = $expandedrepos{"$projid/$repoid"}; } eval { die($path) unless ref $path; my $c = concatconfigs($projid, $repoid, $remotemap, @$path); $bconfs{$repoid} = Build::read_config($arch, [ split("\n", $c) ]); }; if ($@) { my $err = $@; chomp $err; $bconfs{$repoid} = {'error' => $err}; } } my $conf = $bconfs{$repoid}; if ($conf->{'error'}) { $rinfo->{'error'} = $conf->{'error'}; next; } my $type = $conf->{'type'}; if (!$type || $type eq 'UNDEFINED') { $rinfo->{'error'} = 'bad build configuration, no build type defined or detected'; next; } my $file = findfile($rev, $repoid, $type, $files); if (!defined($file)) { $rinfo->{'error'} = 'excluded'; next; } $rinfo->{'file'} = $file; my $buildtype = Build::recipe2buildtype($file); if (!$buildtype) { $rinfo->{'error'} = "don't know how to build $file"; next; } if (($type eq 'kiwi' || $buildtype eq 'kiwi') && $BSConfig::kiwiprojects && !$cgi->{'ignoredisable'}) { my %kiwiprojects = map {$_ => 1} @$BSConfig::kiwiprojects; if (!$kiwiprojects{$projid}) { $rinfo->{'error'} = 'kiwi image building is not enabled for this project'; next; } } # get build dependency info local $conf->{'buildflavor'}; local $conf->{'obspackage'} = $packid; if ($packid =~ /(?{'obspackage'} = $1; $conf->{'buildflavor'} = $2; } my $d; eval { $d = Build::parse_typed($conf, BSRevision::revfilename($rev, $file, $files->{$file}), $buildtype); }; if ($@) { $d = {'error' => $@}; $d->{'error'} =~ s/\n.*//s; } $d = { 'error' => "can not parse $file" } unless $d; data2utf8xml($d); if ($d->{'error'}) { $rinfo->{'error'} = $d->{'error'}; next; } if (!defined($d->{'name'})) { $rinfo->{'error'} = "can not parse name from $file"; next; } my $version = defined($d->{'version'}) ? $d->{'version'} : 'unknown'; $pinfo->{'versrel'} ||= "$version-$rev->{'vrev'}"; $rinfo->{'name'} = $d->{'name'}; $rinfo->{'dep'} = $d->{'deps'}; if ($d->{'prereqs'}) { my %deps = map {$_ => 1} (@{$d->{'deps'} || []}, @{$d->{'subpacks'} || []}); my @prereqs = grep {!$deps{$_} && !/^%/} @{$d->{'prereqs'}}; $rinfo->{'prereq'} = \@prereqs if @prereqs; } # add all source services to be used at build time if ($files->{'_service'}) { my $services = BSRevision::revreadxml($rev, '_service', $files->{'_service'}, $BSXML::services, 1) || {}; for my $service (@{$services->{'service'} || []}) { next unless $service->{'mode'} && $service->{'mode'} eq 'buildtime'; push @{$rinfo->{'buildtimeservice'}}, $service->{'name'}; } } # KIWI Products support debugmedium and sourcemedium filters if ($type eq 'kiwi' && ($d->{'imagetype'}[0] || '') eq 'product') { $rinfo->{'nodbgpkgs'} = 1 if defined($d->{'debugmedium'}) && $d->{'debugmedium'} <= 0; $rinfo->{'nosrcpkgs'} = 1 if defined($d->{'sourcemedium'}) && $d->{'sourcemedium'} <= 0; } # KIWI Images don't build with local arch if ($type eq 'kiwi' && ($d->{'imagetype'}[0] || '') ne 'product') { $rinfo->{'error'} = 'excluded' if defined($BSConfig::localarch) && $arch eq 'local'; } if ($type eq 'kiwi' && ($d->{'imagetype'}[0] || '') eq 'product') { # KIWI Products always build on the first repo arch $rinfo->{'imagearch'} = [ @{$d->{'exclarch'}} ] if $d->{'exclarch'}; } else { my $myarch = $conf->{'target'} ? (split('-', $conf->{'target'}))[0] : $arch; $rinfo->{'error'} = 'excluded' if $d->{'exclarch'} && !grep {$_ eq $myarch} @{$d->{'exclarch'}}; $rinfo->{'error'} = 'excluded' if $d->{'badarch'} && grep {$_ eq $myarch} @{$d->{'badarch'}}; } for ('imagetype', 'extrasource') { $rinfo->{$_} = $d->{$_} if exists $d->{$_}; } for (@{$d->{'path'} || []}) { my $r = { 'project' => $_->{'project'}, 'repository' => $_->{'repository'} }; $r->{'priority'} = $_->{'priority'} if defined $_->{'priority'}; push @{$rinfo->{'path'}}, $r; } for (@{$d->{'containerpath'} || []}) { my $r = { 'project' => $_->{'project'}, 'repository' => $_->{'repository'} }; $r->{'priority'} = $_->{'priority'} if defined $_->{'priority'}; push @{$rinfo->{'containerpath'}}, $r; } if ($remotemap && ($rinfo->{'path'} || $rinfo->{'containerpath'})) { my @kiwipath = (@{$rinfo->{'path'} || []}, @{$rinfo->{'containerpath'} || []}); @kiwipath = map {"$_->{'project'}/$_->{'repository'}"} grep {$_->{'project'} ne '_obsrepositories'} @kiwipath; # simple way to fill the remote map eval { concatconfigs($projid, $repoid, $remotemap, @kiwipath); }; if ($cgi->{'buildinfo'} && !@res) { # add kiwipath projects for my $prp (@kiwipath) { my ($p) = split('/', $prp, 2); next if $remotemap && $remotemap->{$p}; push @projids, $p unless $p eq $projid || grep {$_ eq $p} @projids; } } } } $pinfo->{'info'} = \@dinfo if @dinfo; } } } $jinfo->{'package'} = \@pinfo; push @res, $jinfo; } BSSrcServer::Projlink::disable_frozenlinks_cache(); my $ret = {'repoid' => $datarepoid, 'project' => \@res}; if ($remotemap) { delete $remotemap->{':partition'}; delete $remotemap->{':partitions'}; } #print Dumper($remotemap); if ($remotemap && %$remotemap) { for my $p (sort keys %$remotemap) { next unless $remotemap->{$p}; my $r = {'project' => $p}; # keep in sync with BSXML! (we only use access/publish from the flags) for (qw{kind root remoteurl remoteproject remoteroot partition proto config publish access person group repository error}) { $r->{$_} = $remotemap->{$p}->{$_} if defined($remotemap->{$p}->{$_}); } $r->{'error'} =~ s/\n$// if $r->{'error'}; push @{$ret->{'remotemap'}}, $r; } } if (%channeldata) { for my $md5 (sort keys %channeldata) { next unless ref($channeldata{$md5}); push @{$ret->{'channeldata'}}, {'md5' => $md5, 'channel' => $channeldata{$md5} }; } } if ($cgi->{'view'} && $cgi->{'view'} eq 'storable') { return ($ret, \&BSUtil::tostorable, 'Content-Type: application/octet-stream'); } return ($ret, $BSXML::projpack); } sub getprojectlist { my ($cgi) = @_; my @projects = findprojects($cgi->{'deleted'}); @projects = map {{'name' => $_}} @projects; return ({'entry' => \@projects}, $BSXML::dir); } sub getproject { my ($cgi, $projid) = @_; my $proj = readproject($projid, undef, $cgi->{'rev'}); return ($proj, $BSXML::proj); } ######################################################################### sub pubkey2sslcert { my ($projid, $pubkeyfile, $signkeyfile) = @_; die("don't know how to generate a ssl cert\n") unless $BSConfig::sign; $pubkeyfile ||= "$projectsdir/$projid.pkg/_pubkey"; $signkeyfile ||= "$projectsdir/$projid.pkg/_signkey"; my @signargs; push @signargs, '--project', $projid if $BSConfig::sign_project; my $cert = ''; eval { $cert = BSUtil::xsystem(undef, $BSConfig::sign, @signargs, '-P', $signkeyfile, '-C', $pubkeyfile); }; if ($@) { die("Need an RSA key for openssl signing, please create a new key for $projid\n") if $@ =~ /not an RSA private key/i; die($@); } return $cert; } sub updatesslcert { my ($projid, $pubkeyfile, $signkeyfile) = @_; my $rev = BSRevision::getrev_meta($projid, undef); return undef unless $rev; my $files = BSRevision::lsrev($rev); return undef unless $files->{'_sslcert'}; my $cert = pubkey2sslcert($projid, $pubkeyfile, $signkeyfile); mkdir_p($uploaddir); writestr("$uploaddir/sslcert.$$", undef, $cert); return "$uploaddir/sslcert.$$"; } sub createkey { my ($cgi, $projid) = @_; $cgi->{'comment'} ||= 'create sign key'; die("don't know how to create a key\n") unless $BSConfig::sign; die("404 project $projid does not exist\n") unless -s "$projectsdir/$projid.xml"; mkdir_p($uploaddir); unlink("$uploaddir/signkey.$$"); my @keyargs = ('rsa@2048', '800'); my @signargs; push @signargs, '--project', $projid if $BSConfig::sign_project; push @signargs, '-P', "$uploaddir/signkey.$$"; #push @signargs, '-h', 'sha256'; my $obsname = $BSConfig::obsname || 'build.opensuse.org'; my $pubkey = ''; local *F; open(F, '-|', $BSConfig::sign, @signargs, '-g', @keyargs, "$projid OBS Project", "$projid\@$obsname") || die("$BSConfig::sign: $!\n"); 1 while sysread(F, $pubkey, 4096, length($pubkey)); close(F) || die("$BSConfig::sign: $?\n"); die("sign did not create signkey\n") unless -s "$uploaddir/signkey.$$"; mkdir_p("$projectsdir/$projid.pkg"); writestr("$uploaddir/pubkey.$$", undef, $pubkey); my $certfile = updatesslcert($projid, "$uploaddir/pubkey.$$", "$uploaddir/signkey.$$"); BSRevision::addrev_meta_replace($cgi, $projid, undef, [ "$uploaddir/pubkey.$$", "$projectsdir/$projid.pkg/_pubkey", '_pubkey' ], [ "$uploaddir/signkey.$$", "$projectsdir/$projid.pkg/_signkey", '_signkey' ], [ $certfile, undef, '_sslcert' ]); return $BSStdServer::return_ok; } sub extendkey { my ($cgi, $projid) = @_; $cgi->{'comment'} ||= 'extend public key expiry date'; die("don't know how to extend a key\n") unless $BSConfig::sign; die("project does not have a key\n") unless -s "$projectsdir/$projid.pkg/_pubkey"; die("project does not have a signkey\n") unless -s "$projectsdir/$projid.pkg/_signkey"; my @keyargs = ('800'); my @signargs; push @signargs, '--project', $projid if $BSConfig::sign_project; push @signargs, '-P', "$projectsdir/$projid.pkg/_signkey"; my $pubkey = ''; local *F; open(F, '-|', $BSConfig::sign, @signargs, '-x', @keyargs, "$projectsdir/$projid.pkg/_pubkey") || die("$BSConfig::sign: $!\n"); 1 while sysread(F, $pubkey, 4096, length($pubkey)); close(F) || die("$BSConfig::sign: $?\n"); mkdir_p($uploaddir); writestr("$uploaddir/pubkey.$$", undef, $pubkey); my $certfile = updatesslcert($projid, "$uploaddir/pubkey.$$"); BSRevision::addrev_meta_replace($cgi, $projid, undef, [ "$uploaddir/pubkey.$$", "$projectsdir/$projid.pkg/_pubkey", '_pubkey' ], [ $certfile, undef, '_sslcert' ]); return $BSStdServer::return_ok; } sub deletekey { my ($cgi, $projid) = @_; $cgi->{'comment'} ||= 'delete sign key'; BSConfiguration::check_configuration_once(); if ($BSConfig::forceprojectkeys) { my $pprojid = $projid; $pprojid =~ s/:[^:]*$//; my $sk; ($sk) = getsignkey({}, $pprojid) if $projid ne $pprojid; die("must have a key for signing in this or upper project\n") unless $sk; } BSRevision::addrev_meta_replace($cgi, $projid, undef, [ undef, "$projectsdir/$projid.pkg/_pubkey", '_pubkey' ], [ undef, "$projectsdir/$projid.pkg/_signkey", '_signkey' ], [ undef, undef, '_sslcert' ]); rmdir("$projectsdir/$projid.pkg"); return $BSStdServer::return_ok; } sub getpubkey { my ($cgi, $projid) = @_; my $pubkey; my $proj = BSRevision::readproj_local($projid, 1, $cgi->{'rev'}); $proj = BSSrcServer::Remote::remoteprojid($projid) if !$proj || $proj->{'remoteurl'}; die("404 project '$projid' does not exist\n") unless $proj; if ($proj->{'remoteurl'}) { $pubkey = BSRPC::rpc({'uri' => "$proj->{'remoteurl'}/source/$proj->{'remoteproject'}/_pubkey", 'proxy' => $proxy}, undef); } else { if ($cgi->{'rev'}) { my $rev = BSRevision::getrev_meta($projid, undef, $cgi->{'rev'}); my $files = $rev ? BSRevision::lsrev($rev) : {}; $pubkey = BSRevision::revreadstr($rev, '_pubkey', $files->{'_pubkey'}, 1) if $files->{'_pubkey'}; } else { $pubkey = readstr("$projectsdir/$projid.pkg/_pubkey", 1); } } die("404 $projid: no pubkey available\n") unless $pubkey; return ($pubkey, 'Content-Type: text/plain'); } ######################################################################### sub putproject { my ($cgi, $projid) = @_; mkdir_p($uploaddir); my $uploaded = BSServer::read_file("$uploaddir/$$"); die("upload failed\n") unless $uploaded; my $proj = readxml("$uploaddir/$$", $BSXML::proj); $proj->{'name'} = $projid unless defined $proj->{'name'}; BSVerify::verify_proj($proj, $projid); writexml("$uploaddir/$$.2", undef, $proj, $BSXML::proj); unlink("$uploaddir/$$"); my $oldproj = BSRevision::readproj_local($projid, 1); notify($oldproj ? "SRCSRV_UPDATE_PROJECT" : "SRCSRV_CREATE_PROJECT", { "project" => $projid, "sender" => ($cgi->{'user'} || "unknown") }); mkdir_p($projectsdir) || die("creating $projectsdir: $!\n"); BSRevision::addrev_meta_replace($cgi, $projid, undef, [ "$uploaddir/$$.2", "$projectsdir/$projid.xml", '_meta' ]); BSConfiguration::check_configuration_once(); if ($BSConfig::forceprojectkeys) { my ($sk) = getsignkey({}, $projid); createkey({ %$cgi, 'comment' => 'autocreate key' }, $projid) if $sk eq ''; } my %except = map {$_ => 1} qw{title description person group url attributes}; if (!BSUtil::identical($oldproj, $proj, \%except)) { my $type = ($cgi->{'lowprio'}) ? 'lowprioproject' : 'project'; if ($proj->{'remoteurl'} || ($oldproj || {})->{'remoteurl'}) { # inform all repserves about a remote project # need to add the event here since notify_all_repservers() doesn't do it my $ev = {'type' => $type, 'project' => $projid}; addevent($ev); notify_all_repservers($type, $projid); } else { notify_repservers($type, $projid); } } $proj = BSRevision::readproj_local($projid); return ($proj, $BSXML::proj); } sub delproject { my ($cgi, $projid) = @_; $cgi->{'comment'} ||= 'project was deleted'; die("404 project '$projid' does not exist\n") unless -e "$projectsdir/$projid.xml"; # add delete commit to both source and meta BSRevision::addrev_local_replace($cgi, $projid, undef); BSRevision::addrev_meta_replace($cgi, $projid, undef); unlink("$projectsdir/$projid.conf"); unlink("$projectsdir/$projid.xml"); notify_repservers('project', $projid); notify("SRCSRV_DELETE_PROJECT", { "project" => $projid, "comment" => $cgi->{'comment'}, "sender" => ($cgi->{'user'} || "unknown"), "requestid" => $cgi->{'requestid'} }); # send reply BSServer::reply($BSStdServer::return_ok); BSServer::done(1); # now delete all packages eval { if (-d "$projectsdir/$projid.pkg") { mkdir_p("$projectsdir/_deleted/$projid.pkg"); # make room in old delete area by deleting all old packages BSRevision::delete_deleted($cgi, $projid); # make sure we have mrev files for all packages for my $f (ls("$projectsdir/$projid.pkg")) { next unless $f =~ /^(.*)\.xml$/; BSRevision::addrev_meta_replace($cgi, $projid, $1) unless -f "$projectsdir/$projid.pkg/$1.mrev"; } # now do the real delete on all of those files for my $f (ls("$projectsdir/$projid.pkg")) { if ($f =~ /^(.*)\.m?rev(?:\.del)?$/) { BSRevision::delete_rev($cgi, $projid, $1, "$projectsdir/$projid.pkg/$f", "$projectsdir/_deleted/$projid.pkg/$f"); } unlink("$projectsdir/$projid.pkg/$f"); } rmdir("$projectsdir/$projid.pkg") || die("rmdir $projectsdir/$projid.pkg: $!\n"); } }; warn($@) if $@; return undef; } sub undeleteproject { my ($cgi, $projid) = @_; die("404 project '$projid' already exists\n") if -e "$projectsdir/$projid.xml"; die("404 project '$projid' is not deleted\n") unless -e "$projectsdir/_deleted/$projid.pkg"; $cgi->{'comment'} ||= 'project was undeleted'; mkdir_p($uploaddir); mkdir_p("$projectsdir/$projid.pkg"); for my $f (ls("$projectsdir/_deleted/$projid.pkg")) { if ($f =~ /\.m?rev\.del$/) { BSUtil::cp("$projectsdir/_deleted/$projid.pkg/$f", "$uploaddir/$$.2", "$projectsdir/$projid.pkg/$f"); } elsif ($f =~ /^(.*)\.(m?rev)$/) { BSRevision::undelete_rev($cgi, $projid, $1, "$projectsdir/_deleted/$projid.pkg/$f", "$projectsdir/$projid.pkg/$f"); } } notify_repservers('project', $projid); notify("SRCSRV_UNDELETE_PROJECT", { "project" => $projid, "comment" => $cgi->{'comment'}, "sender" => ($cgi->{'user'} || "unknown") }); return $BSStdServer::return_ok; } ######################################################################### sub getpackagelist { my ($cgi, $projid, $repoid, $arch) = @_; my $origins = $cgi->{'noorigins'} ? undef : {}; my $proj; $proj = checkprojrepoarch($projid, $repoid, $arch, 1) unless $cgi->{'deleted'}; my @packages = findpackages($projid, $proj, 0, $origins, !$cgi->{'expand'}, $cgi->{'deleted'}); for (@packages) { $_ = {'name' => $_}; $_->{'originproject'} = $origins->{$_->{'name'}} if $origins && $origins->{$_->{'name'}} ne $projid; $_->{'originpackage'} = $1 if $_->{'name'} =~ /^(.*?)(? \@packages}, $BSXML::dir); } sub getpackage { my ($cgi, $projid, $packid) = @_; if ($cgi->{'meta'} || $cgi->{'view'}) { $cgi->{'meta'} ||= 1; return getfile($cgi, $projid, $packid, '_meta'); } my $proj; $proj = checkprojrepoarch($projid, undef, undef, 1) unless $cgi->{'deleted'}; if ((!$proj || !$proj->{'remoteurl'}) && ($cgi->{'rev'} || $cgi->{'deleted'} || $packid eq '_project')) { # return the exact file here # we also do not look at project links # we return the data as string so that the md5 sum matches my $rev = BSRevision::getrev_meta($projid, $packid, $cgi->{'rev'}, $cgi->{'deleted'}); my $files = BSRevision::lsrev($rev); die("404 _meta: no such file\n") unless $files->{'_meta'}; my $meta = BSRevision::revreadstr($rev, '_meta', $files->{'_meta'}); return ($meta); } my $pack = readpackage($projid, $proj, $packid, $cgi->{'rev'}); return ($pack, $BSXML::pack); } sub putpackage { my ($cgi, $projid, $packid) = @_; mkdir_p($uploaddir); my $uploaded = BSServer::read_file("$uploaddir/$$"); die("upload failed\n") unless $uploaded; my $pack = readxml("$uploaddir/$$", $BSXML::pack); $pack->{'name'} = $packid unless defined $pack->{'name'}; BSVerify::verify_pack($pack, $packid); writexml("$uploaddir/$$.2", undef, $pack, $BSXML::pack); unlink("$uploaddir/$$"); my $proj = BSRevision::readproj_local($projid); die("$projid is a remote project\n") if $proj->{'remoteurl'}; if ($packid eq '_product' && ! -e "$projectsdir/$projid.pkg/$packid.xml") { # creating a _product package, make sure that there is no _product:xxx package my @pkgs = BSRevision::lspackages_local($projid); die("cannot create '$packid' if _product:* packages already exist\n") if grep {/^_product:/} @pkgs; } if (($packid =~ /^_product:/) && ! -e "$projectsdir/$projid.pkg/$packid.xml") { die("403 cannot create '$packid' if a '_product' package exists\n") if -e "$projectsdir/$projid.pkg/_product.xml"; } mkdir_p("$projectsdir/$projid.pkg"); my $oldpack = BSRevision::readpack_local($projid, $packid, 1); notify($oldpack ? "SRCSRV_UPDATE_PACKAGE" : "SRCSRV_CREATE_PACKAGE", { "project" => $projid, "package" => $packid, "sender" => ($cgi->{'user'} || "unknown")}); BSRevision::addrev_meta_replace($cgi, $projid, $packid, [ "$uploaddir/$$.2", "$projectsdir/$projid.pkg/$packid.xml", '_meta' ]); my %except = map {$_ => 1} qw{title description devel person group url}; if (!BSUtil::identical($oldpack, $pack, \%except)) { notify_repservers('package', $projid, $packid); } $pack = BSRevision::readpack_local($projid, $packid); return ($pack, $BSXML::pack); } sub delpackage { my ($cgi, $projid, $packid) = @_; $cgi->{'comment'} ||= 'package was deleted'; die("404 project '$projid' does not exist\n") unless -e "$projectsdir/$projid.xml"; die("404 package '$packid' does not exist in project '$projid'\n") unless -e "$projectsdir/$projid.pkg/$packid.xml"; if ($packid =~ /^_product:/) { die("403 cannot delete '$packid' if a '_product' package exists\n") if -e "$projectsdir/$projid.pkg/_product.xml"; } # kill upload revision unlink("$projectsdir/$projid.pkg/$packid.upload-MD5SUMS"); # add delete commit to both source and meta BSRevision::addrev_local_replace($cgi, $projid, $packid); BSRevision::addrev_meta_replace($cgi, $projid, $packid); # now do the real delete of the package BSRevision::delete_rev($cgi, $projid, $packid, "$projectsdir/$projid.pkg/$packid.rev", "$projectsdir/$projid.pkg/$packid.rev.del"); BSRevision::delete_rev($cgi, $projid, $packid, "$projectsdir/$projid.pkg/$packid.mrev", "$projectsdir/$projid.pkg/$packid.mrev.del"); # get rid of the generated product packages as well if ($packid eq '_product') { BSSrcServer::Product::expandproduct($projid, $packid, undef, undef); } notify_repservers('package', $projid, $packid); notify("SRCSRV_DELETE_PACKAGE", { "project" => $projid, "package" => $packid, "sender" => ($cgi->{'user'} || "unknown"), "comment" => $cgi->{'comment'}, "requestid" => $cgi->{'requestid'} }); return $BSStdServer::return_ok; } sub undeletepackage { my ($cgi, $projid, $packid) = @_; $cgi->{'comment'} ||= 'package was undeleted'; die("404 project '$projid' does not exist\n") unless -e "$projectsdir/$projid.xml"; die("403 package '$packid' already exists\n") if -e "$projectsdir/$projid.pkg/$packid.xml"; die("403 package '$packid' was not deleted\n") unless -e "$projectsdir/$projid.pkg/$packid.rev.del"; BSRevision::undelete_rev($cgi, $projid, $packid, "$projectsdir/$projid.pkg/$packid.mrev.del", "$projectsdir/$projid.pkg/$packid.mrev"); if (-s "$projectsdir/$projid.pkg/$packid.rev.del") { BSRevision::undelete_rev($cgi, $projid, $packid, "$projectsdir/$projid.pkg/$packid.rev.del", "$projectsdir/$projid.pkg/$packid.rev"); } if ($packid eq '_product') { my $rev = BSRevision::getrev_local($projid, $packid); if ($rev) { my $files = BSRevision::lsrev($rev); BSSrcServer::Product::expandproduct($projid, $packid, $rev, $files, $rev->{'user'}); } } notify_repservers('package', $projid, $packid); notify("SRCSRV_UNDELETE_PACKAGE", { "project" => $projid, "package" => $packid, "sender" => ($cgi->{'user'} || "unknown"), "comment" => $cgi->{'comment'} }); return $BSStdServer::return_ok; } sub getpackagehistory { my ($cgi, $projid, $packid) = @_; my @res; my $revfile; $packid = '_project' unless defined $packid; if (!$cgi->{'deleted'}) { my $proj = checkprojrepoarch($projid, undef, undef, 1); if ($proj->{'remoteurl'}) { my @args = BSRPC::args($cgi, 'rev', 'meta', 'limit'); my $h = BSRPC::rpc({'uri' => "$proj->{'remoteurl'}/source/$proj->{'remoteproject'}/$packid/_history", 'proxy' => $proxy}, $BSXML::revisionlist, @args); return ($h, $BSXML::revisionlist); } if ($packid ne '_project' && ! -e "$projectsdir/$projid.pkg/$packid.xml") { # check project links die("404 package '$packid' does not exist\n") unless $proj->{'link'}; $cgi->{'_checked'} ||= {}; $cgi->{'_checked'}->{$projid} = 1; for my $lprojid (map {$_->{'project'}} @{$proj->{'link'}}) { next if $cgi->{'_checked'}->{$lprojid}; $cgi->{'_checked'}->{$lprojid} = 1; my $h; eval { $h = (getpackagehistory($cgi, $lprojid, $packid))[0]; }; die($@) if $@ && $@ !~ /^404/; return ($h, $BSXML::revisionlist) if $h; } die("404 package '$packid' does not exist\n"); } } $revfile = "$projectsdir/$projid.pkg"; $revfile = "$projectsdir/_deleted/$projid.pkg" if $packid eq '_project' && $cgi->{'deleted'}; $revfile .= $cgi->{'meta'} ? "/$packid.mrev" : "/$packid.rev"; if ($packid ne '_project' && $cgi->{'deleted'}) { $revfile .= '.del'; if (! -e $revfile && ! -e "$projectsdir/$projid.xml" && -e "$projectsdir/_deleted/$projid.pkg") { $revfile = "$projectsdir/_deleted/$projid.pkg/$packid.mrev"; } } my $filter; if ($cgi->{'rev'}) { $filter = sub { return $cgi->{'rev'} eq $_[0]->{'rev'} || $cgi->{'rev'} eq $_[0]->{'srcmd5'} ? 1 : 0 }; } for (BSFileDB::fdb_getall_reverse($revfile, $srcrevlay, $cgi->{'limit'}, $filter)) { $_->{'comment'} = str2utf8xml($_->{'comment'}) if $_->{'comment'}; unshift @res, $_; } return ({'revision' => \@res}, $BSXML::revisionlist); } ########################################################################## sub expandsearchpath { my ($projid, $repoid, $remotemap, $base) = @_; my %done; my @ret; my @path = {project => $projid, repository => $repoid}; while (@path) { my $t = shift @path; my $prp = "$t->{'project'}/$t->{'repository'}"; push @ret, $prp unless $done{$prp}; $done{$prp} = 1; if (!@path) { ($base->{'project'}, $base->{'repository'}) = ($t->{'project'}, $t->{'repository'}) if $base; last if $done{"/$prp"}; my ($pid, $tid) = ($t->{'project'}, $t->{'repository'}); my $proj = BSRevision::readproj_local($pid, 1); if (!$proj || $proj->{'remoteurl'}) { undef $proj; $proj = $remotemap->{$pid} if $remotemap && $remotemap->{$pid}; if (!$proj || $proj->{'proto'}) { $proj = BSSrcServer::Remote::remoteprojid($pid); $proj = BSSrcServer::Remote::fetchremoteproj($proj, $pid, $remotemap); die("404 project '$pid' does not exist\n") unless $proj; } } BSSrcServer::Partition::checkpartition($remotemap, $pid, $proj) if $remotemap && $remotemap->{':partitions'} && !$remotemap->{':partitions'}->{$pid}; $done{"/$prp"} = 1; # mark expanded my @repo = grep {$_->{'name'} eq $tid} @{$proj->{'repository'} || []}; push @path, @{$repo[0]->{'path'}} if @repo && $repo[0]->{'path'}; } elsif ($remotemap) { my $pid = $t->{'project'}; my $proj = BSRevision::readproj_local($pid, 1); if ((!$proj || $proj->{'remoteurl'}) && !$remotemap->{$pid}) { my $r = BSSrcServer::Remote::remoteprojid($pid); $remotemap->{$pid} = {%$r, 'proto' => 1} if $r; } BSSrcServer::Partition::checkpartition($remotemap, $pid, $proj) if $remotemap && $remotemap->{':partitions'} && !$remotemap->{':partitions'}->{$pid}; } } return @ret; } sub concatconfigs { my ($projid, $repoid, $remotemap, @path) = @_; my $config = "%define _project $projid\n"; my $macros = ''; #$macros .= "%vendor Open Build Service\n"; # find the sign project, this is what we use as vendor my $vprojid = $projid; while ($vprojid ne '') { last if -s "$projectsdir/$vprojid.pkg/_signkey"; $vprojid =~ s/[^:]*$//; $vprojid =~ s/:$//; } $vprojid = $projid if $vprojid eq ''; my $obsname = $BSConfig::obsname || 'build.opensuse.org'; $macros .= "%vendor obs://$obsname/$vprojid\n"; $macros .= "%_download_url $BSConfig::repodownload\n" if $BSConfig::repodownload; $macros .= "%_project $projid\n"; my $lastr = ''; my $distinfo = "$projid / $repoid"; if ($repoid eq 'standard') { $distinfo = $projid; } for my $prp (reverse @path) { if ($prp eq "$projid/$repoid") { $macros .= "\n%distribution $distinfo\n"; $macros .= "%_project $projid\n"; } my ($p, $r) = split('/', $prp, 2); my $c; if (-s "$projectsdir/$p.conf") { $c = readstr("$projectsdir/$p.conf"); } elsif (!-e "$projectsdir/$p.xml") { my $proj = BSSrcServer::Remote::remoteprojid($p); $c = BSSrcServer::Remote::fetchremoteconfig($proj, $p, $remotemap); } if ($remotemap && $remotemap->{':partitions'}) { BSSrcServer::Partition::checkpartition($remotemap, $p) if !$remotemap->{':partitions'}->{$p}; $remotemap->{$p}->{'config'} = defined($c) ? $c : '' if ($remotemap->{$p} || {})->{'partition'}; } next unless defined $c; $config .= "\n### from $p\n"; $config .= "%define _repository $r\n"; if ($c =~ /^\s*:macros\s*$/im) { # probably some multiple macro sections with %if statements # flush out macros $macros .= "\n### from $p\n"; $macros .= "\n%_repository $r\n"; $config .= "\nMacros:\n$macros:Macros\n\n"; $macros = ''; $lastr = $r; my $s1 = '\A(.*^\s*:macros\s*$)(.*?)\Z'; # should always match if ($c =~ /$s1/msi) { $config .= $1; $c = $2; } else { $config .= $c; $c = ''; } } if ($c =~ /^(.*\n)?\s*macros:[^\n]*\n(.*)/si) { # has single macro section at end. cumulate $c = defined($1) ? $1 : ''; $macros .= "\n### from $p\n"; $macros .= "%_repository $r\n"; $macros .= $2; $lastr = $r; } $config .= $c; } if ($lastr ne $repoid) { $macros .= "\n### from $projid\n"; $macros .= "%_repository $repoid\n"; } if (!@path || $path[0] ne "$projid/$repoid") { $macros .= "\n%distribution $distinfo\n"; $macros .= "%_project $projid\n"; } if ($BSConfig::extramacros) { for (sort keys %{$BSConfig::extramacros}) { $macros .= $BSConfig::extramacros->{$_} if $projid =~ /$_/; } } if ($BSConfig::extraconfig) { my $extraconfig = ''; for (sort keys %{$BSConfig::extraconfig}) { $extraconfig .= $BSConfig::extraconfig->{$_} if $projid =~ /$_/; } $config .= "\n$extraconfig" if $extraconfig; } $config .= "\nMacros:\n$macros" if $macros ne ''; return $config; } sub getbuildconfig { my ($cgi, $projid, $repoid) = @_; my @path; if ($cgi->{'path'}) { @path = @{$cgi->{'path'}}; } else { @path = expandsearchpath($projid, $repoid); } my $config = concatconfigs($projid, $repoid, undef, @path); return ($config, 'Content-Type: text/plain'); } sub getprojectconfig { my ($cgi, $projid) = @_; my $proj = checkprojrepoarch($projid, undef, undef, 1); if ($proj->{'remoteurl'}) { my $config = BSRPC::rpc({'uri' => "$proj->{'remoteurl'}/source/$proj->{'remoteproject'}/_config", 'proxy' => $proxy}, undef); return ($config, 'Content-Type: text/plain'); } my $config; if ($cgi->{'rev'}) { my $rev = getrev($projid, '_project', $cgi->{'rev'}); my $files = $rev ? BSRevision::lsrev($rev) : {}; $config = BSRevision::revreadstr($rev, '_config', $files->{'_config'}, 1) if $files->{'_config'}; } else { $config = readstr("$projectsdir/$projid.conf", 1); } $config = '' unless defined $config; return ($config, 'Content-Type: text/plain'); } sub putprojectconfig { my ($cgi, $projid) = @_; my $proj = BSRevision::readproj_local($projid); mkdir_p($uploaddir); my $uploadfile = "$uploaddir/$$"; die("upload failed\n") unless BSServer::read_file($uploadfile); if (! -s $uploadfile) { unlink($uploadfile); $uploadfile = undef; } BSRevision::addrev_local_replace($cgi, $projid, undef, [ $uploadfile, "$projectsdir/$projid.conf", '_config' ]); notify_repservers('project', $projid); notify("SRCSRV_UPDATE_PROJECT_CONFIG", { "project" => $projid, "sender" => ($cgi->{'user'} || "unknown") }); return $BSStdServer::return_ok; } sub delprojectconfig { my ($cgi, $projid) = @_; BSRevision::addrev_local_replace($cgi, $projid, undef, [ undef, "$projectsdir/$projid.conf", '_config' ]); notify_repservers('project', $projid); notify("SRCSRV_UPDATE_PROJECT_CONFIG", { "project" => $projid, "sender" => ($cgi->{'user'} || "unknown") }); return $BSStdServer::return_ok; } ########################################################################## sub getsources { my ($cgi, $projid, $packid, $srcmd5) = @_; $packid =~ s/(? $projid, 'package' => $packid, 'srcmd5' => $srcmd5}; my $files = BSRevision::lsrev($rev); my @files = map {BSRevision::revcpiofile($rev, $_, $files->{$_})} sort keys %$files; BSServer::reply_cpio(\@files); return undef; } my %getfilelist_ajax_inprogress; sub getfilelist_ajax { my ($cgi, $projid, $packid) = @_; my $srcmd5 = $cgi->{'rev'}; if ($cgi->{'view'} eq 'cpio') { my $rev = {'project' => $projid, 'package' => $packid, 'srcmd5' => $srcmd5}; my $files = BSRevision::lsrev($rev); my @files = map {BSRevision::revcpiofile($rev, $_, $files->{$_})} sort keys %$files; BSWatcher::reply_cpio(\@files); return undef; } if ($cgi->{'view'} eq 'notify') { my $jev = $BSServerEvents::gev; my $filelist = $jev->{'filelist'}; my $idstring = "$projid/$packid/$srcmd5"; if (!$filelist) { return $BSStdServer::return_ok if $getfilelist_ajax_inprogress{$idstring}; $filelist = BSSrcServer::Remote::remote_getrev_getfilelist($projid, $packid, $srcmd5); return undef unless defined $filelist; # ok, got the filelist, we're the master. get the missing files in the background. $jev = BSWatcher::background($BSStdServer::return_ok); $getfilelist_ajax_inprogress{$idstring} = $jev; $jev->{'filelist'} = $filelist; $jev->{'idstring'} = $idstring; $jev->{'handler'} = sub {delete $getfilelist_ajax_inprogress{$idstring}}; } # here we're the backgrounded master. get missing files and notify my $ret; eval { $ret = BSSrcServer::Remote::remote_getrev_getfiles($projid, $packid, $srcmd5, $filelist); }; if ($@) { # hmm, what to do here? for now we just notify. this will lead to a retry loop, though. notify_all_repservers('package', $projid, $packid); die($@); } return undef unless defined $ret; delete $getfilelist_ajax_inprogress{$idstring}; notify_all_repservers('package', $projid, $packid); return $ret; } die("unknown view '$cgi->{'view'}'\n"); } sub getproductrepositories { my ($xml) = @_; my @res; for my $product (@{$xml->{'products'}->{'product'}}) { my @pr; for my $repo (@{$product->{'register'}->{'updates'}->{'repository'}}) { my ($path) = published_path({}, $repo->{'project'}, $repo->{'name'}); $path = { 'path' => $path->{'path'}, 'update' => undef }; $path->{'arch'} = $repo->{'arch'} if $repo->{'arch'}; $path->{'zypp'} = $repo->{'zypp'} if $repo->{'zypp'}; $path->{'debug'} = undef if $repo->{'name'} =~ m/_debug$/; push @pr, $path; } for my $repo (@{$product->{'register'}->{'pool'}->{'repository'}}) { die("getproductrepositories: path AND url is set!\n") if defined ($repo->{'project'}) && defined($repo->{'url'}); my $path; if (defined($repo->{'url'})) { $path = { 'url' => $repo->{'url'} }; } else { ($path) = published_path({ 'medium' => $repo->{'medium'} }, $repo->{'project'}, $repo->{'name'}); $path = { 'path' => $path->{'path'} }; } $path->{'arch'} = $repo->{'arch'} if $repo->{'arch'}; $path->{'zypp'} = $repo->{'zypp'} if $repo->{'zypp'}; $path->{'debug'} = undef if $repo->{'medium'} =~ m/_debug$/; push @pr, $path; } my $prod = { 'name' => $product->{'name'}, 'repository' => \@pr }; $prod->{'distrotarget'} = $product->{'register'}->{'updates'}->{'distrotarget'} if $product->{'register'}->{'updates'}->{'distrotarget'}; push @res, $prod; } return @res; } sub getfilelist { my ($cgi, $projid, $packid) = @_; my $view = $cgi->{'view'}; my $rev; my $linked; $linked = [] if $cgi->{'withlinked'}; my $islatestrev; if ($cgi->{'meta'}) { $rev = BSRevision::getrev_meta($projid, $packid, $cgi->{'rev'}, $cgi->{'deleted'}); } elsif ($cgi->{'deleted'}) { $rev = BSRevision::getrev_local($projid, $packid, $cgi->{'rev'}, $cgi->{'deleted'}); } else { $rev = getrev($projid, $packid, defined($cgi->{'rev'}) ? $cgi->{'rev'} : 'upload', $linked); $islatestrev = 1 if !defined($cgi->{'rev'}) && $rev->{'srcmd5'} && $rev->{'srcmd5'} ne 'pattern' && $rev->{'srcmd5'} ne 'upload'; } my $li = {}; my $files = BSRevision::lsrev($rev, $li); # show state of current source service run, if defined my $serviceinfo; if ($li->{'xservicemd5'} || $li->{'lservicemd5'}) { # new style $serviceinfo = {}; $serviceinfo->{'lsrcmd5'} = $li->{'lservicemd5'} if $li->{'lservicemd5'}; if ($li->{'xservicemd5'}) { if ($cgi->{'expand'}) { $serviceinfo->{'lsrcmd5'} = $rev->{'srcmd5'}; $files = BSSrcServer::Service::handleservice($rev, $files, $li->{'xservicemd5'}); $serviceinfo->{'code'} = 'succeeded'; # otherwise it already died... } else { eval { BSSrcServer::Service::handleservice({ %$rev }, $files, $li->{'xservicemd5'}) }; my $error = $@; chomp $error if $error; if (!$error) { $serviceinfo->{'code'} = 'succeeded'; $serviceinfo->{'xsrcmd5'} = $li->{'xservicemd5'}; } elsif ($error eq 'service in progress') { $serviceinfo->{'code'} = 'running'; } else { $serviceinfo->{'code'} = 'failed'; $serviceinfo->{'xsrcmd5'} = $li->{'xservicemd5'}; $serviceinfo->{'error'} = $error; } } } delete $li->{'xservicemd5'}; delete $li->{'lservicemd5'}; } elsif ($files->{'_service'} && $packid ne '_project' && !$cgi->{'meta'} && !defined($cgi->{'rev'})) { # check error/in progress $serviceinfo = {}; my $lockfile = "$eventdir/service/${projid}::$packid"; if (-e $lockfile) { $serviceinfo->{'code'} = 'running'; } elsif ($files->{'_service_error'}) { $serviceinfo->{'code'} = 'failed'; $serviceinfo->{'error'} = BSRevision::revreadstr($rev, '_service_error', $files->{'_service_error'}); } else { $serviceinfo->{'code'} = 'succeeded'; } } elsif ($files->{'_service_error'}) { $serviceinfo = {'code' => 'failed'}; $serviceinfo->{'error'} = BSRevision::revreadstr($rev, '_service_error', $files->{'_service_error'}); } undef $islatestrev if $serviceinfo && !$cgi->{'expand'}; if ($files->{'_link'}) { if ($cgi->{'emptylink'}) { my $l = BSRevision::revreadxml($rev, '_link', $files->{'_link'}, $BSXML::link); delete $l->{'patches'}; mkdir_p($uploaddir); writexml("$uploaddir/$$", undef, $l, $BSXML::link); $files = {}; $files->{'_link'} = BSSrcrep::addfile($projid, $packid, "$uploaddir/$$", '_link'); $rev = addrev({}, $projid, $packid, $files, ''); undef $islatestrev; } my %lrev = %$rev; $lrev{'linkrev'} = $cgi->{'linkrev'} if $cgi->{'linkrev'}; $li->{'linked'} = $linked if $linked; my $lfiles = BSSrcServer::Link::handlelinks(\%lrev, $files, $li); BSSrcServer::Multibuild::updatemultibuild($projid, $packid, $lfiles) if $islatestrev && !$cgi->{'linkrev'} && ref($lfiles); if ($cgi->{'expand'}) { if (!ref($lfiles)) { if ($cgi->{'withlinked'} && !$view) { my $ret = {}; $ret->{'name'} = $packid; $ret->{'error'} = $lfiles || 'internal error'; $ret->{'linkinfo'} = $li; return ($ret, $BSXML::dir); } die("$lfiles\n"); } $files = $lfiles; %$rev = %lrev; $rev->{'rev'} = $rev->{'srcmd5'}; } else { if (ref $lfiles) { $li->{'xsrcmd5'} = $lrev{'srcmd5'}; } else { # link is broken $li->{'error'} = $lfiles; # set xsrcmd5 if we have a link error file $li->{'xsrcmd5'} = $lrev{'srcmd5'} if $lrev{'srcmd5'} && BSSrcrep::havelinkerror($lrev{'project'}, $lrev{'package'}, $lrev{'srcmd5'}); if ($cgi->{'lastworking'}) { my $lastworking = BSSrcServer::Link::findlastworkinglink($rev); $li->{'lastworking'} = $lastworking if $lastworking; } } } } else { BSSrcServer::Multibuild::updatemultibuild($projid, $packid, $files) if $islatestrev; } if ($islatestrev && $rev->{'originpackage'}) { my $mb = BSSrcServer::Multibuild::getmultibuild($projid, $rev->{'package'}) || {}; die("404 package '$packid' does not exist\n") unless grep {"$rev->{'package'}:$_" eq $packid} @{$mb->{'flavor'} || $mb->{'package'} || []}; } if ($cgi->{'extension'}) { for (keys %$files) { delete $files->{$_} unless /\.\Q$cgi->{'extension'}\E$/; } } if ($view && $view eq 'cpio') { if (!$cgi->{'extension'} && $rev->{'srcmd5'} && $rev->{'srcmd5'} ne 'upload' && $rev->{'srcmd5'} ne 'pattern' && $rev->{'srcmd5'} ne 'empty' && $rev->{'srcmd5'} ne $BSSrcrep::emptysrcmd5) { # hack: we identify remote source downloads by looking at the user agent my $useragent = $BSServer::request->{'headers'}->{'user-agent'} || ''; if ($useragent =~ /BSRPC/) { BSHandoff::handoff("/source/$projid/$rev->{'package'}", undef, "rev=$rev->{'srcmd5'}", 'view=cpio'); } } my @files = map {BSRevision::revcpiofile($rev, $_, $files->{$_})} sort keys %$files; BSServer::reply_cpio(\@files); return undef; } if ($view && ($view eq 'products' || $view eq 'productrepositories') ) { my @res; my $reader = sub { return BSRevision::revreadstr($rev, $_[0], $files->{$_[0]}) }; for my $filename (sort keys %$files) { next unless $filename =~ /\.product$/s; next if $cgi->{'product'} && $filename ne "$cgi->{'product'}.product"; my $xml = BSProductXML::readproductxml([$reader, $filename], 1); die("400 Unable to parse $filename\n") unless $xml; push @res, $xml; } if ($view eq 'productrepositories') { @res = map {getproductrepositories($_)} @res; return ({"product" => \@res}, $BSProductXML::productlistrepositories); } return ({'productdefinition' => \@res}, $BSProductXML::products); } my $ret = {}; $ret->{'name'} = $packid; $ret->{'srcmd5'} = $rev->{'srcmd5'} if $rev->{'srcmd5'} ne 'empty'; $ret->{'rev'} = $rev->{'rev'} if exists $rev->{'rev'}; $ret->{'vrev'} = $rev->{'vrev'} if exists $rev->{'vrev'}; $ret->{'serviceinfo'} = $serviceinfo if $serviceinfo; my @res; for my $filename (sort keys %$files) { my @s = BSRevision::revstat($rev, $filename, $files->{$filename}); if (@s) { push @res, {'name' => $filename, 'md5' => $files->{$filename}, 'size' => $s[7], 'mtime' => $s[9]}; } else { push @res, {'name' => $filename, 'md5' => $files->{$filename}, 'error' => "$!"}; } } if (%$li) { BSSrcServer::Link::linkinfo_addtarget($rev, $li); $ret->{'linkinfo'} = $li; } # fake linkinfo element for project links. see comment in remote_getrev if ($linked && @$linked && !$ret->{'linkinfo'}) { $li->{'linked'} = $linked; $ret->{'linkinfo'} = $li; } $ret->{'entry'} = \@res; return ($ret, $BSXML::dir); } sub getfile { my ($cgi, $projid, $packid, $filename) = @_; die("no filename\n") unless defined($filename) && $filename ne ''; die("bad filename\n") if $filename =~ /\// || $filename =~ /^\./; my $rev; if ($cgi->{'meta'}) { $rev = BSRevision::getrev_meta($projid, $packid, $cgi->{'rev'}, $cgi->{'deleted'}); } elsif ($cgi->{'deleted'}) { $rev = BSRevision::getrev_local($projid, $packid, $cgi->{'rev'}, $cgi->{'deleted'}); } else { $rev = getrev($projid, $packid, defined($cgi->{'rev'}) ? $cgi->{'rev'} : 'upload'); } my $view = $cgi->{'view'} || ''; if ($view eq 'blame') { die("cannot blame deleted packages\n") if $cgi->{'deleted'}; my $b = BSSrcServer::Blame::blame($rev, $filename, $cgi->{'expand'}, $cgi->{'meta'}); my $ret = ''; my %links; my $linkno = 1; $links{"$projid/$packid"} = 0; for my $rev (@$b) { next unless ref($rev); next if exists $links{"$rev->{'project'}/$rev->{'package'}"}; $ret .= sprintf("%7s ", "$linkno:")."$rev->{'project'}/$rev->{'package'}\n"; $links{"$rev->{'project'}/$rev->{'package'}"} = $linkno++; } $ret .= "\n" if $linkno > 1; my $li = 1; while (@$b) { my ($rev, $line) = splice(@$b, 0, 2); if ($linkno > 1) { my $lno = $links{"$rev->{'project'}/$rev->{'package'}"}; $lno = $lno ? "$lno:" : ''; $ret .= sprintf("%7s ", "$lno$rev->{'rev'}"); } else { $ret .= sprintf("%4d ", $rev->{'rev'}); } my $user = sprintf("%-12s", $rev->{'user'}); my $date = BSUtil::isotime($rev->{'time'}); $ret .= "($user $date ".sprintf("%5d", $li++).") $line\n"; } return $ret; } elsif ($view) { die("unknown view '$view'\n"); } my $files; if ($cgi->{'expand'}) { $files = lsrev_expanded($rev); } else { $files = BSRevision::lsrev($rev); } die("404 $filename: no such file\n") unless $files->{$filename}; my @s = BSRevision::revstat($rev, $filename, $files->{$filename}); die("$projid/$packid/$files->{$filename}-$filename: $!\n") unless @s; if (!$BSStdServer::isajax && $rev->{'srcmd5'} && $rev->{'srcmd5'} ne 'upload' && $rev->{'srcmd5'} ne 'pattern' && $rev->{'srcmd5'} ne 'empty' && $rev->{'srcmd5'} ne $BSSrcrep::emptysrcmd5) { # hack: we identify remote source downloads by looking at the user agent my $useragent = $BSServer::request->{'headers'}->{'user-agent'} || ''; if ($useragent =~ /BSRPC/) { BSHandoff::handoff("/source/$projid/$packid/$filename", undef, "rev=$rev->{'srcmd5'}"); } } my $fd = gensym; BSRevision::revopen($rev, $filename, $files->{$filename}, $fd) || die("$projid/$packid/$files->{$filename}-$filename: $!\n"); BSWatcher::reply_file($fd); return undef; } sub putfile { my ($cgi, $projid, $packid, $filename) = @_; die("no filename\n") unless defined($filename) && $filename ne ''; die("bad filename\n") if $filename =~ /\// || $filename =~ /^\./; mkdir_p($uploaddir); my $uploaded = BSServer::read_file("$uploaddir/$$", 'withmd5' => 1); die("upload failed\n") unless $uploaded; if ($cgi->{'meta'}) { if ($filename eq '_attribute') { my $attribs = readxml("$uploaddir/$$", $BSXML::attributes); BSVerify::verify_attributes($attribs); writexml("$uploaddir/$$", undef, $attribs, $BSXML::attributes); } elsif ($filename eq '_frozenlinks') { my $frozenx = readxml("$uploaddir/$$", $BSXML::frozenlinks); BSVerify::verify_frozenlinks($frozenx); writexml("$uploaddir/$$", undef, $frozenx, $BSXML::frozenlinks); } else { die("unsupported meta operation\n"); } my $rev = BSRevision::addrev_meta_replace($cgi, $projid, $packid, [ "$uploaddir/$$", undef, $filename ]); notify_repservers('package', $projid) if $cgi->{'meta'} && $filename eq '_frozenlinks'; delete $rev->{'project'}; delete $rev->{'package'}; return ($rev, $BSXML::revision); } my $rev = getrev($projid, $packid, defined($cgi->{'rev'}) ? $cgi->{'rev'} : 'upload'); die("file '$filename' is read-only\n") if ($filename =~ /^_service:/) && !$cgi->{'force'}; BSSrcrep::addfile($projid, $packid, "$uploaddir/$$", $filename, $uploaded->{'md5'}); # create new meta file my $files; if ($cgi->{'keeplink'}) { $files = lsrev_expanded($rev); } else { $files = BSRevision::lsrev($rev); } $files->{$filename} = $uploaded->{'md5'}; $files = BSSrcServer::Link::keeplink($cgi, $projid, $packid, $files) if $cgi->{'keeplink'}; $rev = addrev($cgi, $projid, $packid, $files, $cgi->{'rev'}); BSSrcServer::Service::runservice($cgi, $rev, $files); delete $rev->{'project'}; delete $rev->{'package'}; return ($rev, $BSXML::revision); } sub getsourcediffcache { my ($cgi, $cacheid) = @_; my $view = $cgi->{'view'} || ''; my $cn = "$diffcache/".substr($cacheid, 0, 2)."/$cacheid"; BSWatcher::addfilewatcher($cn) if $BSStdServer::isajax; my $lockc = BSUtil::lockcheck('>>', "$cn.run"); my $fd = gensym; if (open($fd, '<', $cn)) { unlink("$cn.run"); utime(time, time, $cn); BSWatcher::reply_file($fd, $view eq 'xml' ? 'Content-Type: text/xml' : 'Content-Type: text/plain'); return undef; } return undef if $BSStdServer::isajax && !$lockc; die("cache entry '$cacheid' does not exist\n"); } sub sourcediff { my ($cgi, $projid, $packid) = @_; BSVerify::verify_linkrev($cgi->{'olinkrev'}) if defined($cgi->{'olinkrev'}) && $cgi->{'olinkrev'} ne 'linkrev'; my $oprojid = exists($cgi->{'oproject'}) ? $cgi->{'oproject'} : $projid; my $opackid = exists($cgi->{'opackage'}) ? $cgi->{'opackage'} : $packid; my $fmax = 200; my $tmax = 16000; $fmax = $cgi->{'filelimit'} if defined $cgi->{'filelimit'}; $tmax = $cgi->{'tarlimit'} if defined $cgi->{'tarlimit'}; undef $fmax unless $fmax; undef $tmax unless $tmax; my $have0rev = (defined($cgi->{'rev'}) && $cgi->{'rev'} eq '0') || (defined($cgi->{'orev'}) && $cgi->{'orev'} eq '0'); my $rev; if ($cgi->{'meta'}) { $rev = BSRevision::getrev_meta($projid, $packid, $cgi->{'rev'}); } else { $rev = getrev($projid, $packid, defined($cgi->{'rev'}) ? $cgi->{'rev'} : 'upload', undef, $cgi->{'missingok'}); } my $linkinfo = {}; my $files = BSRevision::lsrev($rev, $linkinfo); $files = BSSrcServer::Service::handleservice($rev, $files, $linkinfo->{'xservicemd5'}) if $cgi->{'expand'} && $linkinfo->{'xservicemd5'}; my $orev = $cgi->{'orev'}; if (!defined($cgi->{'oproject'}) && !defined($cgi->{'opackage'}) && !defined($cgi->{'orev'}) && $rev->{'rev'}) { die("revision is not a simple commit\n") unless $rev->{'rev'} =~ /^\d+$/s; $orev = $rev->{'rev'} - 1; $have0rev = 1 if $orev == 0; $cgi->{'olinkrev'} = 'linkrev' if !defined($cgi->{'olinkrev'}); } if ($cgi->{'meta'}) { $orev = BSRevision::getrev_meta($oprojid, $opackid, $orev); } else { $orev = getrev($oprojid, $opackid, defined($orev) ? $orev : 'latest', undef, $cgi->{'missingok'}); } my $olinkinfo = {}; my $ofiles = BSRevision::lsrev($orev, $olinkinfo); $ofiles = BSSrcServer::Service::handleservice($orev, $ofiles, $olinkinfo->{'xservicemd5'}) if $cgi->{'expand'} && $olinkinfo->{'xservicemd5'}; if ($cgi->{'expand'} || (!$have0rev && $files->{'_link'} && !$ofiles->{'_link'}) || (!$have0rev && $ofiles->{'_link'} && !$files->{'_link'})) { # expand links if ($files->{'_link'}) { $rev->{'linkrev'} = $cgi->{'linkrev'} if $cgi->{'linkrev'}; my %li; my $l = BSRevision::revreadxml($rev, '_link', $files->{'_link'}, $BSXML::link, 1); if ($l) { $l->{'project'} = $rev->{'project'} unless defined $l->{'project'}; $l->{'package'} = $rev->{'package'} unless defined $l->{'package'}; } $files = BSSrcServer::Link::handlelinks($rev, $files, \%li); die("bad link: $files\n") unless ref $files; # some nasty magic to improve diff usability if ($l && $cgi->{'linkrev'} && $l->{'project'} eq $oprojid && $l->{'package'} eq $opackid && !$l->{'rev'} && !$cgi->{'orev'}) { # we're diffing against the link target. As the user specified a baserev, we should use it # instead of the latest source $orev = getrev($oprojid, $opackid, $li{'srcmd5'}); $ofiles = BSRevision::lsrev($orev); } # olinkrev=linkrev: reuse same linkrev if the link target matches if ($cgi->{'olinkrev'} && $cgi->{'olinkrev'} eq 'linkrev' && $ofiles->{'_link'}) { my $ol = BSRevision::revreadxml($orev, '_link', $ofiles->{'_link'}, $BSXML::link, 1); if ($ol) { $ol->{'project'} = $orev->{'project'} unless defined $ol->{'project'}; $ol->{'package'} = $orev->{'package'} unless defined $ol->{'package'}; } $cgi->{'olinkrev'} = $li{'srcmd5'} if $l && $ol && $l->{'project'} eq $ol->{'project'} && $l->{'package'} eq $ol->{'package'}; } } if ($ofiles->{'_link'}) { $orev->{'linkrev'} = $cgi->{'olinkrev'} if $cgi->{'olinkrev'} && $cgi->{'olinkrev'} ne 'linkrev'; $ofiles = BSSrcServer::Link::handlelinks($orev, $ofiles); die("bad link: $ofiles\n") unless ref $ofiles; } } my $view = $cgi->{'view'} || ''; $view = 'unified' if $cgi->{'unified'}; die("unsupported view '$view'\n") if $view && ($view ne 'xml' && $view ne 'unified'); my $cacheid = "//cacheversion:2/"; $cacheid .= "$orev->{'srcmd5'}/$rev->{'srcmd5'}"; $cacheid .= "/unified:1" if $view && $view eq 'unified'; $cacheid .= "/view:$cgi->{'view'}" if $view && $view ne 'unified'; $cacheid .= "/fmax:$fmax" if defined $fmax; $cacheid .= "/tmax:$tmax" if defined $tmax; $cgi->{'withissues'} = 1 if $cgi->{'onlyissues'}; if ($cgi->{'withissues'}) { my @s = stat("$BSConfig::bsdir/issuetrackers.xml"); $cacheid .= "/withissues:$s[9]/$s[7]/$s[1]" if @s; $cacheid .= "/onlyissues" if $cgi->{'onlyissues'}; } if ($cgi->{'file'}) { my %file = map {$_ => 1} @{$cgi->{'file'}}; $cacheid .= "/file:$_" for sort keys %file; for (keys %$ofiles) { delete $ofiles->{$_} unless $file{$_}; } for (keys %$files) { delete $files->{$_} unless $file{$_}; } } $cacheid = Digest::MD5::md5_hex($cacheid); my $xmlret; if ($view eq 'xml') { $xmlret = {}; $xmlret->{'key'} = $cacheid; $rev->{'rev'} ||= 0; $rev->{'srcmd5'} = $BSSrcrep::emptysrcmd5 if $rev->{'srcmd5'} eq 'empty'; $orev->{'rev'} ||= 0; $orev->{'srcmd5'} = $BSSrcrep::emptysrcmd5 if $rev->{'srcmd5'} eq 'empty'; $xmlret->{'old'} = { 'project' => $orev->{'project'}, 'package' => $orev->{'package'}, 'rev' => $orev->{'rev'}, 'srcmd5' => $orev->{'srcmd5'} }; $xmlret->{'new'} = { 'project' => $rev->{'project'}, 'package' => $rev->{'package'}, 'rev' => $rev->{'rev'}, 'srcmd5' => $rev->{'srcmd5'} }; $xmlret->{'files'} = {}; } if (!grep {($ofiles->{$_} || '') ne ($files->{$_} || '')} (keys %$ofiles, keys %$files)) { # all files identical, don't bother return ($xmlret, $BSXML::sourcediff) if $view eq 'xml'; return ('', 'Content-Type: text/plain'); } local *F; my $cn = "$diffcache/".substr($cacheid, 0, 2)."/$cacheid"; if (open(F, '<', $cn)) { utime(time, time, $cn); BSServer::reply_file(\*F, $view eq 'xml' ? 'Content-Type: text/xml' : 'Content-Type: text/plain'); return undef; } local *LF; mkdir_p("$diffcache/".substr($cacheid, 0, 2)); if (!BSUtil::lockcheck('>>', "$cn.run")) { my @args; push @args, "view=$view" if $view; BSHandoff::handoff("/sourcediffcache/$cacheid", undef, @args); } BSUtil::lockopen(\*LF, '>>', "$cn.run"); # retry open, maybe somebody else has created the diff meanwhile if (open(F, '<', $cn)) { unlink("$cn.run"); close LF; utime(time, time, $cn); BSServer::reply_file(\*F, $view eq 'xml' ? 'Content-Type: text/xml' : 'Content-Type: text/plain'); return undef; } my $tmpdir = "$uploaddir/srcdiff$$"; my $d; my %xobscpio; mkdir_p($uploaddir); my $xobscpio = sub { return BSRevision::revfilename($_[0], $_[1], $_[2]) if $_[1] !~ /\.obscpio$/; my $tmp = "$uploaddir/sourcediff.obscpio.$$.$_[2]-$_[1]"; BSSrcrep::copyonefile_tmp($_[0]->{'project'}, $_[0]->{'package'}, $_[1], $_[2], $tmp) unless $xobscpio{$tmp}; $xobscpio{$tmp} = 1; return $tmp; }; my $ofn = sub { $xobscpio->($orev, $_[0], $ofiles->{$_[0]}) }; my $fn = sub { $xobscpio->($rev, $_[0], $files->{$_[0]}) }; my %opts = ('edir' => $tmpdir, 'fmax' => $fmax, 'tmax' => $tmax, 'tfmax' => $fmax, 'doarchive' => 1, 'similar' => 1); if ($view eq 'xml') { if (!$cgi->{'onlyissues'}) { $xmlret->{'files'} = { 'file' => BSSrcdiff::datadiff($ofn, $ofiles, $fn, $files, %opts) }; } if ($cgi->{'withissues'}) { my $trackers = readxml("$BSConfig::bsdir/issuetrackers.xml", $BSXML::issue_trackers, 1) || {}; $opts{'trackers'} = $trackers->{'issue-tracker'} || []; $xmlret->{'issues'} = { 'issue' => BSSrcdiff::issuediff($ofn, $ofiles, $fn, $files, %opts) }; } BSUtil::data2utf8xml($xmlret); $d = XMLout($BSXML::sourcediff, $xmlret); } elsif ($view eq 'unified') { delete $opts{'doarchive'}; delete $opts{'similar'}; $opts{'nodecomp'} = 1; $opts{'oldrevision'} = $orev->{'rev'} if defined $orev->{'rev'}; $opts{'newrevision'} = $rev->{'rev'} if defined $rev->{'rev'}; $d = BSSrcdiff::unifieddiff($ofn, $ofiles, $fn, $files, %opts); } else { $d = BSSrcdiff::srcdiff($ofn, $ofiles, $fn, $files, %opts); } unlink($_) for keys %xobscpio; mkdir_p("$diffcache/".substr($cacheid, 0, 2)); writestr("$diffcache/.new$$", $cn, $d); unlink("$cn.run"); close LF; return ($d, $view eq 'xml' ? 'Content-Type: text/xml' : 'Content-Type: text/plain'); } sub linkdiff { my ($cgi, $projid, $packid) = @_; my $rev = getrev($projid, $packid, defined($cgi->{'rev'}) ? $cgi->{'rev'} : 'upload'); $rev->{'linkrev'} = $cgi->{'linkrev'} if $cgi->{'linkrev'}; my $linkinfo = {}; my $files = lsrev_expanded($rev, $linkinfo); die("not a link\n") unless $linkinfo->{'srcmd5'}; BSSrcServer::Link::linkinfo_addtarget($rev, $linkinfo); return sourcediff({ %$cgi, 'expand' => 0, 'oproject' => $linkinfo->{'project'}, 'opackage' => $linkinfo->{'package'}, 'orev' => $linkinfo->{'srcmd5'}, 'missingok' => $linkinfo->{'missingok'}, 'rev' => $rev->{'srcmd5'}, }, $projid, $packid); } sub servicediff { my ($cgi, $projid, $packid) = @_; die("servicediff only works for new style services\n") if $BSConfig::old_style_services; my $rev = getrev($projid, $packid, defined($cgi->{'rev'}) ? $cgi->{'rev'} : 'upload'); my $linkinfo = {}; my $files = BSRevision::lsrev($rev, $linkinfo); if ($linkinfo->{'xservicemd5'}) { return sourcediff({%$cgi, 'expand' => 0, 'orev' => $rev->{'srcmd5'}, 'rev' => $linkinfo->{'xservicemd5'}}, $projid, $packid); } elsif ($linkinfo->{'lservicemd5'}) { return sourcediff({%$cgi, 'expand' => 0, 'orev' => $linkinfo->{'lservicemd5'}, 'rev' => $rev->{'srcmd5'}}, $projid, $packid); } else { die("no service was run for this revision\n"); } } sub sourcecommit { my ($cgi, $projid, $packid) = @_; my $rev = getrev($projid, $packid, defined($cgi->{'rev'}) ? $cgi->{'rev'} : 'upload'); my $files = BSRevision::lsrev($rev); $files = BSSrcServer::Link::keeplink($cgi, $projid, $packid, $files) if $cgi->{'keeplink'}; $rev = addrev($cgi, $projid, $packid, $files); BSSrcServer::Service::runservice($cgi, $rev, $files) unless $cgi->{'noservice'}; delete $rev->{'project'}; delete $rev->{'package'}; return ($rev, $BSXML::revision); } sub sourcecommitfilelist { my ($cgi, $projid, $packid) = @_; BSVerify::verify_md5($cgi->{'servicemark'}) if $cgi->{'servicemark'}; mkdir_p($uploaddir); my $uploaded = BSServer::read_file("$uploaddir/$$"); die("upload failed\n") unless $uploaded; my $fl = readxml("$uploaddir/$$", $BSXML::dir); unlink("$uploaddir/$$"); # make sure we know every file my @missing; my $files = {}; my $ofiles = {}; my $ofiles_expanded = {}; my $orev = {'project' => $projid, 'package' => $packid}; if ($cgi->{'withvalidate'}) { eval { my $rev_old = getrev($projid, $packid); $ofiles = BSRevision::lsrev($rev_old); $ofiles_expanded = lsrev_expanded($rev_old); }; } for my $entry (@{$fl->{'entry'} || []}) { BSVerify::verify_filename($entry->{'name'}); BSVerify::verify_md5($entry->{'md5'}); if (! -e BSRevision::revfilename($orev, $entry->{'name'}, $entry->{'md5'})) { $entry->{'hash'} = 'new' if $cgi->{'withvalidate'}; push @missing, $entry; } else { die("duplicate file: $entry->{'name'}\n") if exists $files->{$entry->{'name'}}; if ($entry->{'hash'}) { my $fd = gensym; BSRevision::revopen($orev, $entry->{'name'}, $entry->{'md5'}, $fd); my $sha256 = Digest::SHA->new(256); my $hash_to_check = "sha256:" . $sha256->addfile($fd)->hexdigest; if ($hash_to_check ne $entry->{'hash'}) { die("SHA mismatch for same md5sum in $packid for file $entry->{'name'} with sum $entry->{'md5'}\n"); } } elsif ($cgi->{'withvalidate'}) { if ((!$ofiles->{$entry->{'name'}} || $ofiles->{$entry->{'name'}} ne $entry->{'md5'}) && (!$ofiles_expanded->{$entry->{'name'}} || $ofiles_expanded->{$entry->{'name'}} ne $entry->{'md5'})) { $entry->{'hash'} = 'missing'; push @missing, $entry; } } $files->{$entry->{'name'}} = $entry->{'md5'}; } } if (@missing) { my $res = {'name' => $packid, 'error' => 'missing', 'entry' => \@missing}; return ($res, $BSXML::dir); } $files = BSSrcServer::Link::keeplink($cgi, $projid, $packid, $files) if $cgi->{'keeplink'}; if (-e "$projectsdir/$projid.pkg/$packid.upload-MD5SUMS") { # autocommit old update revision so that it doesn't get lost my $uploadrev = {'project' => $projid, 'package' => $packid, 'srcmd5' => 'upload'}; my $uploadfiles = BSRevision::lsrev($uploadrev); addrev({ %$cgi, 'comment' => 'autocommit update revision'}, $projid, $packid, $uploadfiles); } my $rev = addrev($cgi, $projid, $packid, $files); BSSrcServer::Service::runservice($cgi, $rev, $files) unless $cgi->{'noservice'}; $cgi->{'rev'} = $rev->{'rev'}; return getfilelist($cgi, $projid, $packid); } # admin only, move entire project sub moveproject { my ($cgi, $projid) = @_; my $oprojid = $cgi->{'oproject'}; return $BSStdServer::return_ok if $oprojid eq $projid; my $oproj = BSRevision::readproj_local($oprojid); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $oreposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($oprojid) : $BSConfig::reposerver; if ($reposerver ne $oreposerver) { die("cannot copy binaries between different reposiory servers yet\n"); } if (-e "$projectsdir/$projid.pkg" || -e "$projectsdir/$projid.conf" || -e "$projectsdir/$projid.xml") { die("target project already exists\n"); } rename("$projectsdir/$oprojid.xml", "$projectsdir/$projid.xml"); rename("$projectsdir/$oprojid.pkg", "$projectsdir/$projid.pkg") if -e "$projectsdir/$oprojid.pkg"; rename("$projectsdir/$oprojid.conf", "$projectsdir/$projid.conf") if -e "$projectsdir/$oprojid.conf"; rename("$treesdir/$oprojid", "$treesdir/$projid") if $BSConfig::nosharedtrees && -e "$treesdir/$oprojid"; # move entries in linkinfo database BSRevision::movelinkinfos($projid, $oprojid); # move in the backend as well my @args; push @args, "cmd=move"; push @args, "oproject=$oprojid"; my $param = { 'uri' => "$reposerver/build/$projid", 'request' => 'POST', }; eval { # ignore failures for now BSWatcher::rpc($param, undef, @args); }; warn($@) if $@; # check all packages in project notify_repservers('package', $projid); notify_repservers('package', $oprojid); return $BSStdServer::return_ok; } # copy sources of entire project, project exists ensured by api. sub copyproject { my ($cgi, $projid) = @_; my $oprojid = $cgi->{'oproject'}; return $BSStdServer::return_ok if $oprojid eq $projid; die("copyproject can not have both makeolder and makeoriginolder\n") if $cgi->{'makeolder'} && $cgi->{'makeoriginolder'}; my $proj = BSRevision::readproj_local($projid); my $oproj = BSRevision::readproj_local($oprojid); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $oreposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($oprojid) : $BSConfig::reposerver; if ($cgi->{'withbinaries'} && $reposerver ne $oreposerver) { die("cannot copy binaries between different repository servers yet\n"); } my $user = defined($cgi->{'user'}) && $cgi->{'user'} ne '' ? $cgi->{'user'} : 'unknown'; my $comment = defined($cgi->{'comment'}) ? $cgi->{'comment'} : ''; $user = str2utf8xml($user); $comment = str2utf8xml($comment); # copy _project data if (-e "$projectsdir/$oprojid.pkg/_project.rev" || -e "$projectsdir/$oprojid.conf") { my $lastorev = getrev($oprojid, '_project'); my $files = BSRevision::lsrev($lastorev); BSSrcrep::copyfiles($projid, '_project', $oprojid, '_project', $files); addrev($cgi, $projid, '_project', $files); } # signal start of project copy notify_repservers('suspendproject', $projid, undef, 'copyproject in progress'); # local packages only my @pkgs = BSRevision::lspackages_local($oprojid); delete $cgi->{'servicemark'}; # just in case... for my $packid (@pkgs) { if (! -e "$projectsdir/$projid.pkg/$packid.xml") { # new package, create. hopefully the API can deal with this my $opack = BSRevision::readpack_local($oprojid, $packid); my $pack = { 'project' => $projid, 'name' => $packid, }; # everything except person, group, devel and lock for (keys %$opack) { next if $_ eq 'project' || $_ eq 'name'; next if $_ eq 'person' || $_ eq 'group' || $_ eq 'devel' || $_ eq 'lock'; $pack->{$_} = $opack->{$_} if defined $opack->{$_}; } mkdir_p($uploaddir); writexml("$uploaddir/copyproject$$", undef, $pack, $BSXML::pack); BSRevision::addrev_meta_replace($cgi, $projid, $packid, [ "$uploaddir/copyproject$$", "$projectsdir/$projid.pkg/$packid.xml", '_meta' ]); # need to do this now because the binary copy will fail otherwise notify_repservers('package', $projid, $packid) if $cgi->{'withbinaries'}; } if ($cgi->{'makeolder'} || $cgi->{'makeoriginolder'} || -s "$projectsdir/$oprojid.pkg/$packid.rev") { my $lastorev; if ($cgi->{'withhistory'}) { # FIXME: races ahead # history copying is a bit tricky, as it renumbers the revisions my @allrevs = BSFileDB::fdb_getall("$projectsdir/$oprojid.pkg/$packid.rev", $srcrevlay); if (-e "$projectsdir/$projid.pkg/$packid.rev") { my $lastrev = BSFileDB::fdb_getlast("$projectsdir/$projid.pkg/$packid.rev", $srcrevlay); if ($lastrev && $lastrev->{'rev'}) { for my $rev (@allrevs) { $rev->{'rev'} += $lastrev->{'rev'}; } } } # make trees available in new project for my $rev (@allrevs) { BSSrcrep::copytree($projid, $packid, $oprojid, $packid, $rev->{'srcmd5'}); } BSFileDB::fdb_add_multiple("$projectsdir/$projid.pkg/$packid.rev", $srcrevlay, @allrevs); $lastorev = $allrevs[-1]; } else { $lastorev = BSFileDB::fdb_getlast("$projectsdir/$oprojid.pkg/$packid.rev", $srcrevlay); } if (!$lastorev || !$lastorev->{'rev'}) { next unless $cgi->{'makeolder'} || $cgi->{'makeoriginolder'}; # fake empty commit $lastorev = { 'version' => 'unknown', 'rev' => 0, 'vrev' => 0, 'srcmd5' => $BSSrcrep::emptysrcmd5 }; } # always do one new commit, we don't use addrev to have full control over vrev my $linkinfo = {}; my $frev = { %$lastorev, 'project' => $oprojid, 'package' => $packid }; my $files = BSRevision::lsrev($frev, $linkinfo); my $servicemark; if ($linkinfo->{'xservicemd5'}) { if ($cgi->{'noservice'}) { eval { $files = BSSrcServer::Service::handleservice($frev, $files, $linkinfo->{'xservicemd5'}); }; if ($@) { warn($@); # hmm, could not expand service $servicemark = BSSrcServer::Service::genservicemark($projid, $packid, $files, undef, 1); } else { BSSrcrep::copyfiles($projid, $packid, $oprojid, $packid, $files); ($servicemark, $files) = BSSrcServer::Service::servicemark_noservice($cgi, $projid, $packid, $files, undef, $linkinfo->{'xservicemd5'}); } } else { $servicemark = BSSrcServer::Service::genservicemark($projid, $packid, $files, undef, 1); } } BSSrcrep::copyfiles($projid, $packid, $oprojid, $packid, $files); $files->{'/SERVICE'} = $servicemark if $servicemark; my $newrev = { %$lastorev }; $newrev->{'srcmd5'} = BSSrcrep::addmeta($projid, $packid, $files); $newrev->{'user'} = $user; $newrev->{'comment'} = $comment; $newrev->{'requestid'} = $cgi->{'requestid'}; $newrev->{'time'} = time(); if ($cgi->{'makeolder'}) { $newrev->{'vrev'} =~ s/(\d+)$/($1+1).".1"/e; } elsif ($cgi->{'makeoriginolder'} && !$files->{'_link'}) { $newrev->{'vrev'} =~ s/(\d+)$/$1+2/e; } else { $newrev->{'vrev'} =~ s/(\d+)$/$1+1/e; } delete $newrev->{'rev'}; $newrev = BSRevision::addrev_local({'vrev' => 1}, $projid, $packid, $newrev, $files); if ($cgi->{'makeolder'} || ($cgi->{'makeoriginolder'} && !$files->{'_link'})) { $lastorev->{'user'} = $user; $lastorev->{'comment'} = $comment; $lastorev->{'requestid'} = $cgi->{'requestid'}; $lastorev->{'time'} = time(); $lastorev->{'user'} = $user; if ($cgi->{'makeoriginolder'}) { $lastorev->{'vrev'} =~ s/(\d+)$/($1+1).".1"/e; } else { $lastorev->{'vrev'} =~ s/(\d+)$/$1+2/e; } delete $lastorev->{'rev'}; $lastorev = BSFileDB::fdb_add_i("$projectsdir/$oprojid.pkg/$packid.rev", $srcrevlay, $lastorev); } if ($packid ne '_product') { my $omb = BSSrcServer::Multibuild::getmultibuild($oprojid, $packid); my $mb = BSSrcServer::Multibuild::getmultibuild($projid, $packid); if ($mb || $omb) { BSSrcServer::Multibuild::setmultibuild($projid, $packid, $omb); notify_repservers('package', $projid, $packid) if $cgi->{'withbinaries'}; } } } # XXX: does this make any sense? if ($cgi->{'withbinaries'}) { for my $repo (@{$proj->{'repository'} || []}) { my $orepo = (grep {$_->{'name'} eq $repo->{'name'}} @{$oproj->{'repository'} || []})[0]; next unless $orepo; for my $arch (@{$repo->{'arch'} || []}) { next unless grep {$_ eq $arch} @{$orepo->{'arch'} || []}; # same source and target repo/arch in both projects exists for my $mpackid (BSSrcServer::Multibuild::addmultibuildpackages($projid, undef, $packid)) { my @args; push @args, "cmd=copy"; push @args, "oproject=$oprojid"; push @args, "opackage=$mpackid"; # same package name push @args, "orepository=$repo->{'name'}"; # same repo name push @args, 'resign=1' if $cgi->{'resign'}; my $param = { 'uri' => "$reposerver/build/$projid/$repo->{'name'}/$arch/$mpackid", 'request' => 'POST', }; eval { # ignore failures for now BSWatcher::rpc($param, undef, @args); }; warn($@) if $@; } } } } } # check all packages in project notify_repservers('package', $projid); # also resumes the project if ($cgi->{'makeolder'} || $cgi->{'makeoriginolder'}) { notify_repservers('package', $oprojid); } return $BSStdServer::return_ok; } sub freezeprojectlink { my ($cgi, $projid) = @_; my %origins; my $proj = BSRevision::readproj_local($projid); my @frozen; for my $lprojid (map {$_->{'project'}} @{$proj->{'link'} || []}) { my @frozenp; my @lpackids = findpackages($projid, undef, -1); for my $packid (sort(@lpackids)) { my $rev = getrev($projid, $packid); lsrev_expanded($rev); push @frozenp, { name => $packid, srcmd5 => $rev->{'srcmd5'}, 'vrev' => $rev->{'vrev'} }; } push @frozen, { 'project' => $lprojid, 'package' => \@frozenp }; } my $frozenlinks = { 'frozenlink' => \@frozen }; mkdir_p($uploaddir); writexml("$uploaddir/freezeproject.$$", undef, $frozenlinks, $BSXML::frozenlinks); $cgi->{'comment'} ||= 'freeze project'; BSRevision::addrev_meta_replace($cgi, $projid, undef, [ "$uploaddir/freezeproject.$$", undef, '_frozenlinks' ]); return $BSStdServer::return_ok; } # we're going to auto-update a link. this means we must also # auto-update the corresponding service result sub update_link_in_service { my ($rev, $files, $xservicemd5, $isbranch) = @_; return undef unless defined $xservicemd5; return $xservicemd5 if $BSConfig::old_style_services; return $xservicemd5 unless $files->{'_link'}; my $sfiles; eval { $sfiles = BSRevision::lsrev({%$rev, 'srcmd5' => $xservicemd5}); }; return $xservicemd5 unless $sfiles && $sfiles->{'_link'}; return $xservicemd5 if $sfiles->{'_link'} && $sfiles->{'_link'} eq $files->{'_link'}; # nothing changed # okay, we need to generate a new service commit my $servicemark = BSSrcServer::Service::genservicemark($rev->{'project'}, $rev->{'package'}, $files, undef, 1); return undef unless $servicemark; # delete all non-service files unless it's a branch if (!$isbranch) { delete $sfiles->{$_} for grep {!/^_service[_:]/} keys %$sfiles; } # copy new link $sfiles->{'_link'} = $files->{'_link'}; # write back new service result BSSrcServer::Service::fake_service_run($rev->{'project'}, $rev->{'package'}, $files, $sfiles, $servicemark); return $servicemark; } sub sourcecopy { my ($cgi, $projid, $packid) = @_; die("illegal rev parameter\n") if $cgi->{'rev'} && $cgi->{'rev'} ne 'upload'; my $oprojid = exists($cgi->{'oproject'}) ? $cgi->{'oproject'} : $projid; my $opackid = exists($cgi->{'opackage'}) ? $cgi->{'opackage'} : $packid; die("makeoriginolder only makes sense with withvrev\n") if $cgi->{'makeoriginolder'} && !$cgi->{'withvrev'}; die("vrevbump only makes sense with withvrev\n") if $cgi->{'vrevbump'} && !$cgi->{'withvrev'}; my $orev = $cgi->{'orev'}; $orev = getrev($oprojid, $opackid, defined($orev) ? $orev : 'latest'); if ($cgi->{'instantiate'}) { my $proj = BSRevision::readproj_local($projid, 1); if ($proj && $proj->{'link'}) { my $irev = BSSrcServer::Projlink::getrev_projlink($projid, $proj, $packid, undef, undef, 1); if ($irev) { $orev->{'vrev'} = $irev->{'vrev'} if $irev; # hack: overwrite vrev with instantiated vrev $cgi->{'withvrev'} = 1; } } } die("origin must not be virtual for makeoriginolder\n") if $cgi->{'makeoriginolder'} && $orev->{'originproject'}; $orev->{'linkrev'} = $cgi->{'olinkrev'} if $cgi->{'olinkrev'}; my $orev_srcmd5 = $orev->{'srcmd5'}; # so that we can restore it later my $linkinfo = {}; my $files = $cgi->{'noservice'} || $cgi->{'expand'} ? lsrev_service($orev, $linkinfo) : BSRevision::lsrev($orev, $linkinfo); die("need a revision to copy\n") if !$cgi->{'rev'} && !$cgi->{'orev'} && $oprojid eq $projid && $opackid eq $packid && !$cgi->{'instantiate'} && !($files->{'_link'} && $cgi->{'expand'}); die("makeoriginolder currently does not work on links\n") if $cgi->{'makeoriginolder'} && $files->{'_link'}; my $autosimplifylink; my $autosimplifylink_lrev; my $freezelink; my $freezelinkfiles; if ($files->{'_link'} && !$cgi->{'dontupdatesource'} && !$cgi->{'rev'}) { # fix me: do this in a more generic way my $olink = BSRevision::revreadxml($orev, '_link', $files->{'_link'}, $BSXML::link, 1); if ($olink) { my $lprojid = $oprojid; my $lpackid = $opackid; my $lrev = $olink->{'rev'}; $lprojid = $olink->{'project'} if exists $olink->{'project'}; $lpackid = $olink->{'package'} if exists $olink->{'package'}; if ($cgi->{'freezelink'}) { # we're going to freeze the link in the source die("400 freezelink needs expand or noservice\n") unless $cgi->{'noservice'} || $cgi->{'expand'}; $lrev = getrev($lprojid, $lpackid, $lrev, undef, $olink->{'missingok'}); my %lrev = %$lrev; lsrev_expanded(\%lrev); die("400 freezelink refusing to change rev from $olink->{'rev'} to $lrev{'srcmd5'}\n") if $olink->{'rev'} && $lrev{'srcmd5'} ne $olink->{'rev'}; if (!$olink->{'rev'} || $lrev{'srcmd5'} ne $olink->{'rev'}) { # halt, freeze! $olink->{'rev'} = $lrev{'srcmd5'}; $olink->{'vrev'} = $lrev{'vrev'} if defined $lrev{'vrev'}; if ($lprojid eq $projid && $lpackid eq $packid) { eval { BSSrcServer::Access::checksourceaccess($projid, $packid); delete $olink->{'missingok'}; }; } $freezelink = $olink; } $freezelinkfiles = { %$files }; } elsif ($lprojid eq $projid && $lpackid eq $packid) { # copy destination is target of link # we're integrating this link $lrev = getrev($lprojid, $lpackid, $lrev); $autosimplifylink_lrev = { %$lrev }; my $lfiles = $cgi->{'noservice'} && !$cgi->{'expand'} ? lsrev_service({ %$lrev }) : BSRevision::lsrev($lrev); if ($lfiles->{'_link'} && !$cgi->{'expand'}) { # link to a link, join $files = BSSrcServer::Link::integratelink($lfiles, $lprojid, $lpackid, $lrev, $files, $oprojid, $opackid, $olink, $orev); } else { # auto expand $cgi->{'expand'} = 1; } $autosimplifylink = $olink; } } } die("400 freezelink: origin provides no link\n") if $cgi->{'freezelink'} && !$freezelinkfiles; my $oldvrev = $orev->{'vrev'}; if ($files->{'_link'} && $cgi->{'expand'}) { my %olrev = %$orev; # copy so that orev still points to unexpanded sources $files = BSSrcServer::Link::handlelinks(\%olrev, $files); die("broken link in $oprojid/$opackid: $files\n") unless ref $files; $oldvrev = $olrev{'vrev'}; } BSSrcrep::copyfiles($projid, $packid, $oprojid, $opackid, $files); # copy multibuild data if ($cgi->{'expand'} && $cgi->{'noservice'} && $packid ne '_product') { my $omb = BSSrcServer::Multibuild::getmultibuild($oprojid, $opackid); BSSrcServer::Multibuild::setmultibuild($projid, $packid, $omb); } if ($cgi->{'withvrev'} && !$cgi->{'vrev'} && defined($oldvrev)) { $cgi->{'vrev'} = $oldvrev; my $vrevbump = $cgi->{'vrevbump'} || 1; $vrevbump = 2 if $cgi->{'makeoriginolder'} && $vrevbump < 2; # bump vrev so that new builds will have a bigger release number # (just like in copyproject) $cgi->{'vrev'} =~ s/(\d+)$/$1+$vrevbump/e; } $files = BSSrcServer::Link::keeplink($cgi, $projid, $packid, $files) if $cgi->{'keeplink'}; my $rev = addrev($cgi, $projid, $packid, $files, $cgi->{'rev'}); delete $cgi->{'vrev'}; if ($cgi->{'makeoriginolder'}) { # add dummy commit my $lastline = BSFileDB::fdb_getlast("$projectsdir/$oprojid.pkg/$opackid.rev", $srcrevlay); die("makeoriginolder: $oprojid/$opackid does not exists?\n") unless defined $lastline; delete $lastline->{'requestid'}; delete $lastline->{'rev'}; $lastline->{'user'} = 'buildservice-autocommit'; $lastline->{'comment'} = "makeoriginolder vrev update for $projid/$packid"; $lastline->{'requestid'} = $cgi->{'requestid'} if $cgi->{'requestid'}; $lastline->{'vrev'} =~ s/(\d+)$/($1+1).".1"/e; BSFileDB::fdb_add_i("$projectsdir/$oprojid.pkg/$opackid.rev", $srcrevlay, $lastline); notify_repservers('package', $oprojid, $opackid); } if ($freezelink) { mkdir_p($uploaddir); writexml("$uploaddir/$$", undef, $freezelink, $BSXML::link); $freezelinkfiles->{'_link'} = BSSrcrep::addfile($oprojid, $opackid, "$uploaddir/$$", '_link'); my $isbranch = grep {(keys %$_)[0] eq 'branch'} @{$freezelink->{'patches'}->{''} || []}; my $servicemark = update_link_in_service($orev, $freezelinkfiles, $linkinfo->{'xservicemd5'}, $isbranch); addrev({ %$cgi, 'user' => 'buildservice-autocommit', 'comment' => 'freeze link', 'servicemark' => $servicemark }, $oprojid, $opackid, $freezelinkfiles); } elsif ($autosimplifylink && !defined($autosimplifylink->{'rev'})) { $orev->{'srcmd5'} = $orev_srcmd5; # back to unexpanded # make sure that vrev doesn't decrease when copying to the # link target my $vrevbump = 0; if ($rev && $autosimplifylink_lrev && $rev->{'version'} ne $autosimplifylink_lrev->{'version'}) { # version change, check if vrev went down my $vrev1 = $rev->{'vrev'} || '0'; my $vrev2 = $autosimplifylink_lrev->{'vrev'} || '0'; $vrev1 =~ s/.*?(\d+)$/$1/; $vrev2 =~ s/.*?(\d+)$/$1/; $vrevbump = $vrev2 > $vrev1 ? $vrev2 - $vrev1 : 0; } my $isbranch = grep {(keys %$_)[0] eq 'branch'} @{$autosimplifylink->{'patches'}->{''} || []}; if ($isbranch) { # update base rev so that there are no changes # FIXME: this is a gross hack... # we should not need to update the baserev, instead we should change # the way branches get applied my $ofiles = BSRevision::lsrev($orev); delete $ofiles->{'_link'}; BSSrcrep::copyfiles($projid, $packid, $oprojid, $opackid, $ofiles); my $newbase = BSSrcrep::addmeta($projid, $packid, $ofiles); if ($autosimplifylink->{'baserev'} ne $newbase) { eval { my $latestorev = getrev($oprojid, $opackid); my $latestlinkinfo = {}; my $latestfiles = BSRevision::lsrev($latestorev, $latestlinkinfo); if ($latestfiles->{'_link'}) { my $latestl = BSRevision::revreadxml($latestorev, '_link', $latestfiles->{'_link'}, $BSXML::link, 1); my $latestisbranch = grep {(keys %$_)[0] eq 'branch'} @{$latestl->{'patches'}->{''} || []}; if ($latestisbranch && $latestl->{'baserev'} eq $autosimplifylink->{'baserev'}) { $latestl->{'baserev'} = $newbase; $latestl->{'patches'}->{''} = [ { 'branch' => undef} ]; # work around xml problem if ($latestl->{'missingok'} && (defined($latestl->{'project'}) ? $latestl->{'project'} : $oprojid) eq $projid && (defined($latestl->{'package'}) ? $latestl->{'package'} : $opackid) eq $packid) { eval { BSSrcServer::Access::checksourceaccess($projid, $packid); delete $latestl->{'missingok'}; }; } mkdir_p($uploaddir); writexml("$uploaddir/$$", undef, $latestl, $BSXML::link); $latestfiles->{'_link'} = BSSrcrep::addfile($oprojid, $opackid, "$uploaddir/$$", '_link'); my $servicemark = update_link_in_service($latestorev, $latestfiles, $latestlinkinfo->{'xservicemd5'}, 1); if ($vrevbump) { $cgi->{'vrev'} = $latestorev->{'vrev'}; $cgi->{'vrev'} =~ s/(\d+)$/$1 + $vrevbump/e; } addrev({ %$cgi, 'user' => 'buildservice-autocommit', 'comment' => 'baserev update by copy to link target', 'servicemark' => $servicemark }, $oprojid, $opackid, $latestfiles); } } }; warn($@) if $@; } } else { eval { my $latestorev = getrev($oprojid, $opackid); if ($latestorev->{'srcmd5'} eq $orev->{'srcmd5'}) { # simplify link my $latestlinkinfo = {}; my $latestfiles = BSRevision::lsrev($latestorev, $latestlinkinfo); my $nl = { %$autosimplifylink }; delete $nl->{'patches'}; delete $nl->{'baserev'}; mkdir_p($uploaddir); writexml("$uploaddir/$$", undef, $nl, $BSXML::link); my $ofiles = {}; $ofiles->{'_link'} = BSSrcrep::addfile($oprojid, $opackid, "$uploaddir/$$", '_link'); my $servicemark = update_link_in_service($latestorev, $ofiles, $latestlinkinfo->{'xservicemd5'}, 0); if ($vrevbump) { $cgi->{'vrev'} = $latestorev->{'vrev'}; $cgi->{'vrev'} =~ s/(\d+)$/$1 + $vrevbump/e; } addrev({ %$cgi, 'user' => 'buildservice-autocommit', 'comment' => 'auto commit by copy to link target', 'servicemark' => $servicemark }, $oprojid, $opackid, $ofiles); } }; warn($@) if $@; } delete $cgi->{'vrev'} if $vrevbump; } BSSrcServer::Service::runservice($cgi, $rev, $files) unless $cgi->{'noservice'}; delete $rev->{'project'}; delete $rev->{'package'}; return ($rev, $BSXML::revision_acceptinfo); } sub sourcebranch { my ($cgi, $projid, $packid) = @_; my $usebranch = 1; my $oprojid = exists($cgi->{'oproject'}) ? $cgi->{'oproject'} : $projid; my $opackid = exists($cgi->{'opackage'}) ? $cgi->{'opackage'} : $packid; my $orev = $cgi->{'orev'}; die("cannot branch myself\n") if $oprojid eq $projid && $opackid eq $packid; $orev = getrev($oprojid, $opackid, defined($orev) ? $orev : 'latest', undef, $cgi->{'missingok'}); $opackid = $orev->{'package'} if $opackid =~ /(?{'linkrev'} = $cgi->{'olinkrev'} if $cgi->{'olinkrev'}; my $files = lsrev_expanded($orev); # modifies srcmd5, thus also needed for keepcontent case if ($cgi->{'keepcontent'}) { die("keepcontent is only supported for branches\n") unless $usebranch; my $nrev = getrev($projid, $packid, 'latest'); $files = lsrev_expanded($nrev); } my $l = {}; $l->{'project'} = $oprojid if $oprojid ne $projid; $l->{'package'} = $opackid if $opackid ne $packid; # a missing package entry is bad if the project has sourceaccess # disabled, so check if that's the case eval { BSSrcServer::Access::checksourceaccess($oprojid, $opackid) if $opackid eq $packid && $oprojid ne $projid; }; $l->{'package'} = $opackid if $@; $l->{'missingok'} = 'true' if defined $cgi->{'missingok'} && !exists($orev->{'rev'}) && $orev->{'srcmd5'} eq $BSSrcrep::emptysrcmd5; $l->{'rev'} = $cgi->{'orev'} if defined $cgi->{'orev'}; $l->{'baserev'} = $orev->{'srcmd5'}; if ($cgi->{'extendvrev'}) { $l->{'vrev'} = $orev->{'vrev'}; $l->{'vrev'} ||= '0'; die("extendvrev error for $l->{'vrev'}\n") unless $l->{'vrev'} =~ s/^(\d+).*?$/($1+1).'.1'/e; } my $lfiles = {}; if ($usebranch) { $l->{'patches'}->{''} = [ { 'branch' => undef} ]; BSSrcrep::copyfiles($projid, $packid, $oprojid, $opackid, $files) unless $cgi->{'keepcontent'}; $lfiles->{$_} = $files->{$_} for keys %$files; } mkdir_p($uploaddir); writexml("$uploaddir/$$", undef, $l, $BSXML::link); $lfiles->{'_link'} = BSSrcrep::addfile($projid, $packid, "$uploaddir/$$", '_link'); my $rev = addrev($cgi, $projid, $packid, $lfiles); BSSrcServer::Service::runservice($cgi, $rev, $lfiles) unless $cgi->{'noservice'}; delete $rev->{'project'}; delete $rev->{'package'}; return ($rev, $BSXML::revision_acceptinfo); } sub linktobranch { my ($cgi, $projid, $packid) = @_; my $rev = getrev($projid, $packid); $rev->{'linkrev'} = $cgi->{'linkrev'} if $cgi->{'linkrev'}; my $files = BSRevision::lsrev($rev); die("package is not a link\n") unless $files->{'_link'}; my $l = BSRevision::revreadxml($rev, '_link', $files->{'_link'}, $BSXML::link); die("package is already a branch\n") if $l->{'patches'} && grep {(keys %$_)[0] eq 'branch'} @{$l->{'patches'}->{''} || []}; my $linkinfo = {}; $files = lsrev_expanded($rev, $linkinfo); $l->{'baserev'} = $linkinfo->{'srcmd5'}; $l->{'patches'}->{''} = [ { 'branch' => undef} ]; mkdir_p($uploaddir); writexml("$uploaddir/$$", undef, $l, $BSXML::link); $files->{'_link'} = BSSrcrep::addfile($projid, $packid, "$uploaddir/$$", '_link'); $cgi->{'comment'} ||= 'converted link to branch'; $rev = addrev($cgi, $projid, $packid, $files); BSSrcServer::Service::runservice($cgi, $rev, $files); delete $rev->{'project'}; delete $rev->{'package'}; return ($rev, $BSXML::revision); } sub sourcecollectbuildenv { my ($cgi, $projid, $packid) = @_; my $oprojid = $cgi->{'oproject'} || $projid; my $opackid = $cgi->{'opackage'} || $packid; die("cannot collect from myself\n") if $oprojid eq $projid && $opackid eq $packid; my $proj = checkprojrepoarch($projid); my $oproj = checkprojrepoarch($oprojid); my %orepoids; for (@{$oproj->{'repository'} || []}) { $orepoids{"$oprojid/$_->{'name'}"} = $_; } for (@{$oproj->{'repository'} || []}) { for my $rt (@{$_->{'releasetarget'} || []}) { $orepoids{"$rt->{'project'}/$rt->{'repository'}"} ||= $_; } } my %buildenvs; for my $repo (@{$proj->{'repository'} || []}) { next unless @{$repo->{'arch'} || []}; my $repoid = $repo->{'name'}; my @xpath = expandsearchpath($projid, $repoid); my $orepo; for my $xr (@xpath) { $orepo = $orepoids{$xr}; last if $orepo; } if ($orepo) { my $orepoid = $orepo->{'name'}; my %oarchs = map {$_ => 1} @{$orepo->{'arch'} || []}; for my $arch (@{$repo->{'arch'}}) { my $be; if (!$oarchs{$arch}) { # arch not included, use error buildenv $be = { 'error', "$arch missing in $oprojid/$orepoid" }; } else { my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($oprojid) : $BSConfig::reposerver; eval { $be = BSRPC::rpc("$reposerver/build/$oprojid/$orepoid/$arch/$opackid/_buildenv", $BSXML::buildinfo); }; if ($@) { die($@) if $@ !~ /^404/; $be = { 'error', "_buildenv missing in $oprojid/$orepoid" }; } $be ||= { 'error', "could not get _buildenv in $oprojid/$orepoid" }; } $buildenvs{"_buildenv.$repoid.$arch"} = BSUtil::toxml($be, $BSXML::buildinfo); } } } die("could not get any buildenv, something is wrong\n") unless %buildenvs; # add master buildenv, in our case a "fallback error" buildenv my $be = { 'error', "no buildenv for this repo/arch" }; $buildenvs{'_buildenv'} = BSUtil::toxml($be, $BSXML::buildinfo); # now add all the buildenvs to the last commit (unexpanded is enough for us) my $rev = getrev($projid, $packid); my $files = BSRevision::lsrev($rev); delete $files->{$_} for grep {/^_buildenv/} keys %$files; mkdir_p($uploaddir); for my $file (sort keys %buildenvs) { writestr("$uploaddir/_be$$", undef, $buildenvs{$file}); $files->{$file} = BSSrcrep::addfile($projid, $packid, "$uploaddir/_be$$", $file); } $rev = addrev($cgi, $projid, $packid, $files); BSSrcServer::Service::runservice($cgi, $rev, $files); delete $rev->{'project'}; delete $rev->{'package'}; return ($rev, $BSXML::revision); } sub deleteuploadrev { my ($cgi, $projid, $packid) = @_; unlink("$projectsdir/$projid.pkg/$packid.upload-MD5SUMS"); return $BSStdServer::return_ok; } sub unknowncmd { my ($cgi, $projid, $packid) = @_; die("unknown command \"$cgi->{'cmd'}\"\n"); } sub delfile { my ($cgi, $projid, $packid, $filename) = @_; die("no filename\n") unless defined($filename) && $filename ne ''; die("bad filename\n") if $filename =~ /\// || $filename =~ /^\./; if ($cgi->{'meta'}) { if ($filename ne '_attribute' && $filename ne '_frozenlinks') { die("unsupported meta operation\n"); } my $rev = BSRevision::addrev_meta_replace($cgi, $projid, $packid, [ undef, undef, $filename ]); notify_repservers('package', $projid) if $filename eq '_frozenlinks'; delete $rev->{'project'}; delete $rev->{'package'}; return ($rev, $BSXML::revision); } die("file '$filename' is read-only\n") if ($filename =~ /^_service:/) && !$cgi->{'force'}; my $rev = getrev($projid, $packid, defined($cgi->{'rev'}) ? $cgi->{'rev'} : 'upload'); my $files; if ($cgi->{'keeplink'}) { $files = lsrev_expanded($rev); } else { $files = BSRevision::lsrev($rev); } die("404 file '$filename' does not exist\n") unless $files->{$filename}; delete $files->{$filename}; $files = BSSrcServer::Link::keeplink($projid, $packid, $files) if $cgi->{'keeplink'}; $rev = addrev($cgi, $projid, $packid, $files, $cgi->{'rev'}); BSSrcServer::Service::runservice($cgi, $rev, $files); delete $rev->{'project'}; delete $rev->{'package'}; return ($rev, $BSXML::revision); } sub getrepositorylist { my ($cgi, $projid) = @_; my $proj = checkprojrepoarch($projid, undef, undef, 1); if ($proj->{'remoteurl'}) { return (BSRPC::rpc({'uri' => "$proj->{'remoteurl'}/build/$proj->{'remoteproject'}", 'proxy' => $proxy}, $BSXML::dir), $BSXML::dir); } my @res = map {{'name' => $_->{'name'}}} @{$proj->{'repository'} || []}; return ({'entry' => \@res}, $BSXML::dir); } sub getrepository { my ($cgi, $projid, $repoid) = @_; my $proj = BSRevision::readproj_local($projid); my $repo = (grep {$_->{'name'} eq $repoid} @{$proj->{'repository'} || []})[0]; die("404 $repoid: no such repository\n") unless $repo; return ($repo, $BSXML::repo); } sub getarchlist { my ($cgi, $projid, $repoid) = @_; my $proj = checkprojrepoarch($projid, $repoid, undef, 1); if ($proj->{'remoteurl'}) { return (BSRPC::rpc({'uri' => "$proj->{'remoteurl'}/build/$proj->{'remoteproject'}/$repoid", 'proxy' => $proxy}, $BSXML::dir), $BSXML::dir); } my @repo = grep {$_->{'name'} eq $repoid} @{$proj->{'repository'} || []}; die("404 $repoid: no such repository\n") unless @repo; my @res = map {{'name' => $_}} @{$repo[0]->{'arch'} || []}; return ({'entry' => \@res}, $BSXML::dir); } sub getresult { my ($cgi, $projid) = @_; my $proj = checkprojrepoarch($projid, undef, undef, 1); if ($proj->{'remoteurl'}) { die("oldstate not supported for remote projects\n") if $cgi->{'oldstate'}; my @args = BSRPC::args($cgi, 'lastbuild', 'view', 'repository', 'arch', 'package', 'code', 'multibuild', 'locallink'); return (BSRPC::rpc({'uri' => "$proj->{'remoteurl'}/build/$proj->{'remoteproject'}/_result", 'proxy' => $proxy}, $BSXML::resultlist, @args), $BSXML::resultlist); } if ($cgi->{'multibuild'} && $cgi->{'package'} && !$BSStdServer::isajax) { $cgi->{'package'} = [ BSSrcServer::Multibuild::addmultibuildpackages($projid, undef, @{$cgi->{'package'}}) ]; } if ($cgi->{'locallink'} && $cgi->{'package'} && !$BSStdServer::isajax) { my @packages; # find all packages that are links my $db = BSDB::opendb($sourcedb, ''); for my $packid (@{$cgi->{'package'}}) { push @packages, $packid; my @l = grep {s/^\Q$projid\///} BSDBIndex::getvalues($db, 'linkinfo/package', $packid); push @packages, @l; } @packages = BSUtil::unify(@packages); $cgi->{'package'} = \@packages; } if ($cgi->{'oldstate'} && !$BSStdServer::isajax) { my @args = BSRPC::args($cgi, 'oldstate', 'lastbuild', 'view', 'repository', 'arch', 'package', 'code'); BSHandoff::handoff("/build/$projid/_result", undef, @args); } my %repoidfilter = map {$_ => 1} @{$cgi->{'repository'} || []}; my %archfilter = map {$_ => 1} @{$cgi->{'arch'} || []}; my %view = map {$_ => 1} @{$cgi->{'view'} || ['status']}; $view{'status'} = 1 if $view{'versrel'}; my %code = map {$_ => 1} @{$cgi->{'code'} || []}; if ($cgi->{'repository'}) { my %knownrepoids = map {$_->{'name'} => 1} @{$proj->{'repository'} || []}; for (@{$cgi->{'repository'}}) { die("404 unknown repository '$_'\n") if !$knownrepoids{$_}; } } if ($cgi->{'package'}) { my %knownpackids = map {$_ => 1} findpackages($projid, $proj, 1); for (@{$cgi->{'package'}}) { die("404 unknown package '$_'\n") if !$knownpackids{$_}; } } my @prpas; for my $repo (@{$proj->{'repository'} || []}) { next if %repoidfilter && !$repoidfilter{$repo->{'name'}}; my @archs = @{$repo->{'arch'} || []}; @archs = grep {$archfilter{$_}} @archs if %archfilter; push @prpas, map {"$projid/$repo->{'name'}/$_"} @archs; } BSWatcher::addfilewatcher("$projectsdir/$projid.xml") if $BSStdServer::isajax; if (!@prpas) { my $state = "00000000000000000000000000000000"; return undef if $BSStdServer::isajax && $cgi->{'oldstate'} && $state eq $cgi->{'oldstate'}; return ({'state' => $state}, $BSXML::resultlist); } my $ps = {}; my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my @args; push @args, 'lastbuild' if $cgi->{'lastbuild'}; push @args, "oldstate=$cgi->{'oldstate'}" if $cgi->{'oldstate'}; push @args, map {"prpa=$_"} @prpas; push @args, map {"package=$_"} @{$cgi->{'package'} || []}; push @args, map {"code=$_"} @{$cgi->{'code'} || []}; push @args, 'withbinarylist' if $view{'binarylist'}; push @args, 'withstats' if $view{'stats'}; push @args, 'withversrel' if $view{'versrel'}; push @args, 'summary' if $view{'summary'} && !$view{'status'}; eval { $ps = BSWatcher::rpc("$reposerver/_result", $BSXML::resultlist, @args); }; if ($@) { print "warning: $reposerver: $@"; $ps = {}; } return if $BSStdServer::isajax && !defined($ps); if ($view{'summary'} && $view{'status'}) { my @order = ('succeeded', 'failed', 'unresolvable', 'broken', 'scheduled'); my %order = map {$_ => 1} @order; for my $p (@{$ps->{'result'} || []}) { my %sum; for my $pp (@{$p->{'status'} || []}) { $sum{$pp->{'code'}}++ if $pp->{'code'}; } my @sum = grep {exists $sum{$_}} @order; push @sum, grep {!$order{$_}} sort keys %sum; $p->{'summary'} = {'statuscount' => [ map {{'code' => $_, 'count' => $sum{$_}}} @sum ] }; } } if (!$view{'status'}) { for my $p (@{$ps->{'result'} || []}) { delete $p->{'status'}; } } return ($ps, $BSXML::resultlist); } sub docommand { my ($cgi, $projid) = @_; my %repoidfilter = map {$_ => 1} @{$cgi->{'repository'} || []}; my %archfilter = map {$_ => 1} @{$cgi->{'arch'} || []}; my $proj = BSRevision::readproj_local($projid); if ($cgi->{'cmd'} eq 'suspendproject') { notify_repservers('suspendproject', $projid, undef, $cgi->{'comment'} || 'suspendproject'); return $BSStdServer::return_ok; } if ($cgi->{'cmd'} eq 'resumeproject') { notify_repservers('resumeproject', $projid, undef, $cgi->{'comment'} || 'suspendproject'); return $BSStdServer::return_ok; } my @prpas; for my $repo (@{$proj->{'repository'} || []}) { next if %repoidfilter && !$repoidfilter{$repo->{'name'}}; my @archs = @{$repo->{'arch'} || []}; @archs = grep {$archfilter{$_}} @archs if %archfilter; push @prpas, map {"$projid/$repo->{'name'}/$_"} @archs; } die("no repository defined\n") unless @prpas; my @packids = @{$cgi->{'package'} || []}; if ($cgi->{'cmd'} eq 'wipepublishedlocked') { die("wipepublishedlocked can only wipe complete repos\n") if $cgi->{'arch'} || $cgi->{'code'} || @packids; } else { if (@packids) { my %packids = map {$_ => 1} findpackages($projid, $proj, 1); my @badpacks = grep {!$packids{$_}} @packids; die("404 unknown package: @badpacks\n") if @badpacks; } else { @packids = findpackages($projid, $proj); } die("no packages defined\n") unless @packids; } die("illegal wipe parameter\n") if $cgi->{'wipe'} && $cgi->{'cmd'} ne 'wipe'; my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $res; my @args; push @args, map {"prpa=$_"} @prpas; push @args, map {"package=$_"} @packids; push @args, map {"code=$_"} @{$cgi->{'code'} || []}; push @args, map {"wipe=$_"} @{$cgi->{'wipe'} || []}; push @args, "cmd=$cgi->{'cmd'}"; my $param = { 'uri' => "$reposerver/_command", 'request' => 'POST', }; $res = BSWatcher::rpc($param, undef, @args); return $res; } sub checkprojrepoarch { my ($projid, $repoid, $arch, $remoteok) = @_; my $proj = BSRevision::readproj_local($projid, 1); $proj = BSSrcServer::Remote::remoteprojid($projid) if $remoteok && (!$proj || $proj->{'remoteurl'}); die("404 project '$projid' does not exist\n") if !$proj; die("404 project '$projid' is remote\n") if $proj->{'remoteurl'} && !$remoteok; return $proj if $proj->{'remoteurl'}; return $proj unless defined $repoid; my $repo = (grep {$_->{'name'} eq $repoid} @{$proj->{'repository'} || []})[0]; die("404 project '$projid' has no repository '$repoid'\n") unless $repo; return $proj unless defined $arch; die("404 repository '$projid/$repoid' has no architecture '$arch'\n") unless grep {$_ eq $arch} @{$repo->{'arch'} || []}; return $proj; } sub getbuilddepinfo { my ($cgi, $projid, $repoid, $arch) = @_; checkprojrepoarch($projid, $repoid, $arch); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my @args = BSRPC::args($cgi, 'package', 'view'); my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/_builddepinfo", }; if (BSServer::have_content()) { $param->{'request'} = 'POST'; $param->{'data'} = BSServer::read_data(10000000); $param->{'headers'} = [ 'Content-Type: application/octet-stream' ]; } my $res = BSWatcher::rpc($param, $BSXML::builddepinfo, @args); return ($res, $BSXML::builddepinfo); } sub getjobhistory { my ($cgi, $projid, $repoid, $arch) = @_; checkprojrepoarch($projid, $repoid, $arch); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my @args = BSRPC::args($cgi, 'limit', 'package', 'code'); my $res = BSWatcher::rpc("$reposerver/build/$projid/$repoid/$arch/_jobhistory", $BSXML::jobhistlist, @args); return ($res, $BSXML::jobhistlist); } sub getpackagelist_build { my ($cgi, $projid, $repoid, $arch) = @_; if ($cgi->{'view'}) { die("unknown view '$cgi->{'view'}'\n") unless $cgi->{'view'} eq 'binaryversions' || $cgi->{'view'} eq 'binaryversionscode'; my $proj = checkprojrepoarch($projid, $repoid, $arch, 1); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my @args = BSRPC::args($cgi, 'view', 'package'); my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch", 'ignorestatus' => 1, 'receiver' => \&BSServer::reply_receiver, }; if ($proj->{'remoteurl'}) { if (!$BSStdServer::isajax) { BSHandoff::handoff("/build/$projid/$repoid/$arch", undef, @args); } $param->{'uri'} = "$proj->{'remoteurl'}/build/$proj->{'remoteproject'}/$repoid/$arch"; $param->{'proxy'} = $proxy; } BSWatcher::rpc($param, undef, @args); return undef; } return getpackagelist({ %$cgi, 'expand' => 1, 'noorigins' => 1 }, $projid, $repoid, $arch); } sub getbinarylist { my ($cgi, $projid, $repoid, $arch, $packid) = @_; my $view = $cgi->{'view'}; my $nosource = $cgi->{'nosource'}; my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my @args = BSRPC::args($cgi, 'view', 'nosource', 'withmd5', 'binary'); if ($view && ($view eq 'cache' || $view eq 'cpio' || $view eq 'solv' || $view eq 'solvstate')) { # do not check arch in interconnect mode my $proj = checkprojrepoarch($projid, $repoid, undef, 1); if ($view eq 'cpio' && $packid eq '_repository' && !@{$cgi->{'binary'} || []}) { if (!$proj->{'remoteurl'} || $proj->{'partition'}) { my $repo = (grep {$_->{'name'} eq $repoid} @{$proj->{'repository'} || []})[0]; my $doddata = (grep {($_->{'arch'} || '') eq $arch} @{($repo || {})->{'download'} || []})[0]; die("will not get all dod packages\n") if $doddata; } } if (!$BSStdServer::isajax) { if ($proj->{'remoteurl'} && $view eq 'cpio' && $packid eq '_repository' && !$nosource && @{$cgi->{'binary'} || []}) { # hand over to worker_getbinaries to get the answer cached @args = (); push @args, "project=$projid"; push @args, "repository=$repoid"; push @args, "arch=$arch"; push @args, "binaries=".join(',', @{$cgi->{'binary'} || []}); BSHandoff::handoff('/getbinaries', undef, @args); } BSHandoff::handoff("/build/$projid/$repoid/$arch/$packid", undef, @args); } my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/$packid", 'ignorestatus' => 1, 'receiver' => \&BSServer::reply_receiver, }; if ($proj->{'remoteurl'}) { $param->{'uri'} = "$proj->{'remoteurl'}/build/$proj->{'remoteproject'}/$repoid/$arch/$packid"; $param->{'proxy'} = $proxy; } BSWatcher::rpc($param, undef, @args); return undef; } my $proj = checkprojrepoarch($projid, $repoid, $arch, 1); my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/$packid", }; if ($proj->{'remoteurl'}) { $param->{'uri'} = "$proj->{'remoteurl'}/build/$proj->{'remoteproject'}/$repoid/$arch/$packid"; $param->{'proxy'} = $proxy; } if ($view && $view eq 'binaryversions') { push @args, 'nometa=1' if $cgi->{'nometa'}; if (!$BSStdServer::isajax && $packid eq '_repository' && $proj->{'remoteurl'} && !$proj->{'partition'}) { # hand over to getbinaryversions for chunking @args = (); push @args, "project=$projid"; push @args, "repository=$repoid"; push @args, "arch=$arch"; push @args, 'nometa=1' if $cgi->{'nometa'}; push @args, "binaries=".join(',', @{$cgi->{'binary'} || []}); BSHandoff::handoff('/getbinaryversions', undef, @args); } if (!$BSStdServer::isajax && $packid eq '_repository') { # this can take a while if we have dod configured, in that case handoff my $repo = (grep {$_->{'name'} eq $repoid} @{$proj->{'repository'} || []})[0]; if ($repo) { my $doddata = (grep {($_->{'arch'} || '') eq $arch} @{$repo->{'download'} || []})[0]; if ($doddata) { die("will not get all dod packages\n") unless @{$cgi->{'binary'} || []}; BSHandoff::handoff("/build/$projid/$repoid/$arch/$packid", undef, @args); } } } my $bvl = BSWatcher::rpc($param, $BSXML::binaryversionlist, @args); return ($bvl, $BSXML::binaryversionlist); } if ($view && $view eq 'names' && $cgi->{'binary'} && !$BSStdServer::isajax && $packid eq '_repository') { # this can take a while if we have dod configured, in that case handoff my $repo = (grep {$_->{'name'} eq $repoid} @{$proj->{'repository'} || []})[0]; if ($repo) { my $doddata = (grep {($_->{'arch'} || '') eq $arch} @{$repo->{'download'} || []})[0]; if ($doddata) { BSHandoff::handoff("/build/$projid/$repoid/$arch/$packid", undef, @args); } } } if ($view && ($view eq 'cpioheaders' || $view eq 'cpioheaderchksums')) { $param->{'ignorestatus'} = 1; $param->{'receiver'} = \&BSServer::reply_receiver; BSWatcher::rpc($param, undef, @args); return undef; } my $bl = BSWatcher::rpc($param, $BSXML::binarylist, @args); return ($bl, $BSXML::binarylist); } sub getbinary { my ($cgi, $projid, $repoid, $arch, $packid, $filename) = @_; my $proj = checkprojrepoarch($projid, $repoid, $arch, 1); my $view = $cgi->{'view'} || ''; if ($proj->{'remoteurl'} && $packid eq '_repository' && !$view) { # hack: reroute to /getbinaries so that our local cache is used die("need the raw package name as filename for remote repository access\n") if $filename =~ /\.(?:$binsufsre)$/; my @args; push @args, "project=$projid"; push @args, "repository=$repoid"; push @args, "arch=$arch"; push @args, "binaries=$filename"; push @args, "raw=1"; BSHandoff::handoff('/getbinaries', undef, @args); } if ($view eq 'publishedpath') { die("publishedpath does not work for _repository\n") if $packid eq '_repository'; return published_path($cgi, $projid, $repoid); } my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my @args; push @args, "view=$view" if $view; my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/$packid/$filename", 'ignorestatus' => 1, 'receiver' => \&BSServer::reply_receiver, }; if ($view && $view eq 'fileinfo_ext') { my $partition = BSSrcServer::Partition::projid2partition($projid); my $projpack = (getprojpack({'nopackages' => 1, 'withrepos' => 1, 'expandedrepos' => 1, 'withremotemap' => 1, 'withconfig' => 1, 'partition' => $partition}, [ $projid ], [ $repoid ], undef, $arch))[0]; if ($projpack) { if ($projpack->{'project'} && $projpack->{'project'}->[0]->{'name'} eq $projid) { my $config = (getbuildconfig({}, $projid, $repoid))[0]; $projpack->{'project'}->[0]->{'config'} = $config if $config; } $param->{'request'} = 'POST'; $param->{'data'} = BSUtil::toxml($projpack, $BSXML::projpack); $param->{'headers'} = [ 'Content-Type: application/octet-stream' ]; } } if ($proj->{'remoteurl'}) { $param->{'uri'} = "$proj->{'remoteurl'}/build/$proj->{'remoteproject'}/$repoid/$arch/$packid/$filename"; $param->{'proxy'} = $proxy; } BSWatcher::rpc($param, undef, @args); return undef; } sub putbinary { my ($cgi, $projid, $repoid, $arch, $filename) = @_; checkprojrepoarch($projid, $repoid, $arch); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my @args = BSRPC::args($cgi, 'ignoreolder', 'wipe'); my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/_repository/$filename", 'request' => 'PUT', 'data' => \&BSServer::forward_sender, 'chunked' => 1, }; # XXX add return type checking return BSWatcher::rpc($param, undef, @args); } sub delbinary { my ($cgi, $projid, $repoid, $arch, $filename) = @_; checkprojrepoarch($projid, $repoid, $arch); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/_repository/$filename", 'request' => 'DELETE', }; return BSWatcher::rpc($param, undef); } sub copybuild { my ($cgi, $projid, $repoid, $arch, $packid) = @_; die("illegal package '$packid'\n") if $packid =~ /^_/ && !($packid =~ /^_product:/); checkprojrepoarch($projid, $repoid, $arch); my $oprojid = defined($cgi->{'oproject'}) ? $cgi->{'oproject'} : $projid; my $opackid = defined($cgi->{'opackage'}) ? $cgi->{'opackage'} : $packid; my $orepoid = defined($cgi->{'orepository'}) ? $cgi->{'orepository'} : $repoid; die("nothing to do\n") if "$oprojid/$opackid/$orepoid" eq "$projid/$packid/$repoid"; checkprojrepoarch($oprojid, $orepoid, $arch); # make sure the packages exist. not cheap, but does everything we need getrev($projid, $packid); getrev($oprojid, $opackid); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $oreposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($oprojid) : $BSConfig::reposerver; if ($reposerver ne $oreposerver) { die("cannot copy binaries between different reposiory servers yet\n"); } my %tocopy = ($opackid => $packid); if ($cgi->{'multibuild'}) { my $mb = BSSrcServer::Multibuild::getmultibuild($oprojid, $opackid) || {}; $tocopy{"$opackid:$_"} = "$packid:$_" for @{$mb->{'flavor'} || $mb->{'package'} || []}; } for $opackid (sort keys %tocopy) { my @args; push @args, "cmd=copy"; push @args, "oproject=$oprojid"; push @args, "opackage=$opackid"; push @args, "orepository=$orepoid"; push @args, "setupdateinfoid=$cgi->{'setupdateinfoid'}" if $cgi->{'setupdateinfoid'}; push @args, "setrelease=$cgi->{'setrelease'}" if $cgi->{'setrelease'}; push @args, 'resign=1' if $cgi->{'resign'}; my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/$tocopy{$opackid}", 'request' => 'POST', }; BSWatcher::rpc($param, undef, @args); } return $BSStdServer::return_ok; } sub uploadbuild { my ($cgi, $projid, $repoid, $arch, $packid) = @_; die("illegal package '$packid'\n") if $packid =~ /^_/ && !($packid =~ /^_product:/); checkprojrepoarch($projid, $repoid, $arch); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/$packid", 'request' => 'POST', 'data' => \&BSServer::forward_sender, 'chunked' => 1, }; # XXX add return type checking return BSWatcher::rpc($param, undef); } sub getlogfile { my ($cgi, $projid, $repoid, $arch, $packid) = @_; checkprojrepoarch($projid, $repoid, $arch); if (!$cgi->{'start'}) { # check if the package is broken my $rev = getrev($projid, $packid, 'build'); eval { lsrev_expanded($rev); }; if ($@) { my $error = $@; if ($rev->{'srcmd5'}) { my $files = BSRevision::lsrev($rev); if ($files->{'_serviceerror'}) { $error = BSRevision::revreadstr($rev, '_serviceerror', $files->{'_serviceerror'}); } elsif ($files->{'_linkerror'}) { $error = BSRevision::revreadstr($rev, '_linkerror', $files->{'_linkerror'}); } } if ($cgi->{'view'} && $cgi->{'view'} eq 'entry') { my $entry = {'name' => '_log', 'size' => length($error)}; return ({'entry' => [ $entry ]}, $BSXML::dir); } return $error; } } my @args = BSRPC::args($cgi, 'last', 'nostream', 'start', 'end', 'view'); if (!$BSStdServer::isajax && !$cgi->{'view'}) { BSHandoff::handoff("/build/$projid/$repoid/$arch/$packid/_log", undef, @args); } my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/$packid/_log", 'ignorestatus' => 1, 'receiver' => \&BSServer::reply_receiver, 'joinable' => 1, }; BSWatcher::rpc($param, undef, @args); return undef; # always streams result } sub getjobstatus { my ($cgi, $projid, $repoid, $arch, $packid) = @_; checkprojrepoarch($projid, $repoid, $arch); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $jobstatus = BSWatcher::rpc("$reposerver/build/$projid/$repoid/$arch/$packid/_jobstatus", $BSXML::jobstatus); return ($jobstatus, $BSXML::jobstatus); } sub getbuildhistory { my ($cgi, $projid, $repoid, $arch, $packid) = @_; checkprojrepoarch($projid, $repoid, $arch); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my @args = BSRPC::args($cgi, 'limit'); my $buildhist = BSWatcher::rpc("$reposerver/build/$projid/$repoid/$arch/$packid/_history", $BSXML::buildhist, @args); return ($buildhist, $BSXML::buildhist); } sub getbuildinfo { my ($cgi, $projid, $repoid, $arch, $packid) = @_; checkprojrepoarch($projid, $repoid, $arch, 1); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my @args = BSRPC::args($cgi, 'internal', 'debug', 'add'); my $buildinfo = BSWatcher::rpc("$reposerver/build/$projid/$repoid/$arch/$packid/_buildinfo", $BSXML::buildinfo, @args); return ($buildinfo, $BSXML::buildinfo); } sub getbuildinfo_post { my ($cgi, $projid, $repoid, $arch, $packid) = @_; checkprojrepoarch($projid, $repoid, $arch, 1); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my @args = BSRPC::args($cgi, 'debug', 'add'); my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/$packid/_buildinfo", 'request' => 'POST', 'data' => \&BSServer::forward_sender, 'chunked' => 1, }; my $buildinfo = BSWatcher::rpc($param, $BSXML::buildinfo, @args); return ($buildinfo, $BSXML::buildinfo); } sub getbuildreason { my ($cgi, $projid, $repoid, $arch, $packid) = @_; checkprojrepoarch($projid, $repoid, $arch); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $reason = BSWatcher::rpc("$reposerver/build/$projid/$repoid/$arch/$packid/_reason", $BSXML::buildreason); return ($reason, $BSXML::buildreason); } sub getmultibuildpackages { my ($cgi, $projid, $packid) = @_; my $pack_hash = {}; my $mb = BSSrcServer::Multibuild::getmultibuild($projid, $packid) || {}; foreach my $pack (@{$mb->{'package'}}) { my $entry = {'name' => $pack}; push @{$pack_hash->{'entry'}}, $entry; } return ($pack_hash, $BSXML::dir); } sub getbuildstatus { my ($cgi, $projid, $repoid, $arch, $packid) = @_; checkprojrepoarch($projid, $repoid, $arch); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $status = BSWatcher::rpc("$reposerver/build/$projid/$repoid/$arch/$packid/_status", $BSXML::buildstatus); return ($status, $BSXML::buildstatus); } sub add_daemondata { my ($ws, $daemondata) = @_; my $part = (grep {!defined($_->{'name'})} @{$ws->{'partition'} || []})[0]; if (!$part) { $part = {}; $ws->{'partition'} ||= []; unshift @{$ws->{'partition'}}, $part; } $part->{'daemon'} ||= []; unshift(@{$part->{'daemon'}}, $daemondata); return $ws; } sub add_localworkerstatus { my ($cgi, $ws, $type, $lock) = @_; return $ws if $cgi->{'type'} && !grep {$_ eq $type} @{$cgi->{'type'}}; return $ws if $cgi->{'arch'} && !grep {$_ eq $type} @{$cgi->{'arch'}}; return $ws if $type eq 'servicedispatch' && !$BSConfig::servicedispatch; return $ws if $type ne 'srcserver' && ! -e $lock; my $daemondata = {'state' => 'dead', 'type' => $type}; if ($type eq 'srcserver') { my $req = $BSServer::request; $daemondata->{'starttime'} = $req->{'server'}->{'starttime'} if $req && $req->{'server'}; if ($req && $req->{'conf'} && $req->{'conf'}->{'handoffpath'}) { $lock = "$req->{'conf'}->{'handoffpath'}.lock"; } $daemondata->{'state'} = 'running' unless $lock; } local *F; if ($lock && open(F, '<', $lock)) { if (!flock(F, LOCK_EX | LOCK_NB)) { my @s = stat(F); $daemondata->{'state'} = 'running'; $daemondata->{'starttime'} ||= $s[9] if @s; } close F; } return add_daemondata($ws, $daemondata); } sub add_serviceserverstatus { my ($cgi, $ws) = @_; return $ws unless $BSConfig::serviceserver; return $ws if $cgi->{'type'} && !grep {$_ eq 'service'} @{$cgi->{'type'}}; return $ws if $cgi->{'arch'} && !grep {$_ eq 'service'} @{$cgi->{'arch'}}; my $daemondata = {'state' => 'dead', 'type' => 'service'}; my $param = { 'uri' => "$BSConfig::serviceserver/serverstatus", 'timeout' => 60, }; eval { my $sta = BSWatcher::rpc($param, $BSXML::serverstatus); $daemondata->{'state'} = 'running'; $daemondata->{'starttime'} = $sta->{'starttime'}; }; warn($@) if $@; return add_daemondata($ws, $daemondata); } sub add_clouduploadserverstatus { my ($cgi, $ws) = @_; return $ws unless $BSConfig::clouduploadserver; return $ws if $cgi->{'type'} && !grep {$_ eq 'clouduploadserver' || $_ eq 'clouduploadworker'} @{$cgi->{'type'}}; return $ws if $cgi->{'arch'} && !grep {$_ eq 'clouduploadserver' || $_ eq 'clouduploadworker'} @{$cgi->{'arch'}}; my $daemondatas = [ {'state' => 'dead', 'type' => 'clouduploadserver'} ]; my $param = { 'uri' => "$BSConfig::clouduploadserver/workerstatus", 'timeout' => 60, }; eval { my $sws = BSWatcher::rpc($param, $BSXML::workerstatus); $daemondatas = $sws->{'partition'}->[0]->{'daemon'} }; warn($@) if $@; for my $daemondata (reverse(@$daemondatas)) { $ws = add_daemondata($ws, $daemondata); } return $ws; } sub getworkerstatus { my ($cgi) = @_; # compat if ($cgi->{'scheduleronly'} && !$cgi->{'daemonsonly'}) { $cgi->{'daemonsonly'} = delete $cgi->{'scheduleronly'}; } my @args = BSRPC::args($cgi, 'daemonsonly', 'arch'); if (!$BSConfig::partitioning || !$BSConfig::partitionservers) { my $reposerver = $BSConfig::reposerver; my $param = { 'uri' => "$reposerver/workerstatus", 'timeout' => 60, }; my $ws = { 'partition' => [ { 'daemon' => [ { 'state' => 'dead', 'type' => 'repserver' } ] } ] }; eval { $ws = BSWatcher::rpc($param, $BSXML::workerstatus, @args); }; warn($@) if $@; delete $_->{'uri'} for @{$ws->{'idle'} || []}; delete $_->{'uri'} for @{$ws->{'building'} || []}; add_clouduploadserverstatus($cgi, $ws); add_serviceserverstatus($cgi, $ws); add_localworkerstatus($cgi, $ws, 'deltastore', "$rundir/bs_deltastore.lock"); add_localworkerstatus($cgi, $ws, 'servicedispatch', "$rundir/bs_servicedispatch.lock"); add_localworkerstatus($cgi, $ws, 'srcserver'); return ($ws, $BSXML::workerstatus); } # cummulated worker status my $cws; if (!$cgi->{'daemonsonly'}) { $cws->{'clients'} = 0; $cws->{'building'} = []; $cws->{'waiting'} = []; $cws->{'blocked'} = []; $cws->{'buildavg'} = []; $cws->{'idle'} = []; } $cws->{'partition'} = []; my %reposervers = map {$_ => 1} values(%$BSConfig::partitionservers); for my $reposerver (sort keys %reposervers) { my $param = { 'uri' => "$reposerver/workerstatus", 'timeout' => 60, }; my $ws; eval { $ws = BSWatcher::rpc("$reposerver/workerstatus", $BSXML::workerstatus, @args); }; warn($@) if $@; if (!$ws) { for my $par (grep {$BSConfig::partitionservers->{$_} eq $reposerver} sort(keys(%$BSConfig::partitionservers))) { push @{$ws->{'partition'}}, { 'name' => $par, 'daemon' => [ { 'state' => 'dead', 'type' => 'repserver' } ] }; } } push @{$cws->{'partition'}}, @{$ws->{'partition'}}; next if $cgi->{'daemonsonly'}; delete $_->{'uri'} for @{$ws->{'idle'} || []}; delete $_->{'uri'} for @{$ws->{'building'} || []}; for my $workerstate (qw{idle building down dead away}) { push @{$cws->{$workerstate}}, @{$ws->{$workerstate}} if $ws->{$workerstate}; } for my $b (@{$ws->{'waiting'} || []}) { my $ob = (grep {$_->{'arch'} eq $b->{'arch'}} @{$cws->{'waiting'} || []})[0]; if (!$ob) { $ob = {'arch' => $b->{'arch'}, 'jobs' => 0}; push @{$cws->{'waiting'}}, $ob; } $ob->{'jobs'} += $b->{'jobs'}; } for my $b (@{$ws->{'blocked'} || []}) { my $ob = (grep {$_->{'arch'} eq $b->{'arch'}} @{$cws->{'blocked'} || []})[0]; if (!$ob) { $ob = {'arch' => $b->{'arch'}, 'jobs' => 0}; push @{$cws->{'blocked'}}, $ob; } $ob->{'jobs'} += $b->{'jobs'}; } for my $b (@{$ws->{'buildavg'} || []}) { my $ob = (grep {$_->{'arch'} eq $b->{'arch'}} @{$cws->{'buildavg'} || []})[0]; if (!$ob) { $ob = {'arch' => $b->{'arch'}, 'buildavg' => 0, 'count' => 0}; push @{$cws->{'buildavg'}}, $ob; } $ob->{'buildavg'} += $b->{'buildavg'}; $ob->{'count'} += 1; } $cws->{'clients'} += $ws->{'clients'} if $ws->{'clients'}; } for my $b (@{$cws->{'buildavg'} || []}) { $b->{'buildavg'} /= delete $b->{'count'}; } # sort if (!$cgi->{'daemonsonly'}) { $cws->{'idle'} = [ sort {$a->{'workerid'} cmp $b->{'workerid'} || $a->{'uri'} cmp $b->{'uri'} || $a cmp $b} @{$cws->{'idle'}} ]; $cws->{'building'} = [ sort {$a->{'workerid'} cmp $b->{'workerid'} || $a->{'uri'} cmp $b->{'uri'} || $a cmp $b} @{$cws->{'building'}} ]; $cws->{'waiting'} = [ sort {$a->{'arch'} cmp $b->{'arch'} || $a cmp $b} @{$cws->{'waiting'}} ]; $cws->{'blocked'} = [ sort {$a->{'arch'} cmp $b->{'arch'} || $a cmp $b} @{$cws->{'blocked'}} ]; $cws->{'buildavg'} = [ sort {$a->{'arch'} cmp $b->{'arch'} || $a cmp $b} @{$cws->{'buildavg'}} ]; } add_clouduploadserverstatus($cgi, $cws); add_serviceserverstatus($cgi, $cws); add_localworkerstatus($cgi, $cws, 'deltastore', "$rundir/bs_deltastore.lock"); add_localworkerstatus($cgi, $cws, 'servicedispatch', "$rundir/bs_servicedispatch.lock"); add_localworkerstatus($cgi, $cws, 'srcserver'); return ($cws, $BSXML::workerstatus); } sub getajaxstatus { my ($cgi) = @_; BSHandoff::handoff('/ajaxstatus') if !$BSStdServer::isajax; my $r = BSWatcher::getstatus(); return ($r, $BSXML::ajaxstatus); } #################################################################### sub search_proj { my ($cgi, $match, $id) = @_; $match =~ s/^\[(.*)\]$/$1/s; my $data = []; for my $projid (findprojects()) { my $proj = BSRevision::readproj_local($projid); push @$data, $proj; } $data = BSXPath::match($data, $match); if ($id) { for (@{$data || []}) { $_ = {'name' => $_->{'name'}}; } } my $res = {'project' => $data}; return ($res, $BSXML::collection); } sub pkgsearch_fetch { my ($db, $k) = @_; my ($projid, $packid) = split('/', $k, 2); my $pack = BSRevision::readpack_local($projid, $packid, 1) || {'name' => $packid}; $pack->{'project'} = $projid; #my @linkinfo = BSDBIndex::getvalues($db, 'linkinfo', $k); #$pack->{'linkinfo'} = $linkinfo[0] if @linkinfo; return $pack; } sub pkgsearch_indexfunc { my ($db, $path, $value, $lkeys) = @_; if (!defined($path)) { return @{$db->{'_allkeys'}} if $db->{'_allkeys'}; my @projids = findprojects(); my @r; for my $projid (@projids) { push @r, map {"$projid/$_"} BSRevision::lspackages_local($projid); } $db->{'_allkeys'} = \@r; return @r; } elsif (!defined($value)) { return BSDBIndex::getkeys($db, "$db->{'index'}$path") if $path =~ /^linkinfo\//; return findprojects() if $path eq 'project'; if ($path eq 'name') { $lkeys = [ pkgsearch_indexfunc($db) ] unless $lkeys; my %v = map {$_ => 1} grep {s/^.*\///} map {$_} @$lkeys; return sort keys %v; } } else { return BSDBIndex::getvalues($db, "$db->{'index'}$path", $value) if $path =~ /^linkinfo\//; return map {"$value/$_"} BSRevision::lspackages_local($value) if $path eq 'project'; if ($path eq 'name') { $lkeys = [ pkgsearch_indexfunc($db) ] unless $lkeys; return grep {/\Q$value\E$/} @$lkeys; } } return (); } sub search_pack { my ($cgi, $match, $id) = @_; $match =~ s/^\[(.*)\]$/$1/s; # really ugly hack to speed up needed api call if ($match =~ /^\@project='(.+)' and starts-with\(\@name,'(.+)'\)$/) { my $projid = $1; my $startswith = $2; $projid =~ s/''/'/g; $startswith =~ s/''/'/g; my @packages = BSRevision::lspackages_local($projid); my $data = []; for my $packid (grep {/^\Q$startswith\E/} @packages) { my ($pack, undef) = getpackage($cgi, $projid, $packid); $pack->{'project'} = $projid; push @$data, $pack; } my $res = {'package' => $data}; return ($res, $BSXML::collection); } my $db = BSDB::opendb($sourcedb, ''); $db->{'indexfunc'} = { 'project' => \&pkgsearch_indexfunc, 'name' => \&pkgsearch_indexfunc, 'linkinfo/project' => \&pkgsearch_indexfunc, 'linkinfo/package' => \&pkgsearch_indexfunc, 'linkinfo/rev' => \&pkgsearch_indexfunc, }; $db->{'noindexatall'} = 1; $db->{'fetch'} = \&pkgsearch_fetch; my $data = BSXPathKeys::node($db, ''); if ($id) { $data = $data->keymatch($match); for (@$data) { my @p = split('/', $_, 2); $_ = {'name' => $p[1], 'project' => $p[0]}; } } else { $data = BSXPath::match($data, $match); delete $_->{'linkinfo'} for @$data; } my $res = {'package' => $data}; return ($res, $BSXML::collection); } sub search_proj_id { return search_proj(@_, 1); } sub search_pack_id { return search_pack(@_, 1); } ############################################################################# sub search_published_updatedb { my ($cgi) = @_; die("unknown command '$cgi->{'cmd'}'\n") unless $cgi->{'cmd'} eq 'updatedb'; my $data = BSServer::read_data(); $data = Storable::thaw($data); die("no data\n") unless $data && @$data; my $patterndb; my $binarydb; my $repoinfodb; mkdir_p($extrepodb) unless -d $extrepodb; while (@$data) { my ($w, $k, $v) = splice(@$data, 0, 3); if ($w eq 'binary') { $binarydb = BSDB::opendb($extrepodb, 'binary') unless $binarydb; $binarydb->updateindex_rel($k || [], $v || []); } elsif ($w eq 'pattern') { $patterndb = BSDB::opendb($extrepodb, 'pattern') unless $patterndb; $patterndb->store($k, $v); } elsif ($w eq 'repoinfo') { if (!$repoinfodb) { $repoinfodb = BSDB::opendb($extrepodb, 'repoinfo'); $repoinfodb->{'noindexatall'} = 1; } $repoinfodb->store($k, $v); } else { die("bad data type: '$w'\n"); } } return $BSStdServer::return_ok; } #sub search_published_id { # my ($cgi, $what, $match) = @_; # my $res; # for my $rrserver ($BSConfig::reposerver) { # $res = BSRPC::rpc("$rrserver/search/published/$what/id", $BSXML::collection, "match=$match"); # last if $res; # } # return ($res, $BSXML::collection); #} # #sub search_published_binary_id { # return search_published_id($_[0], 'binary', $_[1]); #} # #sub search_published_pattern_id { # return search_published_id($_[0], 'pattern', $_[1]); #} my %prp_to_repoinfo; sub prp_to_repoinfo { my ($prp) = @_; my $repoinfo = $prp_to_repoinfo{$prp}; if (!$repoinfo) { my $repoinfodb = BSDB::opendb($extrepodb, 'repoinfo'); $repoinfo = $repoinfodb->fetch($prp); if ($repoinfo) { for (@{$repoinfo->{'prpsearchpath'} || []}) { next if ref($_); # legacy my ($p, $r) = split('/', $_, 2); $_ = {'project' => $p, 'repository' => $r}; } } else { $repoinfo = {'binaryorigins' => {}}; } $prp_to_repoinfo{$prp} = $repoinfo; } return $repoinfo; } sub binary_key_to_data { my ($db, $key) = @_; my @p = split('/', $key); my $binary = pop(@p); my $name = $binary; my $versrel = ''; if ($name =~ s/-([^-]+-[^-]+)\.[^\.]+\.rpm$//) { $versrel = $1; } elsif ($name =~ s/_([^_]+)_[^_]+\.deb$//) { $versrel = $1; } elsif ($name =~ s/-([^-]+-[^-]+)-[^-]+\.pkg\.tar\..z$//) { $versrel = $1; } my ($version, $release) = ($versrel, undef); ($version, $release) = ($1, $2) if $version =~ /^(.*)-(.*?)$/; my $arch = pop(@p); $arch = $1 if ($arch eq 'RPMS' || $arch eq 'SRPMS') && $binary =~ /\.([^\.]+)\.rpm$/; while (@p > 1 && $p[0] =~ /:$/) { splice(@p, 0, 2, "$p[0]$p[1]"); } my $project = shift(@p); while (@p > 1 && $p[0] =~ /:$/) { splice(@p, 0, 2, "$p[0]$p[1]"); } my $repository = shift(@p); my $prp = "$project/$repository"; my $repoinfo = $prp_to_repoinfo{$prp} || prp_to_repoinfo($prp); my $type; $type = 'rpm' if $binary =~ /\.rpm$/; $type = 'deb' if $binary =~ /\.deb$/; $type = 'arch' if $binary =~ /\.pkg\.tar\..z$/; my $res = { 'name' => $name, 'versrel' => $versrel, 'version' => $version, 'arch' => $arch, 'type' => $type, 'project' => $project, 'repository' => $repository, 'filename' => $binary, 'filepath' => $key, }; $res->{'release'} = $release if defined $release; $res->{'path'} = $repoinfo->{'prpsearchpath'} if $repoinfo->{'prpsearchpath'}; my $location = join('/', @p, $arch, $binary); $res->{'package'} = $repoinfo->{'binaryorigins'}->{$location} if defined $repoinfo->{'binaryorigins'}->{$location}; if ($repoinfo->{'base'}) { $res->{'baseproject'} = $repoinfo->{'base'}->{'project'}; } elsif ($res->{'path'}) { $res->{'baseproject'} = $res->{'path'}->[-1]->{'project'}; } return $res; } sub binary_key_to_project { my ($db, $key) = @_; my @p = split('/', $key); while (@p > 1 && $p[0] =~ /:$/) { splice(@p, 0, 2, "$p[0]$p[1]"); } return shift @p; } sub pattern_key_to_data { my ($db, $key) = @_; my @p = split('/', $key); my $filename = pop(@p); while (@p > 1 && $p[0] =~ /:$/) { splice(@p, 0, 2, "$p[0]$p[1]"); } my $project = shift(@p); while (@p > 1 && $p[0] =~ /:$/) { splice(@p, 0, 2, "$p[0]$p[1]"); } my $repository = shift(@p); my @v = BSDBIndex::getvalues($db, $db->{'table'}, $key); return {} unless @v; my $res = $v[0]; $res->{'baseproject'} = $res->{'path'}->[-1]->{'project'} if $res->{'path'}; $res->{'project'} = $project; $res->{'repository'} = $repository; $res->{'filename'} = $filename; $res->{'filepath'} = $key; return $res; } sub published_projectindexfunc { my ($db, $path, $value) = @_; return findprojects() unless defined $value; my $proj = BSRevision::readproj_local($value); return () unless $proj; my @repoids = map {$_->{'name'}} @{$proj->{'repository'} || []}; my %bins; for my $repoid (@repoids) { my $prp = "$value/$repoid"; my $prp_ext = $prp; $prp_ext =~ s/:/:\//g; my $repoinfo = $prp_to_repoinfo{$prp} || prp_to_repoinfo($prp); for (keys %{$repoinfo->{'binaryorigins'} || {}}) { next unless /\//; # keep in sync with updatebinaryindex in bs_publish next unless /\.(?:rpm|deb|pkg\.tar\..z)$/; $bins{"$prp_ext/$_"} = 1; } } return sort keys %bins; } sub search_add_downloadurl { my ($data) = @_; for my $d (@$data) { my @p = split('/', $d->{'filepath'}); my $downloadurl = BSUrlmapper::get_downloadurl("$d->{'project'}/$d->{'repository'}"); next unless $downloadurl; $downloadurl =~ s/\/$//; $d->{'downloadurl'} = "$downloadurl/".join('/', splice(@p, -2)); } } sub search_published_binary_id { my ($cgi, $match) = @_; my $binarydb = BSDB::opendb($extrepodb, 'binary'); $binarydb->{'allkeyspath'} = 'name'; $binarydb->{'noindex'} = {'version' => 1, 'release' => 1, 'versrel' => 1, 'arch' => 1, 'project' => 1, 'repository' => 1, 'package' => 1, 'type' => 1, 'path/project' => 1, 'path/repository' => 1, 'baseproject' => 1}; $binarydb->{'indexfunc'} = {'project' => \&published_projectindexfunc }; $binarydb->{'fetch'} = \&binary_key_to_data; $binarydb->{'fetch_project'} = \&binary_key_to_project; $binarydb->{'cheapfetch'} = 1; my $limit = defined($cgi->{'limit'}) ? $cgi->{'limit'} : 1000; my $rootnode = BSXPathKeys::node($binarydb, '', $limit && $limit < 10 ? 1000 : $limit * 100); my $data = BSXPath::match($rootnode, $match) || []; # epoch? @$data = sort {Build::Rpm::verscmp($b->{'version'}, $a->{'version'}) || $a->{'name'} cmp $b->{'name'} || $a->{'arch'} cmp $b->{'arch'}} @$data; delete $_->{'versrel'} for @$data; my $res = {}; $res->{'matches'} = @$data; $res->{'limited'} = 'true' if $limit && @$data > $limit; splice(@$data, $limit) if $limit && @$data > $limit; delete $_->{'path'} for @$data; search_add_downloadurl($data) if $cgi->{'withdownloadurl'}; $res->{'binary'} = $data; return ($res, $BSXML::collection); } sub search_published_pattern_id { my ($cgi, $match) = @_; my $patterndb = BSDB::opendb($extrepodb, 'pattern'); $patterndb->{'noindex'} = {'project' => 1, 'repository' => 1}; $patterndb->{'fetch'} = \&pattern_key_to_data; my $limit = defined($cgi->{'limit'}) ? $cgi->{'limit'} : 1000; my $rootnode = BSXPathKeys::node($patterndb, '', $limit && $limit < 10 ? 1000 : $limit * 100); my $data = BSXPath::match($rootnode, $match) || []; my $res = {}; $res->{'matches'} = @$data; $res->{'limited'} = 'true' if $limit && @$data > $limit; splice(@$data, $limit) if $limit && @$data > $limit; for (@$data) { delete $_->{'path'}; delete $_->{'description'}; delete $_->{'summary'}; } search_add_downloadurl($data) if $cgi->{'withdownloadurl'}; $res->{'pattern'} = $data; return ($res, $BSXML::collection); } ############################################################################# sub search { my ($cgi, $in, $match) = @_; # gather all data my $data = []; if ($in eq 'projects') { for my $projid (findprojects()) { my $proj = BSRevision::readproj_local($projid); push @$data, $proj; } } elsif ($in eq 'packages') { for my $projid (findprojects()) { my @packages = BSRevision::lspackages_local($projid); for my $packid (@packages) { my ($pack, undef) = getpackage($cgi, $projid, $packid); $pack->{'project'} = $projid; push @$data, $pack; } } } else { die("'in' parameter needs to be either 'projects' or 'packages'\n"); } my $res; if ($cgi->{'values'}) { $data = BSXPath::valuematch($data, $match); $res = {'value' => $data}; } else { $data = BSXPath::match($data, $match); if (exists $cgi->{'return'}) { $data = BSXPath::valuematch($data, $cgi->{'return'}); $res = {'value' => $data}; } elsif ($in eq 'projects') { $res = {'project' => $data}; } else { $res = {'package' => $data}; } } return ($res, $BSXML::collection); } sub postrepo { my ($cgi, $projid, $repoid, $arch) = @_; my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/_repository", 'request' => 'POST', }; my $res = BSWatcher::rpc($param, $BSXML::collection, "match=$cgi->{'match'}"); return ($res, $BSXML::collection); } #################################################################### sub service { my ($cgi, $service) = @_; die("404 no such service '$service'\n") unless $BSConfig::serviceserver; return BSWatcher::rpc("$BSConfig::serviceserver/service/$service", undef); } sub listservices { my ($cgi) = @_; return "\n" unless $BSConfig::serviceserver; return BSWatcher::rpc("$BSConfig::serviceserver/service", undef); } #################################################################### sub published { my ($cgi, $projid, $repoid, $arch, $filename, $subfilename) = @_; my $projpack; die("unknown view '$cgi->{'view'}'\n") if $cgi->{'view'} && $cgi->{'view'} ne 'ymp' && $cgi->{'view'} ne 'fileinfo'; if (defined($projid) && defined($repoid) && $cgi->{'view'} && $cgi->{'view'} eq 'ymp') { # attach projpack data so that the repo server does not need to # reconnect us $projpack = (getprojpack({'nopackages' => 1, 'withrepos' => 1, 'expandedrepos' => 1}, [ $projid ], [ $repoid ], undef, 'noarch'))[0]; my $proj = $projpack->{'project'}->[0]; die("no such project\n") unless $proj && $proj->{'name'} eq $projid; my $repo = $proj->{'repository'}->[0]; die("no such repository\n") unless $repo && $repo->{'name'} eq $repoid; $projpack->{'project'} = [ $proj ]; my @nprojids = grep {$_ ne $projid} map {$_->{'project'}} @{$repo->{'path'} || []}; @nprojids = BSUtil::unify(@nprojids); for my $nprojid (@nprojids) { my $nproj = (getproject({}, $nprojid))[0]; push @{$projpack->{'project'}}, { 'name' => $nprojid, 'title' => $nproj->{'title'} || '', 'description' => $nproj->{'description'} || '', }; } } my @args; push @args, "view=$cgi->{'view'}" if $cgi->{'view'}; my $p = "/published"; $p .= "/$projid" if defined $projid; $p .= "/$repoid" if defined $repoid; $p .= "/$arch" if defined $arch; $p .= "/$filename" if defined $filename; $p .= "/$subfilename" if defined $subfilename; if (defined($projid) || !$BSConfig::partitioning || !$BSConfig::partitionservers) { my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $param = { 'uri' => "$reposerver$p", 'ignorestatus' => 1, 'receiver' => \&BSServer::reply_receiver, }; if ($projpack) { $param->{'request'} = 'POST'; $param->{'data'} = BSUtil::toxml($projpack, $BSXML::projpack); $param->{'headers'} = [ 'Content-Type: application/octet-stream' ]; } BSWatcher::rpc($param, undef, @args); return undef; } my %pubprojids; my @reposervers = BSSrcServer::Partition::allreposervers(); for my $reposerver (@reposervers) { my $res; eval { $res = BSWatcher::rpc("$reposerver/published", $BSXML::dir, @args); }; warn($@) if $@; next unless $res; $pubprojids{$_->{'name'}} = 1 for @{$res->{'entry'} || []}; } my @res = sort(keys %pubprojids); @res = map {{'name' => $_}} @res; return ({'entry' => \@res}, $BSXML::dir); } sub published_path { my ($cgi, $projid, $repoid) = @_; my $medium = $cgi->{'medium'}; my $prp = "$projid/$repoid"; my $ret = {'project' => $projid, 'repository' => $repoid}; $ret->{'medium'} = $medium if $medium; # update to get fresh version of repodownload BSConfiguration::check_configuration_once(); my ($path, $url) = BSUrlmapper::get_path_downloadurl($prp); if ($cgi->{'filename'}) { # called from getbinary my $bin = $cgi->{'filename'}; my $p; if ($bin =~ /^.+-[^-]+-[^-]+\.([a-zA-Z][^\/\.\-]*)\.d?rpm$/) { $p = "$1/$bin"; } elsif ($bin =~ /^.+_[^_]+_([^_\.]+)\.deb$/) { $p = "$1/$bin"; } elsif ($bin =~ /\.exe$/) { $p = $bin; } elsif ($bin =~ /\.(?:pkg\.tar\.gz|pkg\.tar\.xz)$/) { $p = ($cgi->{'arch'} eq 'i586' ? 'i686' : $cgi->{'arch'})."/$bin"; } elsif ($bin =~ /\.iso(?:\.report)$/) { $p = "iso/$bin"; } elsif ($bin =~ /-Media\d+$/) { $medium = $bin; } if (defined($p) && !$medium) { $path .= "/$p" if defined $path; $url .= BSRPC::urlencode($p) if defined $url; } } if ($medium && $medium =~ /\.iso$/) { $medium = "iso/$medium"; } elsif ($medium) { my @path = expandsearchpath($projid, $repoid); my $c = concatconfigs($projid, $cgi->{'repository'}, undef, @path); my $bconf = Build::read_config('noarch', [ split("\n", $c) ]); my %repotype; for (@{$bconf->{'repotype'} || []}) { if (/^(.*?):(.*)$/) { $repotype{$1} = [ split(':', $2) ]; } else { $repotype{$_} = []; } } if ($repotype{'slepool'}) { my $name = $repotype{'slepool'}->[0] || 'product'; if ($medium =~ /-Media1$/) { $medium = $name; } elsif ($medium =~ /-Media3$/) { $medium = "${name}_debug"; } elsif ($medium =~ /-Media2$/) { my $repoinfo; eval { $repoinfo = $prp_to_repoinfo{$prp}; }; my $binaryorigins = ($repoinfo || {})->{'binaryorigins'}; $medium = $binaryorigins->{"${name}_source"} ? "${name}_source" : "${name}_debug"; } } else { $medium = "repo/$medium"; } } if ($medium) { $path .= "/$medium" if defined $path; $url .= BSRPC::urlencode($medium) if defined $url; } $url =~ s/([^\/])\/$/$1/ if defined $url; # compat $ret->{'path'} = $path if defined $path; $ret->{'url'} = $url if defined $url; return ($ret, $BSXML::publishedpath); } #################################################################### sub autoextend_check { my ($projid, $pk) = @_; return $pk unless $pk; my $ex = 0; eval { $ex = BSPGP::pk2expire(BSPGP::unarmor($pk)) }; if ($ex && $ex < time() + 14 * 24 * 3600) { extendkey({'comment' => 'auto-extend public key expiry date'}, $projid); $pk = readstr("$projectsdir/$projid.pkg/_pubkey", 1); } return $pk; } sub getsignkey { my ($cgi, $projid) = @_; while ($projid ne '') { my $sk = readstr("$projectsdir/$projid.pkg/_signkey", 1); if ($sk) { if ($cgi->{'withpubkey'} || $cgi->{'withalgo'}) { my $pk = readstr("$projectsdir/$projid.pkg/_pubkey", 1); $pk = autoextend_check($projid, $pk) if $cgi->{'withpubkey'} && $cgi->{'autoextend'}; if ($cgi->{'withalgo'} && $sk !~ /^\S+:/) { my $algo = '?'; if ($pk) { eval { $algo = BSPGP::pk2algo(BSPGP::unarmor($pk)) }; } $sk = "$algo:$sk"; } if ($cgi->{'withpubkey'}) { $sk .= "\n" unless $sk =~ /\n$/s; $sk .= $pk if defined $pk; } } return ($sk, 'Content-Type: text/plain') if $sk; } $projid =~ s/[^:]*$//; $projid =~ s/:$//; } return ('', 'Content-Type: text/plain'); } sub projid2sslcert { my ($projid, $origprojid, $sk, $nocreate) = @_; my $rev = BSRevision::getrev_meta($projid, undef); my $files = BSRevision::lsrev($rev); if (!$files->{'_sslcert'}) { return undef if $nocreate; # length(signkey) <= 2 means special handling, don't commit it if (!$sk || length($sk) <= 2) { return pubkey2sslcert($origprojid || $projid, "$projectsdir/$projid.pkg/_pubkey"); } my $cert = pubkey2sslcert($projid); mkdir_p($uploaddir); writestr("$uploaddir/sslcert.$$", undef, $cert); BSRevision::addrev_meta_replace({'comment' => 'automatic cert creation'}, $projid, undef, [ "$uploaddir/sslcert.$$", undef, '_sslcert']); return $cert; } return BSRevision::revreadstr($rev, '_sslcert', $files->{'_sslcert'}); } sub getsslcert { my ($cgi, $projid) = @_; my $origprojid = $projid; while ($projid ne '') { my $sk = readstr("$projectsdir/$projid.pkg/_signkey", 1); if (!$sk) { $projid =~ s/[^:]*$//; $projid =~ s/:$//; next; } my $pk = readstr("$projectsdir/$projid.pkg/_pubkey", 1); $pk = autoextend_check($projid, $pk) if $cgi->{'autoextend'}; my $cert = projid2sslcert($projid, $origprojid, $sk); return ($cert, 'Content-Type: text/plain'); } if ($BSConfig::sign_project && $BSConfig::sign) { # request default cert my $cert = ''; local *F; open(F, '-|', $BSConfig::sign, '--project', $origprojid, '-C') || die("$BSConfig::sign: $!\n"); 1 while sysread(F, $cert, 4096, length($cert)); close(F) || die("$BSConfig::sign: $?\n"); return ($cert, 'Content-Type: text/plain'); } return ('', 'Content-Type: text/plain'); } sub pubkeyinfo { my ($pk) = @_; my $algo; my $keysize; my $fingerprint; my $expire; eval { my $pku = BSPGP::unarmor($pk); eval { $algo = BSPGP::pk2algo($pku) }; eval { $keysize = BSPGP::pk2keysize($pku) }; eval { $fingerprint = BSPGP::pk2fingerprint($pku) }; eval { $expire = BSPGP::pk2expire($pku) }; }; my $pubkey = { '_content' => $pk }; $pubkey->{'algo'} = $algo if $algo; $pubkey->{'keysize'} = $keysize if $keysize; if ($fingerprint) { $pubkey->{'keyid'} = substr($fingerprint, -8, 8); $fingerprint =~ s/(....)/$1 /g; $fingerprint =~ s/ $//; $pubkey->{'fingerprint'} = $fingerprint; } $pubkey->{'expires'} = $expire if $expire; return $pubkey; } sub getkeyinfo { my ($cgi, $projid) = @_; my $origprojid = $projid; my ($sk, $pk, $cert); while ($projid ne '') { $sk = readstr("$projectsdir/$projid.pkg/_signkey", 1); last if $sk; $projid =~ s/[^:]*$//; $projid =~ s/:$//; } if ($projid) { $pk = readstr("$projectsdir/$projid.pkg/_pubkey", 1); undef $pk if $pk && length($pk) <= 2; $pk = autoextend_check($projid, $pk) if $cgi->{'autoextend'}; } if ($cgi->{'withsslcert'} && $projid && $pk) { $cert = projid2sslcert($projid, $origprojid, $sk, $cgi->{'donotcreatecert'}); } if ($cgi->{'withsslcert'} && !$projid && $BSConfig::sign_project && $BSConfig::sign) { local *S; open(S, '-|', $BSConfig::sign, '--project', $origprojid, '-C') || die("$BSConfig::sign: $!\n"); $cert = ''; 1 while sysread(S, $cert, 4096, length($cert)); $cert = undef unless close(S); } if (!$pk && $BSConfig::sign_project && $BSConfig::sign) { local *S; open(S, '-|', $BSConfig::sign, '--project', $origprojid, '-p') || die("$BSConfig::sign: $!\n"); $pk = ''; 1 while sysread(S, $pk, 4096, length($pk)); $pk = undef unless close(S); } elsif (!$pk && $BSConfig::keyfile) { $pk = readstr($BSConfig::keyfile, 1); } my $keyinfo = {}; $keyinfo->{'project'} = $projid if $projid; $keyinfo->{'pubkey'} = pubkeyinfo($pk) if $pk; $keyinfo->{'sslcert'} = $cert if $cert; return ($keyinfo, $BSXML::keyinfo); } #################################################################### # next two functions needed for migrations to 2.4 sub getlastidrequest { my $lastid = (readstr("$requestsdir/.nextid", 1) || 0) - 1; return ("$lastid\n", 'Content-Type: text/plain'); } sub getrequest { my ($cgi, $id) = @_; local *F; if (!open(F, '<', "$requestsdir/$id") && !open(F, '<', "$oldrequestsdir/$id")) { die("404 no such request '$id'\n"); } my $reqxml = ''; 1 while sysread(F, $reqxml, 8192, length($reqxml)); close F; return ($reqxml, 'Content-Type: text/xml'); } #################################################################### sub findremote { my ($projid) = @_; my $proj = BSRevision::readproj_local($projid, 1); $proj = BSSrcServer::Remote::remoteprojid($projid) if !$proj || $proj->{'remoteurl'}; die("404 project '$projid' does not exist\n") unless $proj; if (!$proj->{'remoteurl'}) { $proj->{'remoteurl'} = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; $proj->{'remoteproject'} = $projid; $proj->{'remoteproxy'} = undef; } return $proj; } sub worker_getbinaries { my ($cgi, $projid, $repoid, $arch) = @_; if (!$BSStdServer::isajax) { my @args; push @args, "project=$projid"; push @args, "repository=$repoid"; push @args, "arch=$arch"; push @args, "binaries=$cgi->{'binaries'}"; BSHandoff::handoff('/getbinaries', undef, @args); } my @binaries = split(',', $cgi->{'binaries'}); my $proj = findremote($projid); my $binarylist = BSSrcServer::Remote::getremotebinarylist($proj, $projid, $repoid, $arch, \@binaries); return undef unless $binarylist; my $reply = BSSrcServer::Remote::getremotebinaries($proj, $projid, $repoid, $arch, \@binaries, $binarylist); return undef unless $reply; if ($cgi->{'raw'}) { die("can only transport one binary in raw mode\n") unless @$reply == 1; my $f = $reply->[0]; die("$f->{'name'}: $f->{'error'}\n") if $f->{'error'}; die("$f->{'name'}: not found\n") unless $f->{'filename'}; BSWatcher::reply_file($f->{'filename'}); return undef; } BSWatcher::reply_cpio($reply); return undef; } sub worker_getbinaryversions { my ($cgi, $projid, $repoid, $arch) = @_; if (!$BSStdServer::isajax) { my @args; push @args, "project=$projid"; push @args, "repository=$repoid"; push @args, "arch=$arch"; push @args, "binaries=$cgi->{'binaries'}"; push @args, "nometa=1" if $cgi->{'nometa'}; BSHandoff::handoff('/getbinaryversions', undef, @args); } my @binaries = split(',', $cgi->{'binaries'}); my $proj = findremote($projid); my $binaryversions = BSSrcServer::Remote::getremotebinaryversions($proj, $projid, $repoid, $arch, \@binaries); return undef unless $binaryversions; my $bvl = {}; $bvl->{'binary'} = [ map {$binaryversions->{$_}} @binaries]; return ($bvl, $BSXML::binaryversionlist); } #################################################################### # this is shared for AJAX requests my @lastev_cache; my @lastev_stat; sub lastevents { my ($cgi, $filter) = @_; if (!$cgi->{'start'}) { # just fetch the current event number my $lastev = BSFileDB::fdb_getlast("$eventdir/lastevents", $eventlay); my $lastno = $lastev ? $lastev->{'number'} : 0; my $ret = {'next' => $lastno, 'sync' => 'lost'}; return ($ret, $BSXML::events); } if (!$BSStdServer::isajax) { my @args; push @args, "obsname=$cgi->{'obsname'}" if $cgi->{'obsname'}; push @args, map {"filter=$_"} @{$filter || []}; push @args, "start=$cgi->{'start'}"; BSHandoff::handoff('/lastevents', undef, @args); } BSWatcher::addfilewatcher("$eventdir/lastevents", 120); my @s = stat("$eventdir/lastevents"); my @events; my ($firstno, $nextno); if (@s && @lastev_stat && "$s[9]/$s[7]/$s[1]" eq "$lastev_stat[9]/$lastev_stat[7]/$lastev_stat[1]") { @events = @lastev_cache; } else { my $lastev = BSFileDB::fdb_getlast("$eventdir/lastevents", $eventlay); push @events, $lastev if $lastev; @lastev_cache = @events; @lastev_stat = @s; } $firstno = @events ? $events[0]->{'number'} : 0; $nextno = @events ? $events[-1]->{'number'} + 1 : 1; if ($cgi->{'start'} < $firstno) { # get last 5 @events = BSFileDB::fdb_getall_reverse("$eventdir/lastevents", $eventlay, 5); @events = reverse @events; @lastev_cache = @events; @lastev_stat = @s; $firstno = @events ? $events[0]->{'number'} : 0; $nextno = @events ? $events[-1]->{'number'} + 1 : 1; } if ($cgi->{'start'} < $firstno) { my $cnt = $nextno - $cgi->{'start'}; if ($cnt > 5) { @events = BSFileDB::fdb_getall_reverse("$eventdir/lastevents", $eventlay, $cnt); @events = reverse @events; if (@events < 20) { @lastev_cache = @events; @lastev_stat = @s; } $firstno = @events ? $events[0]->{'number'} : 0; $nextno = @events ? $events[-1]->{'number'} + 1 : 1; } } if ($cgi->{'start'} < $firstno) { # we have to get them all @events = BSFileDB::fdb_getall("$eventdir/lastevents", $eventlay); # re-calculate in case something has changed $firstno = @events ? $events[0]->{'number'} : 0; $nextno = @events ? $events[-1]->{'number'} + 1 : 1; if ($firstno > $cgi->{'start'}) { # out of sync! return ({'next' => $nextno, 'sync' => 'lost'}, $BSXML::events); } } # filter @events = grep {$_->{'number'} >= $cgi->{'start'}} @events; if ($filter && @events) { my %filter = map {$_ => 1} @$filter; for my $ev (splice @events) { if ($ev->{'type'} eq 'package') { next unless defined $ev->{'package'}; next unless $filter{"package/$ev->{'project'}/$ev->{'package'}"} || $filter{"package/$ev->{'project'}"}; } elsif ($ev->{'type'} eq 'project') { next unless $filter{"project/$ev->{'project'}"}; } elsif ($ev->{'type'} eq 'repository' || $ev->{'type'} eq 'repoinfo') { next unless $filter{"repository/$ev->{'project'}/$ev->{'repository'}/$ev->{'arch'}"} || $filter{"repository/$ev->{'project'}/$ev->{'repository'}"}; } else { next; } push @events, $ev; } } # return a sync reply every 100 events / 5 minutes for two reasons # - get rid of old peers # - survive history truncation $cgi->{'start_orig'} ||= $cgi->{'start'}; $cgi->{'req_time'} ||= time(); if ($BSStdServer::isajax && !@events && $nextno < $cgi->{'start_orig'} + 100 && time() < $cgi->{'req_time'} + 300) { # small hack: update cgi to the next event number $cgi->{'start'} = $nextno if $cgi->{'start'} < $nextno; return undef; } for (@events) { $_ = { %$_ }; # clone em # delete unwanted fields delete $_->{'time'}; delete $_->{'number'}; # clean up a bit delete $_->{'package'} unless defined($_->{'package'}) && $_->{'package'} ne ''; } my $ret = {'next' => $nextno}; $ret->{'event'} = \@events if @events; return ($ret, $BSXML::events); } # # add an event to the "lastevents" queue used in the build service # interconnect implementation # sub addevent { my ($ev) = @_; # check the "access" flag. if the project has access turned # off, do not add it to lastevents. # XXX: maybe better to add a "noaccess" marker to the event # and filter in the request if (defined($ev->{'project'})) { my $access = 1; my $proj = BSRevision::readproj_local($ev->{'project'}, 1); if ($proj && $proj->{'access'}) { $access = BSUtil::enabled('', $proj->{'access'}, $access, ''); } # XXX: may also check packages in the future return unless $access; } $ev->{'time'} = time(); mkdir_p($eventdir); my $size = 262144; #keep at least 256k of data if (-s "$eventdir/lastevents" && -s _ >= $size * 2) { local *F; BSUtil::lockopen(\*F, '+>>', "$eventdir/lastevents"); my $events = readstr("$eventdir/lastevents"); if (length($events) >= $size * 2) { $events = substr($events, -$size); $events =~ s/^[^\n]*\n//s; writestr("$eventdir/.lastevents", "$eventdir/lastevents", $events); } close F; } BSFileDB::fdb_add_i("$eventdir/lastevents", $eventlay, $ev); } sub newevent { my ($cgi) = @_; my $ev = {}; for ('type', 'project', 'package', 'repository', 'arch', 'job') { $ev->{$_} = $cgi->{$_} if defined $cgi->{$_}; } addevent($ev); return $BSStdServer::return_ok; } #################################################################### sub getrelsync { my ($cgi, $projid, $repoid, $arch) = @_; checkprojrepoarch($projid, $repoid, $arch); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/_relsync", 'ignorestatus' => 1, 'receiver' => \&BSServer::reply_receiver, }; BSWatcher::rpc($param, undef); return undef; } sub postrelsync { my ($cgi, $projid, $repoid, $arch) = @_; my $proj = checkprojrepoarch($projid, $repoid, $arch); my $repo = (grep {$_->{'name'} eq $repoid} @{$proj->{'repository'} || []})[0]; my $relsyncdata = BSServer::read_data(10000000); for my $a (@{$repo->{'arch'} || []}) { next if $a eq $arch; next if $BSConfig::relsync_pool && ($BSConfig::relsync_pool->{$arch} || '') ne ($BSConfig::relsync_pool->{$a} || ''); my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$a/_relsync", 'request' => 'POST', 'data' => $relsyncdata, }; eval { BSRPC::rpc($param); }; if ($@) { warn($@); } } return $BSStdServer::return_ok; } #################################################################### # XXX: support multiple dispatchers sub putdispatchprios { my ($cgi) = @_; my $dispatcher = $BSConfig::masterdispatcher || $BSConfig::reposerver; my $param = { 'uri' => "$dispatcher/build/_dispatchprios", 'request' => 'PUT', 'data' => \&BSServer::forward_sender, 'chunked' => 1, }; return BSWatcher::rpc($param, undef); } sub getdispatchprios { my ($cgi) = @_; my $dispatcher = $BSConfig::masterdispatcher || $BSConfig::reposerver; my $param = { 'uri' => "$dispatcher/build/_dispatchprios", 'ignorestatus' => 1, 'receiver' => \&BSServer::reply_receiver, }; BSWatcher::rpc($param, undef); return undef; } #################################################################### sub sourceinfo { my ($cgi, $projid, $packid, $bconf) = @_; my $r = {'package' => $packid}; my $linked = []; my $rev; my $files; eval { $rev = getrev($projid, $packid, defined($cgi->{'rev'}) ? $cgi->{'rev'} : 'build', $linked); $r->{'srcmd5'} = $rev->{'srcmd5'} if $rev->{'srcmd5'} ne 'empty'; $r->{'rev'} = $rev->{'rev'} if $rev->{'rev'}; $r->{'vrev'} = $rev->{'vrev'} if $rev->{'vrev'}; if (!$rev || $rev->{'srcmd5'} eq 'empty' || $rev->{'srcmd5'} eq $BSSrcrep::emptysrcmd5) { die("no source uploaded\n") unless $cgi->{'nofilename'}; $rev = {'srcmd5' => $BSSrcrep::emptysrcmd5, 'project' => $projid, 'package' => $packid }; } my $linkinfo = {}; $files = BSRevision::lsrev($rev, $linkinfo); if ($linkinfo->{'xservicemd5'}) { $files = BSSrcServer::Service::handleservice($rev, $files, $linkinfo->{'xservicemd5'}); $r->{'srcmd5'} = $rev->{'srcmd5'}; } my $meta = ''; $meta .= "$files->{$_} $_\n" for sort keys %$files; $r->{'verifymd5'} = Digest::MD5::md5_hex($meta); die("source update running\n") if $files->{'_service'} && -e "$eventdir/service/${projid}::$packid"; die("source update failed\n") if $files->{'_service_error'}; }; $r->{'originproject'} = $rev->{'originproject'} if $rev && $rev->{'originproject'}; $r->{'originpackage'} = $rev->{'package'} if $rev && $rev->{'originpackage'}; $r->{'linked'} = $linked if @$linked; if ($@) { $r->{'error'} = $@; $r->{'error'} =~ s/\n$//s; return $r; } if ($files->{'_link'}) { $rev->{'linkrev'} = $cgi->{'linkrev'} if $cgi->{'linkrev'}; eval { $files = BSSrcServer::Link::handlelinks($rev, $files, {'linked' => $linked}); }; if ($@) { $files = "$@"; $files =~ s/\n$//; } $r->{'linked'} = $linked if @$linked; $r->{'vrev'} = $rev->{'vrev'} if $rev->{'vrev'}; if (!ref $files) { $r->{'error'} = $files || 'could not get file list'; return $r; } $r->{'lsrcmd5'} = $r->{'srcmd5'}; $r->{'srcmd5'} = $rev->{'srcmd5'}; my $meta = ''; $meta .= "$files->{$_} $_\n" for sort keys %$files; $r->{'verifymd5'} = Digest::MD5::md5_hex($meta); } if ($cgi->{'withchangesmd5'}) { $r->{'revtime'} = $rev->{'time'}; $r->{'changesmd5'} = $files->{"$packid.changes"} if $files->{"$packid.changes"}; } if (!$cgi->{'rev'} && !$cgi->{'linkrev'}) { BSSrcServer::Multibuild::updatemultibuild($projid, $packid, $files); if ($rev->{'originpackage'}) { my $mb = BSSrcServer::Multibuild::getmultibuild($projid, $rev->{'package'}) || {}; if (!grep {"$rev->{'package'}:$_" eq $packid} @{$mb->{'flavor'} || $mb->{'package'} || []}) { $r->{'error'} = "package '$packid' does not exist"; return $r; } } } return $r if $cgi->{'nofilename'}; return $r if $packid eq '_pattern'; if ($files->{'_aggregate'}) { $r->{'filename'} = '_aggregate'; return $r; } elsif ($files->{'_patchinfo'}) { $r->{'filename'} = '_patchinfo'; return $r; } my $type = $bconf->{'type'}; my $file; if (!$type || $type eq 'UNDEFINED') { undef $type; for my $t ('spec', 'dsc', 'kiwi') { $file = findfile($rev, $cgi->{'repository'}, $t, $files); next unless defined $file; $type = $t; last; } } else { $file = findfile($rev, $cgi->{'repository'}, $type, $files); } if (!$type) { $r->{'error'} = 'bad build configuration, no build type defined or detected'; return $r; } if (!$file) { $r->{'error'} = "no file found for build type '$type'"; return $r; } $r->{'filename'} = $file; return $r unless $cgi->{'parse'}; my $buildtype = Build::recipe2buildtype($file); if (!$buildtype) { $r->{'error'} = "don't know how to build $file"; return $r; } local $bconf->{'obspackage'} = $packid; local $bconf->{'buildflavor'}; if ($packid =~ /(?{'obspackage'} = $1; $bconf->{'buildflavor'} = $2; } my $d; eval { $d = Build::parse_typed($bconf, BSRevision::revfilename($rev, $file, $files->{$file}), $buildtype); }; if ($@) { $r->{'error'} = "parse error: $@"; $r->{'error'} =~ s/\n.*//s; return $r; } if (!$d) { $r->{'error'} = 'parse error'; return $r; } for (qw{name version release subpacks deps prereqs exclarch badarch}) { $r->{$_} = $d->{$_} if defined $d->{$_}; } return $r; } sub getprojectsourceinfo { my ($cgi, $projid) = @_; my $proj = checkprojrepoarch($projid, $cgi->{'repository'}, $cgi->{'arch'}, 1); my @packages = @{$cgi->{'package'} || []}; BSSrcServer::Projlink::enable_frozenlinks_cache(); @packages = findpackages($projid, $proj) unless @packages; my $bconf; if (!$cgi->{'nofilename'}) { if (!$cgi->{'repository'}) { my $cfile; $cfile = "$projectsdir/$projid.conf" if -e "$projectsdir/$projid.conf"; $bconf = Build::read_config($cgi->{'arch'} || 'noarch', $cfile); } else { my @path = expandsearchpath($projid, $cgi->{'repository'}); my $c = concatconfigs($projid, $cgi->{'repository'}, undef, @path); $bconf = Build::read_config($cgi->{'arch'} || 'noarch', [ split("\n", $c) ]); } } my @res; if (@packages > 1) { $BSSrcServer::Remote::collect_remote_getrev = 1; for my $packid (splice @packages) { my $r = sourceinfo($cgi, $projid, $packid, $bconf); if ($r->{'error'} && $r->{'error'} =~ /collect_remote_getrev$/) { push @packages, $packid; next; } push @res, $r; } $BSSrcServer::Remote::collect_remote_getrev = 0; BSSrcServer::Remote::fill_remote_getrev_cache(); } for my $packid (@packages) { push @res, sourceinfo($cgi, $projid, $packid, $bconf); } BSSrcServer::Projlink::disable_frozenlinks_cache(); return ({'sourceinfo' => \@res}, $BSXML::sourceinfolist); } sub getpackagesourceinfo { my ($cgi, $projid, $packid) = @_; checkprojrepoarch($projid, $cgi->{'repository'}, $cgi->{'arch'}, 1); #remoteok my $bconf; if (!$cgi->{'nofilename'}) { my $cfile; if (!$cgi->{'repository'}) { $cfile = "$projectsdir/$projid.conf" if -e "$projectsdir/$projid.conf"; } else { print "expandsearchpath $projid $cgi->{'repository'}...\n"; my @path = expandsearchpath($projid, $cgi->{'repository'}); my $c = concatconfigs($projid, $cgi->{'repository'}, undef, @path); $cfile = [ split("\n", $c) ]; } $bconf = Build::read_config($cgi->{'arch'} || 'noarch', $cfile); } my $res = sourceinfo($cgi, $projid, $packid, $bconf); return ($res, $BSXML::sourceinfo); } #################################################################### sub putconfiguration { my ($cgi) = @_; mkdir_p($uploaddir); my $uploaded = BSServer::read_file("$uploaddir/$$"); die("upload failed\n") unless $uploaded; my $configuration = readxml("$uploaddir/$$", $BSXML::configuration); unlink("$uploaddir/$$"); my $configurationxml = BSUtil::toxml($configuration, $BSXML::configuration); writestr("$BSConfig::bsdir/.configuration.xml", "$BSConfig::bsdir/configuration.xml", $configurationxml); # distribute to repo servers my @servers = BSSrcServer::Partition::allreposervers(); push @servers, $BSConfig::serviceserver if $BSConfig::serviceserver; for my $server (@servers) { my $param = { 'uri' => "$server/configuration", 'request' => 'PUT', 'data' => $configurationxml, }; eval { BSRPC::rpc($param, undef); }; warn($@) if $@; # XXX: what now? } return $BSStdServer::return_ok; } sub getconfiguration { my $configuration = readxml("$BSConfig::bsdir/configuration.xml", $BSXML::configuration, 1) || {}; return ($configuration, $BSXML::configuration); } #################################################################### sub putissuetrackers { my ($cgi) = @_; mkdir_p($uploaddir); my $uploaded = BSServer::read_file("$uploaddir/$$"); die("upload failed\n") unless $uploaded; my $trackers = readxml("$uploaddir/$$", $BSXML::issue_trackers); unlink("$uploaddir/$$"); writexml("$BSConfig::bsdir/.issuetrackers.xml", "$BSConfig::bsdir/issuetrackers.xml", $trackers, $BSXML::issue_trackers); return $BSStdServer::return_ok; } sub getissuetrackers { my $trackers = readxml("$BSConfig::bsdir/issuetrackers.xml", $BSXML::issue_trackers, 1) || {}; return ($trackers, $BSXML::issue_trackers); } #################################################################### sub orderkiwirepos_for_prio { my (@repos) = @_; my @prps; my %prps; for my $repo (@repos) { my $prp; my $url = ($repo->{'source'} || {})->{'path'}; if ($url =~ /^obs:\/\/\/?([^\/]+)\/([^\/]+)\/?$/) { $prp = "$1/$2"; } else { $prp = BSUrlmapper::urlmapper($url); } $prp ||= ':unknown/unknown'; push @prps, $prp; push @{$prps{$prp}}, $repo; } @prps = BSUtil::unify(@prps); my %deps; for my $prp (@prps) { my ($projid, $repoid) = split('/', $prp, 2); my @path; eval { @path = expandsearchpath($projid, $repoid); }; if ($@) { die($@) unless $@ =~ /^404/; warn($@); } my $oldrprp; for my $rprp (@path) { unshift @{$deps{$rprp}}, $oldrprp if $oldrprp && $rprp ne $oldrprp; $oldrprp = $rprp; if (!$prps{$rprp}) { $prps{$rprp} = []; push @prps, $rprp; } } } $deps{$_} = [ BSUtil::unify(@{$deps{$_}}) ] for keys %deps; # hmm, depsort is not really stable... my @cycs; @prps = BSSolv::depsort(\%deps, undef, \@cycs, @prps); print "orderkiwirepo_for_prio cycle: ".join(' -> ', @$_)."\n" for @cycs; return map {@{$prps{$_}}} @prps; } sub orderkiwirepos { my ($cgi) = @_; mkdir_p($uploaddir); die("content read failed\n") unless BSServer::read_file("$uploaddir/$$"); my $kiwi = readxml("$uploaddir/$$", $BSKiwiXML::kiwidesc); unlink("$uploaddir/$$"); my %prios; my $pkgmanager; $pkgmanager = $kiwi->{'preferences'}->{'packagemanager'} if $kiwi->{'preferences'}; $pkgmanager ||= 'zypper'; for (@{$kiwi->{'repository'} || []}) { my $prio = $_->{'priority'}; $prio = $pkgmanager eq 'smart' ? 0 : 99 unless defined $prio; $prio = 0 + $prio; $prio = -$prio if $pkgmanager eq 'smart'; push @{$prios{$prio}}, $_; } if (%prios) { $kiwi->{'repository'} = []; for my $prio (sort {$a <=> $b} keys %prios) { push @{$kiwi->{'repository'}}, orderkiwirepos_for_prio(@{$prios{$prio}}); } } return ($kiwi, $BSKiwiXML::kiwidesc); } #################################################################### sub external_notification { my ($cgi, $type) = @_; my $param = {}; for (keys %$cgi) { $param->{$_} = $cgi->{$_} unless $_ eq '_type' || /^\./; } notify($type, $param); return $BSStdServer::return_ok; } sub notify_plugins { my ($cgi, $type) = @_; my $param = {}; if (BSServer::have_content()) { my $jsonbody = BSServer::read_data(10000000); $param = JSON::XS::decode_json($jsonbody); } else { for (keys %$cgi) { $param->{$_} = $cgi->{$_} unless $_ eq '_type' || /^\./; } } BSNotify::notify_plugins($type, $param); return $BSStdServer::return_ok; } #################################################################### sub listjobs { my ($cgi, $arch) = @_; die("not masterdispatching\n") unless $BSConfig::masterdispatcher; my $dir = BSRPC::rpc({ 'uri' => "$BSConfig::masterdispatcher/jobs/$arch", 'timeout' => 60, }, $BSXML::dir); my @jobs = sort(map {$_->{'name'}} @{$dir->{'entry'} || []}); if ($cgi->{'partition'}) { my %good = map {$_ => 1} @{$cgi->{'partition'}}; my %projid2partition; for my $job (splice @jobs) { my $jn = $job; $jn =~ s/-[0-9a-f]{32}$//s; my ($projid, $repoid, $packid) = split('::', $jn); next unless defined $repoid; my $part = $projid2partition{$projid}; $part = $projid2partition{$projid} = BSSrcServer::Partition::projid2partition($projid) unless defined $part; push @jobs, $job if $good{$part}; } } @jobs = map {{'name' => $_}} @jobs; return return ({'entry' => \@jobs}, $BSXML::dir); } #################################################################### sub hello { my ($cgi) = @_; return "\n" if $BSStdServer::isajax; return "\n"; } #################################################################### sub getworkercap { my ($cgi, $workerid) = @_; my $reposerver = $BSConfig::masterdispatcher || $BSConfig::reposerver; # no need to look for partitioning here my $param = { 'uri' => "$reposerver/worker/$workerid", 'timeout' => 60, }; my $worker_cap = BSWatcher::rpc($param, $BSXML::worker); return($worker_cap, $BSXML::worker); } sub checkconstraints { my ($cgi) = @_; checkprojrepoarch($cgi->{'project'}, $cgi->{'repository'}, $cgi->{'arch'}); my $reposerver = $BSConfig::masterdispatcher; $reposerver ||= $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($cgi->{'project'}) : $BSConfig::reposerver; my @args = BSRPC::args($cgi, 'project', 'repository', 'arch', 'package'); my $param = { 'uri' => "$reposerver/worker", 'request' => 'POST', }; if (BSServer::have_content()) { $param->{'data'} = \&BSServer::forward_sender; $param->{'chunked'} = 1; } my $ret = BSWatcher::rpc($param, $BSXML::dir, "cmd=checkconstraints", @args); return ($ret, $BSXML::dir); } sub getavailable { my ($in, $available) = @_; for my $i (@{$in || []}) { for my $arch (@{$i->{'arch'} || []}) { $available->{$_}->{$arch} = 1 for @{$i->{'name'} || []}; } } } sub processavailable { my ($available) = @_; my %archlist; my @res; for my $bin (sort keys %$available) { my $archlist = join(',', sort keys %{$available->{$bin}}); $archlist{$archlist}->{$bin} = 1; } for my $archlist (sort keys %archlist) { my @archs = split(',', $archlist); push @res, {'arch' => \@archs, 'name' => [ sort keys %{$archlist{$archlist}} ]}; } return \@res; } sub getavailablebinaries { my ($cgi, $projid) = @_; my @path = @{$cgi->{'path'} || []}; for my $url (@{$cgi->{'url'} || []}) { if ($url =~ /^obs:\/\/\/?([^\/]+)\/([^\/]+)\/?$/) { push @path, "$1/$2"; } else { my $prp = BSUrlmapper::urlmapper($url); push @path, $prp if $prp; } } my @prpa; my @arch = @{$cgi->{'arch'} || []}; my %archfilter = map {$_ => 1} @arch; # hack ahead my $proj = BSRevision::readproj_local($projid, 1) || {}; for my $repo (@{$proj->{'repository'} || []}) { my @repoarch = @{$repo->{'arch'} || []}; next unless @repoarch; if (@{$proj->{'repository'} || []} > 1) { next unless $repo->{'name'} && $repo->{'name'} =~ /^images/; } push @arch, @repoarch unless %archfilter; if (!@path) { for my $prp (expandsearchpath($projid, $repo->{'name'})) { push @prpa, map {"$prp/$_"} @repoarch; } } } if (@path) { @arch = BSUtil::unify(@arch); for my $prp (@path) { push @prpa, map {"$prp/$_"} @arch; } } @prpa = BSUtil::unify(@prpa); # now partition the prpas my %projid2reposerver; my %prpas_by_reposerver; my %remoteprojects; for my $prpa (@prpa) { my ($aprojid, $arepoid, $aarch) = split('/', $prpa, 3); my $reposerver = $projid2reposerver{$aprojid}; if (!$reposerver) { # check if it's a remote repo my $proj = BSRevision::readproj_local($aprojid, 1); $proj = BSSrcServer::Remote::remoteprojid($aprojid) if !$proj || $proj->{'remoteurl'}; if ($proj->{'remoteurl'}) { $reposerver = "remote://$proj->{'remoteurl'}"; $remoteprojects{$aprojid} = $proj; } else { $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($aprojid) : $BSConfig::reposerver; } $projid2reposerver{$aprojid} = $reposerver; } push @{$prpas_by_reposerver{$reposerver}}, $prpa; } # go get em my (%available, %available_pattern, %available_product); for my $reposerver (sort keys %prpas_by_reposerver) { my @args; push @args, map {"prpa=$_"} @{$prpas_by_reposerver{$reposerver}}; push @args, "cmd=availablebinaries"; my $param = { 'uri' => "$reposerver/_command", 'timeout' => 30, 'request' => 'POST', }; if ($reposerver =~ /^remote:\/\//) { my $firstproj; my @remotepath; my @remotearch; for my $prpa (@{$prpas_by_reposerver{$reposerver}}) { my ($aprojid, $arepoid, $aarch) = split('/', $prpa, 3); my $proj = $remoteprojects{$aprojid}; next unless $proj; $firstproj ||= $proj; push @remotepath, "$proj->{'remoteproject'}/$arepoid"; push @remotearch, $aarch; } next unless $firstproj && @remotepath && @remotearch; $param = { 'uri' => "$firstproj->{'remoteurl'}/build/$firstproj->{'remoteproject'}/_availablebinaries", 'timeout' => 30, 'request' => 'GET', 'proxy' => $firstproj->{'remoteproxy'}, }; @args = (); push @args, map {"path=$_"} BSUtil::unify(@remotepath); push @args, map {"arch=$_"} BSUtil::unify(@remotearch); } my $res; eval { $res = BSWatcher::rpc($param, $BSXML::availablebinaries, @args); }; warn($@) if $@; next unless $res; #return ($res, $BSXML::availablebinaries) if keys(%prpas_by_reposerver) == 1; getavailable($res->{'packages'}, \%available) if $res->{'packages'}; getavailable($res->{'patterns'}, \%available_pattern) if $res->{'patterns'}; getavailable($res->{'products'}, \%available_product) if $res->{'products'}; } my %res; $res{'packages'} = processavailable(\%available) if %available; $res{'patterns'} = processavailable(\%available_pattern) if %available_pattern; $res{'products'} = processavailable(\%available_product) if %available_product; return (\%res, $BSXML::availablebinaries); } #################################################################### sub cloudupload_create_receiver { my ($req, $param) = @_; my $hdr = $req->{'headers'}; die("cannot do chunked uploads\n") if $hdr->{'transfer-encoding'} && lc($hdr->{'transfer-encoding'}) eq 'chunked'; my $cl = $hdr->{'content-length'}; die("need a content length\n") unless defined $cl; die("upload image is empty\n") unless $cl; local *S = $req->{'__socket'}; # XXX: fix BSRPC $req->{'__socket'} = \*S; # create a job $param->{'uploadjob'}->{'size'} = $cl; # we now know the size my $createparam = { 'uri' => "$BSConfig::clouduploadserver/cloudupload", 'request' => 'POST', 'timeout' => 60, 'data' => BSUtil::toxml($param->{'uploadjob'}, $BSXML::clouduploadjob), }; my $job = BSWatcher::rpc($createparam, $BSXML::clouduploadjob); # reply the job right away BSStdServer::stdreply($job, $BSXML::clouduploadjob); BSServer::done(1); # do the upload in the background my $uploadparam = { 'uri' => "$BSConfig::clouduploadserver/cloudupload/$job->{'name'}", 'request' => 'PUT', 'timeout' => 600, 'headers' => [ "Content-Length: $cl" ], 'data' => \&BSHTTP::reply_sender, 'reply_req' => $req, }; BSWatcher::rpc($uploadparam); # nothing else to reply return undef; } sub cloudupload_create { my ($cgi, $projid, $repoid, $arch, $packid, $filename) = @_; die("no cloud upload server configurated\n") unless $BSConfig::clouduploadserver; my $targetdata; $targetdata = BSServer::read_data(10000000) if BSServer::have_content(); $targetdata = '' unless defined $targetdata; my $reposerver = $BSConfig::partitioning ? BSSrcServer::Partition::projid2reposerver($projid) : $BSConfig::reposerver; my $uploadjob = { 'project' => $projid, 'repository' => $repoid, 'arch' => $arch, 'package' => $packid, 'filename' => $filename, 'user' => $cgi->{'user'}, 'target' => $cgi->{'target'}, 'details' => unpack('H*', $targetdata), }; my $param = { 'uri' => "$reposerver/build/$projid/$repoid/$arch/$packid/$filename", 'receiver' => \&cloudupload_create_receiver, 'timeout' => 600, 'uploadjob' => $uploadjob, }; return BSWatcher::rpc($param, undef); } sub cloudupload_status { my ($cgi, $jobid) = @_; die("no cloud upload server configurated\n") unless $BSConfig::clouduploadserver; my $param = { 'uri' => "$BSConfig::clouduploadserver/cloudupload/$jobid", 'ignorestatus' => 1, 'receiver' => \&BSServer::reply_receiver, }; BSWatcher::rpc($param, undef); return undef; } sub cloudupload_pubkey { my ($cgi) = @_; die("no cloud upload server configurated\n") unless $BSConfig::clouduploadserver; return cloudupload_status($cgi, '_pubkey') unless $cgi->{'view'}; die("unsupported view '$cgi->{'view'}'\n") if $cgi->{'view'} ne 'info'; my $pk = BSWatcher::rpc("$BSConfig::clouduploadserver/cloudupload/_pubkey"); die("no pubkey configured\n") unless $pk; my $pubkey = pubkeyinfo($pk); return ($pubkey, $BSXML::pubkeyinfo); } sub cloudupload_kill { my ($cgi, $jobid) = @_; die("no cloud upload server configurated\n") unless $BSConfig::clouduploadserver; my $param = { 'uri' => "$BSConfig::clouduploadserver/cloudupload/$jobid", 'request' => 'POST', 'ignorestatus' => 1, 'receiver' => \&BSServer::reply_receiver, }; BSWatcher::rpc($param, undef, 'cmd=kill'); return undef; } sub cloudupload_joblist { my ($cgi) = @_; die("no cloud upload server configurated\n") unless $BSConfig::clouduploadserver; my $param = { 'uri' => "$BSConfig::clouduploadserver/cloudupload", }; return BSRPC::rpc($param, undef, BSRPC::args($cgi, 'name')); } sub cloudupload_log { my ($cgi, $jobid) = @_; die("no cloud upload server configurated\n") unless $BSConfig::clouduploadserver; my @args = BSRPC::args($cgi, 'nostream', 'start', 'end', 'view'); if (!$BSStdServer::isajax && !$cgi->{'nostream'} && !$cgi->{'view'}) { BSHandoff::handoff("/cloudupload/$jobid/_log", undef, @args); } my $param = { 'uri' => "$BSConfig::clouduploadserver/cloudupload/$jobid/_log", 'ignorestatus' => 1, 'receiver' => \&BSServer::reply_receiver, 'joinable' => 1, }; BSWatcher::rpc($param, undef, @args); return undef; # always streams result } #################################################################### my $dispatches = [ '/' => \&hello, '!rw :' => undef, '!- GET:' => undef, '!- HEAD:' => undef, 'POST:/source cmd=orderkiwirepos' => \&orderkiwirepos, 'POST:/source cmd: *:*' => \&unknowncmd, # /source name space: manage project and package data '/source deleted:bool?' => \&getprojectlist, 'POST:/source/$project cmd=createkey user:? comment:?' => \&createkey, 'POST:/source/$project cmd=extendkey user:? comment:?' => \&extendkey, 'POST:/source/$project cmd=undelete user:? comment:?' => \&undeleteproject, 'POST:/source/$project cmd=copy user:? comment:? oproject:project withbinaries:bool? withhistory:bool? makeolder:bool? makeoriginolder:bool? resign:bool? noservice:bool?' => \©project, 'POST:/source/$project cmd=move oproject:project' => \&moveproject, 'POST:/source/$project cmd=freezelink user:? comment:? requestid:num?' => \&freezeprojectlink, 'POST:/source/$project cmd=notifypackagechange' => \¬ifypackagechange, 'POST:/source/$project cmd: *:*' => \&unknowncmd, '/source/$project view=info parse:bool? nofilename:bool? repository? arch? package* withchangesmd5:bool?' => \&getprojectsourceinfo, '/source/$project deleted:bool? expand:bool? noorigins:bool?' => \&getpackagelist, 'DELETE:/source/$project user:? comment:? requestid:num?' => \&delproject, '/source/$project/_meta rev?' => \&getproject, 'PUT:/source/$project/_meta user:? comment:? requestid:num? lowprio:bool?' => \&putproject, '/source/$project/_pubkey rev?' => \&getpubkey, 'DELETE:/source/$project/_pubkey user:? comment:?' => \&deletekey, '/source/$project/_config rev?' => \&getprojectconfig, 'PUT:/source/$project/_config user:? comment:?' => \&putprojectconfig, 'DELETE:/source/$project/_config user:? comment:?' => \&delprojectconfig, '/source/$project/_history rev? meta:bool? deleted:bool? limit:num?' => \&getpackagehistory, '/source/$project/_keyinfo withsslcert:bool? autoextend:bool? donotcreatecert:bool?' => \&getkeyinfo, 'POST:/source/$project/$package cmd=diff rev? orev:rev? oproject:project? opackage:package? expand:bool? linkrev? olinkrev:? unified:bool? missingok:bool? meta:bool? file:filename* filelimit:num? tarlimit:num? view:? withissues:bool? onlyissues:bool?' => \&sourcediff, 'POST:/source/$project/$package cmd=linkdiff rev? linkrev? unified:bool? file:filename* filelimit:num? tarlimit:num? view:? withissues:bool? onlyissues:bool?' => \&linkdiff, 'POST:/source/$project/$package cmd=servicediff rev? unified:bool? file:filename* filelimit:num? tarlimit:num? view:? withissues:bool? onlyissues:bool?' => \&servicediff, 'POST:/source/$project/$package cmd=commit rev? user:? comment:? keeplink:bool? repairlink:bool? linkrev? setrev:bool? requestid:num? noservice:bool?' => \&sourcecommit, 'POST:/source/$project/$package cmd=commitfilelist rev? user:? comment:? keeplink:bool? repairlink:bool? linkrev? setrev:bool? requestid:num? time:num? version:? vrev:? noservice:bool? servicemark:? withvalidate:?' => \&sourcecommitfilelist, 'POST:/source/$project/$package cmd=copy rev? user:? comment:? orev:rev? oproject:project? opackage:package? expand:bool? keeplink:bool? repairlink:bool? linkrev? setrev:linkrev? olinkrev:linkrev? requestid:num? dontupdatesource:bool? noservice:bool? withvrev:bool? withacceptinfo:bool? makeoriginolder:bool? freezelink:bool? vrevbump:num? instantiate:bool?' => \&sourcecopy, 'POST:/source/$project/$package cmd=collectbuildenv user:? comment:? orev:rev? oproject:project? opackage:package?' => \&sourcecollectbuildenv, 'POST:/source/$project/$package cmd=branch rev? user:? comment:? orev:rev? oproject:project? opackage:package? olinkrev:linkrev? requestid:num? force:bool? keepcontent:bool? missingok:bool? noservice:bool? withacceptinfo:bool? time:num? extendvrev:bool?' => \&sourcebranch, 'POST:/source/$project/$package cmd=linktobranch rev? user:? comment:? linkrev?' => \&linktobranch, 'POST:/source/$project/$package cmd=deleteuploadrev' => \&deleteuploadrev, 'POST:/source/$project/$package cmd=undelete user:? comment:? time:num?' => \&undeletepackage, 'POST:/source/$project/$package cmd=runservice user:? comment:?' => \&triggerservicerun, 'POST:/source/$project/$package cmd=waitservice' => \&waitservicerun, 'POST:/source/$project/$package cmd=mergeservice user:? comment:?' => \&mergeservicerun, 'POST:/source/$project/$package cmd=getprojectservices' => \&getprojectservices, 'POST:/source/$project/$package cmd=notifypackagechange' => \¬ifypackagechange, 'POST:/source/$project/$package cmd: *:*' => \&unknowncmd, 'PUT:/source/$project/$package cmd: rev? user:? comment:?' => \&sourcecommitfilelist, # obsolete '/source/$project/$package view=getmultibuild' => \&getmultibuildpackages, '/source/$project/$package view=info rev? linkrev? parse:bool? nofilename:bool? repository? arch? withchangesmd5:bool?' => \&getpackagesourceinfo, '/source/$project/$package rev? linkrev? emptylink:bool? deleted:bool? expand:bool? view:? extension:? lastworking:bool? withlinked:bool? meta:bool? product:?' => \&getfilelist, '/source/$project/$package/_history rev? meta:bool? deleted:bool? limit:num?' => \&getpackagehistory, '/source/$project/$package/_meta rev? expand:bool? meta:bool? deleted:bool? view:?' => \&getpackage, 'PUT:/source/$project/$package/_meta user:? comment:? requestid:num?' => \&putpackage, 'DELETE:/source/$project/$package user:? comment:? requestid:num?' => \&delpackage, '/source/$project/$package/$filename rev? expand:bool? meta:bool? deleted:bool? view:?' => \&getfile, 'PUT:/source/$project/$package/$filename rev? user:? comment:? keeplink:bool? force:bool? meta:bool?' => \&putfile, 'DELETE:/source/$project/$package/$filename rev? user:? comment:? keeplink:bool? force:bool? meta:bool?' => \&delfile, # /published name spec: access published binaries '/published' => \&published, '/published/$project' => \&published, '/published/$project/$repository view=publishedpath medium:?' => \&published_path, '/published/$project/$repository' => \&published, '/published/$project/$repository/$arch:filename view:?' => \&published, '/published/$project/$repository/$arch:filename/$filename view:?' => \&published, '/published/$project/$repository/$arch:filename/$filename/$subfilename:filename view:?' => \&published, # scheduler calls '/getprojpack $project* $repository* $package* $arch? withrepos:bool? withsrcmd5:bool? withdeps:bool? withconfig:bool? expandedrepos:bool? ignoredisable:bool? nopackages:bool? withremotemap:bool? remotemaponly:bool? noremote:bool? parseremote:bool? buildinfo:bool? partition:? view:?' => \&getprojpack, 'POST:/relsync $project $repository $arch' => \&postrelsync, '/relsync $project $repository $arch' => \&getrelsync, # worker capabilities '/worker/$workerid' => \&getworkercap, 'POST:/worker cmd=checkconstraints $project $repository $arch $package' => \&checkconstraints, # worker calls '/getsources $project $package $srcmd5:md5' => \&getsources, '/getconfig $project $repository path:prp*' => \&getbuildconfig, '/getsignkey $project withpubkey:bool? autoextend:bool? withalgo:bool?' => \&getsignkey, '/getsslcert $project autoextend:bool?' => \&getsslcert, '/getbinaries $project $repository $arch binaries: nometa:bool?' => \&worker_getbinaries, '/getbinaryversions $project $repository $arch binaries: nometa:bool?' => \&worker_getbinaryversions, '!- /lastevents $filter:* start:num? obsname:?' => \&lastevents, '/lastnotifications start:num? view:? block:bool? noprune:bool?' => \&lastnotifications, '/notificationpayload/$payloadkey:filename' => \&getnotificationpayload, 'DELETE:/notificationpayload/$payloadkey:filename' => \&deletenotificationpayload, 'POST:/event type: project: package:? repository:? arch:? job:?' => \&newevent, # tmp until lightty gets fixed '/public/lastevents $filter:* start:num? obsname:?' => \&lastevents, # search interface '/search $in: $match: return:? values:bool?' => \&search, '/search/project $match:' => \&search_proj, '/search/project/id $match:' => \&search_proj_id, '/search/package $match:' => \&search_pack, '/search/package/id $match:' => \&search_pack_id, 'POST:/search/published cmd:' => \&search_published_updatedb, '/search/published/binary/id $match: limit:num? withdownloadurl:bool?' => \&search_published_binary_id, '/search/published/pattern/id $match: limit:num? withdownloadurl:bool?' => \&search_published_pattern_id, # service interface, just for listing for now '/service' => \&listservices, # '/service/$service' => \&service, # configuration 'PUT:/configuration' => \&putconfiguration, '/configuration' => \&getconfiguration, # issue trackers 'PUT:/issue_trackers' => \&putissuetrackers, '/issue_trackers' => \&getissuetrackers, # build calls for binary files '/build' => \&getprojectlist, '/build/_workerstatus scheduleronly:bool? daemonsonly:bool? arch*' => \&getworkerstatus, 'PUT:/build/_dispatchprios' => \&putdispatchprios, '/build/_dispatchprios' => \&getdispatchprios, '/build/$project/_availablebinaries url:* path:prp* arch*' => \&getavailablebinaries, 'POST:/build/$project cmd: repository* arch* package* code:* wipe:* comment:?' => \&docommand, '/build/$project' => \&getrepositorylist, '/build/$project/_result oldstate:md5? view:resultview* lastbuild:bool? repository* arch* package* code:* multibuild:bool? locallink:bool?' => \&getresult, '/build/$project/$repository' => \&getarchlist, '/build/$project/$repository/_buildconfig path:prp*' => \&getbuildconfig, '/build/$project/$repository/$arch package* view:?' => \&getpackagelist_build, '!- /build/$project/$repository/$arch/_builddepinfo package* view:?' => \&getbuilddepinfo, '/build/$project/$repository/$arch/_jobhistory package* code:* limit:num?' => \&getjobhistory, 'POST:/build/$project/$repository/$arch/_repository match:' => \&postrepo, 'POST:/build/$project/$repository/$arch/$package cmd=copy oproject:project? opackage:package? orepository:repository? setupdateinfoid:? resign:bool? setrelease:? multibuild:bool?' => \©build, 'POST:/build/$project/$repository/$arch/$package' => \&uploadbuild, '/build/$project/$repository/$arch/$package_repository view:? binary:filename* nometa:bool? nosource:bool? withmd5:bool?' => \&getbinarylist, 'POST:/build/$project/$repository/$arch/$package_repository/_buildinfo add:* debug:bool?' => \&getbuildinfo_post, '/build/$project/$repository/$arch/$package/_buildinfo add:* internal:bool? debug:bool?' => \&getbuildinfo, '/build/$project/$repository/$arch/$package/_jobstatus' => \&getjobstatus, '/build/$project/$repository/$arch/$package/_log nostream:bool? last:bool? start:intnum? end:num? view:?' => \&getlogfile, '/build/$project/$repository/$arch/$package/_reason' => \&getbuildreason, '/build/$project/$repository/$arch/$package/_status' => \&getbuildstatus, '/build/$project/$repository/$arch/$package/_history limit:num?' => \&getbuildhistory, '/build/$project/$repository/$arch/$package_repository/$filename view:?' => \&getbinary, 'PUT:/build/$project/$repository/$arch/_repository/$filename ignoreolder:bool? wipe:bool?' => \&putbinary, 'DELETE:/build/$project/$repository/$arch/_repository/$filename' => \&delbinary, '/request/_lastid' => \&getlastidrequest, # just required for migration into api '/request/$id:num' => \&getrequest, # just required for migration into api # for masterdispatcher syncing '/jobs/$arch partition:*' => \&listjobs, # notifications from publisher/repserver - CGI 'POST:/notify/$_type: *:?' => \&external_notification, # called from the API to notify hermes/rabbitmq 'POST:/notify_plugins/$_type:' => \¬ify_plugins, # cloud upload calls 'POST:/cloudupload $project $repository $arch $package $filename user: target:' => \&cloudupload_create, 'POST:/cloudupload/$job cmd=kill' => \&cloudupload_kill, '/cloudupload/_pubkey view:?' => \&cloudupload_pubkey, '/cloudupload/$job' => \&cloudupload_status, '/cloudupload/$job/_log nostream:bool? start:intnum? end:num? view:?' => \&cloudupload_log, '/cloudupload name:num*' => \&cloudupload_joblist, '/ajaxstatus' => \&getajaxstatus, '/serverstatus' => \&BSStdServer::serverstatus, ]; #################################################################### my $dispatches_ajax = [ '/' => \&hello, '/ajaxstatus' => \&getajaxstatus, '/build/$project/_result oldstate:md5? view:resultview* repository* arch* package* code:*' => \&getresult, '/build/$project/$repository/$arch package* view:?' => \&getpackagelist_build, '/build/$project/$repository/$arch/$package/_log nostream:bool? last:bool? start:intnum? end:num?' => \&getlogfile, '/build/$project/$repository/$arch/$package_repository view:? binary:filename* nometa:bool? nosource:bool? withmd5:bool?' => \&getbinarylist, '/getbinaries $project $repository $arch binaries: nometa:bool? raw:bool?' => \&worker_getbinaries, '/getbinaryversions $project $repository $arch binaries: nometa:bool?' => \&worker_getbinaryversions, '/lastevents $filter:* start:num? obsname:?' => \&lastevents, '/lastnotifications start:num? view:? block:bool?' => \&lastnotifications, '/source/$project/$package cmd=waitservice servicemark:' => \&waitservicerun, '/source/$project/$package rev view:' => \&getfilelist_ajax, '/source/$project/$package:package/$filename rev?' => \&getfile, '/request/$id:num withkey:bool? oldkey:md5?' => \&getrequest, '/sourcediffcache/$cacheid:md5 view:?' => \&getsourcediffcache, '/cloudupload/$job/_log nostream:bool? start:intnum? end:num? view:?' => \&cloudupload_log, ]; #################################################################### my $conf = { 'port' => $port, 'dispatches' => $dispatches, 'maxchild' => 20, 'maxchild2' => 20, 'slowrequestthr' => 10, }; my $aconf = { 'socketpath' => $ajaxsocket, 'dispatches' => $dispatches_ajax, }; if ($BSConfig::workersrcserver) { my $wport = $port; $wport = $1 if $BSConfig::workersrcserver =~ /:(\d+)$/; $conf->{'port2'} = $wport if $wport != $port; } # create bsdir before root privileges are dropped BSUtil::mkdir_p_chown($BSConfig::bsdir, $BSConfig::bsuser, $BSConfig::bsgroup); # set a repoid for identification of this data repository if (! -e "$projectsdir/_repoid") { BSUtil::mkdir_p_chown($projectsdir, $BSConfig::bsuser, $BSConfig::bsgroup); $datarepoid = sprintf("%09d", int(rand(1000000000))); writestr("$projectsdir/._repoid$$", "$projectsdir/_repoid", $datarepoid); } $datarepoid = readstr("$projectsdir/_repoid"); BSStdServer::server('bs_srcserver', \@ARGV, $conf, $aconf); open-build-service-2.9.4/src/backend/bs_warden000077500000000000000000000200431332555733200213010ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2009 Michael Schroeder, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # Check if all jobs in state building are really built on the workers # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use POSIX; use Data::Dumper; use Digest::MD5 (); use Fcntl qw(:DEFAULT :flock); use XML::Structured ':bytes'; use BSConfiguration; use BSRPC; use BSUtil; use BSXML; use strict; my $bsdir = $BSConfig::bsdir || "/srv/obs"; BSUtil::mkdir_p_chown($bsdir, $BSConfig::bsuser, $BSConfig::bsgroup); BSUtil::drop_privs_to($BSConfig::bsuser, $BSConfig::bsgroup); my $rundir = $BSConfig::rundir || "$BSConfig::bsdir/run"; my $workersdir = "$BSConfig::bsdir/workers"; my $jobsdir = "$BSConfig::bsdir/jobs"; $| = 1; $SIG{'PIPE'} = 'IGNORE'; BSUtil::restartexit($ARGV[0], 'warden', "$rundir/bs_warden"); BSUtil::printlog("starting build service worker warden"); # get lock mkdir_p($rundir); open(RUNLOCK, '>>', "$rundir/bs_warden.lock") || die("$rundir/bs_warden.lock: $!\n"); flock(RUNLOCK, LOCK_EX | LOCK_NB) || die("worker warden is already running!\n"); utime undef, undef, "$rundir/bs_warden.lock"; my %building; my $nextorphan = 0; my $xcheck = 0; my %orphanchecks; while (1) { my $now = time(); my %nbuilding; for my $wname (ls("$workersdir/building")) { next if $wname =~ /^\./; $nbuilding{$wname} = $building{$wname} || {'lastcheck' => $now}; } %building = %nbuilding; %nbuilding = (); for my $wname (sort keys %building) { my $b = $building{$wname}; my $lastcheck = $b->{'lastcheck'}; $lastcheck += rand(60 * 60); next if $lastcheck > $now; last if -e "$rundir/bs_warden.restart"; last if -e "$rundir/bs_warden.exit"; my $worker = readxml("$workersdir/building/$wname", $BSXML::worker, 1); next unless $worker && $worker->{'job'} && $worker->{'arch'}; my $job = $worker->{'job'}; my $arch = $worker->{'arch'}; $building{$wname}->{'job'} = "$arch/$job"; my $js; if ($worker->{'reposerver'}) { # masterdispatched job. ask slave about job status. my $param = { 'uri' => "$worker->{'reposerver'}/jobs/$arch/$job", 'timeout' => 60, }; eval { $js = BSRPC::rpc($param, $BSXML::jobstatus, 'view=status'); }; } else { $js = readxml("$jobsdir/$arch/$job:status", $BSXML::jobstatus, 1); } next unless $js && $js->{'code'} eq 'building'; next unless $js->{'workerid'} eq $worker->{'workerid'}; #print "checking worker $wname\n"; my $param = { 'uri' => "$js->{'uri'}/worker", 'timeout' => 60, }; eval { BSRPC::rpc($param, undef, "jobid=$js->{'jobid'}"); }; if ($@) { warn($@); # worker is down or doing something weird. if ($worker->{'reposerver'}) { BSUtil::printlog("restarting build of $arch/$job building on $js->{'workerid'}"); my $param = { 'uri' => "$worker->{'reposerver'}/jobs/$arch/$job", 'request' => 'POST', 'timeout' => 60, }; eval { BSRPC::rpc($param, undef, 'cmd=idleworker', "workerid=$worker->{'workerid'}", "jobid=$js->{'jobid'}"); mkdir_p("$workersdir/down"); rename("$workersdir/building/$wname", "$workersdir/down/$wname"); delete $building{$wname}; }; warn($@) if $@; } else { local *F; my $js2 = BSUtil::lockopenxml(\*F, '<', "$jobsdir/$arch/$job:status", $BSXML::jobstatus, 1); if (!$js2 || $js2->{'code'} ne 'building' || $js2->{'jobid'} ne $js->{'jobid'} || $js2->{'workerid'} ne $js->{'workerid'}) { print "build of $job is done on a different worker\n"; close F; next; } BSUtil::printlog("restarting build of $arch/$job building on $js->{'workerid'}"); unlink("$jobsdir/$arch/$job:status"); mkdir_p("$workersdir/down"); rename("$workersdir/building/$wname", "$workersdir/down/$wname"); delete $building{$wname}; close F; } } else { $b->{'lastcheck'} = $now; } } if ($now > $nextorphan) { $nextorphan = $now + 60; # every minute $xcheck = 0 if $xcheck++ > 10; my %buildingjobs = map {($_->{'job'} || '') => 1} values %building; for my $arch (sort(ls($jobsdir))) { next unless -d "$jobsdir/$arch"; my @b = sort(grep {!/^\./} ls("$jobsdir/$arch")); my %locked = map {$_ => 1} grep {/:status$/} @b; # check for orphaned jobs my %norphanchecks; $orphanchecks{$arch} ||= {}; for my $job (grep {!/:(?:dir|status|new)$/} @b) { next unless $locked{"$job:status"}; next if $buildingjobs{"$arch/$job"}; if (!$orphanchecks{$arch}->{$job}) { my @s = stat("$jobsdir/$arch/$job:status"); $norphanchecks{$job} = 1 + (((@s ? $s[9] : 0) / 60) % 30); } else { $norphanchecks{$job} = $orphanchecks{$arch}->{$job}; } next if $norphanchecks{$job}++ < 30; # check every 30 minutes $norphanchecks{$job} = 1; my $js = readxml("$jobsdir/$arch/$job:status", $BSXML::jobstatus, 1); if ($js && $js->{'code'} ne 'building') { my @s = stat("$jobsdir/$arch/$job:status"); next if !@s || $s[9] + 86400 > $now; if (($js->{'code'} || '') eq 'finished' && !$js->{'endtime'}) { # no endtime, the is probably a fake job we cannot restart. also remove job. BSUtil::printlog("removing stuck fake job $arch/$job"); unlink("$jobsdir/$arch/$job"); BSUtil::cleandir("$jobsdir/$arch/$job:dir"); rmdir("$jobsdir/$arch/$job:dir"); unlink("$jobsdir/$arch/$job:status"); next; } BSUtil::printlog("restarting build of $arch/$job stuck in code $js->{'code'}"); unlink("$jobsdir/$arch/$job:status"); next; } next unless $js && $js->{'code'} eq 'building'; #print "orphan check for $arch/$job...\n"; my $param = { 'uri' => "$js->{'uri'}/worker", 'timeout' => 60, }; eval { BSRPC::rpc($param, undef, "jobid=$js->{'jobid'}"); }; if ($@) { warn($@); local *F; my $js2 = BSUtil::lockopenxml(\*F, '<', "$jobsdir/$arch/$job:status", $BSXML::jobstatus, 1); if (!$js2 || $js2->{'code'} ne 'building' || $js2->{'jobid'} ne $js->{'jobid'} || $js2->{'workerid'} ne $js->{'workerid'}) { print "build of $job is done on a different worker\n"; close F; next; } BSUtil::printlog("restarting orphaned build of $arch/$job building on $js->{'workerid'}"); unlink("$jobsdir/$arch/$job:status"); close F; } } $orphanchecks{$arch} = \%norphanchecks; if (!$xcheck) { # check orphaned :dir or :status files my %jobs = map {$_ => 1} grep {!/:(?:dir|status|new)$/} @b; for my $job (grep {s/:dir$//} @b) { next if $jobs{$job}; my @s = stat("$jobsdir/$arch/$job:dir"); if (@s && $s[9] + 86400 < $now) { BSUtil::printlog("removing orphaned $arch/$job result directory"); BSUtil::cleandir("$jobsdir/$arch/$job:dir"); rmdir("$jobsdir/$arch/$job:dir"); } } for my $job (grep {s/:status$//} @b) { next if $jobs{$job}; my @s = stat("$jobsdir/$arch/$job:status"); if (@s && $s[9] + 86400 < $now) { BSUtil::printlog("removing orphaned $arch/$job status file"); unlink("$jobsdir/$arch/$job:status"); } } } } } #print "sleeping\n"; for my $i (qw{1 2 3 4 5}) { if (-e "$rundir/bs_warden.exit") { close(RUNLOCK); unlink("$rundir/bs_warden.exit"); BSUtil::printlog("exiting..."); exit(0); } if (-e "$rundir/bs_warden.restart") { close(RUNLOCK); unlink("$rundir/bs_warden.restart"); BSUtil::printlog("restarting..."); exec($0); die("$0: $!\n"); } sleep(1); } } open-build-service-2.9.4/src/backend/bs_worker000077500000000000000000003652701332555733200213500ustar00rootroot00000000000000#!/usr/bin/perl -w # # Copyright (c) 2006-2009 Michael Schroeder, Novell Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program (see the file COPYING); if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # ################################################################ # # Worker build process. Builds jobs received from a Repository Server, # sends build binary packages back. # BEGIN { my ($wd) = $0 =~ m-(.*)/- ; $wd ||= '.'; unshift @INC, "$wd/build"; unshift @INC, "$wd"; } use Digest::MD5 (); use XML::Structured ':bytes'; use Data::Dumper; use POSIX; use Fcntl qw(:DEFAULT :flock); BEGIN { Fcntl->import(':seek') unless defined &SEEK_SET; } use Storable; use BSRPC; use BSServer; use BSDispatch; use BSConfiguration; use BSUtil; use BSXML; use BSKiwiXML; use BSHTTP; use BSBuild; use BSCando; use strict; my @binsufs = qw{rpm deb pkg.tar.gz pkg.tar.xz}; my $binsufsre = join('|', map {"\Q$_\E"} @binsufs); my $buildroot; my $port; my $statedir; my $hostarch; my $vm = ''; my $vm_tmpfs_mode; my $vm_root = ''; my $vm_swap = ''; my $vm_kernel; my $vm_initrd; my $vm_memory; my $vm_custom_option; my $vm_enable_console; my $vm_worker_name; my $vm_worker_instance; my $vmdisk_rootsize; my $vmdisk_swapsize; my $vmdisk_filesystem; my $vmdisk_mount_options; my $vmdisk_clean; my $emulator_script; my $hugetlbfs; my $workerid; my $srcserver; my @reposervers; my $testmode; my $noworkercheck; my $nobuildcodecheck; my $oneshot; my $silent; my $hostcheck; my $localkiwi; my $owner; my @hostlabel; my $getbinariesproxy; my $hardstatus; my $jobs; my $threads; my $cachedir; my $cachesize; my $cleanup_chroot; my $wipeafterbuild; my $openstack_flavor; my $vm_server; # current XEN has no xenstore anymore my $xenstore_maxsize = 20 * 1000000; # 1 hour timeout to avoid forever hanging workers my $gettimeout = 1 * 3600; my $buildlog_maxsize = $BSConfig::buildlog_maxsize ? $BSConfig::buildlog_maxsize : 500 * 1000000; my $buildlog_maxidle = $BSConfig::buildlog_maxidle ? $BSConfig::buildlog_maxidle : 8 * 3600; # for collecting statistics my $binariesdownload; my $binariescachehits; my $binariesdownloadsize; my $workercode; my $buildcode; $hostcheck = $BSConfig::workerhostcheck if defined($BSConfig::workerhostcheck); sub lockstate { while (1) { open(STATELOCK, '>>', "$statedir/state") || die("$statedir/state: $!\n"); flock(STATELOCK, LOCK_EX) || die("flock $statedir/state: $!\n"); my @s = stat(STATELOCK); last if $s[3]; # check nlink close(STATELOCK); # race, try again } my $oldstate = readxml("$statedir/state", $BSXML::workerstate, 1); $oldstate = {} unless $oldstate; return $oldstate; } sub unlockstate { close(STATELOCK); } sub commitstate { my ($newstate) = @_; writexml("$statedir/state.new", "$statedir/state", $newstate, $BSXML::workerstate) if $newstate; close(STATELOCK); } sub hardstatus { return unless $hardstatus; my $l = $_[0] || ''; $l =~ s/[\000-\037\177-\377]//g; # ascii only, please $l = substr($l, 0, 40) if length($l) > 40; print "\033_$l\033\\"; } sub trunc_logfile { my ($lf) = @_; open(LF, '<', $lf) || return; my $buf; sysread(LF, $buf, 1000000); $buf .= "\n\n[truncated]\n\n"; sysseek(LF, -1000000, 2); sysread(LF, $buf, 1000000, length($buf)); close LF; $buf .= "\nLogfile got too big, killed job.\n"; open(LF, ">$lf.new") || return; syswrite(LF, $buf); close LF; rename("$lf.new", $lf); } sub tail_logfile { my ($lf, $size) = @_; local *LF; open(LF, '<', $lf) || return ''; if (-s LF > $size) { defined(sysseek(LF, -$size, 2)) || return ''; } my $buf = ''; sysread(LF, $buf, $size); close LF; return $buf; } sub cleanup_job { if ($vm_tmpfs_mode) { # no need to umount if $buildroot is already umounted if (not qsystem('mountpoint', '-q', $buildroot)) { qsystem("umount", "-l", $buildroot) && die("umount tmpfs failed: $!\n"); } } # if this is the chroot case, get rid of it after the build if ((!$vm || $vm eq 'lxc') && $cleanup_chroot) { printf "Removing buildroot $buildroot"; rm_rf($buildroot); mkdir_p($buildroot); } } sub kill_job { my @args; # same args as in the build call if ($vm =~ /(xen|kvm|zvm|emulator|pvm|openstack)/) { push @args, '--root', "$buildroot/.mount"; push @args, '--vm-type', $vm; push @args, '--vm-disk', $vm_root; push @args, '--emulator-script', $emulator_script if $vm eq 'emulator' && $emulator_script; push @args, '--vm-worker', $vm_worker_name if $vm_worker_name; push @args, '--vm-worker-nr', $vm_worker_instance if $vm_worker_instance; } else { push @args, '--root', $buildroot; push @args, '--vm-type', $vm if $vm eq 'lxc' || $vm eq 'docker'; } if (system("$statedir/build/build", @args, "--kill")) { return 0; } cleanup_job(); return 1; } sub wipe_all { my @args; if ($vm =~ /(xen|kvm|zvm|emulator|pvm|openstack)/) { push @args, '--root', "$buildroot/.mount"; push @args, '--vm-type', $vm; push @args, '--vm-disk', $vm_root; push @args, '--vm-swap', $vm_swap; } else { push @args, '--root', $buildroot; } if (system("$statedir/build/build", @args, "--wipe")) { return 0; } return 1; } my @saveargv = @ARGV; # so we can restart ourself sub usage { my ($ret) = @_; print < --statedir --root : buildroot directory --port : fixed port number --statedir : state directory --id : worker id --reposerver: define reposerver, can be used multiple times --arch : define hostarch (overrides 'uname -m') currently supported architectures: @{[sort keys %BSCando::cando]} --kvm : enable kvm --pvm : enable pvm --xen : enable xen --lxc : enable lxc --docker : enable docker --emulator : enable emulator (requires prepared emulator.sh script) --zvm : enable z/VM --tmpfs : uses tmpfs (memory) for the build root --device : set kvm or xen root device (default is /root file) --swap : set kvm or xen swap device (default is /swap file) --hostlabel : define a host label for build constraints, can be used multiple times --vm-kernel : set kernel to use (xen/kvm) --vm-initrd : set initrd to use (xen/kvm) --vm-memory : set amount of memory to use (xen/kvm) --vm-worker NAME : (z/VM) set name of the actual worker --vm-worker-nr NUMBER : (z/VM) set instance number of the actual worker --vm-custom-option SWITCHES : hand over custom switches to vm handler (eg. qemu) --vmdisk-rootsize : size of the root disk image (default 4096M) --vmdisk-swapsize : size of the swap disk image (default 1024M) --vmdisk-filesystem : filesystem to use for autosetup root disk image --emulator-script