apt-cacher-1.7.8/0000755000000000000000000000000012237727165010440 5ustar apt-cacher-1.7.8/apt-cacher-report.pl0000755000000000000000000002024011664536462014317 0ustar #!/usr/bin/perl # apt-cacher-report.pl # Script to generate usage reports for the Apt-cacher package caching system. # # Copyright (C) 2002,2004 Jonathan Oxer # Distributed under the terms of the GNU Public Licence (GPL). use strict; use warnings; ############################################################################# ### configuration ########################################################### # Include the library for the config file parser require('/usr/share/apt-cacher/lib/apt-cacher.pl'); use POSIX qw(strftime); # Read in the config file and set the necessary variables my $configfile = '/etc/apt-cacher/apt-cacher.conf'; while(scalar (@ARGV)) { my $arg=shift(@ARGV); if($arg eq '-c') { $configfile=shift(@ARGV) || die '-c option requires an argument'; die "$configfile not a file" if ! -f $configfile; die "$configfile unreadable" if ! -r $configfile; } elsif($arg eq '-h' || $arg eq '--help') { print < Options: -c configfile Custom config file (default: $configfile) -h|--help Print this help message EOM exit(0); } else { die "Unknown parameter $arg\n"; } } # Needs to be global for setup_ownership() our $cfg = eval{ read_config($configfile) }; # not sure what to do if we can't read the config file... die "Could not read config file: $@" if $@; # check whether we're actually meant to generate a report if ( $cfg->{generate_reports} ne 1 ){ exit 0; } #Give up root setup_ownership(); # Now set some things from the config file # $logfile used to be set in the config file: now we derive it from $log_dir my $logfile = "$cfg->{log_dir}/access.log"; ################################################### # Read in the logfiles if they exist, from oldest to newest # First we look for rolled and compressed logfiles, from # /var/log/apt-cacher/access.log.12.gz to access.log.2.gz my $logcount = 12; my @logdata; while ($logcount > 1) { if (-f "${logfile}.$logcount.gz") { my $logdataraw = `zcat ${logfile}.$logcount.gz`; push (@logdata, split("\n", $logdataraw)); } $logcount--; } # Then the immediately rolled (but uncompressed) log if (-f "${logfile}.1") { open(my $lfh, '<', "${logfile}.1"); push(@logdata, <$lfh>); close($lfh); } # Then finally the current working log if (-f "${logfile}") { open(my $lfh, '<', $logfile); push(@logdata, <$lfh>); close($lfh); } #read current time #($second,$minute,$hour,$day,$month,$year,$null,$null,$null)=localtime(time); my $datetime = strftime("%Y-%m-%d %H:%M:%S", localtime()); #$year = $year + 1900; #$month=$month + 1; my $hit_count = 0; my $hit_bytes = 0; my $miss_count = 0; my $miss_bytes = 0; my ($firstrecord,$lastrecord); #parse logfile: foreach (@logdata) { my @line = split /\|/; if ($#line==4) { # Old format without PID splice @line, 1, 0, ''; # Insert empty field } next unless $#line == 5; # Skip if parsing fails my $req_date = $line[0]; # my $req_pid = $line[1]; # my $req_ip = $line[2]; my $req_result = $line[3]; my $req_bytes = $line[4] =~ /^\d+$/ ? $line[4] : 0; # my $req_object = $line[5]; $lastrecord = $req_date; if(!$firstrecord) { $firstrecord = $req_date; } if ( $req_result =~ /HIT|NOTMOD|HEAD/ ) { $hit_count++; $hit_bytes += $req_bytes; } else { $miss_count++; $miss_bytes += $req_bytes; } } my $total_count = $hit_count + $miss_count; my ($hit_count_percent,$miss_count_percent); if($total_count eq 0) { $hit_count_percent = 0; $miss_count_percent = 0; } else { $hit_count_percent = (int(($hit_count / $total_count) * 10000)) / 100; $miss_count_percent = (int(($miss_count / $total_count) * 10000)) / 100; } my $total_bytes = $hit_bytes + $miss_bytes; ################################################## # At this point we have hit/miss/total counts, and hit/miss/total traffic # So now we need to decide what units to use for each one, and set a # human-readable string. Displays as MB unless > 2000MB, in which case it # displays as GB. # Yes, I know this really should be a subroutine. Sigh. One day. Maybe. my ($tx,$total_trafficstring,$hit_trafficstring,$miss_trafficstring,$hit_data_percent,$miss_data_percent); if($total_bytes > 2097152000) { $tx = (int(($total_bytes/1073741824) * 1000)) / 1000; $total_trafficstring = "$tx GB"; } else { $tx = (int(($total_bytes/1048576) * 1000)) / 1000; $total_trafficstring = "$tx MB"; } if($hit_bytes > 2097152000) { $tx = (int(($hit_bytes/1073741824) * 1000)) / 1000; $hit_trafficstring = "$tx GB"; } else { $tx = (int(($hit_bytes/1048576) * 1000)) / 1000; $hit_trafficstring = "$tx MB"; } if($miss_bytes > 2097152000) { $tx = (int(($miss_bytes/1073741824) * 1000)) / 1000; $miss_trafficstring = "$tx GB"; } else { $tx = (int(($miss_bytes/1048576) * 1000)) / 1000; $miss_trafficstring = "$tx MB"; } ################################################## # Set percentages to 0 if no records, otherwise calculate if($total_bytes eq 0) { $hit_data_percent = 0; $miss_data_percent = 0; } else { $hit_data_percent = (int(($hit_bytes / $total_bytes) * 10000)) / 100; $miss_data_percent = (int(($miss_bytes / $total_bytes) * 10000)) / 100; } ################################################## # If there weren't actually any logfiles processed these will be null, so we'll # set them to strings if(!$firstrecord) { $firstrecord = "unknown"; } if(!$lastrecord) { $lastrecord = "unknown"; } ################################################## # spit out the report my $output = " Apt-cacher traffic report "; # print "Apt-cacher traffic report\n"; # print "\n"; $output .= "

Apt-cacher traffic report

For more information on apt-cacher visit http://packages.debian.org/apt-cacher.
"; $output .= "

summary

"; $output .= ""; $output .= ""; $output .= ""; $output .= ""; $output .= "
Item Value
Report generated $datetime
Administrator {admin_email}\">$cfg->{admin_email}
First request $firstrecord
Last request $lastrecord
Total requests $total_count
Total traffic $total_trafficstring
"; $output .= "

cache efficiency

\n \n \n
Cache hitsCache missesTotal
Requests $hit_count ($hit_count_percent%)$miss_count ($miss_count_percent%)$total_count
Transfers $hit_trafficstring ($hit_data_percent%)$miss_trafficstring ($miss_data_percent%)$total_trafficstring
"; $output .= "\n"; #print $output; my $report_file = "$cfg->{log_dir}/report.html"; unlink $report_file; open(my $rfh, '>', $report_file) or die "Unable to open $report_file"; print $rfh "$output\n"; close $rfh; exit 0; apt-cacher-1.7.8/lib/0000755000000000000000000000000012231245642011173 5ustar apt-cacher-1.7.8/lib/Linux/0000755000000000000000000000000011717045647012305 5ustar apt-cacher-1.7.8/lib/Linux/IO_Prio.pm0000644000000000000000000001207311717045647014146 0ustar package Linux::IO_Prio; use strict; use warnings; require Exporter; use vars qw(@ISA @EXPORT_OK %EXPORT_TAGS $VERSION); use POSIX qw(ENOSYS); use Carp; $VERSION = '0.03'; @ISA = qw(Exporter); %EXPORT_TAGS = (ionice => [qw(&ionice &ionice_class &ionice_data)], c_api => [qw(&ioprio_set &ioprio_get)], macros => [qw(IOPRIO_PRIO_VALUE IOPRIO_PRIO_CLASS IOPRIO_PRIO_DATA)], who => [qw(IOPRIO_WHO_PROCESS IOPRIO_WHO_PGRP IOPRIO_WHO_USE)], class => [qw(IOPRIO_CLASS_NONE IOPRIO_CLASS_RT IOPRIO_CLASS_BE IOPRIO_CLASS_IDLE)] ); # The tag lists are exclusive at the moment, so don't worry about duplicates. push @{$EXPORT_TAGS{all}}, @{$EXPORT_TAGS{$_}} foreach keys %EXPORT_TAGS; Exporter::export_ok_tags($_) foreach keys %EXPORT_TAGS; use constant IOPRIO_CLASS_SHIFT => 13; use constant IOPRIO_PRIO_MASK => ((1 << IOPRIO_CLASS_SHIFT) - 1); use constant { IOPRIO_WHO_PROCESS => 1, IOPRIO_WHO_PGRP => 2, IOPRIO_WHO_USER => 3 }; use constant { IOPRIO_CLASS_NONE => 0, IOPRIO_CLASS_RT => 1, IOPRIO_CLASS_BE => 2, IOPRIO_CLASS_IDLE => 3 }; if ($^O eq 'linux') { _load_syscall(); } else { warn "Linux::IO_Prio: unsupported operating system -- $^O\n"; } # Load syscall.ph sub _load_syscall { return eval{require('syscall.ph') || require('sys/syscall.ph')}; } # C API functions # int ioprio_get(int which, int who); sub ioprio_get { my ($which, $who) = @_; if (defined &SYS_ioprio_get) { return syscall(SYS_ioprio_get(), $which, $who); } else { return _not_implemented(); } } # int ioprio_set(int which, int who, int ioprio); sub ioprio_set { my ($which, $who, $ioprio) = @_; if (defined &SYS_ioprio_set) { return syscall(SYS_ioprio_set(), $which, $who, $ioprio); } else { return _not_implemented(); } } # C API Macros sub IOPRIO_PRIO_VALUE { my ($class, $data) = @_; return ($class << IOPRIO_CLASS_SHIFT) | $data; } sub IOPRIO_PRIO_CLASS { my ($mask) = @_; return ($mask >> IOPRIO_CLASS_SHIFT); } sub IOPRIO_PRIO_DATA { my ($mask) = @_; return ($mask & IOPRIO_PRIO_MASK); } # Wrapper functions sub ionice { my ($which, $who, $class, $data) = @_; carp "Data not permitted for class IOPRIO_CLASS_IDLE" if $class == IOPRIO_CLASS_IDLE && $data; return ioprio_set($which, $who, IOPRIO_PRIO_VALUE($class, $data)); } sub ionice_class { my ($which, $who) = @_; if((my $priority = ioprio_get($which, $who)) < 0) { return $priority; } else { return IOPRIO_PRIO_CLASS($priority); } } sub ionice_data { my ($which, $who) = @_; if((my $priority = ioprio_get($which, $who)) < 0) { return $priority; } else { return IOPRIO_PRIO_DATA($priority); } } # Stub for not implemented sub _not_implemented { $! = ENOSYS; return -1; } 1; __END__ =head1 NAME Linux::IO_Prio - Interface to Linux ioprio_set and ioprio_get via syscall or ionice wrapper. =head1 SYNOPSIS use Linux::IO_Prio qw(:all); my $status = ioprio_set(IOPRIO_WHO_PROCESS, $$, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); my $status = ionice(IOPRIO_WHO_PROCESS, $$, IOPRIO_CLASS_IDLE, 0); =head1 DESCRIPTION Use L and L from Perl. Only Linux is supported currently. Support for other unices will be added once the kernel capabilities are available. =head1 Exports Nothing by default. The required exports can be specified individually or by tag: =over 4 =item :ionice -- ionice ionice_data ionice_class =item :c_api -- ioprio_set ioprio_get =item :macro -- IOPRIO_PRIO_VALUE IOPRIO_PRIO_CLASS IOPRIO_PRIO_DATA =item :who -- IOPRIO_WHO_PROCESS IOPRIO_WHO_PGRP IOPRIO_WHO_USER =item :class -- IOPRIO_CLASS_NONE IOPRIO_CLASS_RT IOPRIO_CLASS_BE IOPRIO_CLASS_IDLE =item :all -- all the above =back ionice(), ionice_class() and ionice_data() are thin wrappers around the C API allowing conventient single function calls. All of the other exports have the same meaning and prototypes as the C API equivalents. See man L for further details. =head2 Functions =head3 C API =over =item $priority = ioprio_get($which, $who) =item $staus = ioprio_set($which, $who, $priority) =back =head3 Wrappers =over =item $status = ionice($which, $who, $class, $data) =item $class = ionice_class($which, $who) =item $data = ionice_data($which, $who) =back =head2 MACROS =over 4 =item $priority = IOPRIO_PRIO_VALUE($class, $data) =item $class = IOPRIO_PRIO_CLASS($mask) =item $data = IOPRIO_PRIO_DATA ($mask) =back =head2 CONSTANTS =over 4 =item IOPRIO_WHO_PROCESS =item IOPRIO_WHO_PGRP =item IOPRIO_WHO_USER =item IOPRIO_CLASS_NONE =item IOPRIO_CLASS_RT =item IOPRIO_CLASS_BE =item IOPRIO_CLASS_IDLE =back =head1 COPYRIGHT This module is Copyright (c) 2011 Mark Hindley All rights reserved. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. If you need more liberal licensing terms, please contact the maintainer. =head1 WARRANTY This is free software. IT COMES WITHOUT WARRANTY OF ANY KIND. =head1 AUTHOR Mark Hindley apt-cacher-1.7.8/lib/apt-cacher-cs.pl0000755000000000000000000002024312032143444014142 0ustar #! /usr/bin/perl # # lib/apt-cacher-cs.pl # # Library file for apt-cacher with checksum specific common code use strict; use warnings; use BerkeleyDB; use Digest::SHA; use Digest::MD5; use IO::Uncompress::AnyUncompress qw($AnyUncompressError); use Fcntl qw(:DEFAULT :flock); use IPC::SysV qw(IPC_CREAT IPC_EXCL SEM_UNDO); use IPC::Semaphore; our $cfg; BEGIN { foreach (\&db, \&import_sums, \&check_sum) { # Silence redefintion warning undef &$_; } } sub sig_handler { warn "Got SIG@_. Exiting gracefully!\n" if $cfg->{debug}; exit 1; } # Need to handle non-catastrophic signals so that END blocks get executed local $SIG{$_} = \&sig_handler foreach qw{INT TERM PIPE QUIT HUP SEGV}; # Returns a DB handle # # Note: BerkeleyDB is not reentrant/fork safe, so avoid forking or calling this # function time whilst a previously returned handle is still in scope. sub db { my ($nolock) = @_; my $dbfile="$cfg->{cache_dir}/sums.db"; debug_message('Init checksum database') if defined &debug_message && $cfg->{debug}; # Serialise enviroment handling my $envlock; unless ($nolock) { sysopen($envlock, "$cfg->{cache_dir}/private/dbenvlock", O_RDONLY|O_CREAT) || die "Unable to open DB environment lockfile: $!\n"; _flock($envlock, LOCK_EX)|| die "Unable to lock DB environment: $!\n"; } my @envargs = ( -Home => $cfg->{cache_dir}, -Flags => DB_CREATE | DB_INIT_MPOOL | DB_INIT_CDB, -ThreadCount => 64 ); my $logfile; push (@envargs, (-ErrFile => $logfile, -ErrPrefix => "[$$]")) if open($logfile, '>>', "$cfg->{log_dir}/db.log"); debug_message('Create DB environment') if defined &debug_message && $cfg->{debug}; my $env; eval { local $SIG{__DIE__} = 'IGNORE'; # Prevent log verbosity local $SIG{ALRM} = sub { die "timeout\n" }; # NB: \n required alarm $cfg->{request_timeout}; $env = BerkeleyDB::Env->new(@envargs); alarm 0; }; if ($@) { die unless $@ eq "timeout\n"; # propagate unexpected errors } unless ($env) { warn "Failed to create DB environment: $BerkeleyDB::Error. Attempting recovery...\n"; db_recover(); $env = BerkeleyDB::Env->new(@envargs); } die "Unable to create DB environment: $BerkeleyDB::Error\n" unless $env; $env->set_isalive; failchk($env); # Take shared lock. This protects verify which requests LOCK_EX db_flock(LOCK_SH)|| die "Shared lock failed: $!\n"; unless ($nolock) { _flock($envlock, LOCK_UN)||die "Unable to unlock DB environment: $!\n"; close($envlock); } debug_message('Open database') if defined &debug_message && $cfg->{debug}; my $dbh = BerkeleyDB::Btree->new(-Filename => $dbfile, -Flags => DB_CREATE, -Env => $env) or die "Unable to open DB file, $dbfile $BerkeleyDB::Error\n"; return $dbh; } # Arg is DB handle # Arg is not undef for DB_WRITECURSOR sub get_cursor { my ($dbh,$write)=@_; my $cursor = $dbh->db_cursor($write?DB_WRITECURSOR:undef) or die $BerkeleyDB::Error; return $cursor; } # Arg is cursor # Arg is key reference # Arg is data reference sub cursor_next { my ($cursor,$keyref,$dataref) = @_; return $cursor->c_get($$keyref, $$dataref, DB_NEXT) } # Arg is the environment object sub failchk { my ($e) = @_; # warn "$$ failchk on $e\n"; if ($e->failchk == DB_RUNRECOVERY) { warn "Failed thread detected. Running database recovery\n"; db_recover(); } return; } # Arg is flock flags my $dblock; sub db_flock { my ($flags) = @_; if (!$dblock){ sysopen($dblock, "$cfg->{cache_dir}/private/dblock", O_RDONLY|O_CREAT) || die "Unable to open lockfile: $!\n"; } return _flock($dblock, $flags); } sub db_recover { env_remove(); my @envargs = ( -Home => $cfg->{cache_dir}, -Flags => DB_CREATE | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_RECOVER | DB_PRIVATE | DB_USE_ENVIRON ); # Avoid leaving DB log on filesystem if possible if ($BerkeleyDB::db_version <= 4.6) { # Cannot use for db4.7. Requires log_set_config() to be called before Env open eval {push(@envargs, (-SetFlags => DB_LOG_INMEMORY))}; } elsif ($BerkeleyDB::VERSION >= 0.40) { eval {push(@envargs, (-LogConfig => DB_LOG_IN_MEMORY))} } my $logfile; push(@envargs, (-ErrFile => $logfile)) if open($logfile, '>>', "$cfg->{log_dir}/db.log"); my $renv = BerkeleyDB::Env->new(@envargs) or die "Unable to create recovery environment: $BerkeleyDB::Error\n"; unlink "$cfg->{cache_dir}/private/dblock"; return defined $renv; } sub env_remove { return unlink <$$cfg{cache_dir}/__db.*>; # Remove environment } sub temp_env { # From db_verify.c # Return an unlocked environment # First try to attach to an existing MPOOL my $tempenv; $tempenv = BerkeleyDB::Env->new(-Home => $cfg->{cache_dir}, -Flags => DB_INIT_MPOOL | DB_USE_ENVIRON) or # Else create a private region $tempenv = BerkeleyDB::Env->new(-Home => $cfg->{cache_dir}, -Flags => DB_CREATE | DB_INIT_MPOOL | DB_USE_ENVIRON | DB_PRIVATE) or die "Unable to create temporary DB environment: $BerkeleyDB::Error\n"; return $tempenv; } sub db_error { return $BerkeleyDB::Error; } sub db_verify { my ($file,$env) = @_; return BerkeleyDB::db_verify (-Filename=>$file, -Env=>$env); } # Returns reference to status and hash of compaction data # Arg: DB handle ref sub _db_compact { my ($dbh) = @_; my %hash; my $status; return (\'DB not initialised in _db_compact', undef) unless $dbh; SWITCH: for ($dbh->type) { /1/ && do { # Btree $status = $dbh->compact(undef,undef,\%hash,DB_FREE_SPACE); last SWITCH; }; /2/ && do { # Hash $status = $dbh->compact(undef,undef,\%hash,DB_FREELIST_ONLY); last SWITCH; }; } return [$status, %hash]; } sub get_sem { my ($no_create) = @_; # If set, don't create, only return exisiting segment my $key = IPC::SysV::ftok("$cfg->{cache_dir}/sums.db", 1); my $sem; # First try to create new segment if (!$no_create && ($sem = IPC::Semaphore->new($key, 1, oct(666) | IPC_CREAT | IPC_EXCL))) { $sem->setall($cfg->{concurrent_import_limit}); } else { # Use existing $sem = IPC::Semaphore->new($key, 1, oct(666)); } return $sem; } sub get_existing_sem { return get_sem(1); } # arg: name with optional filehandle to be scanned and added to DB sub import_sums { my ($name, $fh) = @_; my %temp; my $sem; return unless $cfg->{checksum}; if ($cfg->{concurrent_import_limit}) { if ($sem = get_sem()) { # Take semaphore $sem->op(0, -1, SEM_UNDO); } else { warn "Failed to get IPC::Semaphore: $!"; } } if (extract_sums($name, $fh, \%temp)) { my $dbh = db(); while (my ($filename,$data) = each %temp){ $dbh->db_put($filename,$data) == 0 || warn "db_put $filename, $data failed with $BerkeleyDB::Error" ; } } # Release semaphore $sem->op(0, 1, SEM_UNDO) if $sem; return; } # arg: name # arg: filehandle # arg: DB handle sub check_sum { my ($name, $fh) = @_; return 1 unless $cfg->{checksum}; if (ref $fh ne 'GLOB') { warn "Not a filehandle"; return 1; } unless ($name) { info_message('Empty filename in check_sum()'); return 1; # Ignore } seek($fh,0,0); # Rewind if ($name =~ /2\d{3}-\d{2}-\d{2}-\d{4}\.\d{2}\.gz$/) { # pdiffs need decompressing $fh = IO::Uncompress::AnyUncompress->new($fh) or die "Decompression failed: $AnyUncompressError\n"; } my $data; my $dbh = db(); if (my $status = $dbh->db_get($name, $data) != 0) { # Returns 0 on success. $cfg->{debug} && debug_message("db_get for $name failed: $status ($BerkeleyDB::Error)"); return 1; } my $href = hashify(\$data); foreach (qw/sha1 md5 sha256/) { # Try algorithms in order if($href->{$_}) { # now check file my $digest; if (/^sha(\d+)/) { $digest = Digest::SHA->new($1)->addfile($fh)->hexdigest; } else { $digest = Digest::MD5->new->addfile($fh)->hexdigest; } $cfg->{debug} && debug_message("Verify $name $_: db $href->{$_}, file $digest"); return ($href->{$_} eq $digest); } } $cfg->{debug} && debug_message("No stored checksum found for $name. Ignoring"); return 1; } 1; apt-cacher-1.7.8/lib/apt-cacher.pl0000755000000000000000000005112712231245470013547 0ustar #! /usr/bin/perl # # lib/apt-cacher.pl # # This is a library file for apt-cacher to allow code common to apt-cacher # itself plus its supporting scripts to be maintained in one location. use strict; use warnings; use POSIX (); use Fcntl qw/:DEFAULT :flock/; use FreezeThaw qw(freeze thaw); use HTTP::Response; use URI; use IO::Uncompress::AnyUncompress qw($AnyUncompressError); use Module::Load::Conditional; use File::Spec; use Carp; our $cfg; sub read_config { (my $config_file) = @_; # set the default config variables my %config = ( # General admin_email => 'root@localhost', allowed_hosts => '', allowed_ssl_locations => '', allowed_ssl_ports => '443', cache_dir => '/var/cache/apt-cacher', clean_cache => 1, concurrent_import_limit => eval {my $count = 0; if (open(my $fh, '<', '/proc/cpuinfo')){ /^processor\s*:/i && $count++ foreach <$fh>; } $count}, curl_idle_timeout => 120, curl_throttle => 10, daemon_port => 3142, debug => 0, denied_hosts => '', distinct_namespaces => 0, expire_hours => 0, data_timeout => 120, generate_reports => 1, group => eval {my $g = $); $g =~ s/\s.*$//; $g}, http_proxy => '', http_proxy_auth => '', limit => 0, limit_global => 0, log_dir => '/var/log/apt-cacher', request_empty_lines => 5, request_timeout => 30, return_buffer_size => 1048576, # 1Mb reverse_path_map => 1, ubuntu_release_names => join(', ', qw( dapper edgy feisty gutsy hardy intrepid jaunty karmic lucid maverick natty oneiric precise quantal raring saucy trusty )), use_proxy => 0, use_proxy_auth => 0, user => $>, # Private _config_file => $config_file, _path_map => { 'debian-changelogs' => ['packages.debian.org'], 'ubuntu-changelogs' => ['changelogs.ubuntu.com'] }, # Regexps checksum_files_regexp => '^(?:' . join('|', qw(Packages(?:\.gz|\.bz2)? Sources(?:\.gz|\.bz2)? (?:In)?Release Index(?:\.bz2)? ) ) . ')$', skip_checksum_files_regexp => '^(?:' . join('|', qw((?:In)?Release Release\.gpg ) ) . ')$', index_files_regexp => '^(?:' . join('|', qw(Index(?:\.bz2)? Packages(?:\.gz|\.bz2)? Release(?:\.gpg)? InRelease Sources(?:\.gz|\.bz2)? Contents-(?:[a-z]+-)?[a-zA-Z0-9]+\.gz (?:srclist|pkglist)\.[a-z-]+\.bz2 release(?:\.gz|\.bz2)? ), # This needs to be a separate item to avoid a warning from the # comma within qw() q(Translation-[a-z]{2,3}(?:_[A-Z]{2})?(?:\.gz|\.bz2|\.xz)?) ) . ')$', installer_files_regexp => '^(?:' . join('|', qw(vmlinuz linux initrd\.gz changelog NEWS.Debian UBUNTU_RELEASE_NAMES\.tar\.gz(?:\.gpg)? (?:Devel|EOL)?ReleaseAnnouncement(?:\.html)? meta-release(?:-lts)?(?:-(?:development|proposed))? ) ) . ')$', package_files_regexp => '(?:' . join('|', qw(^[-+.a-z0-9]+_(?:\d:)?[-+.~a-zA-Z0-9]+(?:_[-a-z0-9]+\.(?:u|d)?deb|\.dsc|\.tar(?:\.gz|\.bz2|\.xz)|\.diff\.gz) \.rpm index\.db-.+\.gz \.jigdo \.template ) ) .')$', pdiff_files_regexp => '^2\d{3}-\d{2}-\d{2}-\d{4}\.\d{2}\.gz$', soap_url_regexp => '^(?:http://)?bugs\.debian\.org(?::80)?/cgi-bin/soap.cgi$', ); CONFIGFILE: foreach my $file ($config_file, grep {!/(?:\.(?:disabled|dpkg-(?:old|dist|new|tmp))|~)$/} glob((File::Spec->splitpath($config_file))[1].'conf.d/*')) { open my $fh, '<', $file or die $!; local $/; # Slurp if (my $buf = $fh->getline) { $buf=~s/\\\n#/\n#/mg; # fix broken multilines $buf=~s/\\\n//mg; # merge multilines for(split(/\n/, $buf)) { next if(/^#/); # weed out whole comment lines immediately next unless $_; # weed out empty lines immediately s/#.*//; # kill off comments s/^\s+//; # kill off leading spaces s/\s+$//; # kill off trailing spaces if (!/[a-z_6]{4,}\s*=/) { # Shortest configuration option is 4 # Invalid line if ($file eq $config_file) { # Main configfile: warn and skip just this line warn "Invalid line in main configuration file $config_file: \Q$_\E. Ignoring line\n"; next; } else { # conf.d file, skip file warn "Invalid configuration line in $file: \Q$_\E. Skipping file\n"; next CONFIGFILE; } } if (my ($key, $value) = split(/\s*=\s*/)) { # split into key and value pair if ($key =~ /^_/) { warn "Can't set private configuration option $key. Ignoring\n"; next; } $value = 0 unless ($value); #print "key: $key, value: $value\n"; $config{$key} = $value; #print "$config{$key}\n"; } } } close $fh; } # Recognise old/renamed configuration options foreach (['logdir' => 'log_dir'], ['fetch_timeout' => 'data_timeout']) { if ($config{@$_[0]}) { $config{@$_[1]} = $config{@$_[0]}; delete $config{@$_[0]}; } } return \%config; } sub cfg_split { my ($item) = @_; return $item ? grep {!/^$/} split(/\s*[,;]\s*/, $item) : undef; } sub private_config { if($cfg->{path_map}) { for(cfg_split($cfg->{path_map})) { my @tmp = split(/\s+/, $_); next unless my $key=shift(@tmp); if (@tmp) { s#/+$## foreach @tmp; # No trailing / $cfg->{_path_map}{$key} = [@tmp]; } else { # Unset predefined? delete $cfg->{_path_map}{$key} } } } # Handle libcurl configuration if ($cfg->{libcurl}) { for (cfg_split($cfg->{libcurl})) { my @tmp = split(/\s+/, $_); next unless my $key=uc(shift(@tmp)); if (@tmp) { $cfg->{_libcurl}{$key} = shift(@tmp); } } } # Expand PATH_MAP in allowed_hosts if ($cfg->{allowed_locations}) { $cfg->{allowed_locations} =~ s/\bPATH_MAP\b/join(', ', keys %{$cfg->{_path_map}})/ge; } # Expand UBUNTU_RELEASE_NAMES in installer_files_regexp $cfg->{installer_files_regexp} =~ s/UBUNTU_RELEASE_NAMES/'(?:' . join('|', grep { m%[^a-z]% ? warn "Ignoring invalid Ubuntu release: $_\n" : $_ } cfg_split($cfg->{ubuntu_release_names})) . ')'/ge; # Precompile regexps so they will not be recompiled each time $cfg->{$_} = qr/$cfg->{$_}/ foreach glob('{{{skip_,}checksum,index,installer,package,pdiff}_files,soap_url}_regexp'); if ($cfg->{interface}) { # If we can't resolve item, see if it is an interface name unless (inet_aton($cfg->{interface})) { require IO::Interface::Simple; my $if = IO::Interface::Simple->new($cfg->{interface}); if ($if) { $cfg->{interface} = $if->address; } else { $cfg->{interface} = ''; } } } # Proxy support foreach ('proxy', 'proxy_auth') { if ($cfg->{"use_$_"} && !$cfg->{"http_$_"}) { warn "use_$_ specified without http_$_ being set. Disabling."; $cfg->{"use_$_"}=0; } } # Rate limit and disk_usage_limit support foreach (qw(limit disk_usage_limit)) { next unless exists $cfg->{$_}; if (defined(my $e = expand_byte_suffix($cfg->{$_}))) { # Test defined() as 0 is valid $cfg->{"_$_"} = $e; # Set private variable } else { warn "Unrecognised $_: $cfg->{$_}. Ignoring."; } } # convert curl_throttle from milliseconds to seconds $cfg->{_curl_throttle} = $cfg->{curl_throttle}/1000; return; } sub expand_byte_suffix { my ($bstring) = @_; my $ret; # The standards are pretty confused here between SI, IEC and JDEC. # # Rationale: # Pre-1.7 configuration was based on wget(1) (which uses lowercase k, m, g, t): decimal. # Standard SI prefixes: decimal. # Support *bibytes as binary # K on its own isn't SI, so: binary for ($bstring) { /^(\d+)$/ && do {$ret = $1; last}; /^(\d+)\s*kB?$/ && do {$ret = $1 * 1000; last}; /^(\d+)\s*Ki?B?$/ && do {$ret = $1 * 1024; last}; /^(\d+)\s*[mM]B?$/ && do {$ret = $1 * 1000**2; last}; /^(\d+)\s*MiB?$/ && do {$ret = $1 * 1024**2; last}; /^(\d+)\s*[gG]B?$/ && do {$ret = $1 * 1000**3; last}; /^(\d+)\s*GiB?$/ && do {$ret = $1 * 1024**3; last}; /^(\d+)\s*[tT]B?$/ && do {$ret = $1 * 1000**4; last}; /^(\d+)\s*TiB?$/ && do {$ret = $1 * 1024**4; last}; } return $ret; } # check directories exist and are writable # Needs to run as root as parent directories may not be writable sub check_install { # Die if we have not been configured correctly die "$0: No cache_dir directory!\n" if (!-d $cfg->{cache_dir}); my $uid = $cfg->{user}=~/^\d+$/ ? $cfg->{user} : POSIX::getpwnam($cfg->{user}); my $gid = $cfg->{group}=~/^\d+$/ ? $cfg->{group} : POSIX::getgrnam($cfg->{group}); if (!defined ($uid || $gid)) { die "Unable to get user:group"; } my @dir = ($cfg->{cache_dir}, $cfg->{log_dir}, "$cfg->{cache_dir}/private", "$cfg->{cache_dir}/import", "$cfg->{cache_dir}/packages", "$cfg->{cache_dir}/headers"); foreach my $dir (@dir) { if (!-d $dir) { print "Info: $dir missing. Doing mkdir($dir, 0755)\n"; mkdir($dir, 0755) || die "Unable to create $dir: $!"; } if ((stat($dir))[4] != $uid || (stat(_))[5] != $gid) { print "Warning: $dir -- setting ownership to $uid:$gid\n"; chown ($uid, $gid, $dir) || die "Unable to set ownership for $dir: $!"; } } for my $file ("$cfg->{log_dir}/access.log", "$cfg->{log_dir}/error.log") { if(!-e $file) { print "Warning: $file missing. Creating.\n"; open(my $tmp, '>', $file) || die "Unable to create $file: $!"; close($tmp); } if ((stat($file))[4] != $uid || (stat(_))[5] != $gid) { print "Warning: $file -- setting ownership to $uid:$gid\n"; chown ($uid, $gid, $file) || die "Unable to set ownership for $file: $!"; } } return; } # Arg is ref to flattened hash. Returns hash ref sub hashify { my ($href) = @_; return unless $$href; if ($$href =~ /^FrT;/) { # New format: FreezeThaw return (thaw($$href))[0]; } elsif ($$href =~ /. ./) { # Old format: join return {split(/ /, $$href)}; } else { return; } } # Get filename from filehandle sub filename_fh { my ($fh) = @_; return readlink fd_path($fh); } # Get path of fildescriptor # VERY Linux specific sub fd_path { my ($fh) = @_; die 'Not a GLOB' unless ref $fh eq 'GLOB'; return '/proc/self/fd/' . $fh->fileno; } # Delete cached files by filehandle sub unlink_by_fh { my @fh = @_; my $count; foreach (@fh) { unless ((my $ref = ref) eq 'GLOB') { warn "Not a GLOB, skipping: $ref \n"; next; } $_ = filename_fh($_); next unless -f; # Skip already deleted debug_message("Deleting $_") if defined &debug_message && $cfg->{debug}; ($count += unlink $_) || warn "Failed to delete $_: $!"; } return $count; } # Verbose wrapper to flock sub _flock { my ($fh, $flags) = @_; my $ret; unless ($ret = flock($fh, $flags | LOCK_NB)) { if ($cfg->{debug}) { # Hash for decoding flag symbols. # # __PACKAGE__->$sym references flock constants without needing to # disable strict refs my %h = map {$_ => __PACKAGE__->$_} glob("LOCK_{SH,EX,UN,NB}"); debug_message('Waiting for ' . join ('|', grep {$h{$_} & $flags && $_} keys %h) . ' on ' . filename_fh($fh)) if defined &debug_message && $cfg->{debug}; } $ret = flock($fh, $flags); debug_message('Got it!') if defined &debug_message && $cfg->{debug}; } return $ret; } # Argument is filehandle or filename, returns HTTP::Response sub read_header { my ($file) = @_; my ($r, $fh); if (ref $file eq 'GLOB') { $fh = $file; } else { open($fh, '<', $file) || die "Open header $file failed: $!"; } if ($fh) { for ($fh->getline) { last unless defined; if (/^(HTTP\/1\.[01]\s+)?\d{3}\s+/) { # Valid seek($fh,0,0) || die "Seek failed: $!"; { local $/; # Slurp $r = HTTP::Response->parse(<$fh>); } chomp_message($r); # Fake Client-Date if not specified $r->client_date($r->date || time) unless $r->client_date; $r->header('Age' => $r->current_age); } else { # Invalid warn "Invalid/corrupt header file: $file"; undef $r; undef $fh; } } } # Don't explicitly close $fh return $r; } # Args are filename/filehandle and HTTP::Response sub write_header { my ($file, $response) = @_; debug_message('Writing header') if defined &debug_message && $cfg->{debug}; # Remove Connection header and options foreach ($response->header('Connection')) { $response->remove_header($_) } $response->remove_header('Connection','Age'); if ($response->request && (my $request_url = $response->request->uri)) { # Add Request URL to headers $response->header('X-AptCacher-URL' => $request_url); } $response->client_date(time); # HTTP::Response uses this to calculate current_age my $chfh; if (ref $file eq 'GLOB'){ $chfh = $file; $chfh->truncate(0) || die "Truncate failed: $!"; seek($chfh,0,0) || die "Seek failed: $!"; } else { open ($chfh, '>', $file) || die "Unable to open $file, $!"; } print $chfh $response->status_line, "\n"; print $chfh $response->headers->as_string; # No explicit close. Rely on gc or explicit close in caller return; } # HTTP::Response->parse is leaving \r on the end of the message! sub chomp_message { my ($r) = @_; for ($r->message) { last unless defined; local $/ = "\r"; redo if chomp; $r->message($_); } return $r; } # Returns valid namespace from URI sub get_namespace { my ($uri) = @_; if ($cfg->{distinct_namespaces}) { my @path = ($uri->host, $uri->path_segments); # Use path_map, if defined if (defined $cfg->{_path_map}{$path[0]}) { return $path[0]; } # Work from the end while (defined(local $_ = pop @path)) { last if /^(?:pool|dists)$/; } return join('_', grep {!/^$/} @path); } return; } # Returns URI object of url used by libcurl to fetch file sub get_upstream_url { my ($filename) = @_; my $uri; # Try cached headers first if (my $response = read_header("$cfg->{cache_dir}/headers/$filename")) { $uri = URI->new($response->header('X-AptCacher-URL')); } unless ($uri) { # Old complete file if (open (my $cfh, '<', my $complete_file = "$cfg->{cache_dir}/private/$filename.complete")) { $uri = URI->new(<$cfh>); close($cfh); } else { # Assume same as request $uri = get_original_url($filename); } } return $uri; } # Returns URI object of url used to request file sub get_original_url { my ($filename) = @_; # Infer from filename, assume HTTP return URI->new('http://' . join('/', split(/_/, $filename))); } # Stores data flattened for use in tied hashes # Arg $fh is optional sub extract_sums { my ($name, $fh, $hashref) = @_; if ($fh) { seek($fh,0,0) || die "Seek failed: $!"; } else { open($fh, '<', $name) || die "Open $name failed: $!"; } my $raw = IO::Uncompress::AnyUncompress->new($fh) or die "Decompression failed: $AnyUncompressError\n"; # Name is just the cached filename without path $name = (File::Spec->splitpath($name))[2]; # Determine namespace my $namespace; if ($namespace = get_namespace(get_original_url($name)) || ''){ # Default empty, not undef $namespace .= '/'; } my ($indexbase) = ($name =~ /([^\/]+_)(?:Index|(?:In)?Release)$/); $indexbase = '' unless $indexbase; # Empty by default (for Sources) my ($skip,%data); while(<$raw>) { last if $AnyUncompressError; chomp; # This flag prevents us bothering with the History section of diff_Index files if (/^SHA1-(?:Current|History)/) { $skip = 1; } elsif (/^SHA1-Patches:/) { $skip = 0; } elsif (/^\s(\w{32}|\w{40}|\w{64})\s+(\d+)\s(\S+)$/) { # diff_Index/Release/Sources next if $skip; my $hexdigest=$1; my $size=$2; my $file=$indexbase.$3; $file=~s!/!_!g; # substitute any separators in indexed filename if ($name =~ /Index$/) { $file.=".gz"; } elsif ($name =~ /_Sources(?:\.gz|\.bz2)?$/) { # Prepend namespace, if set $file = $namespace . $file; } $data{$file}{size} = $size; for (my $len = length($hexdigest)) { # Select algorithm based on hex length $len == 32 # md5 && do { $data{$file}{md5}=$hexdigest; last; }; $len == 40 # sha1 && do { $data{$file}{sha1}=$hexdigest; last; }; $len == 64 # sha256 && do { $data{$file}{sha256}=$hexdigest; last; }; warn "Unrecognised algorithm length: $len. Ignoring."; } } elsif(/^MD5sum:\s+([a-z0-9]{32})$/) { # Packages $data{md5}=$1; } elsif(/^SHA1:\s+([a-z0-9]{40})$/) { $data{sha1}=$1; } elsif(/^SHA256:\s+([a-z0-9]{64})$/) { $data{sha256}=$1; } elsif(/^Size:\s+([0-9]+)$/) { $data{size}=$1; } elsif(/^Filename:\s+.*?([^\/]+)$/) { # Non-greedy quantifier essential # Prepend namespace, if set $data{file} = $namespace . $1; } # diff_Index and Release files have no empty line at the end, so test eof() for them if(/^$/ || ($name =~ /(?:(?:In)?Release|diff_Index)$/ && $raw->eof())) { # End of record/file if (exists $data{file}) { # From Packages. Convert to hash of hashes with filename as key foreach (qw(size md5 sha1 sha256)) { $data{$data{file}}{$_} = $data{$_}; delete $data{$_}; } delete $data{file}; } foreach (keys %data) { $hashref->{$_} = freeze($data{$_}); } undef %data; # Reset } }; if ($AnyUncompressError) { warn "$name Read failed: $AnyUncompressError. Aborting read\n"; return; } return 1; } { # Scoping block my $glock; sub set_global_lock { my ($msg)=@_; my $glockfile="$cfg->{cache_dir}/private/glock"; $msg='Unspecified' if !$msg; debug_message("Global lock: \u$msg") if defined &debug_message && $cfg->{debug}; # May need to create it if the file got lost sysopen($glock, $glockfile, O_CREAT) || die "Unable to open lockfile: $!"; _flock($glock, LOCK_EX) || die "Unable to lock $glockfile for \u$msg: $!"; return defined($glock); } sub release_global_lock { unless ($glock->opened) { carp('Attmept to free lock not held'); return; } _flock($glock, LOCK_UN) || die "Unable to release lock: $!"; close $glock || die "Unable to close lock: $!"; debug_message("Release global lock") if defined &debug_message && $cfg->{debug}; return; } sub global_lock_fh { return $glock; } } sub setup_ownership { my $uid=$cfg->{user}; my $gid=$cfg->{group}; if($cfg->{chroot}) { if($uid || $gid) { # open them now, before it is too late # FIXME: reopening won't work, but the lose of file handles needs to be # made reproducible first open_log_files(); } chroot $cfg->{chroot} || die "Unable to chroot: $1"; chdir $cfg->{chroot}; } if($gid) { if($gid=~/^\d+$/) { my $name = POSIX::getgrgid($gid); die "Unknown group ID: $gid (exiting)\n" if !$name; } else { $gid = POSIX::getgrnam($gid); die "No such group (exiting)\n" if !defined($gid); } POSIX::setgid($gid) || die "setgid failed: $!"; $) =~ /^$gid\b/ && $( =~ /^$gid\b/ || die "Unable to change group id"; } if($uid) { if($uid=~/^\d+$/) { my $name = POSIX::getpwuid($uid); die "Unknown user ID: $uid (exiting)\n" if !$name; } else { $uid = POSIX::getpwnam($uid); die "No such user (exiting)\n" if !defined($uid); } POSIX::setuid($uid) || die "setuid failed: $!"; $> == $uid && $< == $uid || die "Unable to change user id"; } return; } # Still matches against the filename only if called with a fully qualified path sub is_file_type { my ($type,$file) = @_; $type .= '_files_regexp'; die "Regexp $type not defined in config" if !exists($cfg->{$type}); return ((File::Spec->splitpath($file))[2] =~ $cfg->{$type}); } sub load_checksum { return unless $cfg->{checksum}; if (Module::Load::Conditional::check_install(module => 'BerkeleyDB')) { require('apt-cacher-cs.pl'); } else { warn "Checksum disabled as BerkeleyDB not found. Install libberkeleydb-perl\n"; $cfg->{checksum}=0; } return; } ######### HOOKS ########### # # arg: file to be scanned and added to DB sub import_sums { return 1; } # purpose: ?create?, lock the DB file and establish DB connection sub db { return 1; } # args: filehandle and DB handle sub check_sum { return 1; } 1; apt-cacher-1.7.8/apt-cacher-import.pl0000755000000000000000000002665712231245457014330 0ustar #!/usr/bin/perl # apt-cacher-import.pl # # Script to import APT package files for into the apt-cacher cache. # # It is not necessary to run this script when setting up apt-cacher for the # first time: its purpose is to introduce APT packages from some other source, # such as a local mirror. Along with each cached file, apt-cacher also caches # the HTTP headers to send out to clients when the package is requested. If # package files are just copied straight into the cache, apt-cacher won't use # them because the relevant headers are missing. Having copied some package # files to the import directory, or by specifying the directory to use on the # command line, utilise this script to generate the HTTP headers and copy the # header and package files to the correct location. # # Basic usage: # 1. Place package files into {cache_dir}/import # 2. Run the script: /usr/share/apt-cacher/apt-cacher-import.pl # # It can also be used in sevaral ways to import packages within namespaces when # distinct_namespaces is set in the apt-cacher configuration. # # To place imported package in a specified namespace: # 1. As basic usage above, but also use -n|--namespace foobar # # To migrate an existing apt-cacher cache to support multiple distributions: # 1. Configure distinct_namespaces = 1 in the configuration file # 2. Run the script with the -u|--url option and specify the cache package # directory as the import directory, e.g # # /usr/share/apt-cacher/apt-cacher-import.pl -u /var/cache/apt-cacher/packages # # To import only packages which are in the current cache index files: # 1. Configure apt-cacher with distinct_namespaces = 1 in the configuration file # 2. Run apt-get update on your clients to populate the apt-cacher index files # 3. Run the script with the -d|--digest option and optionally specify a package # import directory, e.g # # /usr/share/apt-cacher/apt-cacher-import.pl -d [/tmp/obsolete_cache] # # Copyright (C) 2004, Jonathan Oxer # Copyright (C) 2005, Eduard Bloch # Copyright (C) 2011, Mark Hindley # # Distributed under the terms of the GNU Public Licence (GPL). use strict; use warnings; # Include the library file use lib '/usr/share/apt-cacher/lib'; require('apt-cacher.pl'); use Getopt::Long qw(:config no_ignore_case bundling); use File::Copy; use File::Spec; use Cwd 'abs_path'; use HTTP::Date; use HTTP::Response; use URI; use Digest::MD5; my $configfile = '/etc/apt-cacher/apt-cacher.conf'; my $help; my $quiet; my $sim; my $force; my @namespaces; my $digest; my $url; my $recursive; my $ro_mode; my $symlink_mode; local $| = 1; my %options = ( "h|help" => \$help, "q|quiet" => \$quiet, "s|show" => \$sim, "f|force" => \$force, "n|namespace=s" => \@namespaces, "d|digest" => \$digest, "u|url" => \$url, "R|recursive" => \$recursive, "r|readonly" => \$ro_mode, "l|symlinks" => \$symlink_mode, "c|cfg|conf=s" => \$configfile ); if (!GetOptions(%options) || $help) { die <] [-d|--digest] [-f|--force] [-h|--help] [-l|--symlinks] [-n|--namespace=] [-q|--quiet] [-R|--recursive] [-r|--readonly] [-s|--show] [-u|--url] [ ...] Options: -c Use the specified configuration file (default '$configfile'). -d Automatically try to determine the correct namespace for the packages by matching MD5 digests with those in Packages and Sources indices (requires distinct_namespaces). -f Force continue, even if it seems unwise. -h Show this usage help. -l Do not move the source files. Instead, create symlinks to them. If the target symlink already exists, it will be removed. -n Use the specified namespace subdirectory (requires distinct_namespaces). -q Less verbose. -R Recurse into subdirectories below the import directory. -r Do not move the source files. Instead, create hardlinks or real copies. -s Just show what would be done. -u Automatically try to determine the correct namespace for the packages from their original URL (requires distinct_namespaces). If is omitted, {cache_dir}/import will be used. EOM } our $cfg = eval{ read_config($configfile) }; # not sure what to do if we can't read the config file... die "Could not read configuration file '$configfile': $@" if $@; private_config(); # change uid and gid setup_ownership($cfg); check_install(); # Sanity checks if(!$ARGV[0]) { my $import_dir = "$cfg->{cache_dir}/import"; print "No import directory specified, using $import_dir\n" unless $quiet; push @ARGV, $import_dir; sleep 2; } if (@namespaces || $digest || $url) { if (!$cfg->{distinct_namespaces}) { die "Namespace import requested without distinct_namespaces being set in the configuration.\n"; } elsif ((grep {defined && $_ > 0} (scalar @namespaces, $url, $digest)) > 1) { die "Cannot specify multiple simultaneous namespace mechanisms\n"; } elsif ($url && $recursive) { die "Cannot act recursively with -u|--url\n"; } elsif (@namespaces){ foreach (@namespaces) { if (!exists $cfg->{_path_map}{$_}) { print "The namespace specified ($_) is not a path_map key. This is almost certainly not what was intended.\n"; die "Use -f|--force option to import anyway.\n" unless $force; print "Continuing anyway as -f|--force specified.\n"; } } } elsif ($digest && ! ($ro_mode || $symlink_mode)) { warn "Digest mode requires symlinking or copying: enabling symlink option\n"; $symlink_mode = 1; } } die "Cannot specify -s|--symlink and -r|--readonly together.\n" if $symlink_mode && $ro_mode; my $imported = 0; # common dummy data for all imported packages my @info = stat("$cfg->{cache_dir}/private"); my $headerdate = time2str(); my %digest_map; # To map MD5 digests to namespaces for -d|--digest read_indices(); foreach (@ARGV) { if (!-d) { warn "$_ is not a directory -- skipping\n"; next; } print "Importing from $_\n" unless $quiet; import($_); } print "Done.\n" , ($sim ? "Simulation mode so nothing actually done. $imported files would have been imported\n" : "$imported files imported\n") unless $quiet; exit 0; ### Subroutines ### sub read_indices { if ($digest) { my $cwd=Cwd::getcwd(); chdir("$cfg->{cache_dir}/packages") || die "Unable to chdir() to $cfg->{cache_dir}/packages/: $!"; # Read the index files my @ifiles = glob("*{Packages,Sources}{,.bz2,.gz}"); die "No index files found in $cfg->{cache_dir}/packages for automatic digest import into namespaces\n" unless @ifiles; print "Reading index files from $cfg->{cache_dir}/packages/\n", "This can take some time, so be patient....\n" unless $quiet; foreach my $indexfile (@ifiles) { my %sums; # Get namespace my $namespace; if (my $uri = get_original_url($indexfile)) { unless ($namespace = get_namespace($uri)) { die "No namespace found for $indexfile. Check path_map setting or delete the file. Aborting\n"; } } else { warn "Unable to get original URL for $indexfile\n"; next; } print "Reading: $indexfile [namespace $namespace]\n" unless $quiet; # Parse extract_sums($indexfile, undef, \%sums) || die("Error processing $indexfile, automatic digest import abandoned.\nRemove the file if the packages to be imported are not associated with this repository.\n"); while (my ($package, $ice) = each %sums) { if (my $md5 = hashify(\$ice)->{md5}) { # Use a reverse hash to prevent duplicates my %h = map {$_ => 1} @{$digest_map{$md5}{namespaces}}; next if $h{$namespace}; push @{$digest_map{$md5}{namespaces}}, $namespace; $digest_map{$md5}{filename} = (File::Spec->splitpath($package))[2]; } else { warn "MD5 digest for $package not included in indexfile.\n"; } } } chdir $cwd; } return; } sub import { my $import_dir=shift; chdir($import_dir) || die "Can't change to the import directory ($import_dir)"; if($recursive) { my $cwd=Cwd::getcwd(); foreach (glob('*')) { if(-d && !-l) { import("$cwd/$_"); chdir $cwd; } } } print "Simulation mode: just showing what would be done\n" if $sim; print(($symlink_mode ? 'Symlinking' : ($ro_mode ? 'Hard linking or copying' : 'Moving')), " package files from $import_dir to $cfg->{cache_dir}\n") unless $quiet; # Loop through all the package files in the current directory foreach my $packagefile (glob('*')) { next unless -f $packagefile; next unless $digest || is_file_type('package', $packagefile); # By default use the current name my $targetfile = $packagefile; if($digest) { open(my $pfh, '<', $packagefile) || die "Unable to open $packagefile: $!"; my $md5 = Digest::MD5->new()->addfile($pfh)->hexdigest; close($pfh); if (! exists $digest_map{$md5}) { print "Cannot import $packagefile: file MD5 digest $md5 not found in index files\n"; next; } @namespaces = @{$digest_map{$md5}{namespaces}}; $targetfile = $digest_map{$md5}{filename}; # Just in case filename is mangled } elsif ($url) { if (my $uri = get_original_url($packagefile)) { if (my $namespace = get_namespace($uri)) { push @namespaces, $namespace; } else { print "Unable to determine namespace from URL for $packagefile. Skipping.\n"; next } } else { die "Unable to get original URL for $packagefile\n"; } } @namespaces = (undef) unless @namespaces; NAMESPACE: foreach (@namespaces) { my $package_dir = join('/', grep {defined} $cfg->{cache_dir}, 'packages', $_); my $header_dir = join('/', grep {defined} $cfg->{cache_dir}, 'headers', $_); foreach ($package_dir, $header_dir) { next if $sim; mkdir $_ or my $error = $! unless -d; die "Failed to create directory $_: $error\n" unless -d; die "Cannot write to $_: $!\n" if !-w; } foreach ("$header_dir/$targetfile", "$package_dir/$targetfile"){ if (-f) { # Don't overwrite files print "$_ already exists: skipping\n"; next NAMESPACE; } } print "Importing: $packagefile", ($packagefile ne $targetfile ? " as $targetfile" : ''), ($_ ? " [namespace $_]\n" : "\n") unless $quiet; unless ($sim) { import_file($packagefile, "$package_dir/$targetfile"); if ($url && -f "../headers/$packagefile") { # Upgrading an existing cache, use the header file import_file("../headers/$packagefile", "$header_dir/$packagefile"); # } else { # Generate a header write_header("$header_dir/$targetfile", HTTP::Response->new(200, 'OK', ['Date' => $headerdate, 'Last-Modified' => $headerdate, 'Content-Length' => -s $packagefile])); } # copy the ownership of the private directory foreach (glob("{$header_dir,$package_dir}/$targetfile")) { chown($info[4], $info[5], $_) || warn "Failed to set ownership for $_. You need to correct this manually\n"; } } $imported++; } } return; } sub import_file { my ($source, $target) = @_; if($symlink_mode) { symlink(abs_path($source), $target) || (unlink($target) && symlink(abs_path($source), $target)) || die "Failed to create the symlink $target"; } elsif($ro_mode) { link($source, $target) || copy($source, $target) || die "Failed to copy $source"; } else { rename($source, $target) || die "Failed to move $source to $target: $!.\n Try read-only (-r) or symlink (-l) options."; } return 1; } apt-cacher-1.7.8/apt-cacher-format-transition.pl0000755000000000000000000000246311725372474016472 0ustar #!/usr/bin/perl use warnings; use strict; die "Please specify the cache directory!\n" if !$ARGV[0]; chdir $ARGV[0] || die "Could not enter the cache directory!"; my @info = stat("private"); mkdir "packages"; mkdir "headers"; chown $info[4], $info[5], "packages", "headers"; for my $fname (glob('*.deb *pgp *gz *bz2 *Release')) { my $data=0; my $size=0; open(my $in, '<', $fname); open(my $daten, '>', "packages/$fname"); open(my $header, '>', "headers/$fname"); while(<$in>) { if($data) { print $daten $_; next; }; s/\r$//; # Some combined files have /r/n terminated headers. See bug # 355157. Not needed in new split format. print $header $_; $size=$1 if /^Content-Length: (\d+)/; $data=1 if /^$/; } close($daten); close($header); if (!$data) { print "Not found header/data boundary in file $fname. Skipping\n"; unlink "packages/$fname", "headers/$fname"; next; } my @statinfo = stat("packages/$fname"); if($size == $statinfo[7]) { chown $info[4], $info[5], "packages/$fname", "headers/$fname"; utime $statinfo[9], $statinfo[9], "packages/$fname", "headers/$fname"; print "Processed $fname.\n"; unlink $fname; } else { unlink "packages/$fname"; unlink "headers/$fname"; } } apt-cacher-1.7.8/apt-cacher-precache.pl0000755000000000000000000001310212103452313014532 0ustar #!/usr/bin/perl # # apt-cacher-precache.pl # Script for pre-fetching of package data that may be used by users RSN # # Copyright (C) 2005, Eduard Bloch # Distributed under the terms of the GNU Public Licence (GPLv2). use strict; use warnings; use Getopt::Long qw(:config no_ignore_case bundling pass_through); #use File::Basename; use Cwd 'abs_path'; use strict; my $distfilter='testing|etch'; my $quiet=0; my $priofilter=''; #my $expireafter=0; my $help; my $noact=0; my $uselists=0; my $configfile = '/etc/apt-cacher/apt-cacher.conf'; my %options = ( "h|help" => \$help, "d|dist-filter=s" => \$distfilter, "q|quiet" => \$quiet, "p|by-priority=s" => \$priofilter, "n|no-act" => \$noact, "c|cfgfile=s" => \$configfile, "l|list-dir=s" => \$uselists ); &help unless ( GetOptions(%options)); &help if ($help); # Include the library for the config file parser require('/usr/share/apt-cacher/lib/apt-cacher.pl'); my $cfgref; eval { $cfgref = read_config($configfile); }; # not sure what to do if we can't read the config file... die "Could not read config file: $@" if $@; $configfile=abs_path($configfile); # now pick up what we need my $cachedir=$$cfgref{cache_dir}; sub help { print " USAGE: $0 [ options ] Options: -d, --dist-filter=RE Perl regular experession, applied to the URL of Packages files to select only special versions. Example: 'sid|unstable|experimental' (default: 'testing|etch') -q, --quiet suppress verbose output -l, --list-dir=DIR also use pure/compressed files from the specified dir (eg. /var/log/apt-cacher) to get the package names from. Words before | are ignored (in apt-cacher logs). To create a such list from clients, see below. -p, --by-priority=RE Perl regular expression for priorities to be looked for when selecting packages. Implies threating all packages with this priority as installation candidates. (default: scanning the cache for candidates without looking at priority) NOTE: the options may change in the future. You can feed existing package lists or old apt-cacher logs into the selection algorithm by using the -l option above. If the version is omited (eg. for lists created with \"dpkg --get-selections\" then the packages may be redownloaded). To avoid this, install libdpkg-perl and use following one-liner to fake a list with version infos: dpkg -l | perl -MDpkg::Arch -ne 'if(/^(i.|.i)\\s+(\\S+)\\s+(\\S+)/) { print \"\$2_\$3_\", Dpkg::Arch::get_host_arch, \".deb\\n\$2_\$3_all.deb\\n\"}' "; exit 1;}; syswrite(STDOUT, "This is an experimental script. You have been warned. Run before apt-cacher-cleanup.pl, otherwise it cannot track old downloads. ") if !$quiet; my $pcount=0; chdir "$cachedir/packages" || die "cannot enter $cachedir/packages" ; my %having; # remember seen packages, just for debugging/noact, emulate what -f would do for us otherwise sub get { my ($path_info, $filename) = @_; if(!defined $having{$filename}) { print "I: downloading $path_info\n" if !$quiet; $pcount++; } $having{$filename}=1; if(!$noact) { open(my $fh, '|', "REMOTE_ADDR=PRECACHING /usr/share/apt-cacher/apt-cacher -i -c $configfile >/dev/null"); print $fh "GET /$path_info\r\nConnection: Close\r\n\r\n"; close($fh); } return; } my %pkgs; for (glob('*')) { s/_.*//g; $pkgs{$_}=1; } if($uselists) { for(glob("$uselists/*")) { my $cat = (/bz2$/ ? "bzcat" : (/gz$/ ? "zcat" : "cat")); #open(catlists, "/bin/cat $$cfg{log_dir}/access.log $$cfg{log_dir}/access.log.1 2>/dev/null ; zcat $$cfg{log_dir}/access.log.*.gz 2>/dev/null |"); if(open(my $catlists,"-|",$cat,$_)) { while(<$catlists>){ chomp; s/.*\|//g; s/\s.*//g; $having{$_}=1; # filter the packages we already have installed s/_.*//g; $pkgs{$_}=1; } } } } PKGITER: for my $pgz (glob('*Packages*')) { # ignore broken files next PKGITER if(!-f "../private/$pgz.complete"); if(length($distfilter)) { if($pgz =~ /$distfilter/) { print "I: distfilter passed, $pgz\n" if !$quiet; } else { next PKGITER; } } my $pgz_path_info=$pgz; $pgz_path_info =~ s!_!/!g; my $root_path_info = $pgz_path_info; $root_path_info =~ s!/dists/.*!!g; # that sucks, pure guessing $root_path_info =~ s!/project/experimental/.*!!g; # that sucks, pure guessing my ($cat, $listpipe); $_=$pgz; $cat = (/bz2$/ ? "bzcat" : (/gz$/ ? "zcat" : "cat")); &get($pgz_path_info, $_); print "I: processing $_\n" if !$quiet; if(open(my $pfile,"-|",$cat,$pgz)) { my $prio; while(<$pfile>) { chomp; if(/^Priority:\s+(.*)/) { $prio=$1; } if(s/^Filename:.//) { my $deb_path_info="$root_path_info/$_"; # purify the name s!.*/!!g; my $filename=$_; s!_.*!!g; my $pkgname=$_; if(length($priofilter)) { if(!-e $filename && $prio=~/$priofilter/ ) { &get($deb_path_info, $filename); } } elsif($pkgs{$pkgname}) { if(!-e $filename) { &get($deb_path_info, $filename); } } } } } } print "Downloaded: $pcount files.\n" if !$quiet; apt-cacher-1.7.8/apt-proxy-to-apt-cacher0000755000000000000000000001035710277371137014760 0ustar #!/usr/bin/perl use strict; use Getopt::Long qw(:config no_ignore_case bundling pass_through); use Cwd 'abs_path'; my $help; my $configfile = '/etc/apt-cacher/apt-cacher.conf'; my $apconfigfile = '/etc/apt-proxy/apt-proxy-v2.conf'; my %options = ( "h|help" => \$help, "c|cfgfile=s" => \$configfile, "C|apconfigfile=s" => \$apconfigfile, ); &help unless ( GetOptions(%options)); &help if ($help); sub help { die " USAGE: $0 [ options ] Transforms configuration and cached data from apt-proxy v2 to apt-cacher 1.x Options: -c apt-cacher's config file -C apt-proxy's config file "; } print "Reading apt-proxy's configuration from $apconfigfile\n"; open(my $apc, $apconfigfile) || die "Could not open $apconfigfile. Use the -C option\n"; my %config; print "Adopting options:\n"; my $cache_dir; my $prevkey; LINE: while (<$apc>) { chomp; if ( /^\t(.*)$/ && defined $prevkey ) { $config{$prevkey}.= " $1 "; next LINE; } s/^;.*$//; # kill off comments s/^\s+//; # kill off leading spaces s/\s+$//; # kill off trailing spaces next if /^\[DEFAULT/; if(/^\[(.*)\]/) { $config{path_map} .=" ; " if $config{path_map}; $config{path_map} .= " $1 "; } if ($_) { my ($key, $value) = split(/\s*=\s*/); # split into key and value pair #print "key: $key, value: $value\n"; $prevkey=$key; if($key eq "port") { $config{daemon_port} = $value; print "Port: $value\n"; } if($key eq "address") { $config{daemon_addr} = $value; print "Address: $value\n"; } if($key eq "http_proxy") { $config{http_proxy} = $value; $config{use_proxy} = 1; print "Proxy: $value\n"; } if($key eq "backends") { $prevkey = "path_map"; $config{path_map} .= " $value "; } if($key eq "cache_dir") { $cache_dir=$value; } } } my @map = split(/\s+/, $config{path_map}); for(@map) { # just try to use http on ftp servers and drop rsync versions # s#^ftp:#http:#; # s#^rsync.*##; s#^.*://##; } $config{path_map} = join(" ", @map); #for(keys %config) { # print "hm, $_: $config{$_}\n"; #} print "Reading apt-cacher's configuration from $configfile\n"; open(CONFIG, $configfile) || die "Unable to open the apt-cacher config file template\n"; my $buf; read(CONFIG, $buf, 50000); close(CONFIG); $buf=~s/\\\n#/\n#/mg; # fix broken multilines $buf=~s/\\\n//mg; # merge multilines my @out = ("# This file has been modified by $0\n# Some lines may have been appended at the bottom of this file\n"); for(split(/\n/, $buf)) { my $orig=$_; s/#.*//; # kill off comments s/^\s+//; # kill off leading spaces s/\s+$//; # kill off trailing spaces if ($_) { my ($key, $value) = split(/\s*=\s*/); # split into key and value pair if(exists $config{$key}) { push @out, "$key = $config{$key}\n"; delete $config{$key}; } else { push @out, "$orig\n"; } } else { push @out, "$orig\n"; } } # append the remaining settings for(keys %config) { push @out, "\n# extra setting from apt-proxy configuration\n$_ = $config{$_}\n"; } print "\n$0 will now modify the apt-cacher.conf file\nand import the data from apt-proxy's cache. Do you wish to continue? [y/n] "; my $answer= ; if($answer eq "y\n") { open(CONFIG, ">$configfile") || die "Unable to write apt-cacher config file\n"; print CONFIG @out; close(CONFIG); #print join(" ", "Running: ", "/usr/share/apt-cacher/apt-cacher-import.pl", "-c", $configfile, "-r", "-R" , $cache_dir, "\n"); system("/usr/share/apt-cacher/apt-cacher-import.pl", "-c", $configfile, "-r", "-R" , $cache_dir); } print "\nStop apt-proxy and start apt-cacher now? [y/n] "; $answer= ; if($answer eq "y\n") { system "/etc/init.d/apt-proxy stop"; system "echo AUTOSTART=1 >> /etc/default/apt-cacher"; system "/etc/init.d/apt-cacher restart"; } print "\nDisable the apt-proxy in the init configuration (update-rc.d remove)? [y/n] "; $answer= ; if($answer eq "y\n") { system "update-rc.d -f apt-proxy remove"; } apt-cacher-1.7.8/config/0000755000000000000000000000000012032314423011663 5ustar apt-cacher-1.7.8/config/apt-cacher.default.md5sum0000644000000000000000000000031412032314423016447 0ustar afc7a4b065275465c1eeb5a09c985bde AUTOSTART=0 f269a1c735ae47d7068db3ba5641a08b AUTOSTART=1 1207bbf54d26ab191dbac80fe336dc48 pre 1.7: AUTOSTART=0 046661f9e728b783ea90738769219d71 pre 1.7: AUTOSTART=1 apt-cacher-1.7.8/config/apt-cacher.default0000644000000000000000000000060612032313622015242 0ustar # apt-cacher daemon startup configuration file # Set to 1 to run apt-cacher as a standalone daemon, set to 0 if you are going # to run apt-cacher from /etc/inetd or in CGI mode (deprecated). Alternatively, # invoking "dpkg-reconfigure apt-cacher" should do the work for you. # AUTOSTART=0 # extra settings to override the ones in apt-cacher.conf # EXTRAOPT=" daemon_port=3142 limit=30 " apt-cacher-1.7.8/config/apt-cacher.conf0000755000000000000000000003021612014364024014550 0ustar ################################################################################# # This is the config file for apt-cacher. On most Debian systems you can safely # # leave the defaults alone. # # # # Commented defaults or examples are given. They can be changed here, or # # overridden using a fragment placed in ./conf.d/ # ################################################################################# ### GENERAL ### # The location of the local cache/working directory. This can become quite # large, so make sure it is somewhere with plenty of space. # #cache_dir = /var/cache/apt-cacher # The directory to use for apt-cacher access and error logs. # The access log records every request in the format: # # date-time|PID|client IP address|HIT/HEAD/MISS/EXPIRED/NOTMOD|object size|object name # # The error log is slightly more free-form, and is also used for debug messages # if debug mode is turned on. # #log_dir = /var/log/apt-cacher # The email address of the administrator is displayed in the info page and # traffic reports. # #admin_email = root@localhost # Daemon port setting, only useful in stand-alone mode. You need to run the # daemon as root to use privileged ports (<1024). # # For standalone daemon auto startup settings please edit the file # /etc/default/apt-cacher. # #daemon_port = 3142 # Optional settings, user and group to run the daemon as. Make sure they have # sufficient permissions within the cache and log directories. Comment the # settings to run apt-cacher as the invoking user. # group = www-data user = www-data # optional setting, binds the listening daemon to specified IP(s). # #daemon_addr = localhost # Apt-cacher can be used in offline mode which just uses files already cached, # but doesn't make any new outgoing connections by setting this to 1. # #offline_mode = 1 # To enable data checksumming, install libberkeleydb-perl and set this option to # 1. Then wait until the Packages/Sources files have been refreshed once (and so # the database has been built up). You can also delete them from the cache to # trigger the database update. # #checksum = 1 # Importing checksums from new index files into the checksum database can cause # high CPU usage on slower systems. This option sets a limit to the number of # index files that are imported simultaneously, thereby limiting CPU load # average, but, possibly, taking longer. Set to 0 for no limit. # #concurrent_import_limit = 1 # CGI mode is deprecated. # # Send a 410 (Gone) HTTP message with the specified text when accessed via # CGI. Useful to tell users to adapt their sources.list files when the # apt-cacher server is being relocated (via apt-get's error messages while # running "update") # #cgi_advise_to_use = Please use http://cacheserver:3142/ as apt-cacher access URL #cgi_advise_to_use = Server relocated. To change sources.list, run # perl -pe "s,/apt-cacher??,:3142," -i /etc/apt/sources.list # # To further facilitate migration from CGI to daemon mode this setting will # automatically redirect incoming CGI requests to the specified daemon URL. # #cgi_redirect = http://localhost:3142/ ### UPSTREAM PROXY ### # Apt-cacher can pass all its requests to an external HTTP proxy like Squid, # which could be very useful if you are using an ISP that blocks port 80 and # requires all web traffic to go through its proxy. The format is # 'hostname:port', eg: 'proxy.example.com:8080'. # #http_proxy = proxy.example.com:8080 # External http proxy sometimes need authentication to get full access. The # format is 'username:password'. # #http_proxy_auth = proxyuser:proxypass # Use of the configured external proxy can be turned on or off with this flag. # Value should be either 0 (off) or 1 (on). # #use_proxy = 0 # Use of external proxy authentication can be turned on or off with this flag. # Value should be either 0 (off) or 1 (on). # #use_proxy_auth = 0 # This sets the interface to use for the upstream connection. # Specify an interface name, an IP address or a host name. # If unset, the default route is used. # #interface = eth0 # Rate limiting sets the maximum bandwidth in bytes per second to use for # fetching packages. Use 0 value for no rate limiting. # #limit = 0 ### ACCESS and SECURITY ### # Server mapping - this allows mapping virtual paths that appear in the access # URL to real server names. The syntax is the part of the beginning of the URL # to replace (the key), followed by a list of mirror URLs, all space # separated. Multiple mappings are separated by semicolons or commas, as # usual. Note that you need to specify all keys (or use the 'PATH_MAP' # shorthand) in the allowed_locations option, if you make use of it. Also note # that the paths should not overlap each other. # # The keys are also used to separate the caching of multiple distributions # within a single apt-cacher instance if distinct_namespaces is also set. # #path_map = debian ftp.uni-kl.de/pub/linux/debian ftp2.de.debian.org/debian ; # ubuntu archive.ubuntu.com/ubuntu ; # security security.debian.org/debian-security ftp2.de.debian.org/debian-security # # There are 2 default internal path_map settings for the Debian and Ubuntu # changelog servers which will be merged with this option. # # debian-changelogs packages.debian.org # ubuntu-changelogs changelogs.ubuntu.com # # These can be overridden by specifying an alternative mirror for that key, or # deleted by just specifying the key with no mirror. # #path_map = debian-changelogs # From version 1.7.0 there is support for caching multiple distibutions (eg # Debian and Ubuntu) within the same apt-cacher instance. Enable this by setting # distinct_namespaces to 1. Distribution package files are cached in separate # directories whose names are derived from the relevant path_map key. So # generally there will be a path_map key => server(s) setting for each # distribution that is cached. Having enabled distinct_namespaces, existing # packages can be imported into the correct directory by running (as root) # # /usr/share/apt-cacher/apt-cacher-import.pl -u {cache_dir}/packages # #distinct_namespaces = 0 # If the apt-cacher machine is directly exposed to the Internet and you are # worried about unauthorised machines fetching packages through it, you can # specify a list of IP addresses which are allowed to use it and another list of # IP addresses which are prohibited. # # Localhost (127.0.0.1/8, ::ffff:127.0.0.1/8 and ::1) are always allowed. Other # addresses must be matched by allowed_hosts and not by denied_hosts to be # permitted to use the cache. Setting allowed_hosts to "*" means "allow all" # (which was the default before version 1.7.0). The default is now ''. # # The format is a comma-separated list containing addresses, optionally with # masks (like 10.0.0.0/24 or 10.0.0.0/255.255.255.0), or ranges of addresses # (two addresses separated by a hyphen with no masks, specifying a valid subnet, # like '192.168.0.0-63' or '192.168.0.0 - 192.168.0.63') or a DNS resolvable # hostname. The corresponding IPv6 options allowed_hosts_6 and denied_hosts_6 # are deprecated (but will still be honoured, if set). IPv6 addresses can now be # added directly to allowed_hosts and denied_hosts along with IPv4 addresses. # #allowed_hosts = * #denied_hosts = # Only allow HTTPS/SSL proxy CONNECT to hosts or IPs which match an item in this # list. # #allowed_ssl_locations = # Only allow HTTPS/SSL proxy CONNECT to ports which match an item in this list. # Adding further items to this option can pose a significant security risk. DO # NOT do it unless you understand the full implications. # #allowed_ssl_ports = 443 # Optional setting to limit access to upstream mirrors based on server names in # the URLs. This is matched before any path_map settings are expanded. If # 'PATH_MAP' in included in this option, it will be expanded to the keys of the # path_map setting. Note these items are strings, not regexps. # #allowed_locations = ftp.uni-kl.de, ftp.nerim.net, debian.tu-bs.de/debian #allowed_locations = ftp.debian.org, PATH_MAP #allowed_locations = PATH_MAP # List of Ubuntu release names used to expand UBUNTU_RELEASE_NAMES in # installer_files_regexp (see below). This is required to allow the Ubuntu # installer to fetch upgrade information. As the naming scheme is unpredictable, # new release names need to be added to this list. # #ubuntu_release_names = dapper, edgy, feisty, gutsy, hardy, intrepid, jaunty, karmic, lucid, maverick, natty, oneiric, precise, quantal ### HOUSEKEEPING ### # Apt-cacher can generate usage reports every 24 hours if you set this directive # to 1. You can view the reports in a web browser by pointing to your cache # machine with 'report' on the end, like this: # # http://yourcache.example.com:3142/report # # Generating reports is very fast even with many thousands of logfile lines, so # you can safely turn this on without creating much additional system load. # #generate_reports = 1 # Apt-cacher can clean up its cache directory every 24 hours if you set this # directive to 1. Cleaning the cache can take some time to run (generally in the # order of a few minutes) and removes all package files that are not mentioned # in any existing 'Packages' lists. This has the effect of deleting packages # that have been superseded by an updated 'Packages' list. # #clean_cache = 1 ### INTERNALS ### # Debug mode makes apt-cacher write a lot of extra debug information to the # error log (whose location is defined with the 'log_dir' directive). Leave # this off unless you need it, or your error log will get very big. Acceptable # values are 0 or an integer up to 7. See man apt-cacher (1) for further # details. # #debug = 0 # You shouldn't need to change anything below here. If you do, ensure you # understand the full implications of doing so. # Permitted package files -- this is a perl regular expression which matches all # package-type files (files that are uniquely identified by their filename). # #package_files_regexp = (?:^[-+.a-z0-9]+_(?:\d:)?[-+.~a-zA-Z0-9]+(?:_[-a-z0-9]+\.(?:u|d)?deb|\.dsc|\.tar(?:\.gz|\.bz2|\.xz)|\.diff\.gz)|\.rpm|index\.db-.+\.gz|\.jigdo|\.template)$ # Permitted APT pdiff files -- this is a perl regular expression which matches # APT pdiff files which are ed(1) scripts used to patch index files rather than # redownloading the whole file afresh. # #pdiff_files_regexp = ^2\d{3}-\d{2}-\d{2}-\d{4}\.\d{2}\.gz$ # Permitted Index files -- this is the perl regular expression which matches all # index-type files (files that are uniquely identified by their full path and # need to be checked for freshness). # #index_files_regexp = ^(?:Index(?:\.bz2)?|Packages(?:\.gz|\.bz2)?|Release(?:\.gpg)?|InRelease|Sources(?:\.gz|\.bz2)?|Contents-(?:[a-z]+-)?[a-zA-Z0-9]+\.gz|(?:srclist|pkglist)\.[a-z-]+\.bz2|release(?:\.gz|\.bz2)?|Translation-[a-z]{2,3}(?:_[A-Z]{2})?(?:\.gz|\.bz2|\.xz)?)$ # Permitted installer files -- this is the perl regular expression which matches # all installer-type files (files that are uniquely identified by their full # path but don’t need to be checked for freshness). These are typically files # used by Debian/Ubuntu Installer, Debian Live and apt. Within this option, the # shorthand 'UBUNTU_RELEASE_NAMES' will be expanded to the list configured in # ubuntu_release_names as regexp alternatives. # #installer_files_regexp = ^(?:vmlinuz|linux|initrd\.gz|changelog|NEWS.Debian|UBUNTU_RELEASE_NAMES\.tar\.gz(?:\.gpg)?|(?:Devel|EOL)?ReleaseAnnouncement(?:\.html)?|meta-release(?:-lts)?(?:-(?:development|proposed))?)$ # Perl regular expression which matches Index files from which to read checksums # if checksum is enabled. # #checksum_files_regexp = ^(?:Packages(?:\.gz|\.bz2)?|Sources(?:\.gz|\.bz2)?|(?:In)?Release|Index(?:\.bz2)?)$ # Perl regular expression which matches files for which checksum validation is # not performed. NB files matched by installer_files_regexp are skipped # automatically and do not need to be added here as well. # #skip_checksum_files_regexp = ^(?:(?:In)?Release|Release\.gpg)$ # Perl regular expression which matches URLs to be permitted for Debian bugs # SOAP requests as made by apt-listbugs(1). # #soap_url_regexp = ^(?:http://)?bugs\.debian\.org(?::80)?/cgi-bin/soap.cgi$ apt-cacher-1.7.8/config/apache.conf0000755000000000000000000000033011717045647013774 0ustar Alias /apt-cacher /usr/share/apt-cacher/apt-cacher-cgi.pl Options ExecCGI AddHandler cgi-script .pl AllowOverride None order allow,deny allow from all apt-cacher-1.7.8/apt-cacher-cgi.pl0000755000000000000000000000044011664536462013546 0ustar #!/usr/bin/perl # cgi.pl - CGI to provide a local cache for debian packages and # release files and .deb files. Actually just a wrapper to set CGI mode flag # for the real script. $ENV{CGI_MODE}=1; # identify as CGI and run the actual script require "/usr/share/apt-cacher/apt-cacher"; apt-cacher-1.7.8/apt-cacher0000755000000000000000000022516012231245457012374 0ustar #!/usr/bin/perl # ---------------------------------------------------------------------------- =head1 NAME apt-cacher =head1 DESCRIPTION Caching HTTP proxy optimized for use with APT =head1 DOCUMENTATION Detailed, full usage and configuration information for both servers and clients is contained in the L manpage. There are additional notes in F. The default server configuration file, F, also contains further server configuration examples. =head1 COPYRIGHT Copyright (C) 2005 Eduard Bloch Copyright (C) 2007-2011 Mark Hindley Distributed under the terms of the GNU Public Licence (GPL). =cut # ---------------------------------------------------------------------------- use strict; use warnings; use lib '/usr/share/apt-cacher/lib'; use Fcntl qw(:DEFAULT :flock); use WWW::Curl::Easy; use WWW::Curl::Multi; use WWW::Curl::Share; use FreezeThaw qw(freeze thaw); use IO::Socket::INET; use IO::Select; use IO::Interface::Simple; use HTTP::Request; use HTTP::Response; use HTTP::Date; use Time::Piece; use Sys::Hostname (); use Filesys::Df; use Time::HiRes qw(sleep); use NetAddr::IP; use NetAddr::IP::Util; use List::Util; use Getopt::Long qw(:config no_ignore_case bundling); use Sys::Syscall; use POSIX (); use Hash::Util; # Include the library for the config file parser require('apt-cacher.pl'); # Set some defaults my $version='devel'; # this will be auto-replaced when the Debian package is being built my $mode; # cgi|inetd|undef # Needs to be global for setup_ownership() our $cfg; # Data shared between functions my ($aclog_fh, $erlog_fh); my ($con, $source); my $listeners; # Subroutines sub setup { my $configfile_default = '/etc/apt-cacher/apt-cacher.conf'; my $configfile = $configfile_default; my $pidfile; my $chroot; my $retnum; my $fork; my @extraconfig; if($ENV{CGI_MODE}) { # yahoo, back to the roots, CGI mode $mode='cgi'; } else { local @ARGV = @ARGV; # Use a copy so @ARGV not destroyed my $help; my $inetd; my $show_version; my %options = ( 'h|help' => \$help, 'c|cfg|conf=s' => \$configfile, 'i|inetd' => \$inetd, 'r|chroot=s' => \$chroot, 'd|daemon' => \$fork, 'p|pidfile=s' => \$pidfile, 't|try|tries|R|retry|retries=i' => \$retnum, 'v|version' => \$show_version ); if (!GetOptions(%options) || $help) { die <] [-i|--inetd] [-d|--daemon] [-r|--chroot ] [-p|--pidfile] ] [-t|--tries|-R|--retry ] [-v|--version] [