Exists in fs -> | YES | NO * |
---|---|---|
not seen before | \c N | \c - * |
committed | \c C, \c R | \c D * |
unversioned | \c d | \c d (or D?, or with !?) * |
added | \c n | \c n (with !) * |
\e md5 | * The \b MD5 of the new file is identical to that of one or more already * committed files; there is no percentage. * * |
\e inode | * The \b device/inode number is identical to the given known entry; this * could mean that the old entry has been renamed or hardlinked. * \b Note: Not all filesystems have persistent inode numbers (eg. NFS) - * so depending on your filesystems this might not be a good indicator! * * |
\e name | * The entry has the same name as another entry. * * |
\e manber | * Analysing files of similar size shows some percentage of * (variable-sized) common blocks (ignoring the order of the * blocks). * * |
\e dirlist | * The new directory has similar files to the old directory.\n * The percentage is (number_of_common_entries)/(files_in_dir1 + * files_in_dir2 - number_of_common_entries). * * |
anymore.
s,(\S)(\),$1 $2,g;
print $w $_;
}
close $w;
#open(STDOUT, "> $output") || die $!;
# Cut until first header
while (<$r>)
{
# I'd thought lynx had an option to not print these?
# yes ... -nonumbers.
s#\[\d+\]##;
next if m#^\[#;
s/\xe2/--/g;
# $p=m#^SYNOPSIS# .. m#^\s*-{30,}#;
$p=m#^\w# .. m#^\s*_{30,}#;
print if ($p =~ m#^\d+$#);
}
fsvs-fsvs-1.2.12/src/dev/gcov-summary.pl 0000775 0000000 0000000 00000006231 14536317137 0020107 0 ustar 00root root 0000000 0000000 #!/usr/bin/perl
# read whole files
undef $/;
$exe_lines=$sum_lines=0;
%runs=();
while (<>)
{
($c_file=$ARGV) =~ s#\.gcov\.smry$##;
# File 'warnings.c'
# Lines executed:85.71% of 28
# warnings.c:creating 'warnings.c.gcov'
($pct, $lines) = (m#File '$c_file'\s+Lines executed:([\d\.]+)% of (\d+)#);
if (!$lines)
{
warn "Cannot parse (or no lines executed) for $ARGV.\n";
next;
}
open(SRC, "< " . $c_file) || die $!;
@funcs_to_ignore = map {
m#\s*/// FSVS GCOV MARK: (\w+)# ? $1 : ();
} split(/\n/,);
close(SRC);
$ignored=0;
for $func (@funcs_to_ignore)
{
($fexec, $flines) =
m#Function '$func'\s+Lines executed:([\d\.]+)\% of (\d+)#;
if (!defined($flines))
{
warn "Function $func should be ignored, but was not found!\n";
}
elsif ($fexec>0)
{
warn "Function $func should be ignored, but was run!\n";
}
else
{
$ignored += $flines;
}
}
# #####: 77: STOPIF( st__status(sts, path), NULL);
# TODO: Count the whole block; eg. DEBUG normally has more than a single
# line.
open(GCOV, "< $c_file.gcov");
{
local($/)="\n";
$last_line=$cur=0;
# find untested lines, and count them
$this_run=0;
while ()
{
$cur++;
if (/^\s*(#####|-):\s+\d+:\s+(STOPIF|BUG|BUG_ON|DEBUGP)?/)
{
$stopif_lines++ if $2;
if ($last_line == $cur -1)
{
$old=delete $runs{$c_file . "\0" . $last_line};
# An line without executable code (mark '-') is taken as continuation, but
# doesn't add to unexecuted lines.
$runs{$c_file . "\0" . $cur} =
[ $old->[0] + ($1 eq "#####" ? 1 : 0),
$old->[1] || $cur ];
}
$last_line=$cur;
}
}
}
$covered=int($lines*$pct/100.0+0.5);
$lines -= $ignored;
$pct=$covered/$lines*100.0;
$cover{sprintf("%9.5f-%s",$pct,$ARGV)} =
[$lines, $pct, $ARGV, $covered, $ignored];
$sum_lines+=$lines;
$exe_lines+=$covered;
}
die "No useful information found!!\n" if !$sum_lines;
$delim="---------+--------+--------+--------------------------------------------------\n";
print "\n\n", $delim;
for (reverse sort keys %cover)
{
($lines, $pct, $name, $covered, $ignored)=@{$cover{$_}};
$ntest=$lines-$covered;
$name =~ s#\.gcov\.smry$##i;
write;
}
format STDOUT_TOP=
Percent | exec'd | #lines | #!test | #ignrd | Filename
---------+--------+--------+--------+--------+----------------------------
.
format STDOUT=
@##.##% | @##### | @##### | @##### | @##### | @<<<<<<<<<<<<<<<<<<<<<<<<<<
$pct, $covered, $lines, $ntest, $ignored, $name
.
print $delim;
$pct=100.0*$exe_lines/$sum_lines;
$covered=$exe_lines;
$lines=$sum_lines;
$name="Total";
write;
print $delim;
printf " %6.2f%% coverage when counting %d error handling lines as executed\n",
100.0*($exe_lines+$stopif_lines)/$sum_lines, $stopif_lines;
print "-" x (length($delim)-1), "\n\n";
# Print runs
@runs_by_length=();
map { $runs_by_length[$runs{$_}[0]]{$_}=$runs{$_}; } keys %runs;
$max=10;
print "Longest runs:\n";
while ($max>0 && @runs_by_length)
{
$this_length=$#runs_by_length;
printf " %3d# ",$this_length;
$length_arr=delete $runs_by_length[$this_length];
for (sort keys %$length_arr)
{
($file, $last)=split(/\0/);
print " ",$file,":",$length_arr->{$_}[1];
$max--;
}
print "\n";
}
print "\n\n";
fsvs-fsvs-1.2.12/src/dev/make_doc.pl 0000775 0000000 0000000 00000001244 14536317137 0017217 0 ustar 00root root 0000000 0000000 #!/usr/bin/perl
print "/* This file is generated, do not edit!\n",
" * Last done on ", scalar(gmtime(time())),"\n",
" * */\n",
"\n\n";
while (<>)
{
chomp;
next if /(_{30,})/;
next if /^\s*$/ && !@text;
$sect=$1 if /^_?([\w\-]{1,5}[a-zA-Z0-9])/;
# print STDERR "sect=$sect old=$old_sect\n";
if ($sect ne $old_sect)
{
print "const char hlp_${old_sect}[]=\"" .
join("\"\n \"", @text),"\";\n\n"
if ($old_sect && $old_sect =~ /^[a-z]/);
@text=();
$sect =~ s#-#_#g;
$old_sect=$sect;
}
else
{
# make \ safe
s#\\#\\\\#g;
# make " safe
s#"#\\"#g;
# remove space at beginning
# s#^ ##;
push(@text,$_ . "\\n");
}
}
print "\n\n// vi: filetype=c\n";
fsvs-fsvs-1.2.12/src/dev/make_fsvs_release.pl 0000775 0000000 0000000 00000002521 14536317137 0021132 0 ustar 00root root 0000000 0000000 #!/usr/bin/perl
$version=shift() || die "Welche Version??\n";
$version =~ m#^(\d+\.)+\d+$# || die "Version ungültig!!\n";
$tagdir="fsvs-$version";
system("git tag '$tagdir'");
warn "Fehler $? beim Taggen!" if $?;
#print "Getaggt!! Warte auf Bestätigung.\n"; $_=;
srand();
$tempdir="/tmp/" . $$ . ".tmp.".rand();
mkdir ($tempdir) || die "mkdir($tempdir): $!";
sub C { system("rm -rf '$tempdir'"); };
$SIG{"__DIE__"}=sub { print @_; C(); exit($! || 1); };
system("git archive --prefix '$tagdir/' | tar -xf -C '$tempdir'");
die "Fehler $?" if $?;
chdir($tempdir);
system("cd $tagdir && autoconf");
if ($?)
{
#die "Fehler $?" if $?;
print "Fehler $?!!\n";
system("/bin/bash");
}
# open(CH, "< $tagdir/CHANGES") || die $!;
# open(CHHTML,"> CHANGES.html") || die $!;
# while()
# {
# chomp;
# last if /^\s*$/;
#
# print(CHHTML "$_\n\n"), next if (/^\w/);
# s#^- #- #;
# print CHHTML $_, "\n";
# }
# print CHHTML "
\n";
# close CH; close CHHTML;
chdir($tempdir);
system("tar -cvf $tagdir.tar $tagdir");
die "Fehler $?" if $?;
system("bzip2 -v9k $tagdir.tar");
die "Fehler $?" if $?;
system("gzip -v9 $tagdir.tar");
die "Fehler $?" if $?;
system("md5sum *.tar.* > MD5SUM");
die "Fehler $?" if $?;
system("sha256sum *.tar.* > SHA256SUM");
die "Fehler $?" if $?;
print "ok\n\n cd $tempdir\n\n";
#C();
exit(0);
fsvs-fsvs-1.2.12/src/dev/permutate-all-tests 0000775 0000000 0000000 00000020417 14536317137 0020762 0 ustar 00root root 0000000 0000000 #!/usr/bin/perl
# vim: sw=2 ts=2 expandtab
#
# Runs the tests in various configurations
# To be started from the src/ directory, to have matching paths
#
# If there's an environment variable MAKEFLAGS set, and it includes a
# -j parameter, the tests are run in parallel.
#
#
##########################################################################
# Copyright (C) 2005-2008 Philipp Marek.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
##########################################################################
use Encode qw(from_to);
use Fcntl qw(FD_CLOEXEC F_SETFD F_GETFD);
#
#############################################################################
# Detection and preparation
#############################################################################
{
@locales=`locale -a`;
# look for UTF8
($utf8_locale)=grep(/\.utf-?8/i,@locales);
chomp $utf8_locale;
# look for non-utf8
($loc_locale)=grep(!/(POSIX|C|utf-?8$)/i, @locales);
chomp $loc_locale;
($cur_locale)=map { /LC_CTYPE="(.*)"/ ? ($1) : (); } `locale`;
@test_locales=($utf8_locale, $loc_locale);
($cur_locale_norm = $cur_locale) =~ s#utf-8#utf8#i;
push @test_locales, $cur_locale
unless grep(lc($cur_locale_norm) eq lc($_), @test_locales);
# Test the locales.
($utf8, $loc)=`make -C ../tests locale_strings BINARY=/bin/true`;
# print $utf8,$loc;
$target_enc="ISO-8859-1";
from_to($utf8, "utf-8", $target_enc, Encode::FB_CROAK);
from_to($loc, $target_enc, "utf-8", Encode::FB_CROAK);
# print $utf8,$loc; exit;
# Use special directories, so that normal system operation is not harmed.
$PTESTBASE="/tmp/fsvs-tests-permutated";
mkdir($PTESTBASE, 0777) || die $!
if !-d $PTESTBASE;
open(CSV, "> /tmp/fsvs-tests.csv") || die $!;
select((select(CSV), $|=1)[0]);
print CSV qq("Nr","Prot","LANG","priv","config","Result"\n);
# To get some meaningful test name outputted
$ENV{"CURRENT_TEST"}="ext-tests";
$start=time();
$ENV{"MAKEFLAGS"} =~ /-j\s*(\d*)\b/;
$parallel=$ENV{"PARALLEL"} || ($1+0) || 1;
MSG("INFO", "Parallel found as $parallel") if $parallel;
# Used for status output
$fail=0;
# Used for counting
$sum=0;
# For parallel execution
$running=0;
# Wait for children
$SIG{"CHLD"}="DEFAULT";
%results=();
%pid_to_result=();
MSG("INFO", StartText($start));
# We don't want no cache.
$| =1;
}
#############################################################################
# Run the tests
#############################################################################
{
# My default is debug - so do that last, to have a
# correctly configured environment :-)
# Furthermore the "unusual" configurations are done first.
# for $release ("--enable-debug")
# for $release ("--enable-release")
for $release ("--with-waa_md5=8", "", "--enable-release", "--enable-debug")
{
# make sure that the binary gets recompiled
$conf_cmd="( cd .. && ./configure $release ) && ".
"touch config.h && make -j$parallel";
system("( $conf_cmd ) > /tmp/fsvs-conf.txt 2>&1") &&
die "configure problem: $?";
# Start the slow, uncommon tasks first.
for $prot ("svn+ssh", "file://")
{
for $user ("sudo", "")
{
for $lang (@test_locales)
{
$sum++;
# We have to make the conf and waa directory depend on the
# user, so that root and normal user don't share the same base -
# the user would get some EPERM.
# Furthermore parallel tests shouldn't collide.
$PTESTBASE2="$PTESTBASE/u.$user" . ($parallel ? ".$sum" : "");
# Start the test asynchronous, and wait if limit reached.
$pid=StartTest();
$running++;
{
my($tmp);
$tmp="?";
$results{$lang}{$user}{$prot}{$release}=\$tmp;
$pid_to_result{$pid}=\$tmp;
}
WaitForChilds($parallel);
}
}
}
# As we reconfigure on the next run, we have to wait for *all* pending
# children.
WaitForChilds(1);
}
}
#############################################################################
# Summary
#############################################################################
{
$end=time();
MSG("INFO", EndText($start, $end));
if ($fail)
{
MSG("ERROR","$fail of $sum tests failed.");
}
else
{
MSG("SUCCESS", "All $sum tests passed.");
}
close CSV;
}
system qq(make gcov);
exit;
#############################################################################
# Functions
#############################################################################
sub MSG
{
my($type, @text)=@_;
# We use the same shell functions, to get a nice consistent output.
Bash(". ../tests/test_functions\n\$$type '" . join(" ",@text) . "'");
}
# Gets all parameters from global variables.
sub StartTest
{
$pid=fork();
die $! unless defined($pid);
return $pid if ($pid);
# $x=(0.5 < rand())+0; print "$$: exit with $x\n"; exit($x);
# this is the child ...
pipe(FAILREAD, FAILWRITE) || die "pipe: $!";
# sudo closes the filehandles above 2, and I found no way to get it to
# keep them open.
# So we have to give a path name to the children.
$tl=$ENV{"TEST_LIST"};
$parms="LANG=$lang" .
" LC_MESSAGES=C" .
" 'TESTBASEx=$PTESTBASE2'" .
" 'PROTOCOL=$prot'" .
" RANDOM_ORDER=1" .
($tl ? " 'TEST_LIST=$tl'" : "") .
" TEST_FAIL_WRITE_HDL=/proc/$$/fd/".fileno(FAILWRITE) .
# And it can have our STDERR.
" TEST_TTY_HDL=/proc/$$/fd/2";
# To avoid getting N*N running tasks for a "-j N", we explicitly say 1.
# Parallel execution within the tests is not done yet, but better safe
# than sorry.
$cmd="$user make run-tests -j1 $parms";
$start=time();
# Output on STDOUT is short; the logfile says it all.
print "#$sum ", StartText($start);
open(LOG, "> /tmp/fsvs-test-$sum.log");
select((select(LOG), $|=1)[0]);
print LOG "Testing #$sum: (configure=$release) $parms\n",
StartText($start),
"\n$conf_cmd &&\n\t$cmd\n\n";
# The sources are already configured; just the tests have to be run.
$pid=fork();
die $! unless defined($pid);
if (!$pid)
{
close FAILREAD;
$ENV{"MAKEFLAGS"}="";
open(STDIN, "< /dev/null") || die $!;
open(STDOUT, ">&LOG") || die $!;
open(STDERR, ">&LOG") || die $!;
system("make -C ../tests diag BINARY=true LC_ALL=$lang");
$x=fcntl(FAILWRITE, F_GETFD, 0);
fcntl(FAILWRITE, F_SETFD, $x & ~FD_CLOEXEC);
# sudo removes some environment variables, so set all options via make.
exec $cmd;
die;
}
# Give the child some time to take the write side.
# If we ever get more than 4/64 kB of failed tests this will hang.
die $! if waitpid($pid, 0) == -1;
$error=$?;
# We have to close the write side of the pipe, so that on reading we'll
# see an EOF.
close FAILWRITE;
@failed=map { chomp; $_; } ;
close FAILREAD;
$end=time();
$t=EndText($start, $end);
if ($error)
{
$status="FAIL";
open(F, "< /proc/loadavg") && print(LOG "LoadAvg: ", ) && close(F);
MSG("WARN", "#$sum failed; $t");
}
else
{
$status="OK";
MSG("INFO", "#$sum done; $t");
system("sudo rm -rf $PTESTBASE2");
}
print LOG "\n",
"$t\n",
"$status $error: $user $parms\n",
"got failed as (", join(" ", @failed), ")\n",
"\n",
"$conf_cmd && $cmd\n";
close LOG;
$u = $user || "user";
print CSV join(",", $sum, map { "'$_'"; }
($prot, $lang, $u, $release, $status, sort(@failed))),
"\n";
close CSV;
# We cannot return $error directly ... only the low 8bit would
# be taken, and these are the signal the process exited with.
# A normal error status would be discarded!
exit($error ? 1 : 0);
}
sub WaitForChilds
{
my($allowed)=@_;
my($pid, $ret);
while ($running >= $allowed)
{
$pid=wait();
$ret=$?;
die $! if $pid == -1;
${$pid_to_result{$pid}}=$ret;
$fail++ if $ret;
$running--;
}
}
# Some of the things done in via the shell only works with bash; since
# debian has moved to dash recently, we make sure to use the correct
# program.
sub Bash
{
die unless @_ == 1;
system '/bin/bash', '-c', @_;
}
# The \n don't matter for the shell, and they help for direct output.
sub StartText
{
my($start)=@_;
return "Started at (" . localtime($start) . ").\n";
}
sub EndText
{
my($start, $end)=@_;
return "Finished after ". ($end - $start) . " seconds (" .
localtime($end) . ").";
}
fsvs-fsvs-1.2.12/src/diff.c 0000664 0000000 0000000 00000065446 14536317137 0015431 0 ustar 00root root 0000000 0000000 /************************************************************************
* Copyright (C) 2006-2009 Philipp Marek.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
* published by the Free Software Foundation.
************************************************************************/
#include
#include
#include
#include
#include
#include
#include
#include "global.h"
#include "revert.h"
#include "helper.h"
#include "interface.h"
#include "url.h"
#include "status.h"
#include "options.h"
#include "est_ops.h"
#include "ignore.h"
#include "waa.h"
#include "racallback.h"
#include "cp_mv.h"
#include "warnings.h"
#include "diff.h"
/** \file
* The \ref diff command source file.
*
* Currently only diffing single files is possible; recursive diffing
* of trees has to be done.
*
* For trees it might be better to fetch all files in a kind of
* update-scenario; then we'd avoid the many round-trips we'd have with
* single-file-fetching.
* Although an optimized file-fetching (rsync-like block transfers) would
* probably save a lot of bandwidth.
* */
/** \addtogroup cmds
*
* \section diff
*
* \code
* fsvs diff [-v] [-r rev[:rev2]] [-R] PATH [PATH...]
* \endcode
*
* This command gives you diffs between local and repository files.
*
* With \c -v the meta-data is additionally printed, and changes shown.
*
* If you don't give the revision arguments, you get a diff of the base
* revision in the repository (the last commit) against your current local file.
* With one revision, you diff this repository version against your local
* file. With both revisions given, the difference between these repository
* versions is calculated.
*
* You'll need the \c diff program, as the files are simply passed as
* parameters to it.
*
* The default is to do non-recursive diffs; so fsvs diff . will
* output the changes in all files in the current directory and
* below.
*
* The output for special files is the diff of the internal subversion
* storage, which includes the type of the special file, but no newline at
* the end of the line (which \c diff complains about).
*
* For entries marked as copy the diff against the (clean) source entry is
* printed.
*
* Please see also \ref o_diff and \ref o_colordiff.
*
* \todo Two revisions diff is buggy in that it (currently) always fetches
* the full trees from the repository; this is not only a performance
* degradation, but you'll see more changed entries than you want (like
* changes A to B to A). This will be fixed.
* */
int cdiff_pipe=STDOUT_FILENO;
pid_t cdiff_pid=0;
/** A number that cannot be a valid pointer. */
#define META_DIFF_DELIMITER (0xf44fee31)
/** How long may a meta-data diff string be? */
#define META_DIFF_MAXLEN (256)
/** Diff the given meta-data.
* The given \a format string is used with the va-args to generate two
* strings. If they are equal, one is printed (with space at front); else
* both are shown (with '-' and '+').
* The delimiter between the two argument lists is via \ref
* META_DIFF_DELIMITER. (NULL could be in the data, eg. as integer \c 0.)
*
* It would be faster to simply compare the values given to \c vsnprintf();
* that could even be done here, by using two \c va_list variables and
* comparing. But it's not a performance problem.
*/
int df___print_meta(char *format, ... )
{
int status;
va_list va;
char buf_old[META_DIFF_MAXLEN],
buf_new[META_DIFF_MAXLEN];
int l1, l2;
status=0;
va_start(va, format);
l1=vsnprintf(buf_old, META_DIFF_MAXLEN-1, format, va);
DEBUGP("meta-diff: %s", buf_old);
l2=0;
while (va_arg(va, int) != META_DIFF_DELIMITER)
{
l2++;
BUG_ON(l2>5, "Parameter list too long");
}
l2=vsnprintf(buf_new, META_DIFF_MAXLEN-1, format, va);
DEBUGP("meta-diff: %s", buf_new);
STOPIF_CODE_ERR( l1<0 || l2<0 ||
l1>=META_DIFF_MAXLEN || l2>=META_DIFF_MAXLEN, EINVAL,
"Printing meta-data strings format error");
/* Different */
STOPIF_CODE_EPIPE(
printf(
(l1 != l2 || strcmp(buf_new, buf_old) !=0) ?
"-%s\n+%s\n" : " %s\n",
buf_old, buf_new), NULL);
ex:
return status;
}
/** Get a file from the repository, and initiate a diff.
*
* Normally rev1 == root->repos_rev; to diff against
* the \e base revision of the file.
*
* If the user specified only a single revision (rev2 == 0),
* the local file is diffed against this; else against the
* other repository version.
*
* \a rev2_file is meaningful only if \a rev2 is 0; this file gets removed
* after printing the difference!
* */
int df__do_diff(struct estat *sts,
svn_revnum_t rev1,
svn_revnum_t rev2, char *rev2_file)
{
int status;
int ch_stat;
static pid_t last_child=0;
static char *last_tmp_file=NULL;
static char *last_tmp_file2=NULL;
pid_t tmp_pid;
char *path, *disp_dest, *disp_source;
int len_d, len_s;
char *b1, *b2;
struct estat sts_r2;
char short_desc[10];
char *new_mtime_string, *other_mtime_string;
char *url_to_fetch, *other_url;
int is_copy;
int fdflags;
apr_hash_t *props_r1, *props_r2;
status=0;
/* Check whether we have an active child; wait for it. */
if (last_child)
{
/* Keep the race window small. */
tmp_pid=last_child;
last_child=0;
STOPIF_CODE_ERR( waitpid(tmp_pid, &ch_stat, 0) == -1, errno,
"Waiting for child gave an error");
DEBUGP("child %d exitcode %d - status 0x%04X",
tmp_pid, WEXITSTATUS(ch_stat), ch_stat);
STOPIF_CODE_ERR( !WIFEXITED(ch_stat), EIO,
"!Child %d terminated abnormally", tmp_pid);
if (WEXITSTATUS(ch_stat) == 1)
DEBUGP("exit code 1 - file has changed.");
else
{
STOPIF( wa__warn(WRN__DIFF_EXIT_STATUS, EIO,
"Child %d gave an exit status %d",
tmp_pid, WEXITSTATUS(ch_stat)),
NULL);
}
}
/* \a last_tmp_file should only be set when last_child is set;
* but who knows.
*
* This cleanup must be done \b after waiting for the child - else we
* might delete the file before it was opened!
* */
if (last_tmp_file)
{
STOPIF_CODE_ERR( unlink(last_tmp_file) == -1, errno,
"Cannot remove temporary file %s", last_tmp_file);
last_tmp_file=NULL;
}
if (last_tmp_file2)
{
STOPIF_CODE_ERR( unlink(last_tmp_file2) == -1, errno,
"Cannot remove temporary file %s", last_tmp_file2);
last_tmp_file2=NULL;
}
/* Just uninit? */
if (!sts) goto ex;
STOPIF( ops__build_path( &path, sts), NULL);
url_to_fetch=NULL;
/* If this entry is freshly copied, get it's source URL. */
is_copy=sts->flags & RF___IS_COPY;
if (is_copy)
{
/* Should we warn if any revisions are given? Can we allow one? */
STOPIF( cm__get_source(sts, NULL, &url_to_fetch, &rev1, 0), NULL);
/* \TODO: That doesn't work for unknown URLs - but that's needed as
* soon as we allow "fsvs cp URL path". */
STOPIF( url__find(url_to_fetch, &sts->url), NULL);
}
else
url_to_fetch=path+2;
current_url = sts->url;
/* We have to fetch a file and do the diff, so open a session. */
STOPIF( url__open_session(NULL, NULL), NULL);
/* The function rev__get_file() overwrites the data in \c *sts with
* the repository values - mtime, ctime, etc.
* We use this as an advantage and remember the current time - so that
* we can print both. */
/* \e From is always the "old" - base revision, or first given revision.
* \e To is the newer version - 2nd revision, or local file. */
/* TODO: use delta transfers for 2nd file. */
sts_r2=*sts;
if (rev2 != 0)
{
STOPIF( url__full_url(sts, &other_url), NULL);
STOPIF( url__canonical_rev(current_url, &rev2), NULL);
STOPIF( rev__get_text_to_tmpfile(other_url, rev2, DECODER_UNKNOWN,
NULL, &last_tmp_file2,
NULL, &sts_r2, &props_r2,
current_url->pool),
NULL);
}
else if (rev2_file)
{
DEBUGP("diff against %s", rev2_file);
/* Let it get removed. */
last_tmp_file2=rev2_file;
}
/* Now fetch the \e old version. */
STOPIF( url__canonical_rev(current_url, &rev1), NULL);
STOPIF( rev__get_text_to_tmpfile(url_to_fetch, rev1, DECODER_UNKNOWN,
NULL, &last_tmp_file,
NULL, sts, &props_r1,
current_url->pool), NULL);
/* If we didn't flush the stdio buffers here, we'd risk getting them
* printed a second time from the child. */
fflush(NULL);
last_child=fork();
STOPIF_CODE_ERR( last_child == -1, errno,
"Cannot fork diff program");
if (!last_child)
{
STOPIF( hlp__format_path(sts, path, &disp_dest), NULL);
/* Remove the ./ at the front */
setenv(FSVS_EXP_CURR_ENTRY, path+2, 1);
disp_source= is_copy ? url_to_fetch : disp_dest;
len_d=strlen(disp_dest);
len_s=strlen(disp_source);
if (cdiff_pipe != STDOUT_FILENO)
{
STOPIF_CODE_ERR( dup2(cdiff_pipe, STDOUT_FILENO) == -1, errno,
"Redirect output");
/* Problem with svn+ssh - see comment below. */
fdflags=fcntl(STDOUT_FILENO, F_GETFD);
fdflags &= ~FD_CLOEXEC;
/* Does this return errors? */
fcntl(STDOUT_FILENO, F_SETFD, fdflags);
}
/* We need not be nice with memory usage - we'll be replaced soon. */
/* 30 chars should be enough for everyone */
b1=malloc(len_s + 60 + 30);
b2=malloc(len_d + 60 + 30);
STOPIF( hlp__strdup( &new_mtime_string,
ctime(& sts_r2.st.mtim.tv_sec)), NULL);
STOPIF( hlp__strdup( &other_mtime_string,
ctime(&sts->st.mtim.tv_sec)), NULL);
sprintf(b1, "%s \tRev. %llu \t(%-24.24s)",
disp_source, (t_ull) rev1, other_mtime_string);
if (rev2 == 0)
{
sprintf(b2, "%s \tLocal version \t(%-24.24s)",
disp_dest, new_mtime_string);
strcpy(short_desc, "local");
}
else
{
sprintf(b2, "%s \tRev. %llu \t(%-24.24s)",
disp_dest, (t_ull) rev2, new_mtime_string);
sprintf(short_desc, "r%llu", (t_ull) rev2);
}
/* Print header line, just like a recursive diff does. */
STOPIF_CODE_EPIPE( printf("diff -u %s.r%llu %s.%s\n",
disp_source, (t_ull)rev1,
disp_dest, short_desc),
"Diff header");
if (opt__is_verbose() > 0) // TODO: && !symlink ...)
{
STOPIF( df___print_meta( "Mode: 0%03o",
sts->st.mode & 07777,
META_DIFF_DELIMITER,
sts_r2.st.mode & 07777),
NULL);
STOPIF( df___print_meta( "MTime: %.24s",
other_mtime_string,
META_DIFF_DELIMITER,
new_mtime_string),
NULL);
STOPIF( df___print_meta( "Owner: %d (%s)",
sts->st.uid, hlp__get_uname(sts->st.uid, "undefined"),
META_DIFF_DELIMITER,
sts_r2.st.uid, hlp__get_uname(sts_r2.st.uid, "undefined") ),
NULL);
STOPIF( df___print_meta( "Group: %d (%s)",
sts->st.gid, hlp__get_grname(sts->st.gid, "undefined"),
META_DIFF_DELIMITER,
sts_r2.st.gid, hlp__get_grname(sts_r2.st.gid, "undefined") ),
NULL);
}
fflush(NULL);
// TODO: if special_dev ...
/* Checking \b which return value we get is unnecessary ... On \b
* every error we get \c -1 .*/
execlp( opt__get_string(OPT__DIFF_PRG),
opt__get_string(OPT__DIFF_PRG),
opt__get_string(OPT__DIFF_OPT),
last_tmp_file,
"--label", b1,
(rev2 != 0 ? last_tmp_file2 :
rev2_file ? rev2_file : path),
"--label", b2,
opt__get_string(OPT__DIFF_EXTRA),
NULL);
STOPIF_CODE_ERR( 1, errno,
"Starting the diff program \"%s\" failed",
opt__get_string(OPT__DIFF_PRG));
}
ex:
return status;
}
/** Cleanup rests. */
int df___cleanup(void)
{
int status;
int ret;
if (cdiff_pipe != STDOUT_FILENO)
STOPIF_CODE_ERR( close(cdiff_pipe) == -1, errno,
"Cannot close colordiff pipe");
if (cdiff_pid)
{
/* Should we kill colordiff? Let it stop itself? Wait for it?
* It should terminate itself, because STDIN gets no more data.
*
* But if we don't wait, it might get scheduled after the shell printed
* its prompt ... and that's not fine. But should we ignore the return
* code? */
STOPIF_CODE_ERR( waitpid( cdiff_pid, &ret, 0) == -1, errno,
"Can't wait");
DEBUGP("child %d exitcode %d - status 0x%04X",
cdiff_pid, WEXITSTATUS(ret), ret);
}
STOPIF( df__do_diff(NULL, 0, 0, 0), NULL);
ex:
return status;
}
/// FSVS GCOV MARK: df___signal should not be executed
/** Signal handler function.
* If the user wants us to quit, we remove the temporary files, and exit.
*
* Is there a better/cleaner way?
* */
static void df___signal(int sig)
{
DEBUGP("signal %d arrived!", sig);
df___cleanup();
exit(0);
}
/** Does a diff of the local non-directory against the given revision.
* */
int df___type_def_diff(struct estat *sts, svn_revnum_t rev,
apr_pool_t *pool)
{
int status;
char *special_stg, *fn;
apr_file_t *apr_f;
apr_size_t wr_len, exp_len;
status=0;
special_stg=NULL;
switch (sts->st.mode & S_IFMT)
{
case S_IFREG:
STOPIF( df__do_diff(sts, rev, 0, NULL), NULL);
break;
case S_IFCHR:
case S_IFBLK:
case S_IFANYSPECIAL:
special_stg=ops__dev_to_filedata(sts);
/* Fallthrough, ignore first statement. */
case S_IFLNK:
if (!special_stg)
STOPIF( ops__link_to_string(sts, NULL, &special_stg), NULL);
STOPIF( ops__build_path( &fn, sts), NULL);
STOPIF_CODE_EPIPE( printf("Special entry changed: %s\n", fn), NULL);
/* As "diff" cannot handle special files directly, we have to
* write the expected string into a file, and diff against
* that.
* The remote version is fetched into a temporary file anyway. */
STOPIF( waa__get_tmp_name(NULL, &fn, &apr_f, pool), NULL);
wr_len=exp_len=strlen(special_stg);
STOPIF( apr_file_write(apr_f, special_stg, &wr_len), NULL);
STOPIF_CODE_ERR( wr_len != exp_len, ENOSPC, NULL);
STOPIF( apr_file_close(apr_f), NULL);
STOPIF( df__do_diff(sts, rev, 0, fn), NULL);
break;
default:
BUG("type?");
}
ex:
return status;
}
/** -. */
int df___direct_diff(struct estat *sts)
{
int status;
svn_revnum_t rev1;
char *fn;
STOPIF( ops__build_path( &fn, sts), NULL);
status=0;
if (!S_ISDIR(sts->st.mode))
{
DEBUGP("doing %s", fn);
/* Has to be set per sts. */
rev1=sts->repos_rev;
if ( (sts->entry_status & FS_REMOVED))
{
STOPIF_CODE_EPIPE( printf("Only in repository: %s\n", fn), NULL);
goto ex;
}
if (sts->to_be_ignored) goto ex;
if ( (sts->entry_status & FS_NEW) || !sts->url)
{
if (sts->flags & RF___IS_COPY)
{
/* File was copied, we have a source */
}
else
{
if (opt__is_verbose() > 0)
STOPIF_CODE_EPIPE( printf("Only in local filesystem: %s\n",
fn), NULL);
goto ex;
}
}
/* Local files must have changed; for repos-only diffs do always. */
if (sts->entry_status || opt_target_revisions_given)
{
DEBUGP("doing diff rev1=%llu", (t_ull)rev1);
if (S_ISDIR(sts->st.mode))
{
/* TODO: meta-data diff? */
}
else
{
/* TODO: Some kind of pool handling in recursion. */
STOPIF( df___type_def_diff(sts, rev1, global_pool), NULL);
}
}
}
else
{
/* Nothing to do for directories? */
}
ex:
return status;
}
/** A cheap replacement for colordiff.
* Nothing more than a \c cat. */
int df___cheap_colordiff(void)
{
int status;
char *tmp;
const int tmp_size=16384;
status=0;
tmp=alloca(tmp_size);
while ( (status=read(STDIN_FILENO,tmp, tmp_size)) > 0 )
if ( (status=write(STDOUT_FILENO, tmp, status)) == -1)
break;
if (status == -1)
{
STOPIF_CODE_ERR(errno != EPIPE, errno,
"Getting or pushing diff data");
status=0;
}
ex:
return status;
}
/** Tries to start colordiff.
* If colordiff can not be started, but the option says \c auto, we just
* forward the data. Sadly neither \c splice nor \c sendfile are available
* everywhere.
* */
int df___colordiff(int *handle, pid_t *cd_pid)
{
const char *program;
int status;
int pipes[2], fdflags, success[2];
status=0;
program=opt__get_int(OPT__COLORDIFF) ?
opt__get_string(OPT__COLORDIFF) :
"colordiff";
STOPIF_CODE_ERR( pipe(pipes) == -1, errno,
"No more pipes");
STOPIF_CODE_ERR( pipe(success) == -1, errno,
"No more pipes, case 2");
/* There's a small problem if the parent gets scheduled before the child,
* and the child doesn't find the colordiff binary; then the parent might
* only find out when it tries to send the first data across the pipe.
*
* But the successfully spawned colordiff won't report success, so the
* parent would have to wait for a fail message - which delays execution
* unnecessary - or simply live with diff getting EPIPE.
*
* Trying to get it scheduled by sending it a signal (which will be
* ignored) doesn't work reliably, too.
*
* The only way I can think of is opening a second pipe in reverse
* direction; if there's nothing to be read but EOF, the program could be
* started - else we get a single byte, signifying an error. */
*cd_pid=fork();
STOPIF_CODE_ERR( *cd_pid == -1, errno,
"Cannot fork colordiff program");
if (!*cd_pid)
{
close(success[0]);
fdflags=fcntl(success[1], F_GETFD);
fdflags |= FD_CLOEXEC;
fcntl(success[1], F_SETFD, fdflags);
STOPIF_CODE_ERR(
( dup2(pipes[0], STDIN_FILENO) |
close(pipes[1]) |
close(pipes[0]) ) == -1, errno,
"Redirecting IO didn't work");
execlp( program, program, NULL);
/* "" as value means best effort, so no error; any other string should
* give an error. */
if (opt__get_int(OPT__COLORDIFF) != 0)
{
fdflags=errno;
if (!fdflags) fdflags=EINVAL;
/* Report an error to the parent. */
write(success[1], &fdflags, sizeof(fdflags));
STOPIF_CODE_ERR_GOTO(1, fdflags, quit,
"!Cannot start colordiff program \"%s\"", program);
}
close(success[1]);
/* Well ... do the best. */
/* We cannot use STOPIF() and similar, as that would return back up to
* main - and possibly cause problems somewhere else. */
status=df___cheap_colordiff();
quit:
exit(status ? 1 : 0);
}
close(pipes[0]);
close(success[1]);
status=read(success[0], &fdflags, sizeof(fdflags));
close(success[0]);
STOPIF_CODE_ERR( status>0, fdflags,
"!The colordiff program \"%s\" doesn't accept any data.\n"
"Maybe it couldn't be started, or stopped unexpectedly?",
opt__get_string(OPT__COLORDIFF) );
/* For svn+ssh connections a ssh process is spawned off.
* If we don't set the CLOEXEC flag, it inherits the handle, and so the
* colordiff child will never terminate - it might get data from ssh, after
* all. */
fdflags=fcntl(pipes[1], F_GETFD);
fdflags |= FD_CLOEXEC;
/* Does this return errors? */
fcntl(pipes[1], F_SETFD, fdflags);
*handle=pipes[1];
DEBUGP("colordiff is %d", *cd_pid);
ex:
return status;
}
/** Prints diffs for all entries with estat::entry_status or
* estat::remote_status set. */
int df___diff_wc_remote(struct estat *entry, apr_pool_t *pool)
{
int status;
struct estat **sts;
int removed;
char *fn;
apr_pool_t *subpool;
status=0;
subpool=NULL;
STOPIF( apr_pool_create(&subpool, pool), NULL);
removed =
( ((entry->remote_status & FS_REPLACED) == FS_REMOVED) ? 1 : 0 ) |
( ((entry->remote_status & FS_REPLACED) == FS_NEW) ? 2 : 0 ) |
( ((entry->entry_status & FS_REPLACED) == FS_REMOVED) ? 2 : 0 );
STOPIF( ops__build_path(&fn, entry), NULL);
DEBUGP_dump_estat(entry);
/* TODO: option to print the whole lot of removed and "new" lines for
* files existing only at one point? */
switch (removed)
{
case 3:
/* Removed both locally and remote; no change to print. (?) */
break;
case 1:
/* Remotely removed. */
STOPIF_CODE_EPIPE( printf("Only locally: %s\n", fn), NULL);
break;
case 2:
/* Locally removed. */
STOPIF_CODE_EPIPE( printf("Only in the repository: %s\n", fn), NULL);
break;
case 0:
/* Exists on both; show (recursive) differences. */
if ((entry->local_mode_packed != entry->new_rev_mode_packed))
{
/* Another type, so a diff doesn't make much sense, does it? */
STOPIF_CODE_EPIPE( printf("Type changed from local %s to %s: %s\n",
st__type_string(PACKED_to_MODE_T(entry->local_mode_packed)),
st__type_string(PACKED_to_MODE_T(entry->new_rev_mode_packed)),
fn), NULL);
/* Should we print some message that sub-entries are available?
if (opt__is_verbose() > 0)
{
} */
}
else if (entry->entry_status || entry->remote_status)
{
/* Local changes, or changes to repository. */
if (S_ISDIR(entry->st.mode))
{
/* TODO: meta-data diff? */
if (entry->entry_count)
{
sts=entry->by_inode;
while (*sts)
{
STOPIF( df___diff_wc_remote(*sts, subpool), NULL);
sts++;
}
}
}
else
STOPIF( df___type_def_diff(entry, entry->repos_rev, subpool), NULL);
}
break;
}
ex:
/* This is of type (void), so we don't have any status to check. */
if (subpool) apr_pool_destroy(subpool);
return status;
}
/** Set the entry as BASE (has no changes). */
int df___reset_remote_st(struct estat *sts)
{
sts->remote_status=0;
return 0;
}
/** Does a repos/repos diff.
* Currently works only for files. */
int df___repos_repos(struct estat *sts)
{
int status;
char *fullpath, *path;
struct estat **children;
STOPIF( ops__build_path( &fullpath, sts), NULL);
DEBUGP("%s: %s", fullpath, st__status_string_fromint(sts->remote_status));
STOPIF( hlp__format_path( sts, fullpath, &path), NULL);
if ((sts->remote_status & FS_REPLACED) == FS_REPLACED)
STOPIF_CODE_EPIPE(
printf("Completely replaced: %s\n", path), NULL);
else if (sts->remote_status & FS_NEW)
STOPIF_CODE_EPIPE(
printf("Only in r%llu: %s\n",
(t_ull)opt_target_revision2, path), NULL);
else if ((sts->remote_status & FS_REPLACED) == FS_REMOVED)
STOPIF_CODE_EPIPE(
printf("Only in r%llu: %s\n",
(t_ull)opt_target_revision, path), NULL);
else if (sts->remote_status)
switch (sts->st.mode & S_IFMT)
{
case S_IFDIR:
/* TODO: meta-data diff? */
if (sts->entry_count)
{
children=sts->by_inode;
while (*children)
STOPIF( df___repos_repos(*(children++)), NULL);
}
break;
/* Normally a repos-repos diff can only show symlinks changing -
* all other types of special entries get *replaced*. */
case S_IFANYSPECIAL:
/* We don't know yet which special type it is. */
case S_IFLNK:
case S_IFBLK:
case S_IFCHR:
STOPIF_CODE_EPIPE( printf("Special entry changed: %s\n",
path), NULL);
/* Fallthrough */
case S_IFREG:
STOPIF( df__do_diff(sts,
opt_target_revision, opt_target_revision2, NULL),
NULL);
break;
default:
BUG("type?");
}
ex:
return status;
}
/** -.
*
* We get the WC status, fetch the named changed entries, and call
* an external diff program for each.
*
* As a small performance optimization we do that kind of parallel -
* while we're fetching a file, we run the diff. */
int df__work(struct estat *root, int argc, char *argv[])
{
int status;
int i, deinit;
char **normalized;
svn_revnum_t rev, base;
char *norm_wcroot[2]= {".", NULL};
status=0;
deinit=1;
STOPIF( waa__find_common_base(argc, argv, &normalized), NULL);
STOPIF( url__load_nonempty_list(NULL, 0), NULL);
STOPIF(ign__load_list(NULL), NULL);
signal(SIGINT, df___signal);
signal(SIGTERM, df___signal);
signal(SIGHUP, df___signal);
signal(SIGCHLD, SIG_DFL);
/* check for colordiff */
if (( opt__get_int(OPT__COLORDIFF)==0 ||
opt__doesnt_say_off(opt__get_string(OPT__COLORDIFF)) ) &&
(isatty(STDOUT_FILENO) ||
opt__get_prio(OPT__COLORDIFF) > PRIO_PRE_CMDLINE) )
{
DEBUGP("trying to use colordiff");
STOPIF( df___colordiff(&cdiff_pipe, &cdiff_pid), NULL);
}
/* TODO: If we get "-u X@4 Y@4:3 Z" we'd have to do different kinds of
* diff for the URLs.
* What about filenames? */
STOPIF( url__mark_todo(), NULL);
switch (opt_target_revisions_given)
{
case 0:
/* Diff WC against BASE. */
action->local_callback=df___direct_diff;
/* We know that we've got a wc base because of
* waa__find_common_base() above. */
STOPIF( waa__read_or_build_tree(root, argc,
normalized, argv, NULL, 1), NULL);
break;
case 1:
/* WC against rX. */
/* Fetch local changes ... */
action->local_callback=st__progress;
action->local_uninit=st__progress_uninit;
STOPIF( waa__read_or_build_tree(root, argc, normalized, argv,
NULL, 1), NULL);
// Has to set FS_CHILD_CHANGED somewhere
/* Fetch remote changes ... */
while ( ! ( status=url__iterator(&rev) ) )
{
STOPIF( cb__record_changes(root, rev, current_url->pool), NULL);
}
STOPIF_CODE_ERR( status != EOF, status, NULL);
STOPIF( df___diff_wc_remote(root, current_url->pool), NULL);
break;
case 2:
/* rX:Y.
* This works in a single loop because the URLs are sorted in
* descending priority, and an entry removed at a higher priority
* could be replaced by one at a lower. */
/* TODO: 2 revisions per-URL. */
/* If no entries are given, do the whole working copy. */
if (!argc)
normalized=norm_wcroot;
while ( ! ( status=url__iterator(&rev) ) )
{
STOPIF( url__canonical_rev(current_url, &opt_target_revision), NULL);
STOPIF( url__canonical_rev(current_url, &opt_target_revision2), NULL);
/* Take the values at the first revision as base; say that we've
* got nothing. */
current_url->current_rev=0;
action->repos_feedback=df___reset_remote_st;
STOPIF( cb__record_changes(root, opt_target_revision,
current_url->pool), NULL);
/* Now get changes. We cannot do diffs directly, because
* we must not use the same connection for two requests
* simultaneously. */
action->repos_feedback=NULL;
/* We say that the WC root is at the target revision, but that some
* paths are not. */
base=current_url->current_rev;
current_url->current_rev=opt_target_revision2;
STOPIF( cb__record_changes_mixed(root, opt_target_revision2,
normalized, base, current_url->pool),
NULL);
}
STOPIF_CODE_ERR( status != EOF, status, NULL);
/* If we'd use the log functions to get a list of changed files
* we'd be slow for large revision ranges; for the various
* svn_ra_do_update, svn_ra_do_diff2 and similar functions we'd
* need the (complete) working copy base to get deltas against (as
* we don't know which entries are changed).
*
* This way seems to be the fastest, and certainly the easiest for
* now. */
/* "time fsvs diff -r4:4" on "ssh+svn://localhost/..." for 8400
* files gives a real time of 3.6sec.
* "time fsvs diff > /dev/null" on "ssh+svn://localhost/..." for 840
* of 8400 files changed takes 1.8sec.
* */
/* A possible idea would be to have a special delta-editor that
* accepts (not already known) directories as unchanged.
* Then it should be possible [1] to ask for the *needed* parts
* only, which should save a fair bit of bandwidth.
*
* Ad 1: Ignoring "does not exist" messages when we say "directory
* 'not-needed' is already at revision 'target'" and this isn't
* true. TODO: Test whether all ra layers make that possible. */
STOPIF( df___repos_repos(root), NULL);
status=0;
break;
default:
BUG("what?");
}
STOPIF( df__do_diff(NULL, 0, 0, 0), NULL);
ex:
if (deinit)
{
deinit=0;
i=df___cleanup();
if (!status && i)
STOPIF(i, NULL);
}
return status;
}
fsvs-fsvs-1.2.12/src/diff.h 0000664 0000000 0000000 00000001074 14536317137 0015421 0 ustar 00root root 0000000 0000000 /************************************************************************
* Copyright (C) 2006-2008 Philipp Marek.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
* published by the Free Software Foundation.
************************************************************************/
#ifndef __DIFF_H__
#define __DIFF_H__
/** \file
* \ref diff action header file. */
#include "global.h"
#include "actions.h"
/** Diff command main function. */
work_t df__work;
#endif
fsvs-fsvs-1.2.12/src/direnum.c 0000664 0000000 0000000 00000040255 14536317137 0016153 0 ustar 00root root 0000000 0000000 /************************************************************************
* Copyright (C) 2005-2009 Philipp Marek.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
* published by the Free Software Foundation.
************************************************************************/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "est_ops.h"
#include "direnum.h"
#include "warnings.h"
#include "global.h"
#include "helper.h"
/** \file
* Directory enumerator functions. */
/** \defgroup getdents Directory reading
* \ingroup perf
* How to read a million inodes as fast as possible
*
* \section getdents_why Why?
* Why do we care for \a getdents64 instead of simply using the
* (portable) \a readdir()?
* - \a getdents64 gives 64bit inodes (which we need on big
* filesystems)
* - as \a getdents64 gives up to (currently) 4096 bytes of directory
* data, we save some amount of library and/or kernel calls -
* for 32 byte per directory entry (estimated, measured, averaged)
* we get a maximum of about 128 directory entries per call - which
* saves many syscalls and much time.
* Not counting the overhead of the apr- and libc-layers ... which we
* should (have to) use for eg. windows.
*
* \section getdents_how How?
* We have two kinds of directory reading codes.
* - A fast one with \a getdents64() (linux-specific)
* - A compatibility layer using \a opendir() / \a readdir() / \a closedir().
*
* Which one to use is defined by \c configure.
* */
/** @{ */
#undef HAVE_GETDENTS64
#ifdef HAVE_LINUX_TYPES_H
#ifdef HAVE_LINUX_UNISTD_H
/** If the system fulfills all necessary checks to use getdents(), this macro
* is set. */
#define HAVE_GETDENTS64 1
#endif
#endif
#ifdef HAVE_GETDENTS64
/* Fast linux version. */
#include
#include
/** The type of handle. */
typedef int dir__handle;
/** A compatibility structure.
* It has an inode; a name; and a record length in it, to get from one
* record to the next. */
typedef struct dirent64 fsvs_dirent;
/** Starts enumeration of the given \a path. The directory handle is returned
* in \a *dirp.
* \return 0 for success, or an error code. */
int dir__start_enum(dir__handle *dh, char *path)
{
int status;
status=0;
*dh=open(path, O_RDONLY | O_DIRECTORY);
STOPIF_CODE_ERR( *dh <= 0, errno,
"open directory %s for reading", path);
ex:
return status;
}
/** The enumeration function.
* \param dh The handle given by dir__start_enum.
* \param dirp The space where data should be returned
* \param count The maximum number of bytes in \a dirp.
*
* \return The number of bytes used in \a dirp. */
int dir__enum(dir__handle dh, fsvs_dirent *dirp, unsigned int count)
{
return syscall(__NR_getdents64, dh, dirp, count);
}
/** Simply closes the handle \a dh.
* */
int dir__close(dir__handle dh)
{
int status;
status=0;
STOPIF_CODE_ERR( close(dh) == -1, errno,
"closing dir-handle");
ex:
return status;
}
/** How to get the length of a directory (in bytes), from a handle \a dh,
* into \a st->size. */
int dir__get_dir_size(dir__handle dh, struct sstat_t *st)
{
int status;
status=0;
STOPIF( hlp__fstat(dh, st), "Get directory size");
ex:
return status;
}
#else
/* We fake something compatible with what we need.
* That's not the finest way, but it works (TM). */
#include
#include
struct fsvs_dirent_t {
uint64_t d_ino;
int d_reclen;
char d_name[NAME_MAX+1];
};
typedef struct fsvs_dirent_t fsvs_dirent;
typedef DIR* dir__handle;
int dir__start_enum(dir__handle *dh, char *path)
{
int status;
status=0;
STOPIF_CODE_ERR( (*dh=opendir(path)) == NULL, errno,
"Error opening directory %s", path);
ex:
return status;
}
/* Impedance matching .. don't like it. */
int dir__enum(dir__handle dh, fsvs_dirent *dirp, unsigned int count)
{
struct dirent *de;
de=readdir(dh);
/* EOD ? */
if (!de) return 0;
dirp[0].d_ino = de->d_ino;
strcpy( dirp[0].d_name, de->d_name);
dirp[0].d_reclen = sizeof(dirp[0])-sizeof(dirp[0].d_name) +
strlen(dirp[0].d_name) + 1;
return dirp[0].d_reclen;
}
int dir__close(dir__handle dh)
{
int status;
status=0;
STOPIF_CODE_ERR( closedir(dh) == -1, errno,
"Error closing directory handle");
ex:
return status;
}
int dir__get_dir_size(dir__handle dh, struct sstat_t *st)
{
int status;
status=0;
st->size=0;
#ifdef HAVE_DIRFD
STOPIF( hlp__fstat(dirfd(dh), st),
"Get directory size()");
#endif
ex:
return status;
}
#endif
/** @} */
/** The amount of memory that should be allocated for directory reading.
* This value should be bigger (or at least equal) than the number of
* bytes returned by \a getdents().
* For the compatibility layer it's more or less the maximum filename length
* plus the inode and record length lengths.
*
* This many bytes \b more will also be allocated for the filenames in a
* directory; if we get this close to the end of the buffer,
* the memory area will be reallocated. */
#define FREE_SPACE (4096)
/** Compares two struct estat pointers by device/inode.
* \return +2, +1, 0, -1, -2, suitable for \a qsort().
*
* That is now an inline function; but without force gcc doesn't inline it
* on 32bit, because of the size (64bit compares, 0x6b bytes).
* [ \c __attribute__((always_inline)) in declaration]. */
int dir___f_sort_by_inodePP(struct estat *a, struct estat *b)
{
register const struct sstat_t* __a=&(a->st);
register const struct sstat_t* __b=&(b->st);
if (__a->dev > __b->dev) return +2;
if (__a->dev < __b->dev) return -2;
if (__a->ino > __b->ino) return +1;
if (__a->ino < __b->ino) return -1;
return 0;
}
/** Compares the data inside two struct estat pointers to pointers by
* device/inode.
* \return +2, +1, 0, -1, -2, suitable for \a qsort(). */
int dir___f_sort_by_inode(struct estat **a, struct estat **b)
{
return dir___f_sort_by_inodePP(*a, *b);
}
/** Compares two names/strings.
* Used for type checking cleanliness.
* 'C' as for 'Const'.
* \return +2, +1, 0, -1, -2, suitable for \a qsort(). */
int dir___f_sort_by_nameCC(const void *a, const void *b)
{
return strcoll(a,b);
}
/** Compares the data inside two struct estat pointers to pointers
* by name.
* \return +2, +1, 0, -1, -2, suitable for \a qsort(). */
int dir___f_sort_by_name(const void *a, const void *b)
{
register const struct estat * const *_a=a;
register const struct estat * const *_b=b;
return dir___f_sort_by_nameCC((*_a)->name, (*_b)->name);
}
/** Compares a pointer to name (string) with a struct estat pointer
* to pointer.
* \return +2, +1, 0, -1, -2, suitable for \a qsort(). */
int dir___f_sort_by_nameCS(const void *a, const void *b)
{
register const struct estat * const *_b=b;
return dir___f_sort_by_nameCC(a, (*_b)->name);
}
/** -.
* If it has no entries, an array with NULL is nonetheless allocated. */
int dir__sortbyname(struct estat *sts)
{
int count, status;
// BUG_ON(!S_ISDIR(sts->st.mode));
count=sts->entry_count+1;
/* After copying we can release some space, as 64bit inodes
* are smaller than 32bit pointers.
* Or otherwise we may have to allocate space anyway - this
* happens automatically on reallocating a NULL pointer. */
STOPIF( hlp__realloc( &sts->by_name, count*sizeof(*sts->by_name)), NULL);
if (sts->entry_count!=0)
{
memcpy(sts->by_name, sts->by_inode, count*sizeof(*sts->by_name));
qsort(sts->by_name, sts->entry_count, sizeof(*sts->by_name), dir___f_sort_by_name);
}
sts->by_name[sts->entry_count]=NULL;
status=0;
ex:
return status;
}
/** -.
* */
int dir__sortbyinode(struct estat *sts)
{
// BUG_ON(!S_ISDIR(sts->st.mode));
if (sts->entry_count)
{
BUG_ON(!sts->by_inode);
qsort(sts->by_inode, sts->entry_count, sizeof(*sts->by_inode),
(comparison_fn_t)dir___f_sort_by_inode);
}
return 0;
}
/** -.
* The entries are sorted by inode number and stat()ed.
*
* \param this a pointer to this directory's stat - for estimating
* the number of entries. Only this->st.st_size is used for that -
* it may have to be zeroed before calling.
* \param est_count is used to give an approximate number of entries, to
* avoid many realloc()s.
* \param give_by_name simply tells whether the ->by_name array should be
* created, too.
*
* The result is written back into the sub-entry array in \a this.
*
* To avoid reallocating (and copying!) large amounts of memory,
* this function fills some arrays from the directory, then allocates the
* needed space, sorts the data (see note below) and adds all other data.
* See \a sts_array, \a names and \a inode_numbers.
*
* \note Sorting by inode number brings about 30% faster lookup
* times on my test environment (8 to 5 seconds) on an \b empty cache.
* Once the cache is filled, it won't make a difference.
*
* \return 0 for success, else an errorcode.
*/
int dir__enumerator(struct estat *this,
int est_count,
int give_by_name)
{
dir__handle dirhandle;
int size;
int count;
int i,j,l;
int sts_free;
int status;
/* Struct \a estat pointer for temporary use. */
struct estat *sts=NULL;
/* The estimated number of entries. */
int alloc_count;
/* Stores the index of the next free byte in \a strings. */
int mark;
/* Filename storage space. Gets stored in the directories \a ->strings
* for memory management purposes. */
void *strings=NULL;
/* Array of filenames. As the data space potentially has to be
* reallocated at first only the offsets into \a *strings is stored.
* These entries must be of the same size as a pointer, because the array
* is reused as \c sts_array[] .*/
long *names=NULL;
/* The buffer space, used as a struct \a fsvs_dirent */
char buffer[FREE_SPACE];
/* points into and walks over the \a buffer */
fsvs_dirent *p_de;
/* Array of the struct \a estat pointers. Reuses the storage space
* of the \a names Array. */
struct estat **sts_array=NULL;
/* Array of inodes. */
ino_t *inode_numbers=NULL;
STOPIF( dir__start_enum(&dirhandle, "."), NULL);
if (!this->st.size)
STOPIF( dir__get_dir_size(dirhandle, &(this->st)), NULL);
/* At least a long for the inode number, and 3 characters +
* a \0 per entry. But assume an average of 11 characters + \0.
* If that's incorrect, we'll have to do an realloc. Oh, well.
*
* Another estimate which this function gets is the number of files
* last time this directory was traversed.
*
* Should maybe be tunable in the future.
*
* (On my system I have an average of 13.9 characters per entry,
* without the \0) */
alloc_count=this->st.size/(sizeof(*p_de) - sizeof(p_de->d_name) +
ESTIMATED_ENTRY_LENGTH +1);
/* + ca. 20% */
est_count= (est_count*19)/16 +1;
if (alloc_count > est_count) est_count=alloc_count;
/* on /proc, which gets reported with 0 bytes,
* only 1 entry is allocated. This entry multiplied with 19/16
* is still 1 ... crash.
* So all directories reported with 0 bytes are likely virtual
* file systems, which can have _many_ entries ... */
if (est_count < 32) est_count=32;
size=FREE_SPACE + est_count*( ESTIMATED_ENTRY_LENGTH + 1 );
STOPIF( hlp__alloc( &strings, size), NULL);
mark=count=0;
inode_numbers=NULL;
names=NULL;
alloc_count=0;
/* read the directory and count entries */
while ( (i=dir__enum(dirhandle, (fsvs_dirent*)buffer, sizeof(buffer))) >0)
{
/* count entries, copy name and inode nr */
j=0;
while (j= alloc_count)
{
/* If we already started, put a bit more space here.
* Should maybe be configurable. */
if (!alloc_count)
alloc_count=est_count;
else
alloc_count=alloc_count*19/16;
STOPIF( hlp__realloc( &names, alloc_count*sizeof(*names)), NULL);
/* temporarily we store the inode number in the *entries_by_inode
* space; that changes when we've sorted them. */
STOPIF( hlp__realloc( &inode_numbers,
alloc_count*sizeof(*inode_numbers)), NULL);
}
p_de=(fsvs_dirent*)(buffer+j);
DEBUGP("found %llu %s", (t_ull)p_de->d_ino, p_de->d_name);
if (p_de->d_name[0] == '.' &&
((p_de->d_name[1] == '\0') ||
(p_de->d_name[1] == '.' &&
p_de->d_name[2] == '\0')) )
{
/* just ignore . and .. */
}
else
{
/* store inode for sorting */
inode_numbers[count] = p_de->d_ino;
/* Store pointer to name.
* In case of a realloc all pointers to the strings would get
* invalid. So don't store the addresses now - only offsets. */
names[count] = mark;
/* copy name, mark space as used */
l=strlen(p_de->d_name);
strcpy(strings+mark, p_de->d_name);
mark += l+1;
count++;
}
/* next */
j += p_de->d_reclen;
}
/* Check for free space.
* We read at most FREE_SPACE bytes at once,
* so it's enough to have FREE_SPACE bytes free.
* Especially because there are some padding and pointer bytes
* which get discarded. */
if (size-mark < FREE_SPACE)
{
/* Oh no. Have to reallocate.
* But we can hope that this (big) chunk is on the top
* of the heap, so that it won't be copied elsewhere.
*
* How much should we add? For now, just give about 30%. */
/* size*21: Let's hope that this won't overflow :-) */
size=(size*21)/16;
/* If +20% is not at least the buffer size (FREE_SPACE),
* take at least that much memory. */
if (size < mark+FREE_SPACE) size=mark+FREE_SPACE;
STOPIF( hlp__realloc( &strings, size), NULL);
DEBUGP("strings realloc(%p, %d)", strings, size);
}
}
STOPIF_CODE_ERR(i<0, errno, "getdents64");
DEBUGP("after loop found %d entries, %d bytes string-space", count, mark);
this->entry_count=count;
/* Free allocated, but not used, memory. */
STOPIF( hlp__realloc( &strings, mark), NULL);
/* If a _down_-sizing ever gives an error, we're really botched.
* But if it's an empty directory, a NULL pointer will be returned. */
BUG_ON(mark && !strings);
this->strings=strings;
/* Now this space is used - don't free. */
strings=NULL;
/* Same again. Should never be NULL, as the size is never 0. */
STOPIF( hlp__realloc( &inode_numbers,
(count+1)*sizeof(*inode_numbers)), NULL);
STOPIF( hlp__realloc( &names, (count+1)*sizeof(*names)), NULL);
/* Store end-of-array markers */
inode_numbers[count]=0;
names[count]=0;
/* Now we know exactly how many entries, we build the array for sorting.
* We don't do that earlier, because resizing (and copying!)
* is slow. Doesn't matter as much if it's just pointers,
* but for bigger structs it's worth avoiding.
* Most of the structures get filled only after sorting! */
/* We reuse the allocated array for names (int**) for storing
* the (struct estat**). */
sts_array=(struct estat**)names;
sts_free=0;
for(i=0; iname=this->strings + names[i];
sts->st.ino=inode_numbers[i];
/* now the data is copied, we store the pointer. */
sts_array[i] = sts;
sts++;
sts_free--;
}
/* now names is no longer valid - space was taken by sts_array. */
names=NULL;
this->by_inode=sts_array;
/* Now the space is claimed otherwise - so don't free. */
sts_array=NULL;
/* See inodeSort */
STOPIF( dir__sortbyinode(this), NULL);
// for(i=0; id_ino, de[i]->d_name);
for(i=0; iby_inode[i];
sts->parent=this;
sts->repos_rev=SVN_INVALID_REVNUM;
status=hlp__lstat(sts->name, &(sts->st));
if (abs(status) == ENOENT)
{
DEBUGP("entry \"%s\" not interesting - maybe a fifo or socket?",
sts->name);
sts->to_be_ignored=1;
}
else
STOPIF( status, "lstat(%s)", sts->name);
/* New entries get that set, because they're "updated". */
sts->old_rev_mode_packed = sts->local_mode_packed=
MODE_T_to_PACKED(sts->st.mode);
}
/* Possibly return list sorted by name. */
if (give_by_name)
STOPIF(dir__sortbyname(this), NULL);
else
/* should not be needed - but it doesn't hurt, either. */
this->by_name=NULL;
status=0;
ex:
IF_FREE(strings);
IF_FREE(names);
IF_FREE(inode_numbers);
IF_FREE(sts_array);
if (dirhandle>=0) dir__close(dirhandle);
return status;
}
fsvs-fsvs-1.2.12/src/direnum.h 0000664 0000000 0000000 00000002656 14536317137 0016163 0 ustar 00root root 0000000 0000000 /************************************************************************
* Copyright (C) 2005-2008 Philipp Marek.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
* published by the Free Software Foundation.
************************************************************************/
#ifndef __DIRENUM_H__
#define __DIRENUM_H__
/** \file
* Directory enumerator header file. */
// for alphasort
#include
#include "global.h"
/** This function reads a directory into a self-allocated memory area. */
int dir__enumerator(struct estat *this,
int est_count,
int by_name) ;
/** Sorts the entries of the directory \a sts by name into the
* estat::by_name array, which is reallocated and NULL-terminated. */
int dir__sortbyname(struct estat *sts);
/** Sorts the existing estat::by_inode array afresh, by device/inode. */
int dir__sortbyinode(struct estat *sts);
int dir___f_sort_by_inode(struct estat **a, struct estat **b);
int dir___f_sort_by_inodePP(struct estat *a, struct estat *b);
int dir___f_sort_by_name(const void *a, const void *b);
int dir___f_sort_by_nameCC(const void *a, const void *b);
int dir___f_sort_by_nameCS(const void *a, const void *b);
/** How many bytes an average filename needs.
* Measured on a debian system:
* \code
* find / -printf "%f\n" | wc
* \endcode
* */
#define ESTIMATED_ENTRY_LENGTH (15)
#endif
fsvs-fsvs-1.2.12/src/doc.g-c 0000664 0000000 0000000 00000073406 14536317137 0015505 0 ustar 00root root 0000000 0000000 /* This file is generated, do not edit!
* Last done on Mon Nov 7 09:41:43 2022
* */
const char hlp_add[]=" fsvs add [-u URLNAME] PATH [PATH...]\n"
"\n"
" With this command you can explicitly define entries to be versioned,\n"
" even if they have a matching ignore pattern. They will be sent to the\n"
" repository on the next commit, just like other new entries, and will\n"
" therefore be reported as New .\n"
"\n"
" The -u option can be used if you're have more than one URL defined for\n"
" this working copy and want to have the entries pinned to the this URL.\n"
"\n";
const char hlp_unvers[]=" fsvs unversion PATH [PATH...]\n"
"\n"
" This command flags the given paths locally as removed. On the next\n"
" commit they will be deleted in the repository, and the local\n"
" information of them will be removed, but not the entries themselves. So\n"
" they will show up as New again, and you get another chance at ignoring\n"
" them.\n"
"\n";
const char hlp_build[]=" This is used mainly for debugging. It traverses the filesystem and\n"
" builds a new entries file. In production it should not be used; as\n"
" neither URLs nor the revision of the entries is known, information is\n"
" lost by calling this function!\n"
"\n"
" Look at sync-repos.\n"
"\n";
const char hlp_delay[]=" This command delays execution until time has passed at least to the\n"
" next second after writing the data files used by FSVS (dir and urls).\n"
"\n"
" This command is for use in scripts; where previously the delay option\n"
" was used, this can be substituted by the given command followed by the\n"
" delay command.\n"
"\n"
" The advantage against the delay option is that read-only commands can\n"
" be used in the meantime.\n"
"\n"
" An example:\n"
" fsvs commit /etc/X11 -m \"Backup of X11\"\n"
" ... read-only commands, like \"status\"\n"
" fsvs delay /etc/X11\n"
" ... read-write commands, like \"commit\"\n"
"\n"
" The optional path can point to any path in the WC.\n"
"\n"
" In the testing framework it is used to save a bit of time; in normal\n"
" operation, where FSVS commands are not so tightly packed, it is\n"
" normally preferable to use the delay option.\n"
"\n";
const char hlp_cat[]=" fsvs cat [-r rev] path\n"
"\n"
" Fetches a file repository, and outputs it to STDOUT. If no revision is\n"
" specified, it defaults to BASE, ie. the current local revision number\n"
" of the entry.\n"
"\n";
const char hlp_checko[]=" fsvs checkout [path] URL [URLs...]\n"
"\n"
" Sets one or more URLs for the current working directory (or the\n"
" directory path), and does an checkout of these URLs.\n"
"\n"
" Example:\n"
" fsvs checkout . http://svn/repos/installation/machine-1/trunk\n"
"\n"
" The distinction whether a directory is given or not is done based on\n"
" the\n"
" result of URL-parsing – if it looks like an URL, it is used as an URL.\n"
" Please mind that at most a single path is allowed; as soon as two\n"
" non-URLs are found an error message is printed.\n"
"\n"
" If no directory is given, \".\" is used; this differs from the usual\n"
" subversion usage, but might be better suited for usage as a recovery\n"
" tool (where versioning / is common). Opinions welcome.\n"
"\n"
" The given path must exist, and should be empty – FSVS will abort on\n"
" conflicts, ie. if files that should be created already exist.\n"
" If there's a need to create that directory, please say so; patches for\n"
" some parameter like -p are welcome.\n"
"\n"
" For a format definition of the URLs please see the chapter Format of\n"
" URLs and the urls and update commands.\n"
"\n"
" Furthermore you might be interested in Using an alternate root\n"
" directory and Recovery for a non-booting system.\n"
"\n";
const char hlp_commit[]=" fsvs commit [-m \"message\"|-F filename] [-v] [-C [-C]] [PATH [PATH ...]]\n"
" filename\n"
" static char * filename\n"
" Definition: update.c:90\n"
"\n"
" Commits (parts of) the current state of the working copy into the\n"
" repository.\n"
"\n";
const char hlp_cp[]=" fsvs cp [-r rev] SRC DEST\n"
" fsvs cp dump\n"
" fsvs cp load\n"
"\n"
" The copy command marks DEST as a copy of SRC at revision rev, so that\n"
" on the next commit of DEST the corresponding source path is sent as\n"
" copy source.\n"
"\n"
" The default value for rev is BASE, ie. the revision the SRC (locally)\n"
" is at.\n"
"\n"
" Please note that this command works always on a directory structure -\n"
" if you say to copy a directory, the whole structure is marked as copy.\n"
" That means that if some entries below the copy are missing, they are\n"
" reported as removed from the copy on the next commit.\n"
" (Of course it is possible to mark files as copied, too; non-recursive\n"
" copies are not possible, but can be emulated by having parts of the\n"
" destination tree removed.)\n"
"\n"
" Note\n"
" TODO: There will be differences in the exact usage - copy will\n"
" try to run the cp command, whereas copied will just remember the\n"
" relation.\n"
"\n"
" If this command are used without parameters, the currently defined\n"
" relations are printed; please keep in mind that the key is the\n"
" destination name, ie. the 2nd line of each pair!\n"
"\n"
" The input format for load is newline-separated - first a SRC line,\n"
" followed by a DEST line, then an line with just a dot ( \".\") as\n"
" delimiter. If you've got filenames with newlines or other special\n"
" characters, you have to give the paths as arguments.\n"
"\n"
" Internally the paths are stored relative to the working copy base\n"
" directory, and they're printed that way, too.\n"
"\n"
" Later definitions are appended to the internal database; to undo\n"
" mistakes, use the uncopy action.\n"
"\n"
" Note\n"
" Important: User-defined properties like fsvs:commit-pipe are not\n"
" copied to the destinations, because of space/time issues\n"
" (traversing through entire subtrees, copying a lot of\n"
" property-files) and because it's not sure that this is really\n"
" wanted. TODO: option for copying properties?\n"
"\n"
" Todo:\n"
" -0 like for xargs?\n"
"\n"
" Todo:\n"
" Are different revision numbers for load necessary? Should dump\n"
" print the source revision number?\n"
"\n"
" Todo:\n"
" Copying from URLs means update from there\n"
"\n"
" Note\n"
" As subversion currently treats a rename as copy+delete, the mv\n"
" command is an alias to cp.\n"
"\n"
" If you have a need to give the filenames dump or load as first\n"
" parameter for copyfrom relations, give some path, too, as in \"./dump\".\n"
"\n"
" Note\n"
" The source is internally stored as URL with revision number, so\n"
" that operations like these\n"
"\n"
" $ fsvs cp a b\n"
"\n"
" $ rm a/1\n"
"\n"
" $ fsvs ci a\n"
"\n"
" $ fsvs ci b\n"
" work - FSVS sends the old (too recent!) revision number as\n"
" source, and so the local filelist stays consistent with the\n"
" repository.\n"
" But it is not implemented (yet) to give an URL as copyfrom\n"
" source directly - we'd have to fetch a list of entries (and\n"
" possibly the data!) from the repository.\n"
"\n"
" Todo:\n"
" Filter for dump (patterns?).\n"
"\n";
const char hlp_copyfr[]=" fsvs copyfrom-detect [paths...]\n"
"\n"
" This command tells FSVS to look through the new entries, and see\n"
" whether it can find some that seem to be copied from others already\n"
" known.\n"
" It will output a list with source and destination path and why it could\n"
" match.\n"
"\n"
" This is just for information purposes and doesn't change any FSVS\n"
" state, (TODO: unless some option/parameter is set).\n"
"\n"
" The list format is on purpose incompatible with the load syntax, as the\n"
" best match normally has to be taken manually.\n"
"\n"
" Todo:\n"
" some parameter that just prints the \"best\" match, and outputs\n"
" the correct format.\n"
"\n"
" If verbose is used, an additional value giving the percentage of\n"
" matching blocks, and the count of possibly copied entries is printed.\n"
"\n"
" Example:\n"
" $ fsvs copyfrom-list -v\n"
" newfile1\n"
" md5:oldfileA\n"
" newfile2\n"
" md5:oldfileB\n"
" md5:oldfileC\n"
" md5:oldfileD\n"
" newfile3\n"
" inode:oldfileI\n"
" manber=82.6:oldfileF\n"
" manber=74.2:oldfileG\n"
" manber=53.3:oldfileH\n"
" ...\n"
" 3 copyfrom relations found.\n"
"\n"
" The abbreviations are:\n"
" md5\n"
"\n"
" The MD5 of the new file is identical to that of one or more already\n"
" committed files; there is no percentage.\n"
"\n"
" inode\n"
"\n"
" The device/inode number is identical to the given known entry; this\n"
" could mean that the old entry has been renamed or hardlinked. Note: Not\n"
" all filesystems have persistent inode numbers (eg. NFS) - so depending\n"
" on your filesystems this might not be a good indicator!\n"
"\n"
" name\n"
"\n"
" The entry has the same name as another entry.\n"
"\n"
" manber\n"
"\n"
" Analysing files of similar size shows some percentage of\n"
" (variable-sized) common blocks (ignoring the order of the blocks).\n"
"\n"
" dirlist\n"
"\n"
" The new directory has similar files to the old directory.\n"
" The percentage is (number_of_common_entries)/(files_in_dir1 +\n"
" files_in_dir2 - number_of_common_entries).\n"
"\n"
" Note\n"
" manber matching is not implemented yet.\n"
" If too many possible matches for an entry are found, not all are\n"
" printed; only an indicator ... is shown at the end.\n"
"\n";
const char hlp_uncp[]=" fsvs uncopy DEST [DEST ...]\n"
"\n"
" The uncopy command removes a copyfrom mark from the destination entry.\n"
" This will make the entry unknown again, and reported as New on the next\n"
" invocations.\n"
"\n"
" Only the base of a copy can be un-copied; if a directory structure was\n"
" copied, and the given entry is just implicitly copied, this command\n"
" will return an error.\n"
"\n"
" This is not folded in revert, because it's not clear whether revert on\n"
" copied, changed entries should restore the original copyfrom data or\n"
" remove the copy attribute; by using another command this is no longer\n"
" ambiguous.\n"
"\n"
" Example:\n"
" $ fsvs copy SourceFile DestFile\n"
" # Whoops, was wrong!\n"
" $ fsvs uncopy DestFile\n"
"\n";
const char hlp_diff[]=" fsvs diff [-v] [-r rev[:rev2]] [-R] PATH [PATH...]\n"
"\n"
" This command gives you diffs between local and repository files.\n"
"\n"
" With -v the meta-data is additionally printed, and changes shown.\n"
"\n"
" If you don't give the revision arguments, you get a diff of the base\n"
" revision in the repository (the last commit) against your current local\n"
" file. With one revision, you diff this repository version against your\n"
" local file. With both revisions given, the difference between these\n"
" repository versions is calculated.\n"
"\n"
" You'll need the diff program, as the files are simply passed as\n"
" parameters to it.\n"
"\n"
" The default is to do non-recursive diffs; so fsvs diff . will output\n"
" the changes in all files in the current directory and below.\n"
"\n"
" The output for special files is the diff of the internal subversion\n"
" storage, which includes the type of the special file, but no newline at\n"
" the end of the line (which diff complains about).\n"
"\n"
" For entries marked as copy the diff against the (clean) source entry is\n"
" printed.\n"
"\n"
" Please see also Options relating to the \"diff\" action and Using\n"
" colordiff.\n"
"\n"
" Todo:\n"
" Two revisions diff is buggy in that it (currently) always\n"
" fetches the full trees from the repository; this is not only a\n"
" performance degradation, but you'll see more changed entries\n"
" than you want (like changes A to B to A). This will be fixed.\n"
"\n";
const char hlp_export[]=" fsvs export REPOS_URL [-r rev]\n"
"\n"
" If you want to export a directory from your repository without storing\n"
" any FSVS-related data you can use this command.\n"
"\n"
" This restores all meta-data - owner, group, access mask and\n"
" modification time; its primary use is for data recovery.\n"
"\n"
" The data gets written (in the correct directory structure) below the\n"
" current working directory; if entries already exist, the export will\n"
" stop, so this should be an empty directory.\n"
"\n";
const char hlp_help[]=" help [command]\n"
"\n"
" This command shows general or specific help (for the given command). A\n"
" similar function is available by using -h or -? after a command.\n"
"\n";
const char hlp_groups[]=" fsvs groups dump|load\n"
" fsvs groups [prepend|append|at=n] group-definition [group-def ...]\n"
" fsvs ignore [prepend|append|at=n] pattern [pattern ...]\n"
" fsvs groups test [-v|-q] [pattern ...]\n"
"\n"
" This command adds patterns to the end of the pattern list, or, with\n"
" prepend, puts them at the beginning of the list. With at=x the patterns\n"
" are inserted at the position x , counting from 0.\n"
"\n"
" The difference between groups and ignore is that groups requires a\n"
" group name, whereas the latter just assumes the default group ignore.\n"
"\n"
" For the specification please see the related documentation .\n"
"\n"
" fsvs dump prints the patterns to STDOUT . If there are special\n"
" characters like CR or LF embedded in the pattern without encoding (like\n"
" \\r or \\n), the output will be garbled.\n"
"\n"
" The patterns may include * and ? as wildcards in one directory level,\n"
" or ** for arbitrary strings.\n"
"\n"
" These patterns are only matched against new (not yet known) files;\n"
" entries that are already versioned are not invalidated.\n"
" If the given path matches a new directory, entries below aren't found,\n"
" either; but if this directory or entries below are already versioned,\n"
" the pattern doesn't work, as the match is restricted to the directory.\n"
"\n"
" So:\n"
" fsvs ignore ./tmp\n"
"\n"
" ignores the directory tmp; but if it has already been committed,\n"
" existing entries would have to be unmarked with fsvs unversion.\n"
" Normally it's better to use\n"
" fsvs ignore ./tmp/**\n"
"\n"
" as that takes the directory itself (which might be needed after restore\n"
" as a mount point anyway), but ignore all entries below.\n"
" Currently this has the drawback that mtime changes will be reported and\n"
" committed; this is not the case if the whole directory is ignored.\n"
"\n"
" Examples:\n"
" fsvs group group:unreadable,mode:4:0\n"
" fsvs group 'group:secrets,/etc/*shadow'\n"
" fsvs ignore /proc\n"
" fsvs ignore /dev/pts\n"
" fsvs ignore './var/log/*-*'\n"
" fsvs ignore './**~'\n"
" fsvs ignore './**/*.bak'\n"
" fsvs ignore prepend 'take,./**.txt'\n"
" fsvs ignore append 'take,./**.svg'\n"
" fsvs ignore at=1 './**.tmp'\n"
" fsvs group dump\n"
" fsvs group dump -v\n"
" echo \"./**.doc\" | fsvs ignore load\n"
" # Replaces the whole list\n"
"\n"
" Note\n"
" Please take care that your wildcard patterns are not expanded by\n"
" the shell!\n"
"\n";
const char hlp_rign[]=" fsvs rel-ignore [prepend|append|at=n] path-spec [path-spec ...]\n"
" fsvs ri [prepend|append|at=n] path-spec [path-spec ...]\n"
"\n"
" If you keep the same repository data at more than one working copy on\n"
" the same machine, it will be stored in different paths - and that makes\n"
" absolute ignore patterns infeasible. But relative ignore patterns are\n"
" anchored at the beginning of the WC root - which is a bit tiring to\n"
" type if you're deep in your WC hierarchy and want to ignore some files.\n"
"\n"
" To make that easier you can use the rel-ignore (abbreviated as ri)\n"
" command; this converts all given path-specifications (which may include\n"
" wildcards as per the shell pattern specification above) to WC-relative\n"
" values before storing them.\n"
"\n"
" Example for /etc as working copy root:\n"
" fsvs rel-ignore '/etc/X11/xorg.conf.*'\n"
" cd /etc/X11\n"
" fsvs rel-ignore 'xorg.conf.*'\n"
"\n"
" Both commands would store the pattern \"./X11/xorg.conf.*\".\n"
"\n"
" Note\n"
" This works only for shell patterns.\n"
"\n"
" For more details about ignoring files please see the ignore command and\n"
" Specification of groups and patterns.\n"
"\n";
const char hlp_info[]=" fsvs info [-R [-R]] [PATH...]\n"
"\n"
" Use this command to show information regarding one or more entries in\n"
" your working copy.\n"
" You can use -v to obtain slightly more information.\n"
"\n"
" This may sometimes be helpful for locating bugs, or to obtain the URL\n"
" and revision a working copy is currently at.\n"
"\n"
" Example:\n"
" $ fsvs info\n"
" URL: file:\n"
" .... 200 .\n"
" Type: directory\n"
" Status: 0x0\n"
" Flags: 0x100000\n"
" Dev: 0\n"
" Inode: 24521\n"
" Mode: 040755\n"
" UID/GID: 1000/1000\n"
" MTime: Thu Aug 17 16:34:24 2006\n"
" CTime: Thu Aug 17 16:34:24 2006\n"
" Revision: 4\n"
" Size: 200\n"
"\n"
" The default is to print information about the given entry only. With a\n"
" single -R you'll get this data about all entries of a given directory;\n"
" with another -R you'll get the whole (sub-)tree.\n"
"\n";
const char hlp_log[]=" fsvs log [-v] [-r rev1[:rev2]] [-u name] [path]\n"
"\n"
" This command views the revision log information associated with the\n"
" given path at its topmost URL, or, if none is given, the highest\n"
" priority URL.\n"
"\n"
" The optional rev1 and rev2 can be used to restrict the revisions that\n"
" are shown; if no values are given, the logs are given starting from\n"
" HEAD downwards, and then a limit on the number of revisions is applied\n"
" (but see the limit option).\n"
"\n"
" If you use the -v -option, you get the files changed in each revision\n"
" printed, too.\n"
"\n"
" There is an option controlling the output format; see the log_output\n"
" option.\n"
"\n"
" Optionally the name of an URL can be given after -u; then the log of\n"
" this URL, instead of the topmost one, is shown.\n"
"\n"
" TODOs:\n"
" * --stop-on-copy\n"
" * Show revision for all URLs associated with a working copy? In which\n"
" order?\n"
"\n";
const char hlp_prop_g[]=" fsvs prop-get PROPERTY-NAME PATH...\n"
"\n"
" Prints the data of the given property to STDOUT.\n"
"\n"
" Note\n"
" Be careful! This command will dump the property as it is, ie.\n"
" with any special characters! If there are escape sequences or\n"
" binary data in the property, your terminal might get messed up!\n"
" If you want a safe way to look at the properties, use prop-list\n"
" with the -v parameter.\n"
"\n";
const char hlp_prop_s[]=" fsvs prop-set [-u URLNAME] PROPERTY-NAME VALUE PATH...\n"
"\n"
" This command sets an arbitrary property value for the given path(s).\n"
"\n"
" Note\n"
" Some property prefixes are reserved; currently everything\n"
" starting with svn: throws a (fatal) warning, and fsvs: is\n"
" already used, too. See Special property names.\n"
"\n"
" If you're using a multi-URL setup, and the entry you'd like to work on\n"
" should be pinned to a specific URL, you can use the -u parameter; this\n"
" is like the add command, see there for more details.\n"
"\n";
const char hlp_prop_d[]=" fsvs prop-del PROPERTY-NAME PATH...\n"
"\n"
" This command removes a property for the given path(s).\n"
"\n"
" See also prop-set.\n"
"\n";
const char hlp_prop_l[]=" fsvs prop-list [-v] PATH...\n"
"\n"
" Lists the names of all properties for the given entry.\n"
" With -v, the value is printed as well; special characters will be\n"
" translated, as arbitrary binary sequences could interfere with your\n"
" terminal settings.\n"
"\n"
" If you need raw output, post a patch for --raw, or write a loop with\n"
" prop-get.\n"
"\n";
const char hlp_remote[]=" fsvs remote-status PATH [-r rev]\n"
"\n"
" This command looks into the repository and tells you which files would\n"
" get changed on an update - it's a dry-run for update .\n"
"\n"
" Per default it compares to HEAD, but you can choose another revision\n"
" with the -r parameter.\n"
"\n"
" Please see the update documentation for details regarding multi-URL\n"
" usage.\n"
"\n";
const char hlp_resolv[]=" fsvs resolve PATH [PATH...]\n"
"\n"
" When FSVS tries to update local files which have been changed, a\n"
" conflict might occur. (For various ways of handling these please see\n"
" the conflict option.)\n"
"\n"
" This command lets you mark such conflicts as resolved.\n"
"\n";
const char hlp_revert[]=" fsvs revert [-rRev] [-R] PATH [PATH...]\n"
"\n"
" This command undoes local modifications:\n"
" * An entry that is marked to be unversioned gets this flag removed.\n"
" * For a already versioned entry (existing in the repository) the\n"
" local entry is replaced with its repository version, and its status\n"
" and flags are cleared.\n"
" * An entry that is a modified copy destination gets reverted to the\n"
" copy source data.\n"
" * Manually added entries are changed back to \"N\"ew.\n"
"\n"
" Please note that implicitly copied entries, ie. entries that are marked\n"
" as copied because some parent directory is the base of a copy, can not\n"
" be un-copied; they can only be reverted to their original (copied-from)\n"
" data, or removed.\n"
"\n"
" If you want to undo a copy operation, please see the uncopy command.\n"
"\n"
" See also HOWTO: Understand the entries' statii.\n"
"\n"
" If a directory is given on the command line all versioned entries in\n"
" this directory are reverted to the old state; this behaviour can be\n"
" modified with -R/-N, or see below.\n"
"\n"
" The reverted entries are printed, along with the status they had before\n"
" the revert (because the new status is per definition unchanged).\n"
"\n"
" If a revision is given, the entries' data is taken from this revision;\n"
" furthermore, the new status of that entry is shown.\n"
"\n"
" Note\n"
" Please note that mixed revision working copies are not (yet)\n"
" possible; the BASE revision is not changed, and a simple revert\n"
" without a revision arguments gives you that.\n"
" By giving a revision parameter you can just choose to get the\n"
" text from a different revision.\n"
"\n";
const char hlp_status[]=" fsvs status [-C [-C]] [-v] [-f filter] [PATHs...]\n"
"\n"
" This command shows the entries that have been changed locally since the\n"
" last commit.\n"
"\n"
" The most important output formats are:\n"
" * A status columns of four (or, with -v , six) characters. There are\n"
" either flags or a \".\" printed, so that it's easily parsed by\n"
" scripts – the number of columns is only changed by -q, -v –\n"
" verbose/quiet.\n"
" * The size of the entry, in bytes, or \"dir\" for a directory, or \"dev\"\n"
" for a device.\n"
" * The path and name of the entry, formatted by the path option.\n"
"\n"
" Normally only changed entries are printed; with -v all are printed, but\n"
" see the filter option for more details.\n"
"\n"
" The status column can show the following flags:\n"
" * 'D' and 'N' are used for deleted and new entries.\n"
" * 'd' and 'n' are used for entries which are to be unversioned or\n"
" added on the next commit; the characters were chosen as little\n"
" delete (only in the repository, not removed locally) and little new\n"
" (although ignored). See add and unversion.\n"
" If such an entry does not exist, it is marked with an \"!\" in the\n"
" last column – because it has been manually marked, and so the\n"
" removal is unexpected.\n"
" * A changed type (character device to symlink, file to directory\n"
" etc.) is given as 'R' (replaced), ie. as removed and newly added.\n"
" * If the entry has been modified, the change is shown as 'C'.\n"
" If the modification or status change timestamps (mtime, ctime) are\n"
" changed, but the size is still the same, the entry is marked as\n"
" possibly changed (a question mark '?' in the last column) - but see\n"
" change detection for details.\n"
" * A 'x' signifies a conflict.\n"
" * The meta-data flag 'm' shows meta-data changes like properties,\n"
" modification timestamp and/or the rights (owner, group, mode);\n"
" depending on the -v/-q command line parameters, it may be split\n"
" into 'P' (properties), 't' (time) and 'p' (permissions).\n"
" If 'P' is shown for the non-verbose case, it means only property\n"
" changes, ie. the entries filesystem meta-data is unchanged.\n"
" * A '+' is printed for files with a copy-from history; to see the URL\n"
" of the copyfrom source, see the verbose option.\n"
"\n"
" Here's a table with the characters and their positions:\n"
"* Without -v With -v\n"
"* .... ......\n"
"* NmC? NtpPC?\n"
"* DPx! D x!\n"
"* R + R +\n"
"* d d\n"
"* n n\n"
"*\n"
"\n"
" Furthermore please take a look at the stat_color option, and for more\n"
" information about displayed data the verbose option.\n"
"\n";
const char hlp_sync_r[]=" fsvs sync-repos [-r rev] [working copy base]\n"
"\n"
" This command loads the file list afresh from the repository.\n"
" A following commit will send all differences and make the repository\n"
" data identical to the local.\n"
"\n"
" This is normally not needed; the only use cases are\n"
" * debugging and\n"
" * recovering from data loss in the $FSVS_WAA area.\n"
"\n"
" It might be of use if you want to backup two similar machines. Then you\n"
" could commit one machine into a subdirectory of your repository, make a\n"
" copy of that directory for another machine, and sync this other\n"
" directory on the other machine.\n"
"\n"
" A commit then will transfer only changed files; so if the two machines\n"
" share 2GB of binaries ( /usr , /bin , /lib , ...) then these 2GB are\n"
" still shared in the repository, although over time they will deviate\n"
" (as both committing machines know nothing of the other path with\n"
" identical files).\n"
"\n"
" This kind of backup could be substituted by two or more levels of\n"
" repository paths, which get overlaid in a defined priority. So the base\n"
" directory, which all machines derive from, will be committed from one\n"
" machine, and it's no longer necessary for all machines to send\n"
" identical files into the repository.\n"
"\n"
" The revision argument should only ever be used for debugging; if you\n"
" fetch a filelist for a revision, and then commit against later\n"
" revisions, problems are bound to occur.\n"
"\n"
" Note\n"
" There's issue 2286 in subversion which describes sharing\n"
" identical files in the repository in unrelated paths. By using\n"
" this relaxes the storage needs; but the network transfers would\n"
" still be much larger than with the overlaid paths.\n"
"\n";
const char hlp_update[]=" fsvs update [-r rev] [working copy base]\n"
" fsvs update [-u url@rev ...] [working copy base]\n"
"\n"
" This command does an update on the current working copy; per default\n"
" for all defined URLs, but you can restrict that via -u.\n"
"\n"
" It first reads all filelist changes from the repositories, overlays\n"
" them (so that only the highest-priority entries are used), and then\n"
" fetches all necessary changes.\n"
"\n";
const char hlp_urls[]=" fsvs urls URL [URLs...]\n"
" fsvs urls dump\n"
" fsvs urls load\n"
"\n"
" Initializes a working copy administrative area and connects the current\n"
" working directory to REPOS_URL. All commits and updates will be done to\n"
" this directory and against the given URL.\n"
"\n"
" Example:\n"
" fsvs urls http://svn/repos/installation/machine-1/trunk\n"
"\n"
" For a format definition of the URLs please see the chapter Format of\n"
" URLs.\n"
"\n"
" Note\n"
" If there are already URLs defined, and you use that command\n"
" later again, please note that as of 1.0.18 the older URLs are\n"
" not overwritten as before, but that the new URLs are appended to\n"
" the given list! If you want to start afresh, use something like\n"
"\n"
" true | fsvs urls load\n"
"\n";
// vi: filetype=c
fsvs-fsvs-1.2.12/src/dox/ 0000775 0000000 0000000 00000000000 14536317137 0015130 5 ustar 00root root 0000000 0000000 fsvs-fsvs-1.2.12/src/dox/HOWTO-BACKUP.dox 0000664 0000000 0000000 00000015207 14536317137 0017514 0 ustar 00root root 0000000 0000000 /**
\defgroup howto A small collection of HOW-TOs
\ingroup userdoc
Here you see a small collection of HOW-TOs.
These aim to give you a small overview about common tasks.
The paths and examples are based on a current Debian/Testing, but should
be easily transferable to other Linux distributions or other UNIXes.
*/
/**
\defgroup howto_backup HOWTO: Backup
\ingroup howto
This document is a step-by-step explanation how to do backups using FSVS.
\section howto_backup_prep Preparation
If you're going to back up your system, you have to decide what you want
to have stored in your backup, and what should be left out.
Depending on your system usage and environment you first have to decide:
- Do you only want to backup your data in \c /home?
- Less storage requirements
- In case of hardware crash the OS must be set up again
- Do you want to keep track of your configuration in \c /etc?
- Very small storage overhead
- Not much use for backup/restore, but shows what has been changed
- Or do you want to backup your whole installation, from \c / on?
- Whole system versioned, restore is only a few commands
- Much more storage space needed - typically you'd need at least a few
GB free space.
The next few moments should be spent thinking about the storage space for
the repository - will it be on the system harddisk, a secondary or an
external harddisk, or even off-site?
\note If you just created a fresh repository, you probably should create
the "default" directory structure for subversion - \c trunk, \c branches,
\c tags; this layout might be useful for your backups.\n The URL you'd
use in fsvs would go to \c trunk.
Possibly you'll have to take the available bandwidth into your
considerations; a single home directory may be backed up on a 56k modem,
but a complete system installation would likely need at least some kind
of DSL or LAN.
\note If this is a production box with sparse, small changes, you could
take the initial backup on a local harddisk, transfer the directory with
some media to the target machine, and switch the URLs.
A fair bit of time should go to a small investigation which file patterns
and paths you \b not want to back-up.
- Backup files like \c *.bak, \c *~, \c *.tmp, and similar
- History files: .sh-history and similar in the home-directories
- Cache directories: your favourite browser might store many MB of cached
data in you home-directories
- Virtual system directories, like \c /proc and \c /sys, \c /dev/shmfs.
\section howto_backup_first_steps Telling FSVS what to do
Given \c $WC as the working directory - the base of the data you'd
like backed up (\c /, \c /home), and \c $URL as a valid subversion URL to
your (already created) repository path.
Independent of all these details the first steps look like these:
\code
cd $WC
fsvs urls $URL
\endcode
Now you have to say what should be ignored - that'll differ depending on
your needs/wishes.
\code
fsvs ignore './§**~' './§**.tmp' './§**.bak'
fsvs ignore ./proc/ ./sys/ ./tmp/
fsvs ignore ./var/tmp/ ./var/spool/lpd/
fsvs ignore './var/log/§*.gz'
fsvs ignore ./var/run/ /dev/pts/
fsvs ignore './etc/*.dpkg-dist' './etc/*.dpkg-new'
fsvs ignore './etc/*.dpkg-old' './etc/*.dpkg-bak'
\endcode
\note \c /var/run is for transient files; I've heard reports that \ref revert
"reverting" files there can cause problems with running programs.\n
Similar for \c /dev/pts - if that's a \c devpts filesystem, you'll run into
problems on \ref update or \ref revert - as FSVS won't be allowed to create
entries in this directory.
Now you may find that you'd like to have some files encrypted in your
backup - like \c /etc/shadow, or your \c .ssh/id_* files. So you tell
fsvs to en/decrypt these files:
\code
fsvs propset fsvs:commit-pipe 'gpg -er {your backup key}' /etc/shadow /etc/gshadow
fsvs propset fsvs:update-pipe 'gpg -d' /etc/shadow /etc/gshadow
\endcode
\note This are just examples. You'll probably have to exclude some other
paths and patterns from your backup, and mark some others as
to-be-filtered.
\section howto_backup_first_commit The first backup
\code
fsvs commit -m "First commit."
\endcode
That's all there is to it!
\section howto_backup_usage Further use and maintenance
The further usage is more or less the \c commit command from the last
section. \n
When do you have to do some manual work?
- When ignore patterns change.
- New filesystems that should be ignored, or would be ignored but
shouldn't
- You find that your favorite word-processor leaves many *.segv files
behind, and similar things
- If you get an error message from fsvs, check the arguments and retry.
In desperate cases (or just because it's quicker than debugging
yourself) create a github issue.
\section howto_backup_restore Restoration in a working system
Depending on the circumstances you can take different ways to restore
data from your repository.
- "fsvs export" allows you to just dump some repository data
into your filesystem - eg. into a temporary directory to sort things
out.
- Using "fsvs revert" you can get older revisions of a
given file, directory or directory tree inplace. \n
- Or you can do a fresh checkout - set an URL in an (empty) directory,
and update to the needed revision.
- If everything else fails (no backup media with fsvs on it), you can use
subversion commands (eg. \c export) to restore needed parts, and update
the rest with fsvs.
\section howto_backup_recovery Recovery for a non-booting system
In case of a real emergency, when your harddisks crashed or your
filesystem was eaten and you have to re-partition or re-format, you
should get your system working again by
- booting from a knoppix or some other Live-CD (with FSVS on it),
- partition/format as needed,
- mount your harddisk partitions below eg. \c /mnt,
- and then recovering by
\code
$ cd /mnt
$ export FSVS_CONF=/etc/fsvs # if non-standard
$ export FSVS_WAA=/var/spool/fsvs # if non-standard
$ fsvs checkout -o softroot=/mnt
\endcode
If somebody asks really nice I'd possibly even create a \c recovery
command that deduces the \c softroot parameter from the current working
directory.
For more information please take a look at \ref o_softroot.
\section howto_backup_feedback Feedback
If you've got any questions, ideas, wishes or other feedback, please tell me.
Thank you!
*/
// vi: filetype=doxygen spell spelllang=en_us
fsvs-fsvs-1.2.12/src/dox/HOWTO-MASTER_LOCAL.dox 0000664 0000000 0000000 00000025005 14536317137 0020451 0 ustar 00root root 0000000 0000000 /**
\defgroup howto_master_local HOWTO: Master/Local repositories
\ingroup howto
This HOWTO describes how to use a single working copy with multiple
repositories.
Please read the \ref howto_backup first, to know about basic steps using
FSVS.
\section howto_masloc_ratio Rationale
If you manage a lot of machines with similar or identical software, you
might notice that it's a bit of work keeping them all up-to-date. Sure,
automating distribution via rsync or similar is easy; but then you get
identical machines, or you have to play with lots of exclude patterns to
keep the needed differences.
Here another way is presented; and even if you don't want to use FSVS for
distributing your files, the ideas presented here might help you keep
your machines under control.
\section howto_masloc_prep Preparation, repository layout
In this document the basic assumption is that there is a group of (more
or less identical) machines, that share most of their filesystems.
Some planning should be done beforehand; while the ideas presented here
might suffice for simple versioning, your setup can require a bit of
thinking ahead.
This example uses some distinct repositories, to achieve a bit more
clarity; of course these can simply be different paths in a single
repository (see \ref howto_masloc_single_repos for an example
configuration).
Repository in URL \c base:
\code
trunk/
bin/
ls
true
lib/
libc6.so
modules/
sbin/
mkfs
usr/
local/
bin/
sbin/
tags/
branches/
\endcode
Repository in URL \c machine1 (similar for machine2):
\code
trunk/
etc/
HOSTNAME
adjtime
network/
interfaces
passwd
resolv.conf
shadow
var/
log/
auth.log
messages
tags/
branches/
\endcode
\subsection howto_masloc_prep_user User data versioning
If you want to keep the user data versioned, too, a idea might be to start
a new working copy in \b every home directory; this way
- the system- and (several) user-commits can be run in parallel,
- the intermediate \c home directory in the repository is not needed, and
- you get a bit more isolation (against FSVS failures, out-of-space errors
and similar).
- Furthermore FSVS can work with smaller file sets, which helps performance
a bit (less dentries to cache at once, less memory used, etc.).
\code
A/
Andrew/
.bashrc
.ssh/
.kde/
Alexander/
.bashrc
.ssh/
.kde/
B/
Bertram/
\endcode
A cronjob could simply loop over the directories in \c /home, and
call fsvs for each one; giving a target URL name is not necessary if every
home-directory is its own working copy.
\note URL names can include a forward slash \c / in their name, so you
might give the URLs names like \c home/Andrew - although that should not
be needed, if every home directory is a distinct working copy.
\section howto_masloc_using Using master/local repositories
Imagine having 10 similar machines with the same base-installation.
Then you install one machine, commit that into the repository as
\c base/trunk, and make a copy as \c base/released.
The other machines get \c base/released as checkout source, and another
(overlaid) from eg. \c machine1/trunk. \n
Per-machine changes are always committed into the \c machineX/trunk of the
per-machine repository; this would be the host name, IP address, and similar
things.
On the development machine all changes are stored into \c base/trunk; if
you're satisfied with your changes, you merge them (see \ref
howto_masloc_branches) into \c base/released, whereupon all other machines
can update to this latest version.
So by looking at \c machine1/trunk you can see the history of the
machine-specific changes; and in \c base/released you can check out every
old version to verify problems and bugs.
\note You can take this system a bit further: optional software packages
could be stored in other subtrees. They should be of lower priority than
the base tree, so that in case of conflicts the base should always be
preferred (but see \ref howto_masloc_note_1).
Here is a small example; \c machine1 is the development machine, \c
machine2 is a \e client.
\code
machine1$ fsvs urls name:local,P:200,svn+ssh://lserver/per-machine/machine1/trunk
machine1$ fsvs urls name:base,P:100,http://bserver/base-install1/trunk
# Determine differences, and commit them
machine1$ fsvs ci -o commit_to=local /etc/HOSTNAME /etc/network/interfaces /var/log
machine1$ fsvs ci -o commit_to=base /
\endcode
Now you've got a base-install in your repository, and can use that on the
other machine:
\code
machine2$ fsvs urls name:local,P:200,svn+ssh://lserver/per-machine/machine2/trunk
machine2$ fsvs urls name:base,P:100,http://bserver/base-install1/trunk
machine2$ fsvs sync-repos
# Now you see differences of this machines' installation against the other:
machine2$ fsvs st
# You can see what is different:
machine2$ fsvs diff /etc/X11/xorg.conf
# You can take the base installations files:
machine2$ fsvs revert /bin/ls
# And put the files specific to this machine into its repository:
machine2$ fsvs ci -o commit_to=local /etc/HOSTNAME /etc/network/interfaces /var/log
\endcode
Now, if this machine has a harddisk failure or needs setup for any other
reason, you boot it (eg. via PXE, Knoppix or whatever), and do (\ref
howto_masloc_note_3)
\code
# Re-partition and create filesystems (if necessary)
machine2-knoppix$ fdisk ...
machine2-knoppix$ mkfs ...
# Mount everything below /mnt
machine2-knoppix$ mount /mnt/[...]
machine2-knoppix$ cd /mnt
# Do a checkout below /mnt
machine2-knoppix$ fsvs co -o softroot=/mnt
\endcode
\section howto_masloc_branches Branching, tagging, merging
Other names for your branches (instead of \c trunk, \c tags and \c
branches) could be \c unstable, \c testing, and \c stable; your production
machines would use \c stable, your testing environment \c testing, and in
\c unstable you'd commit all your daily changes.
\note Please note that there's no merging mechanism in FSVS; and as far as
I'm concerned, there won't be. Subversion just gets automated merging
mechanisms, and these should be fine for this usage too. (\ref
howto_masloc_note_4)
\subsection howto_masloc_branch_tags Thoughts about tagging
Tagging works just like normally; although you need to remember to tag more
than a single branch. Maybe FSVS should get some knowledge about the
subversion repository layout, so a fsvs tag would tag all
repositories at once? It would have to check for duplicate tag-names (eg. on
the \c base -branch), and just keep it if it had the same copyfrom-source.
But how would tags be used? Define them as source URL, and checkout? Would
be a possible case.
Or should fsvs tag do a \e merge into the repository, so that a
single URL contains all files currently checked out, with copyfrom-pointers
to the original locations? Would require using a single repository, as such
pointers cannot be across different repositories. If the committed data
includes the \c $FSVS_CONF/.../Urls file, the original layout would be known,
too - although to use it a \ref sync-repos would be necessary.
\section howto_masloc_single_repos Using a single repository
A single repository would have to be partitioned in the various branches
that are needed for bookkeeping; see these examples.
Depending on the number of machines it might make sense to put them in a 1-
or 2 level deep hierarchy; named by the first character, like
\code
machines/
A/
Axel/
Andreas/
B/
Berta/
G/
Gandalf/
\endcode
\subsection howto_masloc_single_simple Simple layout
Here only the base system gets branched and tagged; the machines simply
backup their specific/localized data into the repository.
\code
# For the base-system:
trunk/
bin/
usr/
sbin/
tags/
tag-1/
branches/
branch-1/
# For the machines:
machines/
machine1/
etc/
passwd
HOSTNAME
machine2/
etc/
passwd
HOSTNAME
\endcode
\subsection howto_masloc_single_per_area Per-area
Here every part gets its \c trunk, \c branches and \c tags:
\code
base/
trunk/
bin/
sbin/
usr/
tags/
tag-1/
branches/
branch-1/
machine1/
trunk/
etc/
passwd
HOSTNAME
tags/
tag-1/
branches/
machine2/
trunk/
etc/
passwd
HOSTNAME
tags/
branches/
\endcode
\subsection howto_masloc_single_common_ttb Common trunk, tags, and branches
Here the base-paths \c trunk, \c tags and \c branches are shared:
\code
trunk/
base/
bin/
sbin/
usr/
machine2/
etc/
passwd
HOSTNAME
machine1/
etc/
passwd
HOSTNAME
tags/
tag-1/
branches/
branch-1/
\endcode
\section howto_masloc_notes Other notes
\subsection howto_masloc_note_1 1
Conflicts should not be automatically merged.
If two or more trees bring the same file, the file from the \e highest
tree wins - this way you always know the file data on your machines.
It's better if a single software doesn't work, compared to a machine that
no longer boots or is no longer accessible (eg. by SSH)).
So keep your base installation at highest priority, and you've got good
chances that you won't loose control in case of conflicting files.
\subsection howto_masloc_note_2 2
If you don't know which files are different in your installs,
- install two machines,
- commit the first into fsvs,
- do a \ref sync-repos on the second,
- and look at the \ref status output.
\subsection howto_masloc_note_3 3
As debian includes FSVS in the near future, it could be included on the
next KNOPPIX, too!
Until then you'd need a custom boot CD, or copy the absolute minimum of
files to the harddisk before recovery.
There's a utility \c svntar available; it allows you to take a snapshot of
a subversion repository directly into a \c .tar -file, which you can easily
export to destination machine. (Yes, it knows about the meta-data
properties FSVS uses, and stores them into the archive.)
\subsection howto_masloc_note_4 4
Why no file merging? Because all real differences are in the
per-machine files -- the files that are in the \c base repository are
changed only on a single machine, and so there's an unidirectional flow.
BTW, how would you merge your binaries, eg. \c /bin/ls?
\section howto_masloc_feedback Feedback
If you've got any questions, ideas, wishes or other feedback, please tell me.
Thank you!
*/
// vi: filetype=doxygen spell spelllang=en_us
fsvs-fsvs-1.2.12/src/dox/TIPS_TRICKS.dox 0000664 0000000 0000000 00000002756 14536317137 0017514 0 ustar 00root root 0000000 0000000 /**
\defgroup tips Tips and tricks
\ingroup userdoc
This is a list of tips and tricks that you might find useful.
\section tip_verbose Seeing the verbose status, but only changed entries
Sometimes the status \ref status_meta_changed "meta-data changed"
is not enough - the differentiation between \c mtime and the permission
attributes is needed.
For that the command line option \ref glob_opt_verb "-v" is used; but
this \e verbose mode also prints all entries, not only the changed.
To solve that the \ref glob_opt_filter "filter option" gets set; with the
value \c none (to reset the mask), and then with the wanted mask - to
restore the default the string \c "text,meta" could be set.
Example:
\code
$ fsvs status -v -f none,text,meta
$ fsvs status -v -f none,text,meta /etc
$ fsvs status -v -f none,text,meta some/dir another_dir and_a_file
\endcode
\section tip_perf Performance points
Some effort has been taken to get FSVS as fast as possible.
With 1.1.17 the default for checking for changes on files was altered,
to do a MD5-check of files with a changed modification time but the same
size (to avoid printing a \c "?" \ref status_possibly "as status");
if that affects your use-case badly you can use the \ref o_chcheck
"option" to get the old (fast) behavior.
Please note that not the whole file has to be read - the first changed
manber block (with averaged 128kB) terminates the check.
*/
// vi: filetype=doxygen spell spelllang=en_us
fsvs-fsvs-1.2.12/src/dox/dev.dox 0000664 0000000 0000000 00000023012 14536317137 0016420 0 ustar 00root root 0000000 0000000 /**
\addtogroup dev
\section dev_welcome Dear developers/debuggers,
thank you for your interest in fsvs.
I highly appreciate any help, tips and tricks, and even if it's just
a bug report I want to know that.
I'm also looking forward to documentation updates, and notifying me about
mistakes will be politely answered, too.
*/
/**
\defgroup dev_debug What to do in case of errors
\ingroup dev
First, please read the documentation to rule out the possibility that
it's just a badly written sentence that caused misunderstanding.
If you can't figure it out yourself, don't hesitate and write a bug report.
Please include the version you're running (output of fsvs -V), the
command line you're calling fsvs with, and the output it gives.
Furthermore it might help diagnosing if you tried with the \ref
glob_opt_verb "-v" parameter, and/or with \ref glob_opt_deb "-d"; but
please mind that there might be data in the dump that you don't want to
make public!
Send these things along with a description of what you wanted to do to
me or, if you like that alternative better, just file
an issue. \n
(The bugs I find and the things on my \c TODO are not in the issue tracker,
as I can't access it while on the train - and that's where I spend the most
time working on fsvs).
Please be aware that I possibly need more details or some other tries to
find out what goes wrong.
\section dev_devs People that like to help
If you know C and want to help with fsvs, Just Do It (R) :-)
Look into the \c TODO file, pick your favorite point, and implement it.
If you don't know C, but another programming language (like perl, python,
or shell-programming), you can help, too -- help write test scripts. \n
I mostly checked the positive behavior (ie. that something should happen
given a set of predefined state and parameters), but testing for wrong and
unexpected input makes sense, too.
If you don't know any programming language, you can still look at the
documentation and point me to parts which need clarifying, write documents
yourself, or just fix mistakes.
All contributions should \b please be sent as a unified diff, along with
a description of the change, and there's a good chance to have it
integrated into the fsvs code-base.
\note How to generate such a diff? \n
If you're using svn or svk to track fsvs usage, the "svn diff" or
"svk diff" commands should do what you want.
If you downloaded a \c .tar.gz or \c .tar.bz2, keep a pristine version in
some directory and make your changes in another copy. \n
When you're finished making changes, run the command
\code
diff -ur \e original \e new > \e my-changes.patch
\endcode
and send me that file.
*/
/**
\defgroup dev_design The internal design
\ingroup dev
\section dev_design_terms Terms used in this document
\subsection dev_des_t_entry Entry
In subversion speak an entry is either a directory, a symlink or a file;
In FSVS it can additionally be a block or character device. \n
Sockets and pipes are currently ignored, as they're typically re-created by
the various applications.
\subsection dev_des_t_waa WAA, CONF
Please see \ref waa_file.
\section dev_des_memory_layout In-memory layout
In memory fsvs builds a complete tree of the needed entries (\c struct \c
estat). They are referenced with the \c parent pointers upwards to the
root, and the \c estat::by_inode and \c estat::by_name downwards.
\subsection dev_des_mem_alloc Storage and allocation
Every directory entry can have a string space allocated, ie. space needed
for the filenames in this directory (and possibly sub-directories, too.)
On loading of the list in \c waa__input_tree() two memory ranges are
allocated - one for the struct estats read, and one for the
filenames.
Because of this \c free()ing of part of the entries is not possible; a
freelist for the struct estats is used, but the string space is
more or less permanent.
\section dev_des_algo Algorithms and assumption in the code
Generally I tried to use fast and simple algorithms better than \c O(n);
but it's possible that I forgot something.
\subsection dev_des_algo_dsearch Searching for an entry
Searching for an entry in a directory (in memory) is \c O(log2(n)), as I
use \c bsearch().
\subsection dev_des_algo_output Writing the entry list
Determining the correct order for writing the entries (in \c
waa__output_tree()) is optimized by having all lists pre-sorted; about half
the time (tested) a single compare is enough to determine the next written
entry.
\note Of course, to keep the lists sorted, a lot of comparisons have to be
made before waa__output_tree().
\subsection dev_des_algo_by estat::by_inode and estat::by_name
The \c by_inode and \c by_name members are pointers to arrays of pointers
to entries (:-); they must reference the same entries, only the order may
differ.
\c by_inode must (nearly) always be valid ; \c by_name is optional.
The flag \c estat::to_be_sorted tells \c waa__output_tree() that the
order of the \c by_inode array might be wrong, and has to be re-sorted
before use.
While scanning for changes we use a global \c by_inode ordering, as this
is \b much faster than naive traversal; the \c by_name array is used for
comparing directories, to determine whether there are any new entries.
Both arrays \b must include a \c NULL -pointer at the end of the array.
\subsection dev_des_algo_manber Manber-Hash and MD5
To quickly find whether a given file has changed, and to send only the
changed bytes over the wire, we take a running hash (a Manber-Hash), and
whenever we find a "magic" value we take that as buffer end.
We calculate the MD5 of each buffer, and store them along with their
start offset in the file.
So on commit we can find identical blocks and send only those, and while
comparing we can return "changed" as soon as we find a difference.
\section dev_des_errors Error checking and stopping
Return codes are checked everywhere.
The return value of functions in this program is normally (int);
0 means success, something else an error.
Either this error is expected (like ENOENT for some operations) and handled,
or it must be returned to the caller.
Most of this is already defined in macros.
Typical function layout is like this (taken from waa.c):
\code
int waa__make_info_link(char *directory, char *name, char *dest)
{
int status;
char *path, *eos;
STOPIF( waa___get_waa_directory(directory, &path, &eos), NULL);
strcpy(eos, name);
...
if (access(path, F_OK) != 0)
STOPIF_CODE_ERR( symlink(dest, path) == -1,
errno, "cannot create informational symlink '%s' -> '%s'",
path, dest);
ex:
return status;
}
\endcode
When a function gets called by subversion libraries, we have to use
their return type.
Here an example from \c commit.c:
\code
svn_error_t *ci___set_props(void *baton,
struct estat *sts,
change_any_prop_t function,
apr_pool_t *pool)
{
const char *ccp;
svn_string_t *str;
int status;
svn_error_t *status_svn;
status=0;
...
if (sts->entry_type != FT_SYMLINK)
{
...
str=svn_string_createf (pool, "%u %s",
sts->st.uid, hlp__get_uname(sts->st.uid, "") );
STOPIF_SVNERR( function, (baton, propname_owner, str, pool) );
...
}
ex:
RETURN_SVNERR(status);
}
\endcode
The various \c STOPIF() -macros automatically print an error message and,
depending on the debug- and verbosity-flags given on the command line,
a back trace too.
Another special case is output to \c STDOUT; if we get an error \c EPIPE
here, we pass it up to main() as \c -EPIPE (to avoid confusion with writing
some other data), where it gets ignored. To avoid printing an error message
this is hardcoded in the \c STOPIF() macros.
Assertions should be checked by \c BUG_ON(condition, format_string, ...).
This will cause a segmentation violation, which (for debug builds) will
automatically attach a debugger (\c gdb, only if present on the system).
\section dev_des_comments Comments and documentation
FSVS is normalized to use doxygen format for the documentation:
"/§** ... *§/".
For non-trivial things it's practical to document the thoughts, too;
such internal documentation uses the normal C-style comments
("/§* ... *§/").
\subsection dev_des_slash_star /§* in documentation
In cases where a slash \c / and a star \c * have to be used in the
documentation, there's a hack by putting a paragraph symbol (\c \\c2\\xa7 in
UTF-8) between them, so that it doesn't break the comment block.
There's a perl hack for documentation generation, where these get removed.
\note For C this would not be strictly necessary; There's always the way of
putting a # if 0 block around that comment block. Doxygen doesn't
allow this; even if using a shell script (with comments indicated by \c #)
doxygen doesn't allow /§* or *§/.
\section dev_tests About the tests
\subsection dev_tests_delay Delays after commit
There have been a lot of "sleep 1" commands in the tests,
to get directories' mtime to change for new entries.
Now they are mostly changed to a simple "-o delay=yes" on the commit
just above, which should give us about half a second on average.
\note If FSVS has to be run for the check, it must wait until the other
instance has finished - else the dir-list file and so on won't be written;
so parallel checking via \c & and \c wait doesn't really work.
Simply putting delay=yes in the FSVS configuration file more than
doubled the run time of the tests - this was unacceptable to me.
*/
// vi: filetype=doxygen spell spelllang=en_us
fsvs-fsvs-1.2.12/src/dox/options.dox 0000664 0000000 0000000 00000067162 14536317137 0017353 0 ustar 00root root 0000000 0000000 /**
\defgroup options Further options for FSVS.
\ingroup userdoc
List of settings that modify FSVS' behaviour.
FSVS understands some options that modify its behaviour in various small
ways.
\section oh_overview Overview
\subsection o__hlist This document
This document lists all available options in FSVS, in an \ref o__list
"full listing" and in \ref o__groups "groups".
Furthermore you can see their \ref o__prio "relative priorities" and some
\ref o__examples "examples".
\subsection o__groups Semantic groups
- \ref oh_display
- \ref oh_diff
- \ref oh_commit
- \ref oh_performance
- \ref oh_base
- \ref oh_debug
\subsection o__list Sorted list of options
FSVS currently knows:
- \c all_removed - \ref o_all_removed
- \c author - \ref o_author
- \c change_check - \ref o_chcheck
- \c colordiff - \ref o_colordiff
- \c commit_to - \ref o_commit_to
- \c conflict - \ref o_conflict
- \c conf - \ref o_conf.
- \c config_dir - \ref o_configdir.
- \c copyfrom_exp - \ref o_copyfrom_exp
- \c debug_output - \ref o_debug_output
- \c debug_buffer - \ref o_debug_buffer
- \c delay - \ref o_delay
- \c diff_prg, \c diff_opt, \c diff_extra - \ref o_diff
- \c dir_exclude_mtime - \ref o_dir_exclude_mtime
- \c dir_sort - \ref o_dir_sort
- \c empty_commit - \ref o_empty_commit
- \c empty_message - \ref o_empty_msg
- \c filter - \ref o_filter, but see \ref glob_opt_filter "-f".
- \c group_stats - \ref o_group_stats.
- \c limit - \ref o_logmax
- \c log_output - \ref o_logoutput
- \c merge_prg, \c merge_opt - \ref o_merge
- \c mkdir_base - \ref o_mkdir_base
- \c password - \ref o_passwd
- \c path - \ref o_opt_path
- \c softroot - \ref o_softroot
- \c stat_color - \ref o_status_color
- \c stop_change - \ref o_stop_change
- \c verbose - \ref o_verbose
- \c warning - \ref o_warnings, but see \ref glob_opt_warnings "-W".
- \c waa - \ref o_waa "waa".
\subsection o__prio Priorities for option setting
The priorities are
- Command line \e (highest)
- Environment variables. These are named as FSVS_{upper-case
option name}.
- $HOME/.fsvs/wc-dir/config
- $FSVS_CONF/wc-dir/config
- $HOME/.fsvs/config
- $FSVS_CONF/config
- Default value, compiled in \e (lowest)
\note The \c $HOME-dependent configuration files are not implemented
currently. Volunteers?
Furthermore there are "intelligent" run-time dependent settings, like
turning off colour output when the output is redirected. Their
priority is just below the command line - so they can always be
overridden if necessary.
\subsection o__examples Examples
Using the commandline:
\code
fsvs -o path=environment
fsvs -opath=environment
\endcode
Using environment variables:
\code
FSVS_PATH=absolute fsvs st
\endcode
A configuration file, from $FSVS_CONF/config or in a WC-specific
path below $FSVS_CONF:
\code
# FSVS configuration file
path=wcroot
\endcode
\section oh_display Output settings and entry filtering
\subsection o_all_removed Trimming the list of deleted entries
If you remove a directory, all entries below are implicitly known to be
deleted, too. To make the \ref status output shorter there's the \c
all_removed option which, if set to \c no, will cause children of removed
entries to be omitted.
Example for the config file:
\code
all_removed=no
\endcode
\subsection o_dir_exclude_mtime Ignore mtime-metadata changes for directories
When this option is enabled, directories where only the mtime changed are not reported on \ref status anymore.
This is useful in situations where temporary files are created in directories, eg. by text editors. (Example: \c VIM swapfiles when no \c directory option is configured).
Example for the config file:
\code
dir_exclude_mtime=yes
\endcode
\subsection o_dir_sort Directory sorting
If you'd like to have the output of \ref status sorted, you can use the
option \c dir_sort=yes.
FSVS will do a run through the tree, to read the status of the entries, and
then go through it again, but sorted by name.
\note If FSVS aborts with an error during \ref status output, you might
want to turn this option off again, to see where FSVS stops; the easiest
way is on the command line with \c -odir_sort=no.
\subsection o_filter Filtering entries
Please see the command line parameter for \ref glob_opt_filter "-f", which
is identical.
\code
fsvs -o filter=mtime
\endcode
\subsection o_logmax "fsvs log" revision limit
There are some defaults for the number of revisions that are shown on a
"fsvs log" command:
- 2 revisions given (-rX:Y): \c abs(X-Y)+1, ie. all revisions in
that range.
- 1 revision given: exactly that one.
- no revisions given: from \c HEAD to 1, with a maximum of 100.
As this option can only be used to set an upper limit of revisions, it
makes most sense for the no-revision-arguments case.
\subsection o_logoutput "fsvs log" output format
You can modify aspects of the \ref log "fsvs log" output format by setting
the \c log_output option to a combination of these flags:
- \c color: This uses color in the output, similar to \c cg-log (\c
cogito-log); the header and separator lines are highlighted.
\note This uses ANSI escape sequences, and tries to restore the default
color; if you know how to do that better (and more compatible), please
tell the developer mailing list.
- \c indent: Additionally you can shift the log message itself a space to
the right, to make the borders clearer.
Furthermore the value \c normal is available; this turns off all special
handling.
\note If you start such an option, the value is reset; so if you specify \c
log_output=color,indent in the global config file, and use \c
log_output=color on the commandline, only colors are used. This is
different to the \ref o_filter option, which is cumulating.
\subsection o_opt_path Displaying paths
You can specify how paths printed by FSVS should look like; this is used
for the entry status output of the various actions, and for the diff header
lines.
There are several possible settings, of which one can be chosen via the \c
path option.
- \anchor pd_wcroot \c wcroot \n
This is the old, traditional FSVS setting, where all paths are printed
relative to the working copy root.
- \anchor pd_parm \c parameter \n
With this setting FSVS works like most other programs - it uses the first
best-matching parameter given by the user, and appends the rest of the
path.\n
This is the new default.
\note Internally FSVS still first parses all arguments, and then does a
single run through the entries. So if some entry matches more than one
parameter, it is printed using the first match.
- \anchor pd_absolute \c absolute \n
All paths are printed in absolute form. This is useful if you want to
paste them into other consoles without worrying whether the current
directory matches, or for using them in pipelines.
The next two are nearly identical to \c absolute, but the beginning of
paths are substituted by environment variables. This makes sense if you
want the advantage of full paths, but have some of them abbreviated.
- \anchor pd_env \c environment \n
Match variables to directories after reading the known entries, and use
this cached information. This is faster, but might miss the best case if
new entries are found (which would not be checked against possible longer
hits). \n
Furthermore, as this works via associating environment variables to
entries, the environment variables must at least match the working copy
base - shorter paths won't be substituted.
- \c full-environment \n
Check for matches just before printing the path. \n
This is slower, but finds the best fit.
\note The string of the environment variables must match a directory name;
the filename is always printed literally, and partial string matches are
not allowed. Feedback wanted.
\note Only environment variables whose names start with \c WC are used for
substitution, to avoid using variables like \c $PWD, \c $OLDPWD, \c $HOME
and similar which might differ between sessions.
Maybe the allowed prefixes for the environment variables should be settable
in the configuration. Opinions to the users mailing list, please.
Example, with \c / as working copy base:
\code
$ cd /etc
$ fsvs -o path=wcroot st
.mC. 1001 ./etc/X11/xorg.conf
$ fsvs -o path=absolute st
.mC. 1001 /etc/X11/xorg.conf
$ fsvs -o path=parameters st
.mC. 1001 X11/xorg.conf
$ fsvs -o path=parameters st .
.mC. 1001 ./X11/xorg.conf
$ fsvs -o path=parameters st /
.mC. 1001 /etc/X11/xorg.conf
$ fsvs -o path=parameters st X11
.mC. 1001 X11/xorg.conf
$ fsvs -o path=parameters st ../dev/..
.mC. 1001 ../dev/../etc/X11/xorg.conf
$ fsvs -o path=parameters st X11 ../etc
.mC. 1001 X11/xorg.conf
$ fsvs -o path=parameters st ../etc X11
.mC. 1001 ../etc/X11/xorg.conf
$ fsvs -o path=environ st
.mC. 1001 ./etc/X11/xorg.conf
$ WCBAR=/etc fsvs -o path=wcroot st
.mC. 1001 $WCBAR/X11/xorg.conf
$ WCBAR=/etc fsvs -o path=wcroot st /
.mC. 1001 $WCBAR/X11/xorg.conf
$ WCBAR=/e fsvs -o path=wcroot st
.mC. 1001 /etc/X11/xorg.conf
$ WCBAR=/etc WCFOO=/etc/X11 fsvs -o path=wcroot st
.mC. 1001 $WCFOO/xorg.conf
$ touch /etc/X11/xinit/xinitrc
$ fsvs -o path=parameters st
.mC. 1001 X11/xorg.conf
.m.? 1001 X11/xinit/xinitrc
$ fsvs -o path=parameters st X11 /etc/X11/xinit
.mC. 1001 X11/xorg.conf
.m.? 1001 /etc/X11/xinit/xinitrc
\endcode
\note At least for the command line options the strings can be abbreviated,
as long as they're still identifiable.
Please use the full strings in the configuration file, to avoid having
problems in future versions when more options are available.
\subsection o_status_color Status output coloring
FSVS can colorize the output of the status lines; removed entries will
be printed in red, new ones in green, and otherwise changed in blue.
Unchanged (for \c -v) will be given in the default color.
For this you can set \c stat_color=yes; this is turned \c off per default.
As with the other colorizing options this gets turned \c off automatically
if the output is not on a tty; on the command line you can override this,
though.
\subsection o_stop_change Checking for changes in a script
If you want to use FSVS in scripts, you might simply want to know whether
anything was changed.
In this case use the \c stop_on_change option, possibly combined with \ref
o_filter; this gives you no output on \c STDOUT, but an error code on the
first change seen:
\code
fsvs -o stop_change=yes st /etc
if fsvs status -o stop_change=yes -o filter=text /etc/init.d
then
echo No change found ...
else
echo Changes seen.
fi
\endcode
\subsection o_verbose Verbosity flags
If you want a bit more control about the data you're getting you can use
some specific flags for the \c verbose options.
- \c none,veryquiet - reset the bitmask, don't display anything.
- \c quiet - only a few output lines.
- \c changes - the characters showing what has changed for an entry.
- \c size - the size for files, or the textual description (like \c
"dir").
- \c path - the path of the file, formatted according to \ref
o_opt_path "the path option".
- \c default - The default value, ie. \c changes, \c size and \c name.
- \c meta - One more than the default so it can be used via a single \c
"-v", it marks that the mtime and owner/group changes get reported as two
characters.
If \c "-v" is used to achieve that, even entries without changes are
reported, unless overridden by \ref o_filter.
- \c url - Displays the entries' top priority URL
- \c copyfrom - Displays the URL this entry has been copied from (see
\ref copy).
- \c group - The group this entry belongs to
- \c urls - Displays all known URLs of this entry
- \c stacktrace - Print the full stacktrace when reporting errors; useful
for debugging.
- \c all - Sets all flags. Mostly useful for debugging.
Please note that if you want to display \b fewer items than per default,
you'll have to clear the bitmask first, like this:
\code
fsvs status -o verbose=none,changes,path
\endcode
\section oh_diff Diffing and merging on update
\subsection o_diff Options relating to the "diff" action
The diff is not done internally in FSVS, but some other program is called,
to get the highest flexibility.
There are several option values:
- diff_prg: The executable name, default "diff".
- diff_opt: The default options, default "-pu".
- diff_extra: Extra options, no default.
The call is done as
\code
$diff_prg $diff_opt $file1 --label "$label1" $file2 --label "$label2" $diff_extra
\endcode
\note In \c diff_opt you should use only use command line flags without
parameters; in \c diff_extra you can encode a single flag with parameter
(like "-U5").
If you need more flexibility, write a shell script and pass its name as \c
diff_prg.
Advanced users might be interested in \ref exp_env
"exported environment variables", too; with their help you can eg. start
different \c diff programs depending on the filename.
\subsection o_colordiff Using colordiff
If you have \c colordiff installed on your system, you might be interested
in the \c colordiff option.
It can take one of these values:
- \c no, \c off or \c false: Don't use \c colordiff.
- empty (default value): Try to use \c colordiff as executable, but don't
throw an error if it can't be started; just pipe the data as-is to \c
STDOUT. (\e Auto mode.)
- anything else: Pipe the output of the \c diff program (see \ref
o_diff) to the given executable.
Please note that if \c STDOUT is not a tty (eg. is redirected into a file),
this option must be given on the command line to take effect.
\subsection o_conflict How to resolve conflicts on update
If you start an update, but one of the entries that was changed in the
repository is changed locally too, you get a conflict.
There are some ways to resolve a conflict:
- \c local - Just take the local entry, ignore the repository.
- \c remote - Overwrite any local change with the remote version.
- \c both - Keep the local modifications in the file renamed to
filename.mine, and save the repository version as
filename.rXXX, ie. put the revision number after
the filename.
The conflict must be solved manually, and the solution made known to FSVS
via the \ref resolve command.
\note As there's no known \e good version after this renaming, a zero byte
file gets created. \n
Any \ref resolve "resolve" or \ref revert "revert" command would make that
current, and the changes that are kept in filename.mine
would be lost! \n
You should only \ref revert to the last repository version, ie. the data of
filename.rXXX.
- \c merge - Call the program \c merge with the common ancestor, the
local and the remote version.
If it is a clean merge, no further work is necessary; else you'll get the
(partly) merged file, and the two other versions just like with the \c both
variant, and (again) have to tell FSVS that the conflict is solved, by
using the \ref resolve command.
\note As in the subversion command line client \c svn the auxiliary files
are seen as new, although that might change in the future (so that they
automatically get ignored).
\subsection o_merge Options regarding the "merge" program
Like with \ref o_diff "diff", the \c merge operation is not done internally
in FSVS.
To have better control
- merge_prg: The executable name, default "merge".
- merge_opt: The default options, default "-A".
The option \c "-p" is always used:
\code
$merge_prg $merge_opt -p $file1 $common $file2
\endcode
\section oh_commit Options for commit
\subsection o_author Author
You can specify an author to be used on commit.
This option has a special behaviour; if the first character of
the value is an \c '$', the value is replaced by the environment
variable named.
Empty strings are ignored; that allows an \c /etc/fsvs/config like this:
\code
author=unknown
author=$LOGNAME
author=$SUDO_USER
\endcode
where the last non-empty value is taken;
and if your \c .authorized_keys has lines like
\code
environment="FSVS_AUTHOR=some_user" ssh-rsa ...
\endcode
that would override the config values.
\note Your \c sshd_config needs the \c PermitUserEnvironment setting; you can
also take a look at the \c AcceptEnv and \c SendEnv documentation.
\subsection o_passwd Password
In some scenarios like ssl-client-key-authentication it is more comfortable to
use anonymous logins for checkout.
In case the commit needs authentication via a password, you can use the \c
password option.
Please note the possible risks - on the command line it's visible via \c ps,
and config files should at least be protected via \c chmod! There's no
encryption or obfuscation!
\code
password="pa55word"
\endcode
\subsection o_commit_to Destination URL for commit
If you defined multiple URLs for your working copy, FSVS needs to know
which URL to commit to.
For this you would set \c commit_to to the \b name of the URL; see this
example:
\code
fsvs urls N:master,P:10,http://... N:local,P:20,file:///...
fsvs ci /etc/passwd -m "New user defined" -ocommit_to=local
\endcode
\subsection o_empty_commit Doing empty commits
In the default settings FSVS will happily create empty commits, ie.
revisions without any changed entry. These just have a revision number, an
author and a timestamp; this is nice if FSVS is run via CRON, and you want
to see when FSVS gets run.
If you would like to avoid such revisions, set this option to \c no; then
such commits will be avoided.
Example:
\code
fsvs commit -o empty_commit=no -m "cron" /etc
\endcode
\subsection o_empty_msg Avoid commits without a commit message
If you don't like the behaviour that FSVS does commits with an empty
message received from \c $EDITOR (eg if you found out that you don't want
to commit after all), you can change this option to \c no; then FSVS won't
allow empty commit messages.
Example for the config file:
\code
empty_message=no
\endcode
\subsection o_mkdir_base Creating directories in the repository above the URL
If you want to keep some data versioned, the first commit is normally the
creation of the base directories \b above the given URL (to keep that data
separate from the other repository data).
Previously this had to be done manually, ie. with a svn mkdir $URL
--parents or similar command. \n
With the \c mkdir_base option you can tell FSVS to create directories as
needed; this is mostly useful on the first commit.
\code
fsvs urls ...
fsvs group 'group:ignore,./**'
fsvs ci -m "First post!" -o mkdir_base=yes
\endcode
\subsection o_delay Waiting for a time change after working copy operations
If you're using FSVS in automated systems, you might see that changes
that happen in the same second as a commit are not seen with \ref status
later; this is because the timestamp granularity of FSVS is 1 second.
For backward compatibility the default value is \c no (don't delay).
You can set it to any combination of
- \c commit,
- \c update,
- \c revert and/or
- \c checkout;
for \c yes all of these actions are delayed until the clock seconds change.
Example how to set that option via an environment variable:
\code
export FSVS_DELAY=commit,revert
\endcode
\section oh_performance Performance and tuning related options
\subsection o_chcheck Change detection
This options allows to specify the trade-off between speed and accuracy.
A file with a changed size can immediately be known as changed; but if only
the modification time is changed, this is not so easy. Per default FSVS
does a MD5 check on the file in this case; if you don't want that, or if
you want to do the checksum calculation for \b every file (in case a file
has changed, but its mtime not), you can use this option to change FSVS'
behaviour.
On the command line there's a shortcut for that: for every \c "-C" another
check in this option is chosen.
The recognized specifications are
none Resets the check bitmask to "no checks".
file_mtime Check files for modifications (via MD5) and
directories for new entries, if the mtime is different - default
dir Check all directories for new entries, regardless of
the timestamp.
allfiles Check \b all files with MD5 for changes (\c
tripwire -like operation).
full All available checks.
You can give multiple options; they're accumulated unless overridden by \c
none.
\code
fsvs -o change_check=allfiles status
\endcode
\note \a commit and \a update set additionally the \c dir option, to avoid
missing new files.
\subsection o_copyfrom_exp Avoiding expensive compares on \ref cpfd "copyfrom-detect"
If you've got big files that are seen as new, doing the MD5 comparison can
be time consuming. So there's the option \c copyfrom_exp (for \e
"expensive", which takes the usual \c yes (default) and \c no arguments.
\code
fsvs copyfrom-detect -o copyfrom_exp=no some_directory
\endcode
\subsection o_group_stats Getting grouping/ignore statistics
If you need to ignore many entries of your working copy, you might find
that the ignore pattern matching takes some valuable time. \n
In order to optimize the order of your patterns you can specify this option
to print the number of tests and matches for each pattern.
\code
$ fsvs status -o group_stats=yes -q
Grouping statistics (tested, matched, groupname, pattern):
4705 80 ignore group:ignore,./**.bak
4625 40 ignore group:ignore,./**.tmp
\endcode
For optimizing you'll want to put often matching patterns at the front (to
make them match sooner, and avoid unnecessary tests); but if you are using
other groups than \c ignore (like \c take), you will have to take care to
keep the patterns within a group together.
Please note that the first line shows how many entries were tested, and
that the next lines differ by the number of matches entries for the current
line, as all entries being tested against some pattern get tested for the
next too, unless they match the current pattern.
This option is available for \ref status and the \ref ignore "ignore test"
commands.
\section oh_base Base configuration
\subsection o_conf Path definitions for the config and WAA area
\anchor o_waa
The paths given here are used to store the persistent configuration data
needed by FSVS; please see \ref waa_files and \ref o__prio for more
details, and the \ref o_softroot parameter as well as the \ref
howto_backup_recovery for further discussion.
\code
FSVS_CONF=/home/user/.fsvs-conf fsvs -o waa=/home/user/.fsvs-waa st
\endcode
\note Please note that these paths can be given \b only as environment
variables (\c $FSVS_CONF resp. \c $FSVS_WAA) or as command line parameter;
settings in config files are ignored.
\subsection o_configdir Configuration directory for the subversion libraries
This path specifies where the subversion libraries should take their
configuration data from; the most important aspect of that is authentication
data, especially for certificate authentication.
The default value is \c $FSVS_CONF/svn/.
\c /etc/fsvs/config could have eg.
\code
config_dir=/root/.subversion
\endcode
Please note that this directory can hold an \c auth directory, and the \c
servers and \c config files.
\subsection o_softroot Using an alternate root directory
This is a path that is prepended to \c $FSVS_WAA and \c $FSVS_CONF
(or their default values, see \ref waa_files), if they do not already
start with it, and it is cut off for the directory-name MD5 calculation.
When is that needed? Imagine that you've booted from some Live-CD like
Knoppix; if you want to setup or restore a non-working system, you'd have
to transfer all files needed by the FSVS binary to it, and then start in
some kind of \c chroot environment.
With this parameter you can tell FSVS that it should load its libraries
from the current filesystem, but use the given path as root directory for
its administrative data.
This is used for recovery; see the example in \ref howto_backup_recovery.
So how does this work?
- The internal data paths derived from \c $FSVS_WAA and \c $FSVS_CONF use
the value given for \c softroot as a base directory, if they do not already
start with it. \n
(If that creates a conflict for you, eg. in that you want to use \c /var as
the \c softroot, and your \c $FSVS_WAA should be \c /var/fsvs, you can make
the string comparison fail by using /./var for either path.)
- When a directory name for \c $FSVS_CONF or \c $FSVS_WAA is derived from
some file path, the part matching \c softroot is cut off, so that the
generated names match the situation after rebooting.
Previously you'd have to \ref export your data back to the filesystem and
call \ref urls "fsvs urls" and FSVS \ref sync-repos
"sync-repos" again, to get the WAA data back.
\note A plain \c chroot() would not work, as some needed programs (eg.
the decoder for update, see \ref s_p_n) would not be available.
\note The easy way to understand \c softroot is: If you want to do a
\c chroot() into the given directory (or boot with it as \c /), you'll want
this set.
\note As this value is used for finding the correct working copy root (by
trying to find a \ref o_conf "conf-path", it cannot be set from a per-wc
config file. Only the environment, global configuration or command line
parameter make sense.
\section oh_debug Debugging and diagnosing
The next two options could be set in the global configuration file, to
automatically get the last debug messages when an error happens.
To provide an easy way to get on-line debugging again, \c debug_output and
\c debug_buffer are both reset to non-redirected, on-line output, if more
than a single \c -d is specified on the command line, like this:
\code
fsvs commit -m "..." -d -d filenames
\endcode
In this case you'll get a message telling you about that.
\subsection o_debug_output Destination for debug output
You can specify the debug output destination with the option \c
debug_output. This can be a simple filename (which gets truncated on open),
or, if it starts with a \c |, a command that the output gets piped into.
If the destination cannot be opened (or none is given), debug output goes
to \c STDOUT (for easier tracing via \c less).
Example:
\code
fsvs -o debug_output=/tmp/debug.out -d st /etc
\endcode
\note That string is taken only once - at the first debug output line.
So you have to use the correct order of parameters:
-o debug_output=... -d.
An example: writing the last 200 lines of debug output into a file.
\code
fsvs -o debug_output='| tail -200 > /tmp/debug.log' -d ....
\endcode
\subsection o_debug_buffer Using a debug buffer
With the \c debug_buffer option you can specify the size of a buffer (in
kB) that is used to capture the output, and which gets printed
automatically if an error occurs.
This must be done \b before debugging starts, like with the \ref
o_debug_output "debug_output" specification.
\code
fsvs -o debug_buffer=128 ...
\endcode
\note If this option is specified in the configuration file or via the
environment, only the buffer is allocated; if it is used on the command
line, debugging is automatically turned on, too.
\subsection o_warnings Setting warning behaviour
Please see the command line parameter \ref glob_opt_warnings "-W", which is
identical.
\code
fsvs -o warning=diff-status=ignore
\endcode
*/
// Use this for folding:
// g/^\\subsection/normal v/^\\s
kkzf
// vi: filetype=doxygen spell spelllang=en_gb formatoptions+=ta :
// vi: nowrapscan foldmethod=manual foldcolumn=3 :
fsvs-fsvs-1.2.12/src/dox/statii.dox 0000664 0000000 0000000 00000006325 14536317137 0017147 0 ustar 00root root 0000000 0000000 /**
\defgroup howto_entry_statii HOWTO: Understand the entries' statii
\ingroup howto
Transitions between the various statii.
Here is a small overview about the various entry-statii and their
change conditions.
If you find any mismatches between this graphic and FSVS behaviour,
don't hesitate to ask on the dev@ mailing list.
\dot
digraph {
// use tooltip?
// Note: the labelangle is manually optimized for the current
// ordering - which isn't stable, so it might go wrong.
edge [fontname=Arial,
fontsize=7,
labeldistance=0];
node [shape=box,
fontname=Arial,
fontsize=9];
subgraph cluster_2 {
color=white;
// --------------- Statii
{
rank=same;
node [style=bold];
New;
Not_existing [label="Not existing"];
}
Ignored;
Deleted;
{
rank=same;
Added;
CopiedU [label="Copied,\nunchanged"];
}
{
rank=same;
Changed;
Committed [color=green, style=bold];
}
Unversioned [label="To-be-\nunversioned"];
{
rank=same;
Conflicted;
CopiedC [label="Copied,\nchanged"];
}
// --------------- Commands
edge [color=brown];
New -> Added [label="add", URL="\ref add" ];
Ignored -> Added [label="add", URL="\ref add"];
Committed -> Unversioned [label="unversion", URL="\ref unversion"];
{
edge [ label="update", URL="\ref update"];
Committed -> Committed;
Changed -> Conflicted;
}
Conflicted -> Changed [label="resolve", URL="\ref resolve"];
{
edge [ color=green, URL="\ref commit",
tooltip="commit"];
Added -> Committed;
New -> Committed;
CopiedU -> Committed;
Changed -> Committed;
CopiedC -> Committed;
Unversioned -> New [
label="commit;\nremoved from\nrepository;\nlocally kept,\nbut forgotten."];
Deleted:w -> Not_existing [
label="commit;\nremoved from\nrepository\nand local data."];
}
New -> CopiedU [label="copy", URL="\ref cp"];
CopiedU -> New [label="uncopy", URL="\ref uncopy"];
{
edge [ color=blue, URL="\ref revert",
tooltip="revert"];
CopiedC -> CopiedU;
Changed -> Committed;
Deleted -> Committed;
Added -> New;
Unversioned -> Committed;
Conflicted -> Committed;
}
// Configuration
edge [color=black];
New -> Ignored [label="ignore\npattern\nmatches",
URL="\ref ignore"];
// External
edge [color=orange, style=dashed];
CopiedU -> CopiedC [label="edit"];
Committed -> Changed [label="edit"];
Committed -> Deleted [label="rm"];
Not_existing -> New [ label="Create"];
}
subgraph cluster_1 {
margin=0;
nodesep=0.2;
ranksep=0.2;
color=white;
node [shape=plaintext, width=0, height=0, label=""];
{
rank=same;
revert1 -> revert2 [color=blue, label="revert",
URL="\ref revert"];
}
{
rank=same;
commit1 -> commit2 [label="commit", color=green,
URL="\ref commit"];
}
{
rank=same;
other1 -> other2 [color=brown, label="other\ncommands"];
}
{
rank=same;
extern1 -> extern2 [color=orange, label="external\nchanges", style=dashed];
}
edge [ style=invis ];
revert1 -> commit1 -> other1 -> extern1;
}
}
\enddot
*/
// vi: filetype=doxygen spell spelllang=en_gb
fsvs-fsvs-1.2.12/src/doxygen-data/ 0000775 0000000 0000000 00000000000 14536317137 0016722 5 ustar 00root root 0000000 0000000 fsvs-fsvs-1.2.12/src/doxygen-data/Doxyfile 0000775 0000000 0000000 00000355204 14536317137 0020444 0 ustar 00root root 0000000 0000000 # Doxyfile 1.9.5
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
#
# All text after a double hash (##) is considered a comment and is placed in
# front of the TAG it is preceding.
#
# All text after a single hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists, items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (\" \").
#
# Note:
#
# Use doxygen to compare the used configuration file with the template
# configuration file:
# doxygen -x [configFile]
# Use doxygen to compare the used configuration file with the template
# configuration file without replacing the environment variables or CMake type
# replacement variables:
# doxygen -x_noenv [configFile]
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the configuration
# file that follow. The default is UTF-8 which is also the encoding used for all
# text before the first occurrence of this tag. Doxygen uses libiconv (or the
# iconv built into libc) for the transcoding. See
# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
# double-quotes, unless you are using Doxywizard) that should identify the
# project for which the documentation is generated. This name is used in the
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = fsvs
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER =
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF =
# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
# the logo to the output directory.
PROJECT_LOGO =
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
# entered, it will be relative to the location where doxygen was started. If
# left blank the current directory will be used.
OUTPUT_DIRECTORY = ../doxygen/
# If the CREATE_SUBDIRS tag is set to YES then doxygen will create up to 4096
# sub-directories (in 2 levels) under the output directory of each output format
# and will distribute the generated files over these directories. Enabling this
# option can be useful when feeding doxygen a huge amount of source files, where
# putting all generated files in the same directory would otherwise causes
# performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to
# control the number of sub-directories.
# The default value is: NO.
CREATE_SUBDIRS = NO
# Controls the number of sub-directories that will be created when
# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every
# level increment doubles the number of directories, resulting in 4096
# directories at level 8 which is the default and also the maximum value. The
# sub-directories are organized in 2 levels, the first level always has a fixed
# numer of 16 directories.
# Minimum value: 0, maximum value: 8, default value: 8.
# This tag requires that the tag CREATE_SUBDIRS is set to YES.
CREATE_SUBDIRS_LEVEL = 8
# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
# characters to appear in the names of generated files. If set to NO, non-ASCII
# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
# U+3044.
# The default value is: NO.
ALLOW_UNICODE_NAMES = NO
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian,
# Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English
# (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek,
# Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with
# English messages), Korean, Korean-en (Korean with English messages), Latvian,
# Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese,
# Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish,
# Swedish, Turkish, Ukrainian and Vietnamese.
# The default value is: English.
OUTPUT_LANGUAGE = English
# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
# The default value is: YES.
BRIEF_MEMBER_DESC = YES
# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
# description of a member or function before the detailed description
#
# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
# brief descriptions will be completely suppressed.
# The default value is: YES.
REPEAT_BRIEF = YES
# This tag implements a quasi-intelligent brief description abbreviator that is
# used to form the text in various listings. Each string in this list, if found
# as the leading text of the brief description, will be stripped from the text
# and the result, after processing the whole list, is used as the annotated
# text. Otherwise, the brief description is used as-is. If left blank, the
# following values are used ($name is automatically replaced with the name of
# the entity):The $name class, The $name widget, The $name file, is, provides,
# specifies, contains, represents, a, an and the.
ABBREVIATE_BRIEF = "The $name class" \
"The $name widget" \
"The $name file" \
is \
provides \
specifies \
contains \
represents \
a \
an \
the
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# doxygen will generate a detailed section even if there is only a brief
# description.
# The default value is: NO.
ALWAYS_DETAILED_SEC = NO
# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
# inherited members of a class in the documentation of that class as if those
# members were ordinary class members. Constructors, destructors and assignment
# operators of the base classes will not be shown.
# The default value is: NO.
INLINE_INHERITED_MEMB = NO
# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
# shortest path that makes the file name unique will be used
# The default value is: YES.
FULL_PATH_NAMES = NO
# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
# Stripping is only done if one of the specified strings matches the left-hand
# part of the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the path to
# strip.
#
# Note that you can specify absolute paths here, but also relative paths, which
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
STRIP_FROM_PATH = ./
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
# header file to include in order to use a class. If left blank only the name of
# the header file containing the class definition is used. Otherwise one should
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
STRIP_FROM_INC_PATH = ./
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
# support long names like on DOS, Mac, or CD-ROM.
# The default value is: NO.
SHORT_NAMES = NO
# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
# first line (until the first dot) of a Javadoc-style comment as the brief
# description. If set to NO, the Javadoc-style will behave just like regular Qt-
# style comments (thus requiring an explicit @brief command for a brief
# description.)
# The default value is: NO.
JAVADOC_AUTOBRIEF = YES
# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
# such as
# /***************
# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
# Javadoc-style will behave just like regular comments and it will not be
# interpreted by doxygen.
# The default value is: NO.
JAVADOC_BANNER = NO
# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
# line (until the first dot) of a Qt-style comment as the brief description. If
# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
# requiring an explicit \brief command for a brief description.)
# The default value is: NO.
QT_AUTOBRIEF = NO
# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
# a brief description. This used to be the default behavior. The new default is
# to treat a multi-line C++ comment block as a detailed description. Set this
# tag to YES if you prefer the old behavior instead.
#
# Note that setting this tag to YES also means that rational rose comments are
# not recognized any more.
# The default value is: NO.
MULTILINE_CPP_IS_BRIEF = NO
# By default Python docstrings are displayed as preformatted text and doxygen's
# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
# doxygen's special commands can be used and the contents of the docstring
# documentation blocks is shown as doxygen documentation.
# The default value is: YES.
PYTHON_DOCSTRING = YES
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
# The default value is: YES.
INHERIT_DOCS = YES
# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
# page for each member. If set to NO, the documentation of a member will be part
# of the file/class/namespace that contains it.
# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
# uses this value to replace tabs by spaces in code fragments.
# Minimum value: 1, maximum value: 16, default value: 4.
TAB_SIZE = 4
# This tag can be used to specify a number of aliases that act as commands in
# the documentation. An alias has the form:
# name=value
# For example adding
# "sideeffect=@par Side Effects:^^"
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". Note that you cannot put \n's in the value part of an alias
# to insert newlines (in the resulting output). You can put ^^ in the value part
# of an alias to insert a newline as if a physical newline was in the original
# file. When you need a literal { or } or , in the value part of an alias you
# have to escape them by means of a backslash (\), this can lead to conflicts
# with the commands \{ and \} for these it is advised to use the version @{ and
# @} or use a double escape (\\{ and \\})
ALIASES =
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
# instance, some of the names that are used will be different. The list of all
# members will be omitted, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_FOR_C = YES
# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
# Python sources only. Doxygen will then generate output that is more tailored
# for that language. For instance, namespaces will be presented as packages,
# qualified scopes will look different, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_JAVA = NO
# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
# sources. Doxygen will then generate output that is tailored for Fortran.
# The default value is: NO.
OPTIMIZE_FOR_FORTRAN = NO
# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
# sources. Doxygen will then generate output that is tailored for VHDL.
# The default value is: NO.
OPTIMIZE_OUTPUT_VHDL = NO
# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
# sources only. Doxygen will then generate output that is more tailored for that
# language. For instance, namespaces will be presented as modules, types will be
# separated into more groups, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_SLICE = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice,
# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
# tries to guess whether the code is fixed or free formatted code, this is the
# default for Fortran type files). For instance to make doxygen treat .inc files
# as Fortran files (default is PHP), and .f files as C (default is Fortran),
# use: inc=Fortran f=C.
#
# Note: For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by doxygen. When specifying no_extension you should add
# * to the FILE_PATTERNS.
#
# Note see also the list of default file extension mappings.
EXTENSION_MAPPING =
# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
# according to the Markdown format, which allows for more readable
# documentation. See https://daringfireball.net/projects/markdown/ for details.
# The output of markdown processing is further processed by doxygen, so you can
# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
# case of backward compatibilities issues.
# The default value is: YES.
MARKDOWN_SUPPORT = YES
# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
# to that level are automatically included in the table of contents, even if
# they do not have an id attribute.
# Note: This feature currently applies only to Markdown headings.
# Minimum value: 0, maximum value: 99, default value: 5.
# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
TOC_INCLUDE_HEADINGS = 5
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by putting a % sign in front of the word or
# globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.
AUTOLINK_SUPPORT = YES
# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
# to include (a tag file for) the STL sources as input, then you should set this
# tag to YES in order to let doxygen match functions declarations and
# definitions whose arguments contain STL classes (e.g. func(std::string);
# versus func(std::string) {}). This also make the inheritance and collaboration
# diagrams that involve STL classes more complete and accurate.
# The default value is: NO.
BUILTIN_STL_SUPPORT = NO
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
# The default value is: NO.
CPP_CLI_SUPPORT = NO
# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
# will parse them like normal C++ but will assume all classes use public instead
# of private inheritance when no explicit protection keyword is present.
# The default value is: NO.
SIP_SUPPORT = NO
# For Microsoft's IDL there are propget and propput attributes to indicate
# getter and setter methods for a property. Setting this option to YES will make
# doxygen to replace the get and set methods by a property in the documentation.
# This will only work if the methods are indeed getting or setting a simple
# type. If this is not the case, or you want to show the methods anyway, you
# should set this option to NO.
# The default value is: YES.
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
# tag is set to YES then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
# If one adds a struct or class to a group and this option is enabled, then also
# any nested class or struct is added to the same group. By default this option
# is disabled and one has to add nested compounds explicitly via \ingroup.
# The default value is: NO.
GROUP_NESTED_COMPOUNDS = NO
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
# subgrouping. Alternatively, this can be done per class using the
# \nosubgrouping command.
# The default value is: YES.
SUBGROUPING = YES
# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
# are shown inside the group in which they are included (e.g. using \ingroup)
# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
# and RTF).
#
# Note that this feature does not work in combination with
# SEPARATE_MEMBER_PAGES.
# The default value is: NO.
INLINE_GROUPED_CLASSES = NO
# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
# with only public data fields or simple typedef fields will be shown inline in
# the documentation of the scope in which they are defined (i.e. file,
# namespace, or group documentation), provided this scope is documented. If set
# to NO, structs, classes, and unions are shown on a separate page (for HTML and
# Man pages) or section (for LaTeX and RTF).
# The default value is: NO.
INLINE_SIMPLE_STRUCTS = NO
# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
# enum is documented as struct, union, or enum with the name of the typedef. So
# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
# with name TypeT. When disabled the typedef will appear as a member of a file,
# namespace, or class. And the struct will be named TypeS. This can typically be
# useful for C code in case the coding convention dictates that all compound
# types are typedef'ed and only the typedef is referenced, never the tag name.
# The default value is: NO.
TYPEDEF_HIDES_STRUCT = NO
# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
# cache is used to resolve symbols given their name and scope. Since this can be
# an expensive process and often the same symbol appears multiple times in the
# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
# doxygen will become slower. If the cache is too large, memory is wasted. The
# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
# symbols. At the end of a run doxygen will report the cache usage and suggest
# the optimal cache size from a speed point of view.
# Minimum value: 0, maximum value: 9, default value: 0.
LOOKUP_CACHE_SIZE = 0
# The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use
# during processing. When set to 0 doxygen will based this on the number of
# cores available in the system. You can set it explicitly to a value larger
# than 0 to get more control over the balance between CPU load and processing
# speed. At this moment only the input processing can be done using multiple
# threads. Since this is still an experimental feature the default is set to 1,
# which effectively disables parallel processing. Please report any issues you
# encounter. Generating dot graphs in parallel is controlled by the
# DOT_NUM_THREADS setting.
# Minimum value: 0, maximum value: 32, default value: 1.
NUM_PROC_THREADS = 1
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
# documentation are documented, even if no documentation was available. Private
# class members and static file members will be hidden unless the
# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
# Note: This will also disable the warnings about undocumented members that are
# normally produced when WARNINGS is set to YES.
# The default value is: NO.
EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = YES
# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
# methods of a class will be included in the documentation.
# The default value is: NO.
EXTRACT_PRIV_VIRTUAL = NO
# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
EXTRACT_PACKAGE = NO
# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = YES
# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
# locally in source files will be included in the documentation. If set to NO,
# only classes defined in header files are included. Does not have any effect
# for Java sources.
# The default value is: YES.
EXTRACT_LOCAL_CLASSES = YES
# This flag is only useful for Objective-C code. If set to YES, local methods,
# which are defined in the implementation section but not in the interface are
# included in the documentation. If set to NO, only methods in the interface are
# included.
# The default value is: NO.
EXTRACT_LOCAL_METHODS = NO
# If this flag is set to YES, the members of anonymous namespaces will be
# extracted and appear in the documentation as a namespace called
# 'anonymous_namespace{file}', where file will be replaced with the base name of
# the file that contains the anonymous namespace. By default anonymous namespace
# are hidden.
# The default value is: NO.
EXTRACT_ANON_NSPACES = NO
# If this flag is set to YES, the name of an unnamed parameter in a declaration
# will be determined by the corresponding definition. By default unnamed
# parameters remain unnamed in the output.
# The default value is: YES.
RESOLVE_UNNAMED_PARAMS = YES
# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
# undocumented members inside documented classes or files. If set to NO these
# members will be included in the various overviews, but no documentation
# section is generated. This option has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_MEMBERS = NO
# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
# to NO, these classes will be included in the various overviews. This option
# has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
# declarations. If set to NO, these declarations will be included in the
# documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
# documentation blocks found inside the body of a function. If set to NO, these
# blocks will be appended to the function's detailed documentation block.
# The default value is: NO.
HIDE_IN_BODY_DOCS = NO
# The INTERNAL_DOCS tag determines if documentation that is typed after a
# \internal command is included. If the tag is set to NO then the documentation
# will be excluded. Set it to YES to include the internal documentation.
# The default value is: NO.
INTERNAL_DOCS = NO
# With the correct setting of option CASE_SENSE_NAMES doxygen will better be
# able to match the capabilities of the underlying filesystem. In case the
# filesystem is case sensitive (i.e. it supports files in the same directory
# whose names only differ in casing), the option must be set to YES to properly
# deal with such files in case they appear in the input. For filesystems that
# are not case sensitive the option should be set to NO to properly deal with
# output files written for symbols that only differ in casing, such as for two
# classes, one named CLASS and the other named Class, and to also support
# references to files without having to specify the exact matching casing. On
# Windows (including Cygwin) and MacOS, users should typically set this option
# to NO, whereas on Linux or other Unix flavors it should typically be set to
# YES.
# Possible values are: SYSTEM, NO and YES.
# The default value is: SYSTEM.
CASE_SENSE_NAMES = YES
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
# their full class and namespace scopes in the documentation. If set to YES, the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
# append additional text to a page's title, such as Class Reference. If set to
# YES the compound reference will be hidden.
# The default value is: NO.
HIDE_COMPOUND_REFERENCE= NO
# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class
# will show which file needs to be included to use the class.
# The default value is: YES.
SHOW_HEADERFILE = YES
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
SHOW_INCLUDE_FILES = YES
# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
# grouped member an include statement to the documentation, telling the reader
# which file to include in order to use the member.
# The default value is: NO.
SHOW_GROUPED_MEMB_INC = NO
# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
# files with double quotes in the documentation rather than with sharp brackets.
# The default value is: NO.
FORCE_LOCAL_INCLUDES = NO
# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
# documentation for inline members.
# The default value is: YES.
INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
# (detailed) documentation of file and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order.
# The default value is: YES.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
# descriptions of file, namespace and class members alphabetically by member
# name. If set to NO, the members will appear in declaration order. Note that
# this will also influence the order of the classes in the class list.
# The default value is: NO.
SORT_BRIEF_DOCS = NO
# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
# (brief and detailed) documentation of class members so that constructors and
# destructors are listed first. If set to NO the constructors will appear in the
# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
# member documentation.
# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
# detailed member documentation.
# The default value is: NO.
SORT_MEMBERS_CTORS_1ST = NO
# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
# of group names into alphabetical order. If set to NO the group names will
# appear in their defined order.
# The default value is: NO.
SORT_GROUP_NAMES = NO
# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
# fully-qualified names, including namespaces. If set to NO, the class list will
# be sorted only by class name, not including the namespace part.
# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
# Note: This option applies only to the class list, not to the alphabetical
# list.
# The default value is: NO.
SORT_BY_SCOPE_NAME = NO
# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
# type resolution of all parameters of a function it will reject a match between
# the prototype and the implementation of a member function even if there is
# only one candidate or it is obvious which candidate to choose by doing a
# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
# accept a match between prototype and implementation in such cases.
# The default value is: NO.
STRICT_PROTO_MATCHING = NO
# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
# list. This list is created by putting \todo commands in the documentation.
# The default value is: YES.
GENERATE_TODOLIST = YES
# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
# list. This list is created by putting \test commands in the documentation.
# The default value is: YES.
GENERATE_TESTLIST = YES
# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
# list. This list is created by putting \bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = YES
# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
# the deprecated list. This list is created by putting \deprecated commands in
# the documentation.
# The default value is: YES.
GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional documentation
# sections, marked by \if ... \endif and \cond
# ... \endcond blocks.
ENABLED_SECTIONS = html
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
# documentation. If the initializer consists of more lines than specified here
# it will be hidden. Use a value of 0 to hide initializers completely. The
# appearance of the value of individual variables and macros / defines can be
# controlled using \showinitializer or \hideinitializer command in the
# documentation regardless of this setting.
# Minimum value: 0, maximum value: 10000, default value: 30.
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
# the bottom of the documentation of classes and structs. If set to YES, the
# list will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
# will remove the Files entry from the Quick Index and from the Folder Tree View
# (if specified).
# The default value is: YES.
SHOW_FILES = YES
# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
# page. This will remove the Namespaces entry from the Quick Index and from the
# Folder Tree View (if specified).
# The default value is: YES.
SHOW_NAMESPACES = YES
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from
# the version control system). Doxygen will invoke the program by executing (via
# popen()) the command command input-file, where command is the value of the
# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
# by doxygen. Whatever the program writes to standard output is used as the file
# version. For an example see the documentation.
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
# output files in an output format independent way. To create the layout file
# that represents doxygen's defaults, run doxygen with the -l option. You can
# optionally specify a file name after the option, if omitted DoxygenLayout.xml
# will be used as the name of the layout file. See also section "Changing the
# layout of pages" for information.
#
# Note that if you run doxygen from a directory containing a file called
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
# extension is automatically appended if omitted. This requires the bibtex tool
# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
# search path. See also \cite for info how to create references.
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
# The QUIET tag can be used to turn on/off the messages that are generated to
# standard output by doxygen. If QUIET is set to YES this implies that the
# messages are off.
# The default value is: NO.
QUIET = YES
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
# The default value is: YES.
WARNINGS = YES
# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
WARN_IF_UNDOCUMENTED = YES
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as documenting some parameters in
# a documented function twice, or documenting parameters that don't exist or
# using markup commands wrongly.
# The default value is: YES.
WARN_IF_DOC_ERROR = YES
# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete
# function parameter documentation. If set to NO, doxygen will accept that some
# parameters have no documentation without warning.
# The default value is: YES.
WARN_IF_INCOMPLETE_DOC = NO
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO, doxygen will only warn about wrong parameter
# documentation, but not about the absence of documentation. If EXTRACT_ALL is
# set to YES then this flag will automatically be disabled. See also
# WARN_IF_INCOMPLETE_DOC
# The default value is: NO.
WARN_NO_PARAMDOC = NO
# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
# at the end of the doxygen process doxygen will return with a non-zero status.
# Possible values are: NO, YES and FAIL_ON_WARNINGS.
# The default value is: NO.
WARN_AS_ERROR = NO
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
# and the warning text. Optionally the format may contain $version, which will
# be replaced by the version of the file (if it could be obtained via
# FILE_VERSION_FILTER)
# See also: WARN_LINE_FORMAT
# The default value is: $file:$line: $text.
WARN_FORMAT = "$file:$line: $text"
# In the $text part of the WARN_FORMAT command it is possible that a reference
# to a more specific place is given. To make it easier to jump to this place
# (outside of doxygen) the user can define a custom "cut" / "paste" string.
# Example:
# WARN_LINE_FORMAT = "'vi $file +$line'"
# See also: WARN_FORMAT
# The default value is: at line $line of file $file.
WARN_LINE_FORMAT = "at line $line of file $file"
# The WARN_LOGFILE tag can be used to specify a file to which warning and error
# messages should be written. If left blank the output is written to standard
# error (stderr). In case the file specified cannot be opened for writing the
# warning and error messages are written to standard error. When as file - is
# specified the warning and error messages are written to standard output
# (stdout).
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
INPUT = . \
dox \
tools
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
# documentation (see:
# https://www.gnu.org/software/libiconv/) for the list of possible encodings.
# See also: INPUT_FILE_ENCODING
# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
# This tag can be used to specify the character encoding of the source files
# that doxygen parses The INPUT_FILE_ENCODING tag can be used to specify
# character encoding on a per file pattern basis. Doxygen will compare the file
# name with each pattern and apply the encoding instead of the default
# INPUT_ENCODING) if there is a match. The character encodings are a list of the
# form: pattern=encoding (like *.php=ISO-8859-1). See cfg_input_encoding
# "INPUT_ENCODING" for further information on supported encodings.
INPUT_FILE_ENCODING =
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# read by doxygen.
#
# Note the list of default checked file patterns might differ from the list of
# default file extension mappings.
#
# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml,
# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C
# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd,
# *.vhdl, *.ucf, *.qsf and *.ice.
FILE_PATTERNS = *.c \
*.h \
*.dox
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = NO
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
# subdirectory from a directory tree whose root is specified with the INPUT tag.
#
# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE =
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
# The default value is: NO.
EXCLUDE_SYMLINKS = NO
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# certain files from those directories.
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS =
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# ANamespace::AClass, ANamespace::*Test
#
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories use the pattern */test/*
EXCLUDE_SYMBOLS =
# The EXAMPLE_PATH tag can be used to specify one or more files or directories
# that contain example code fragments that are included (see the \include
# command).
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
EXAMPLE_PATTERNS = *
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
# irrespective of the value of the RECURSIVE tag.
# The default value is: NO.
EXAMPLE_RECURSIVE = NO
# The IMAGE_PATH tag can be used to specify one or more files or directories
# that contain images that are to be included in the documentation (see the
# \image command).
IMAGE_PATH =
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
# by executing (via popen()) the command:
#
#
#
# where is the value of the INPUT_FILTER tag, and is the
# name of an input file. Doxygen will then use the output that the filter
# program writes to standard output. If FILTER_PATTERNS is specified, this tag
# will be ignored.
#
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
#
# Note that doxygen will use the data processed and written to standard output
# for further processing, therefore nothing else, like debug statements or used
# commands (so in case of a Windows batch file always use @echo OFF), should be
# written to standard output.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis. Doxygen will compare the file name with each pattern and apply the
# filter if there is a match. The filters are a list of the form: pattern=filter
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
FILTER_SOURCE_FILES = NO
# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
# it is also possible to disable source filtering for a specific pattern using
# *.ext= (so without naming a filter).
# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
FILTER_SOURCE_PATTERNS =
# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
# is part of the input, its contents will be placed on the main page
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
USE_MDFILE_AS_MAINPAGE =
# The Fortran standard specifies that for fixed formatted Fortran code all
# characters from position 72 are to be considered as comment. A common
# extension is to allow longer lines before the automatic comment starts. The
# setting FORTRAN_COMMENT_AFTER will also make it possible that longer lines can
# be processed before the automatic comment starts.
# Minimum value: 7, maximum value: 10000, default value: 72.
FORTRAN_COMMENT_AFTER = 72
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
# generated. Documented entities will be cross-referenced with these sources.
#
# Note: To get rid of all source code in the generated output, make sure that
# also VERBATIM_HEADERS is set to NO.
# The default value is: NO.
SOURCE_BROWSER = YES
# Setting the INLINE_SOURCES tag to YES will include the body of functions,
# classes and enums directly into the documentation.
# The default value is: NO.
INLINE_SOURCES = NO
# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
# special comment blocks from generated source code fragments. Normal C, C++ and
# Fortran comments will always remain visible.
# The default value is: YES.
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# entity all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = YES
# If the REFERENCES_RELATION tag is set to YES then for each documented function
# all documented entities called/used by that function will be listed.
# The default value is: NO.
REFERENCES_RELATION = YES
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
# to YES then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
REFERENCES_LINK_SOURCE = YES
# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
# source code will show a tooltip with additional information such as prototype,
# brief description and links to the definition and documentation. Since this
# will make the HTML file larger and loading of large files a bit slower, you
# can opt to disable this feature.
# The default value is: YES.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
SOURCE_TOOLTIPS = YES
# If the USE_HTAGS tag is set to YES then the references to source code will
# point to the HTML generated by the htags(1) tool instead of doxygen built-in
# source browser. The htags tool is part of GNU's global source tagging system
# (see https://www.gnu.org/software/global/global.html). You will need version
# 4.8.6 or higher.
#
# To use it do the following:
# - Install the latest version of global
# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
# - Make sure the INPUT points to the root of the source tree
# - Run doxygen as normal
#
# Doxygen will invoke htags (and that will in turn invoke gtags), so these
# tools must be available from the command line (i.e. in the search path).
#
# The result: instead of the source browser generated by doxygen, the links to
# source code will now point to the output of htags.
# The default value is: NO.
# This tag requires that the tag SOURCE_BROWSER is set to YES.
USE_HTAGS = NO
# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
# verbatim copy of the header file for each class for which an include is
# specified. Set to NO to disable this.
# See also: Section \class.
# The default value is: YES.
VERBATIM_HEADERS = YES
# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
# clang parser (see:
# http://clang.llvm.org/) for more accurate parsing at the cost of reduced
# performance. This can be particularly helpful with template rich C++ code for
# which doxygen's built-in parser lacks the necessary type information.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
# The default value is: NO.
CLANG_ASSISTED_PARSING = NO
# If the CLANG_ASSISTED_PARSING tag is set to YES and the CLANG_ADD_INC_PATHS
# tag is set to YES then doxygen will add the directory of each input to the
# include path.
# The default value is: YES.
# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
CLANG_ADD_INC_PATHS = YES
# If clang assisted parsing is enabled you can provide the compiler with command
# line options that you would normally use when invoking the compiler. Note that
# the include paths will already be set by doxygen for the files and directories
# specified with INPUT and INCLUDE_PATH.
# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
CLANG_OPTIONS =
# If clang assisted parsing is enabled you can provide the clang parser with the
# path to the directory containing a file called compile_commands.json. This
# file is the compilation database (see:
# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the
# options used when the source files were built. This is equivalent to
# specifying the -p option to a clang tool, such as clang-check. These options
# will then be passed to the parser. Any options specified with CLANG_OPTIONS
# will be added as well.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
CLANG_DATABASE_PATH =
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
# compounds will be generated. Enable this if the project contains a lot of
# classes, structs, unions or interfaces.
# The default value is: YES.
ALPHABETICAL_INDEX = YES
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
# while generating the index headers.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = YES
# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
# it.
# The default directory is: html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_OUTPUT = html
# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
# generated HTML page (for example: .htm, .php, .asp).
# The default value is: .html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FILE_EXTENSION = .html
# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
# each generated HTML page. If the tag is left blank doxygen will generate a
# standard header.
#
# To get valid HTML the header file that includes any scripts and style sheets
# that doxygen needs, which is dependent on the configuration options used (e.g.
# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
# default header using
# doxygen -w html new_header.html new_footer.html new_stylesheet.css
# YourConfigFile
# and then modify the file new_header.html. See also section "Doxygen usage"
# for information on how to generate the default header that doxygen normally
# uses.
# Note: The header is subject to change so you typically have to regenerate the
# default header when upgrading to a newer version of doxygen. For a description
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
##HTML_HEADER = doxygen-data/head.html
HTML_HEADER =
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
# footer. See HTML_HEADER for more information on how to generate a default
# footer and what special commands can be used inside the footer. See also
# section "Doxygen usage" for information on how to generate the default footer
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
##HTML_FOOTER = doxygen-data/foot.html
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
# the HTML output. If left blank doxygen will generate a default style sheet.
# See also section "Doxygen usage" for information on how to generate the style
# sheet that doxygen normally uses.
# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
# it is more robust and this tag (HTML_STYLESHEET) will in the future become
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
##HTML_STYLESHEET = doxygen-data/doxygen.css
HTML_STYLESHEET =
# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# cascading style sheets that are included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
# standard style sheet and is therefore more robust against future updates.
# Doxygen will copy the style sheet files to the output directory.
# Note: The order of the extra style sheet files is of importance (e.g. the last
# style sheet in the list overrules the setting of the previous ones in the
# list). For an example see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET =
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
# files will be copied as-is; there are no commands or markers available.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE tag can be used to specify if the generated HTML output
# should be rendered with a dark or light theme. Default setting AUTO_LIGHT
# enables light output unless the user preference is dark output. Other options
# are DARK to always use dark mode, LIGHT to always use light mode, AUTO_DARK to
# default to dark mode unless the user prefers light mode, and TOGGLE to let the
# user toggle between dark and light mode via a button.
# Possible values are: LIGHT Always generate light output., DARK Always generate
# dark output., AUTO_LIGHT Automatically set the mode according to the user
# preference, use light mode if no preference is set (the default)., AUTO_DARK
# Automatically set the mode according to the user preference, use dark mode if
# no preference is set. and TOGGLE Allow to user to switch between light and
# dark mode via a button..
# The default value is: AUTO_LIGHT.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE = AUTO_LIGHT
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a color-wheel, see
# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
# Minimum value: 0, maximum value: 359, default value: 220.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_HUE = 220
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
# in the HTML output. For a value of 0 the output will use gray-scales only. A
# value of 255 will produce the most vivid colors.
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_SAT = 100
# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
# luminance component of the colors in the HTML output. Values below 100
# gradually make the output lighter, whereas values above 100 make the output
# darker. The value divided by 100 is the actual gamma applied, so 80 represents
# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
# change the gamma.
# Minimum value: 40, maximum value: 240, default value: 80.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
# to YES can help to show when doxygen was last run and thus if the
# documentation is up to date.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = NO
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
# are dynamically created via JavaScript. If disabled, the navigation index will
# consists of multiple levels of tabs that are statically embedded in every HTML
# page. Disable this option to support browsers that do not have JavaScript,
# like the Qt help browser.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_MENUS = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_SECTIONS = NO
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
# shown in the various tree structured indices initially; the user can expand
# and collapse entries dynamically later on. Doxygen will expand the tree to
# such a level that at most the specified number of entries are visible (unless
# a fully collapsed tree already exceeds this amount). So setting the number of
# entries 1 will produce a full collapsed tree by default. 0 is a special value
# representing an infinite number of entries and will result in a full expanded
# tree by default.
# Minimum value: 0, maximum value: 9999, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
# environment (see:
# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To
# create a documentation set, doxygen will generate a Makefile in the HTML
# output directory. Running make will produce the docset in that directory and
# running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
# genXcode/_index.html for more information.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_DOCSET = NO
# This tag determines the name of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# The default value is: Doxygen generated docs.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDNAME = "Doxygen generated docs"
# This tag determines the URL of the docset feed. A documentation feed provides
# an umbrella under which multiple documentation sets from a single provider
# (such as a company or product suite) can be grouped.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_FEEDURL =
# This tag specifies a string that should uniquely identify the documentation
# set bundle. This should be a reverse domain-name style string, e.g.
# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_BUNDLE_ID = org.doxygen.Project
# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
# the documentation publisher. This should be a reverse domain-name style
# string, e.g. com.mycompany.MyDocSet.documentation.
# The default value is: org.doxygen.Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
# The default value is: Publisher.
# This tag requires that the tag GENERATE_DOCSET is set to YES.
DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# on Windows. In the beginning of 2021 Microsoft took the original page, with
# a.o. the download links, offline the HTML help workshop was already many years
# in maintenance mode). You can download the HTML help workshop from the web
# archives at Installation executable (see:
# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo
# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe).
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
# files are now used as the Windows 98 help format, and will replace the old
# Windows help format (.hlp) on all Windows platforms in the future. Compressed
# HTML files also contain an index, a table of contents, and you can search for
# words in the documentation. The HTML workshop also contains a viewer for
# compressed HTML files.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_HTMLHELP = NO
# The CHM_FILE tag can be used to specify the file name of the resulting .chm
# file. You can add a path in front of the file if the result should not be
# written to the html output directory.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
# including file name) of the HTML help compiler (hhc.exe). If non-empty,
# doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
# The GENERATE_CHI flag controls if a separate .chi index file is generated
# (YES) or that it should be included in the main .chm file (NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
# The BINARY_TOC flag controls whether a binary table of contents is generated
# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
BINARY_TOC = NO
# The TOC_EXPAND flag can be set to YES to add extra items for group members to
# the table of contents of the HTML help documentation and to the tree view.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
TOC_EXPAND = NO
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
# (.qch) of the generated HTML documentation.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_QHP = NO
# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
# the file name of the resulting .qch file. The path specified is relative to
# the HTML output folder.
# This tag requires that the tag GENERATE_QHP is set to YES.
QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
# (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_NAMESPACE =
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
# Folders (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
# Filters (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
# Filters (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
# The QHG_LOCATION tag can be used to specify the location (absolute path
# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to
# run qhelpgenerator on the generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
# generated, together with the HTML files, they form an Eclipse help plugin. To
# install this plugin and make it available under the help contents menu in
# Eclipse, the contents of the directory containing the HTML and XML files needs
# to be copied into the plugins directory of eclipse. The name of the directory
# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
# After copying Eclipse needs to be restarted before the help appears.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_ECLIPSEHELP = NO
# A unique identifier for the Eclipse help plugin. When installing the plugin
# the directory name containing the HTML and XML files should also have this
# name. Each documentation set should have its own identifier.
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
ECLIPSE_DOC_ID = org.doxygen.Project
# If you want full control over the layout of the generated HTML pages it might
# be necessary to disable the index and replace it with your own. The
# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
# of each HTML page. A value of NO enables the index and the value YES disables
# it. Since the tabs in the index contain the same information as the navigation
# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
DISABLE_INDEX = NO
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information. If the tag
# value is set to YES, a side panel will be generated containing a tree-like
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
# further fine tune the look of the index (see "Fine-tuning the output"). As an
# example, the default style sheet generated by doxygen has an example that
# shows how to put an image at the root of the tree instead of the PROJECT_NAME.
# Since the tree basically has the same information as the tab index, you could
# consider setting DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = YES
# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the
# FULL_SIDEBAR option determines if the side bar is limited to only the treeview
# area (value NO) or if it should extend to the full height of the window (value
# YES). Setting this to YES gives a layout similar to
# https://docs.readthedocs.io with more room for contents, but less room for the
# project logo, title, and description. If either GENERATE_TREEVIEW or
# DISABLE_INDEX is set to NO, this option has no effect.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
FULL_SIDEBAR = NO
# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
# doxygen will group on one line in the generated HTML documentation.
#
# Note that a value of 0 will completely suppress the enum values from appearing
# in the overview section.
# Minimum value: 0, maximum value: 20, default value: 4.
# This tag requires that the tag GENERATE_HTML is set to YES.
ENUM_VALUES_PER_LINE = 4
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
# to set the initial width (in pixels) of the frame in which the tree is shown.
# Minimum value: 0, maximum value: 1500, default value: 250.
# This tag requires that the tag GENERATE_HTML is set to YES.
TREEVIEW_WIDTH = 250
# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
# external symbols imported via tag files in a separate window.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
EXT_LINKS_IN_WINDOW = NO
# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email
# addresses.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
OBFUSCATE_EMAILS = YES
# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
# the HTML output. These images will generally look nicer at scaled resolutions.
# Possible values are: png (the default) and svg (looks nicer but requires the
# pdf2svg or inkscape tool).
# The default value is: png.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FORMULA_FORMAT = png
# Use this tag to change the font size of LaTeX formulas included as images in
# the HTML documentation. When you change the font size after a successful
# doxygen run you need to manually remove any form_*.png images from the HTML
# output directory to force them to be regenerated.
# Minimum value: 8, maximum value: 50, default value: 10.
# This tag requires that the tag GENERATE_HTML is set to YES.
FORMULA_FONTSIZE = 10
# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
# to create new LaTeX commands to be used in formulas as building blocks. See
# the section "Including formulas" for details.
FORMULA_MACROFILE =
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# https://www.mathjax.org) which uses client side JavaScript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
USE_MATHJAX = NO
# With MATHJAX_VERSION it is possible to specify the MathJax version to be used.
# Note that the different versions of MathJax have different requirements with
# regards to the different settings, so it is possible that also other MathJax
# settings have to be changed when switching between the different MathJax
# versions.
# Possible values are: MathJax_2 and MathJax_3.
# The default value is: MathJax_2.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_VERSION = MathJax_2
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. For more details about the output format see MathJax
# version 2 (see:
# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3
# (see:
# http://docs.mathjax.org/en/latest/web/components/output.html).
# Possible values are: HTML-CSS (which is slower, but has the best
# compatibility. This is the name for Mathjax version 2, for MathJax version 3
# this will be translated into chtml), NativeMML (i.e. MathML. Only supported
# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This
# is the name for Mathjax version 3, for MathJax version 2 this will be
# translated into HTML-CSS) and SVG.
# The default value is: HTML-CSS.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_FORMAT = HTML-CSS
# When MathJax is enabled you need to specify the location relative to the HTML
# output directory using the MATHJAX_RELPATH option. The destination directory
# should contain the MathJax.js script. For instance, if the mathjax directory
# is located at the same level as the HTML output directory, then
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from https://www.mathjax.org before deployment. The default value is:
# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2
# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH =
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# for MathJax version 2 (see
# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions):
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# For example for MathJax version 3 (see
# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html):
# MATHJAX_EXTENSIONS = ams
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
# (see:
# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an
# example see the documentation.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_CODEFILE =
# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
# the HTML output. The underlying search engine uses javascript and DHTML and
# should work on any modern browser. Note that when using HTML help
# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
# there is already a search function so this one should typically be disabled.
# For large projects the javascript based search engine can be slow, then
# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
# search using the keyboard; to jump to the search box use + S
# (what the is depends on the OS and browser, but it is typically
# , /