cgit-0.11.2/0000755000175000017500000000000012510746113012771 5ustar formorerformorercgit-0.11.2/gen-version.sh0000755000175000017500000000066212500600243015560 0ustar formorerformorer#!/bin/sh # Get version-info specified in Makefile V=$1 # Use `git describe` to get current version if we're inside a git repo if test "$(git rev-parse --git-dir 2>/dev/null)" = '.git' then V=$(git describe --abbrev=4 HEAD 2>/dev/null) fi new="CGIT_VERSION = $V" old=$(cat VERSION 2>/dev/null) # Exit if VERSION is uptodate test "$old" = "$new" && exit 0 # Update VERSION with new version-info echo "$new" > VERSION cat VERSION cgit-0.11.2/ui-repolist.c0000644000175000017500000002010712500600243015401 0ustar formorerformorer/* ui-repolist.c: functions for generating the repolist page * * Copyright (C) 2006-2014 cgit Development Team * * Licensed under GNU General Public License v2 * (see COPYING for full license text) */ #include "cgit.h" #include "ui-repolist.h" #include "html.h" #include "ui-shared.h" #include static time_t read_agefile(char *path) { time_t result; size_t size; char *buf; struct strbuf date_buf = STRBUF_INIT; if (readfile(path, &buf, &size)) return -1; if (parse_date(buf, &date_buf) == 0) result = strtoul(date_buf.buf, NULL, 10); else result = 0; free(buf); strbuf_release(&date_buf); return result; } static int get_repo_modtime(const struct cgit_repo *repo, time_t *mtime) { struct strbuf path = STRBUF_INIT; struct stat s; struct cgit_repo *r = (struct cgit_repo *)repo; if (repo->mtime != -1) { *mtime = repo->mtime; return 1; } strbuf_addf(&path, "%s/%s", repo->path, ctx.cfg.agefile); if (stat(path.buf, &s) == 0) { *mtime = read_agefile(path.buf); if (*mtime) { r->mtime = *mtime; goto end; } } strbuf_reset(&path); strbuf_addf(&path, "%s/refs/heads/%s", repo->path, repo->defbranch ? repo->defbranch : "master"); if (stat(path.buf, &s) == 0) { *mtime = s.st_mtime; r->mtime = *mtime; goto end; } strbuf_reset(&path); strbuf_addf(&path, "%s/%s", repo->path, "packed-refs"); if (stat(path.buf, &s) == 0) { *mtime = s.st_mtime; r->mtime = *mtime; goto end; } *mtime = 0; r->mtime = *mtime; end: strbuf_release(&path); return (r->mtime != 0); } static void print_modtime(struct cgit_repo *repo) { time_t t; if (get_repo_modtime(repo, &t)) cgit_print_age(t, -1, NULL); } static int is_match(struct cgit_repo *repo) { if (!ctx.qry.search) return 1; if (repo->url && strcasestr(repo->url, ctx.qry.search)) return 1; if (repo->name && strcasestr(repo->name, ctx.qry.search)) return 1; if (repo->desc && strcasestr(repo->desc, ctx.qry.search)) return 1; if (repo->owner && strcasestr(repo->owner, ctx.qry.search)) return 1; return 0; } static int is_in_url(struct cgit_repo *repo) { if (!ctx.qry.url) return 1; if (repo->url && starts_with(repo->url, ctx.qry.url)) return 1; return 0; } static void print_sort_header(const char *title, const char *sort) { html("%s", title); } static void print_header(void) { html(""); print_sort_header("Name", "name"); print_sort_header("Description", "desc"); if (ctx.cfg.enable_index_owner) print_sort_header("Owner", "owner"); print_sort_header("Idle", "idle"); if (ctx.cfg.enable_index_links) html("Links"); html("\n"); } static void print_pager(int items, int pagelen, char *search, char *sort) { int i, ofs; char *class = NULL; html(""); } static int cmp(const char *s1, const char *s2) { if (s1 && s2) { if (ctx.cfg.case_sensitive_sort) return strcmp(s1, s2); else return strcasecmp(s1, s2); } if (s1 && !s2) return -1; if (s2 && !s1) return 1; return 0; } static int sort_section(const void *a, const void *b) { const struct cgit_repo *r1 = a; const struct cgit_repo *r2 = b; int result; time_t t; result = cmp(r1->section, r2->section); if (!result) { if (!strcmp(ctx.cfg.repository_sort, "age")) { // get_repo_modtime caches the value in r->mtime, so we don't // have to worry about inefficiencies here. if (get_repo_modtime(r1, &t) && get_repo_modtime(r2, &t)) result = r2->mtime - r1->mtime; } if (!result) result = cmp(r1->name, r2->name); } return result; } static int sort_name(const void *a, const void *b) { const struct cgit_repo *r1 = a; const struct cgit_repo *r2 = b; return cmp(r1->name, r2->name); } static int sort_desc(const void *a, const void *b) { const struct cgit_repo *r1 = a; const struct cgit_repo *r2 = b; return cmp(r1->desc, r2->desc); } static int sort_owner(const void *a, const void *b) { const struct cgit_repo *r1 = a; const struct cgit_repo *r2 = b; return cmp(r1->owner, r2->owner); } static int sort_idle(const void *a, const void *b) { const struct cgit_repo *r1 = a; const struct cgit_repo *r2 = b; time_t t1, t2; t1 = t2 = 0; get_repo_modtime(r1, &t1); get_repo_modtime(r2, &t2); return t2 - t1; } struct sortcolumn { const char *name; int (*fn)(const void *a, const void *b); }; static const struct sortcolumn sortcolumn[] = { {"section", sort_section}, {"name", sort_name}, {"desc", sort_desc}, {"owner", sort_owner}, {"idle", sort_idle}, {NULL, NULL} }; static int sort_repolist(char *field) { const struct sortcolumn *column; for (column = &sortcolumn[0]; column->name; column++) { if (strcmp(field, column->name)) continue; qsort(cgit_repolist.repos, cgit_repolist.count, sizeof(struct cgit_repo), column->fn); return 1; } return 0; } void cgit_print_repolist(void) { int i, columns = 3, hits = 0, header = 0; char *last_section = NULL; char *section; int sorted = 0; if (ctx.cfg.enable_index_links) ++columns; if (ctx.cfg.enable_index_owner) ++columns; ctx.page.title = ctx.cfg.root_title; cgit_print_http_headers(); cgit_print_docstart(); cgit_print_pageheader(); if (ctx.cfg.index_header) html_include(ctx.cfg.index_header); if (ctx.qry.sort) sorted = sort_repolist(ctx.qry.sort); else if (ctx.cfg.section_sort) sort_repolist("section"); html(""); for (i = 0; i < cgit_repolist.count; i++) { ctx.repo = &cgit_repolist.repos[i]; if (ctx.repo->hide || ctx.repo->ignore) continue; if (!(is_match(ctx.repo) && is_in_url(ctx.repo))) continue; hits++; if (hits <= ctx.qry.ofs) continue; if (hits > ctx.qry.ofs + ctx.cfg.max_repo_count) continue; if (!header++) print_header(); section = ctx.repo->section; if (section && !strcmp(section, "")) section = NULL; if (!sorted && ((last_section == NULL && section != NULL) || (last_section != NULL && section == NULL) || (last_section != NULL && section != NULL && strcmp(section, last_section)))) { htmlf(""); last_section = section; } htmlf(""); if (ctx.cfg.enable_index_links) { html(""); } html("\n"); } html("
", columns); html_txt(section); html("
", !sorted && section ? "sublevel-repo" : "toplevel-repo"); cgit_summary_link(ctx.repo->name, ctx.repo->name, NULL, NULL); html(""); html_link_open(cgit_repourl(ctx.repo->url), NULL, NULL); html_ntxt(ctx.cfg.max_repodesc_len, ctx.repo->desc); html_link_close(); html(""); if (ctx.cfg.enable_index_owner) { if (ctx.repo->owner_filter) { cgit_open_filter(ctx.repo->owner_filter); html_txt(ctx.repo->owner); cgit_close_filter(ctx.repo->owner_filter); } else { html(""); html_txt(ctx.repo->owner); html(""); } html(""); } print_modtime(ctx.repo); html(""); cgit_summary_link("summary", NULL, "button", NULL); cgit_log_link("log", NULL, "button", NULL, NULL, NULL, 0, NULL, NULL, ctx.qry.showmsg); cgit_tree_link("tree", NULL, "button", NULL, NULL, NULL); html("
"); if (!hits) cgit_print_error("No repositories found"); else if (hits > ctx.cfg.max_repo_count) print_pager(hits, ctx.cfg.max_repo_count, ctx.qry.search, ctx.qry.sort); cgit_print_docend(); } void cgit_print_site_readme(void) { if (!ctx.cfg.root_readme) return; cgit_open_filter(ctx.cfg.about_filter, ctx.cfg.root_readme); html_include(ctx.cfg.root_readme); cgit_close_filter(ctx.cfg.about_filter); } cgit-0.11.2/ui-tree.c0000644000175000017500000001613212500600243014502 0ustar formorerformorer/* ui-tree.c: functions for tree output * * Copyright (C) 2006-2014 cgit Development Team * * Licensed under GNU General Public License v2 * (see COPYING for full license text) */ #include #include "cgit.h" #include "ui-tree.h" #include "html.h" #include "ui-shared.h" struct walk_tree_context { char *curr_rev; char *match_path; int state; }; static void print_text_buffer(const char *name, char *buf, unsigned long size) { unsigned long lineno, idx; const char *numberfmt = "%1$d\n"; html("\n"); if (ctx.cfg.enable_tree_linenumbers) { html("\n"); } else { html("\n"); } if (ctx.repo->source_filter) { char *filter_arg = xstrdup(name); html("
");
		idx = 0;
		lineno = 0;

		if (size) {
			htmlf(numberfmt, ++lineno);
			while (idx < size - 1) { // skip absolute last newline
				if (buf[idx] == '\n')
					htmlf(numberfmt, ++lineno);
				idx++;
			}
		}
		html("
");
		cgit_open_filter(ctx.repo->source_filter, filter_arg);
		html_raw(buf, size);
		cgit_close_filter(ctx.repo->source_filter);
		free(filter_arg);
		html("
\n"); return; } html("
");
	html_txt(buf);
	html("
\n"); } #define ROWLEN 32 static void print_binary_buffer(char *buf, unsigned long size) { unsigned long ofs, idx; static char ascii[ROWLEN + 1]; html("\n"); html(""); for (ofs = 0; ofs < size; ofs += ROWLEN, buf += ROWLEN) { htmlf("\n"); } html("
ofshex dumpascii
%04lx", ofs); for (idx = 0; idx < ROWLEN && ofs + idx < size; idx++) htmlf("%*s%02x", idx == 16 ? 4 : 1, "", buf[idx] & 0xff); html(" "); for (idx = 0; idx < ROWLEN && ofs + idx < size; idx++) ascii[idx] = isgraph(buf[idx]) ? buf[idx] : '.'; ascii[idx] = '\0'; html_txt(ascii); html("
\n"); } static void print_object(const unsigned char *sha1, char *path, const char *basename, const char *rev) { enum object_type type; char *buf; unsigned long size; type = sha1_object_info(sha1, &size); if (type == OBJ_BAD) { cgit_print_error("Bad object name: %s", sha1_to_hex(sha1)); return; } buf = read_sha1_file(sha1, &type, &size); if (!buf) { cgit_print_error("Error reading object %s", sha1_to_hex(sha1)); return; } htmlf("blob: %s (", sha1_to_hex(sha1)); cgit_plain_link("plain", NULL, NULL, ctx.qry.head, rev, path); html(")\n"); if (ctx.cfg.max_blob_size && size / 1024 > ctx.cfg.max_blob_size) { htmlf("
blob size (%ldKB) exceeds display size limit (%dKB).
", size / 1024, ctx.cfg.max_blob_size); return; } if (buffer_is_binary(buf, size)) print_binary_buffer(buf, size); else print_text_buffer(basename, buf, size); } static int ls_item(const unsigned char *sha1, struct strbuf *base, const char *pathname, unsigned mode, int stage, void *cbdata) { struct walk_tree_context *walk_tree_ctx = cbdata; char *name; struct strbuf fullpath = STRBUF_INIT; struct strbuf class = STRBUF_INIT; enum object_type type; unsigned long size = 0; name = xstrdup(pathname); strbuf_addf(&fullpath, "%s%s%s", ctx.qry.path ? ctx.qry.path : "", ctx.qry.path ? "/" : "", name); if (!S_ISGITLINK(mode)) { type = sha1_object_info(sha1, &size); if (type == OBJ_BAD) { htmlf("Bad object: %s %s", name, sha1_to_hex(sha1)); return 0; } } html(""); cgit_print_filemode(mode); html(""); if (S_ISGITLINK(mode)) { cgit_submodule_link("ls-mod", fullpath.buf, sha1_to_hex(sha1)); } else if (S_ISDIR(mode)) { cgit_tree_link(name, NULL, "ls-dir", ctx.qry.head, walk_tree_ctx->curr_rev, fullpath.buf); } else { char *ext = strrchr(name, '.'); strbuf_addstr(&class, "ls-blob"); if (ext) strbuf_addf(&class, " %s", ext + 1); cgit_tree_link(name, NULL, class.buf, ctx.qry.head, walk_tree_ctx->curr_rev, fullpath.buf); } htmlf("%li", size); html(""); cgit_log_link("log", NULL, "button", ctx.qry.head, walk_tree_ctx->curr_rev, fullpath.buf, 0, NULL, NULL, ctx.qry.showmsg); if (ctx.repo->max_stats) cgit_stats_link("stats", NULL, "button", ctx.qry.head, fullpath.buf); if (!S_ISGITLINK(mode)) cgit_plain_link("plain", NULL, "button", ctx.qry.head, walk_tree_ctx->curr_rev, fullpath.buf); html("\n"); free(name); strbuf_release(&fullpath); strbuf_release(&class); return 0; } static void ls_head(void) { html("\n"); html(""); html(""); html(""); html(""); html("\n"); } static void ls_tail(void) { html("
ModeNameSize"); html("
\n"); } static void ls_tree(const unsigned char *sha1, char *path, struct walk_tree_context *walk_tree_ctx) { struct tree *tree; struct pathspec paths = { .nr = 0 }; tree = parse_tree_indirect(sha1); if (!tree) { cgit_print_error("Not a tree object: %s", sha1_to_hex(sha1)); return; } ls_head(); read_tree_recursive(tree, "", 0, 1, &paths, ls_item, walk_tree_ctx); ls_tail(); } static int walk_tree(const unsigned char *sha1, struct strbuf *base, const char *pathname, unsigned mode, int stage, void *cbdata) { struct walk_tree_context *walk_tree_ctx = cbdata; static char buffer[PATH_MAX]; if (walk_tree_ctx->state == 0) { memcpy(buffer, base->buf, base->len); strcpy(buffer + base->len, pathname); if (strcmp(walk_tree_ctx->match_path, buffer)) return READ_TREE_RECURSIVE; if (S_ISDIR(mode)) { walk_tree_ctx->state = 1; ls_head(); return READ_TREE_RECURSIVE; } else { print_object(sha1, buffer, pathname, walk_tree_ctx->curr_rev); return 0; } } ls_item(sha1, base, pathname, mode, stage, walk_tree_ctx); return 0; } /* * Show a tree or a blob * rev: the commit pointing at the root tree object * path: path to tree or blob */ void cgit_print_tree(const char *rev, char *path) { unsigned char sha1[20]; struct commit *commit; struct pathspec_item path_items = { .match = path, .len = path ? strlen(path) : 0 }; struct pathspec paths = { .nr = path ? 1 : 0, .items = &path_items }; struct walk_tree_context walk_tree_ctx = { .match_path = path, .state = 0 }; if (!rev) rev = ctx.qry.head; if (get_sha1(rev, sha1)) { cgit_print_error("Invalid revision name: %s", rev); return; } commit = lookup_commit_reference(sha1); if (!commit || parse_commit(commit)) { cgit_print_error("Invalid commit reference: %s", rev); return; } walk_tree_ctx.curr_rev = xstrdup(rev); if (path == NULL) { ls_tree(commit->tree->object.sha1, NULL, &walk_tree_ctx); goto cleanup; } read_tree_recursive(commit->tree, "", 0, 0, &paths, walk_tree, &walk_tree_ctx); if (walk_tree_ctx.state == 1) ls_tail(); cleanup: free(walk_tree_ctx.curr_rev); } cgit-0.11.2/html.c0000644000175000017500000002001512500600243014067 0ustar formorerformorer/* html.c: helper functions for html output * * Copyright (C) 2006-2014 cgit Development Team * * Licensed under GNU General Public License v2 * (see COPYING for full license text) */ #include "cgit.h" #include "html.h" #include #include #include #include #include #include /* Percent-encoding of each character, except: a-zA-Z0-9!$()*,./:;@- */ static const char* url_escape_table[256] = { "%00", "%01", "%02", "%03", "%04", "%05", "%06", "%07", "%08", "%09", "%0a", "%0b", "%0c", "%0d", "%0e", "%0f", "%10", "%11", "%12", "%13", "%14", "%15", "%16", "%17", "%18", "%19", "%1a", "%1b", "%1c", "%1d", "%1e", "%1f", "%20", NULL, "%22", "%23", NULL, "%25", "%26", "%27", NULL, NULL, NULL, "%2b", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "%3c", "%3d", "%3e", "%3f", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "%5c", NULL, "%5e", NULL, "%60", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "%7b", "%7c", "%7d", NULL, "%7f", "%80", "%81", "%82", "%83", "%84", "%85", "%86", "%87", "%88", "%89", "%8a", "%8b", "%8c", "%8d", "%8e", "%8f", "%90", "%91", "%92", "%93", "%94", "%95", "%96", "%97", "%98", "%99", "%9a", "%9b", "%9c", "%9d", "%9e", "%9f", "%a0", "%a1", "%a2", "%a3", "%a4", "%a5", "%a6", "%a7", "%a8", "%a9", "%aa", "%ab", "%ac", "%ad", "%ae", "%af", "%b0", "%b1", "%b2", "%b3", "%b4", "%b5", "%b6", "%b7", "%b8", "%b9", "%ba", "%bb", "%bc", "%bd", "%be", "%bf", "%c0", "%c1", "%c2", "%c3", "%c4", "%c5", "%c6", "%c7", "%c8", "%c9", "%ca", "%cb", "%cc", "%cd", "%ce", "%cf", "%d0", "%d1", "%d2", "%d3", "%d4", "%d5", "%d6", "%d7", "%d8", "%d9", "%da", "%db", "%dc", "%dd", "%de", "%df", "%e0", "%e1", "%e2", "%e3", "%e4", "%e5", "%e6", "%e7", "%e8", "%e9", "%ea", "%eb", "%ec", "%ed", "%ee", "%ef", "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7", "%f8", "%f9", "%fa", "%fb", "%fc", "%fd", "%fe", "%ff" }; char *fmt(const char *format, ...) { static char buf[8][1024]; static int bufidx; int len; va_list args; bufidx++; bufidx &= 7; va_start(args, format); len = vsnprintf(buf[bufidx], sizeof(buf[bufidx]), format, args); va_end(args); if (len > sizeof(buf[bufidx])) { fprintf(stderr, "[html.c] string truncated: %s\n", format); exit(1); } return buf[bufidx]; } char *fmtalloc(const char *format, ...) { struct strbuf sb = STRBUF_INIT; va_list args; va_start(args, format); strbuf_vaddf(&sb, format, args); va_end(args); return strbuf_detach(&sb, NULL); } void html_raw(const char *data, size_t size) { if (write(STDOUT_FILENO, data, size) != size) die_errno("write error on html output"); } void html(const char *txt) { html_raw(txt, strlen(txt)); } void htmlf(const char *format, ...) { va_list args; struct strbuf buf = STRBUF_INIT; va_start(args, format); strbuf_vaddf(&buf, format, args); va_end(args); html(buf.buf); strbuf_release(&buf); } void html_txtf(const char *format, ...) { va_list args; va_start(args, format); html_vtxtf(format, args); va_end(args); } void html_vtxtf(const char *format, va_list ap) { va_list cp; struct strbuf buf = STRBUF_INIT; va_copy(cp, ap); strbuf_vaddf(&buf, format, cp); va_end(cp); html_txt(buf.buf); strbuf_release(&buf); } void html_status(int code, const char *msg, int more_headers) { htmlf("Status: %d %s\n", code, msg); if (!more_headers) html("\n"); } void html_txt(const char *txt) { const char *t = txt; while (t && *t) { int c = *t; if (c == '<' || c == '>' || c == '&') { html_raw(txt, t - txt); if (c == '>') html(">"); else if (c == '<') html("<"); else if (c == '&') html("&"); txt = t + 1; } t++; } if (t != txt) html(txt); } void html_ntxt(int len, const char *txt) { const char *t = txt; while (t && *t && len--) { int c = *t; if (c == '<' || c == '>' || c == '&') { html_raw(txt, t - txt); if (c == '>') html(">"); else if (c == '<') html("<"); else if (c == '&') html("&"); txt = t + 1; } t++; } if (t != txt) html_raw(txt, t - txt); if (len < 0) html("..."); } void html_attrf(const char *fmt, ...) { va_list ap; struct strbuf sb = STRBUF_INIT; va_start(ap, fmt); strbuf_vaddf(&sb, fmt, ap); va_end(ap); html_attr(sb.buf); strbuf_release(&sb); } void html_attr(const char *txt) { const char *t = txt; while (t && *t) { int c = *t; if (c == '<' || c == '>' || c == '\'' || c == '\"' || c == '&') { html_raw(txt, t - txt); if (c == '>') html(">"); else if (c == '<') html("<"); else if (c == '\'') html("'"); else if (c == '"') html("""); else if (c == '&') html("&"); txt = t + 1; } t++; } if (t != txt) html(txt); } void html_url_path(const char *txt) { const char *t = txt; while (t && *t) { unsigned char c = *t; const char *e = url_escape_table[c]; if (e && c != '+' && c != '&') { html_raw(txt, t - txt); html(e); txt = t + 1; } t++; } if (t != txt) html(txt); } void html_url_arg(const char *txt) { const char *t = txt; while (t && *t) { unsigned char c = *t; const char *e = url_escape_table[c]; if (c == ' ') e = "+"; if (e) { html_raw(txt, t - txt); html(e); txt = t + 1; } t++; } if (t != txt) html(txt); } void html_hidden(const char *name, const char *value) { html(""); } void html_option(const char *value, const char *text, const char *selected_value) { html("\n"); } void html_intoption(int value, const char *text, int selected_value) { htmlf(""); } void html_link_open(const char *url, const char *title, const char *class) { html(""); } void html_link_close(void) { html(""); } void html_fileperm(unsigned short mode) { htmlf("%c%c%c", (mode & 4 ? 'r' : '-'), (mode & 2 ? 'w' : '-'), (mode & 1 ? 'x' : '-')); } int html_include(const char *filename) { FILE *f; char buf[4096]; size_t len; if (!(f = fopen(filename, "r"))) { fprintf(stderr, "[cgit] Failed to include file %s: %s (%d).\n", filename, strerror(errno), errno); return -1; } while ((len = fread(buf, 1, 4096, f)) > 0) html_raw(buf, len); fclose(f); return 0; } static int hextoint(char c) { if (c >= 'a' && c <= 'f') return 10 + c - 'a'; else if (c >= 'A' && c <= 'F') return 10 + c - 'A'; else if (c >= '0' && c <= '9') return c - '0'; else return -1; } static char *convert_query_hexchar(char *txt) { int d1, d2, n; n = strlen(txt); if (n < 3) { *txt = '\0'; return txt-1; } d1 = hextoint(*(txt + 1)); d2 = hextoint(*(txt + 2)); if (d1 < 0 || d2 < 0) { memmove(txt, txt + 3, n - 2); return txt-1; } else { *txt = d1 * 16 + d2; memmove(txt + 1, txt + 3, n - 2); return txt; } } int http_parse_querystring(const char *txt_, void (*fn)(const char *name, const char *value)) { char *o, *t, *txt, *value = NULL, c; if (!txt_) return 0; o = t = txt = xstrdup(txt_); while ((c=*t) != '\0') { if (c == '=') { *t = '\0'; value = t + 1; } else if (c == '+') { *t = ' '; } else if (c == '%') { t = convert_query_hexchar(t); } else if (c == '&') { *t = '\0'; (*fn)(txt, value); txt = t + 1; value = NULL; } t++; } if (t != txt) (*fn)(txt, value); free(o); return 0; } cgit-0.11.2/COPYING0000644000175000017500000004313112500600243014016 0ustar formorerformorer GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. cgit-0.11.2/ui-commit.c0000644000175000017500000001017212500600243015031 0ustar formorerformorer/* ui-commit.c: generate commit view * * Copyright (C) 2006-2014 cgit Development Team * * Licensed under GNU General Public License v2 * (see COPYING for full license text) */ #include "cgit.h" #include "ui-commit.h" #include "html.h" #include "ui-shared.h" #include "ui-diff.h" #include "ui-log.h" void cgit_print_commit(char *hex, const char *prefix) { struct commit *commit, *parent; struct commitinfo *info, *parent_info; struct commit_list *p; struct strbuf notes = STRBUF_INIT; unsigned char sha1[20]; char *tmp, *tmp2; int parents = 0; if (!hex) hex = ctx.qry.head; if (get_sha1(hex, sha1)) { cgit_print_error("Bad object id: %s", hex); return; } commit = lookup_commit_reference(sha1); if (!commit) { cgit_print_error("Bad commit reference: %s", hex); return; } info = cgit_parse_commit(commit); format_display_notes(sha1, ¬es, PAGE_ENCODING, 0); load_ref_decorations(DECORATE_FULL_REFS); cgit_print_diff_ctrls(); html("\n"); html("\n"); html("\n"); html("\n"); html("\n"); for (p = commit->parents; p; p = p->next) { parent = lookup_commit_reference(p->item->object.sha1); if (!parent) { html(""); continue; } html("" ""); parents++; } if (ctx.repo->snapshots) { html(""); } html("
author"); cgit_open_filter(ctx.repo->email_filter, info->author_email, "commit"); html_txt(info->author); if (!ctx.cfg.noplainemail) { html(" "); html_txt(info->author_email); } cgit_close_filter(ctx.repo->email_filter); html(""); cgit_print_date(info->author_date, FMT_LONGDATE, ctx.cfg.local_time); html("
committer"); cgit_open_filter(ctx.repo->email_filter, info->committer_email, "commit"); html_txt(info->committer); if (!ctx.cfg.noplainemail) { html(" "); html_txt(info->committer_email); } cgit_close_filter(ctx.repo->email_filter); html(""); cgit_print_date(info->committer_date, FMT_LONGDATE, ctx.cfg.local_time); html("
commit"); tmp = sha1_to_hex(commit->object.sha1); cgit_commit_link(tmp, NULL, NULL, ctx.qry.head, tmp, prefix); html(" ("); cgit_patch_link("patch", NULL, NULL, NULL, tmp, prefix); html(")
tree"); tmp = xstrdup(hex); cgit_tree_link(sha1_to_hex(commit->tree->object.sha1), NULL, NULL, ctx.qry.head, tmp, NULL); if (prefix) { html(" /"); cgit_tree_link(prefix, NULL, NULL, ctx.qry.head, tmp, prefix); } free(tmp); html("
"); cgit_print_error("Error reading parent commit"); html("
parent"); tmp = tmp2 = sha1_to_hex(p->item->object.sha1); if (ctx.repo->enable_subject_links) { parent_info = cgit_parse_commit(parent); tmp2 = parent_info->subject; } cgit_commit_link(tmp2, NULL, NULL, ctx.qry.head, tmp, prefix); html(" ("); cgit_diff_link("diff", NULL, NULL, ctx.qry.head, hex, sha1_to_hex(p->item->object.sha1), prefix); html(")
download"); cgit_print_snapshot_links(ctx.qry.repo, ctx.qry.head, hex, ctx.repo->snapshots); html("
\n"); html("
"); cgit_open_filter(ctx.repo->commit_filter); html_txt(info->subject); cgit_close_filter(ctx.repo->commit_filter); show_commit_decorations(commit); html("
"); html("
"); cgit_open_filter(ctx.repo->commit_filter); html_txt(info->msg); cgit_close_filter(ctx.repo->commit_filter); html("
"); if (notes.len != 0) { html("
Notes
"); html("
"); cgit_open_filter(ctx.repo->commit_filter); html_txt(notes.buf); cgit_close_filter(ctx.repo->commit_filter); html("
"); html(""); } if (parents < 3) { if (parents) tmp = sha1_to_hex(commit->parents->item->object.sha1); else tmp = NULL; cgit_print_diff(ctx.qry.sha1, tmp, prefix, 0, 0); } strbuf_release(¬es); cgit_free_commitinfo(info); } cgit-0.11.2/filter.c0000644000175000017500000002621212500600243014415 0ustar formorerformorer/* filter.c: filter framework functions * * Copyright (C) 2006-2014 cgit Development Team * * Licensed under GNU General Public License v2 * (see COPYING for full license text) */ #include "cgit.h" #include "html.h" #include #include #include #include #include #include #include #ifndef NO_LUA #include #include #include #endif static ssize_t (*libc_write)(int fd, const void *buf, size_t count); static ssize_t (*filter_write)(struct cgit_filter *base, const void *buf, size_t count) = NULL; static struct cgit_filter *current_write_filter = NULL; static inline void reap_filter(struct cgit_filter *filter) { if (filter && filter->cleanup) filter->cleanup(filter); } void cgit_cleanup_filters(void) { int i; reap_filter(ctx.cfg.about_filter); reap_filter(ctx.cfg.commit_filter); reap_filter(ctx.cfg.source_filter); reap_filter(ctx.cfg.email_filter); reap_filter(ctx.cfg.owner_filter); reap_filter(ctx.cfg.auth_filter); for (i = 0; i < cgit_repolist.count; ++i) { reap_filter(cgit_repolist.repos[i].about_filter); reap_filter(cgit_repolist.repos[i].commit_filter); reap_filter(cgit_repolist.repos[i].source_filter); reap_filter(cgit_repolist.repos[i].email_filter); reap_filter(cgit_repolist.repos[i].owner_filter); } } void cgit_init_filters(void) { libc_write = dlsym(RTLD_NEXT, "write"); if (!libc_write) die("Could not locate libc's write function"); } ssize_t write(int fd, const void *buf, size_t count) { if (fd != STDOUT_FILENO || !filter_write) return libc_write(fd, buf, count); return filter_write(current_write_filter, buf, count); } static inline void hook_write(struct cgit_filter *filter, ssize_t (*new_write)(struct cgit_filter *base, const void *buf, size_t count)) { /* We want to avoid buggy nested patterns. */ assert(filter_write == NULL); assert(current_write_filter == NULL); current_write_filter = filter; filter_write = new_write; } static inline void unhook_write(void) { assert(filter_write != NULL); assert(current_write_filter != NULL); filter_write = NULL; current_write_filter = NULL; } static int open_exec_filter(struct cgit_filter *base, va_list ap) { struct cgit_exec_filter *filter = (struct cgit_exec_filter *)base; int i; for (i = 0; i < filter->base.argument_count; i++) filter->argv[i + 1] = va_arg(ap, char *); filter->old_stdout = chk_positive(dup(STDOUT_FILENO), "Unable to duplicate STDOUT"); chk_zero(pipe(filter->pipe_fh), "Unable to create pipe to subprocess"); filter->pid = chk_non_negative(fork(), "Unable to create subprocess"); if (filter->pid == 0) { close(filter->pipe_fh[1]); chk_non_negative(dup2(filter->pipe_fh[0], STDIN_FILENO), "Unable to use pipe as STDIN"); execvp(filter->cmd, filter->argv); die_errno("Unable to exec subprocess %s", filter->cmd); } close(filter->pipe_fh[0]); chk_non_negative(dup2(filter->pipe_fh[1], STDOUT_FILENO), "Unable to use pipe as STDOUT"); close(filter->pipe_fh[1]); return 0; } static int close_exec_filter(struct cgit_filter *base) { struct cgit_exec_filter *filter = (struct cgit_exec_filter *)base; int i, exit_status = 0; chk_non_negative(dup2(filter->old_stdout, STDOUT_FILENO), "Unable to restore STDOUT"); close(filter->old_stdout); if (filter->pid < 0) goto done; waitpid(filter->pid, &exit_status, 0); if (WIFEXITED(exit_status)) goto done; die("Subprocess %s exited abnormally", filter->cmd); done: for (i = 0; i < filter->base.argument_count; i++) filter->argv[i + 1] = NULL; return WEXITSTATUS(exit_status); } static void fprintf_exec_filter(struct cgit_filter *base, FILE *f, const char *prefix) { struct cgit_exec_filter *filter = (struct cgit_exec_filter *)base; fprintf(f, "%sexec:%s\n", prefix, filter->cmd); } static void cleanup_exec_filter(struct cgit_filter *base) { struct cgit_exec_filter *filter = (struct cgit_exec_filter *)base; if (filter->argv) { free(filter->argv); filter->argv = NULL; } if (filter->cmd) { free(filter->cmd); filter->cmd = NULL; } } static struct cgit_filter *new_exec_filter(const char *cmd, int argument_count) { struct cgit_exec_filter *f; int args_size = 0; f = xmalloc(sizeof(*f)); /* We leave argv for now and assign it below. */ cgit_exec_filter_init(f, xstrdup(cmd), NULL); f->base.argument_count = argument_count; args_size = (2 + argument_count) * sizeof(char *); f->argv = xmalloc(args_size); memset(f->argv, 0, args_size); f->argv[0] = f->cmd; return &f->base; } void cgit_exec_filter_init(struct cgit_exec_filter *filter, char *cmd, char **argv) { memset(filter, 0, sizeof(*filter)); filter->base.open = open_exec_filter; filter->base.close = close_exec_filter; filter->base.fprintf = fprintf_exec_filter; filter->base.cleanup = cleanup_exec_filter; filter->cmd = cmd; filter->argv = argv; /* The argument count for open_filter is zero by default, unless called from new_filter, above. */ filter->base.argument_count = 0; } #ifndef NO_LUA struct lua_filter { struct cgit_filter base; char *script_file; lua_State *lua_state; }; static void error_lua_filter(struct lua_filter *filter) { die("Lua error in %s: %s", filter->script_file, lua_tostring(filter->lua_state, -1)); lua_pop(filter->lua_state, 1); } static ssize_t write_lua_filter(struct cgit_filter *base, const void *buf, size_t count) { struct lua_filter *filter = (struct lua_filter *)base; lua_getglobal(filter->lua_state, "filter_write"); lua_pushlstring(filter->lua_state, buf, count); if (lua_pcall(filter->lua_state, 1, 0, 0)) { error_lua_filter(filter); errno = EIO; return -1; } return count; } static inline int hook_lua_filter(lua_State *lua_state, void (*fn)(const char *txt)) { const char *str; ssize_t (*save_filter_write)(struct cgit_filter *base, const void *buf, size_t count); struct cgit_filter *save_filter; str = lua_tostring(lua_state, 1); if (!str) return 0; save_filter_write = filter_write; save_filter = current_write_filter; unhook_write(); fn(str); hook_write(save_filter, save_filter_write); return 0; } static int html_lua_filter(lua_State *lua_state) { return hook_lua_filter(lua_state, html); } static int html_txt_lua_filter(lua_State *lua_state) { return hook_lua_filter(lua_state, html_txt); } static int html_attr_lua_filter(lua_State *lua_state) { return hook_lua_filter(lua_state, html_attr); } static int html_url_path_lua_filter(lua_State *lua_state) { return hook_lua_filter(lua_state, html_url_path); } static int html_url_arg_lua_filter(lua_State *lua_state) { return hook_lua_filter(lua_state, html_url_arg); } static int html_include_lua_filter(lua_State *lua_state) { return hook_lua_filter(lua_state, (void (*)(const char *))html_include); } static void cleanup_lua_filter(struct cgit_filter *base) { struct lua_filter *filter = (struct lua_filter *)base; if (!filter->lua_state) return; lua_close(filter->lua_state); filter->lua_state = NULL; if (filter->script_file) { free(filter->script_file); filter->script_file = NULL; } } static int init_lua_filter(struct lua_filter *filter) { if (filter->lua_state) return 0; if (!(filter->lua_state = luaL_newstate())) return 1; luaL_openlibs(filter->lua_state); lua_pushcfunction(filter->lua_state, html_lua_filter); lua_setglobal(filter->lua_state, "html"); lua_pushcfunction(filter->lua_state, html_txt_lua_filter); lua_setglobal(filter->lua_state, "html_txt"); lua_pushcfunction(filter->lua_state, html_attr_lua_filter); lua_setglobal(filter->lua_state, "html_attr"); lua_pushcfunction(filter->lua_state, html_url_path_lua_filter); lua_setglobal(filter->lua_state, "html_url_path"); lua_pushcfunction(filter->lua_state, html_url_arg_lua_filter); lua_setglobal(filter->lua_state, "html_url_arg"); lua_pushcfunction(filter->lua_state, html_include_lua_filter); lua_setglobal(filter->lua_state, "html_include"); if (luaL_dofile(filter->lua_state, filter->script_file)) { error_lua_filter(filter); lua_close(filter->lua_state); filter->lua_state = NULL; return 1; } return 0; } static int open_lua_filter(struct cgit_filter *base, va_list ap) { struct lua_filter *filter = (struct lua_filter *)base; int i; if (init_lua_filter(filter)) return 1; hook_write(base, write_lua_filter); lua_getglobal(filter->lua_state, "filter_open"); for (i = 0; i < filter->base.argument_count; ++i) lua_pushstring(filter->lua_state, va_arg(ap, char *)); if (lua_pcall(filter->lua_state, filter->base.argument_count, 0, 0)) { error_lua_filter(filter); return 1; } return 0; } static int close_lua_filter(struct cgit_filter *base) { struct lua_filter *filter = (struct lua_filter *)base; int ret = 0; lua_getglobal(filter->lua_state, "filter_close"); if (lua_pcall(filter->lua_state, 0, 1, 0)) { error_lua_filter(filter); ret = -1; } else { ret = lua_tonumber(filter->lua_state, -1); lua_pop(filter->lua_state, 1); } unhook_write(); return ret; } static void fprintf_lua_filter(struct cgit_filter *base, FILE *f, const char *prefix) { struct lua_filter *filter = (struct lua_filter *)base; fprintf(f, "%slua:%s\n", prefix, filter->script_file); } static struct cgit_filter *new_lua_filter(const char *cmd, int argument_count) { struct lua_filter *filter; filter = xmalloc(sizeof(*filter)); memset(filter, 0, sizeof(*filter)); filter->base.open = open_lua_filter; filter->base.close = close_lua_filter; filter->base.fprintf = fprintf_lua_filter; filter->base.cleanup = cleanup_lua_filter; filter->base.argument_count = argument_count; filter->script_file = xstrdup(cmd); return &filter->base; } #endif int cgit_open_filter(struct cgit_filter *filter, ...) { int result; va_list ap; if (!filter) return 0; va_start(ap, filter); result = filter->open(filter, ap); va_end(ap); return result; } int cgit_close_filter(struct cgit_filter *filter) { if (!filter) return 0; return filter->close(filter); } void cgit_fprintf_filter(struct cgit_filter *filter, FILE *f, const char *prefix) { filter->fprintf(filter, f, prefix); } static const struct { const char *prefix; struct cgit_filter *(*ctor)(const char *cmd, int argument_count); } filter_specs[] = { { "exec", new_exec_filter }, #ifndef NO_LUA { "lua", new_lua_filter }, #endif }; struct cgit_filter *cgit_new_filter(const char *cmd, filter_type filtertype) { char *colon; int i; size_t len; int argument_count; if (!cmd || !cmd[0]) return NULL; colon = strchr(cmd, ':'); len = colon - cmd; /* * In case we're running on Windows, don't allow a single letter before * the colon. */ if (len == 1) colon = NULL; switch (filtertype) { case AUTH: argument_count = 12; break; case EMAIL: argument_count = 2; break; case OWNER: argument_count = 0; break; case SOURCE: case ABOUT: argument_count = 1; break; case COMMIT: default: argument_count = 0; break; } /* If no prefix is given, exec filter is the default. */ if (!colon) return new_exec_filter(cmd, argument_count); for (i = 0; i < ARRAY_SIZE(filter_specs); i++) { if (len == strlen(filter_specs[i].prefix) && !strncmp(filter_specs[i].prefix, cmd, len)) return filter_specs[i].ctor(colon + 1, argument_count); } die("Invalid filter type: %.*s", (int) len, cmd); } cgit-0.11.2/ui-shared.h0000644000175000017500000000634412500600243015022 0ustar formorerformorer#ifndef UI_SHARED_H #define UI_SHARED_H extern const char *cgit_httpscheme(); extern const char *cgit_hosturl(); extern const char *cgit_rooturl(); extern const char *cgit_currenturl(); extern const char *cgit_loginurl(); extern char *cgit_repourl(const char *reponame); extern char *cgit_fileurl(const char *reponame, const char *pagename, const char *filename, const char *query); extern char *cgit_pageurl(const char *reponame, const char *pagename, const char *query); extern void cgit_add_clone_urls(void (*fn)(const char *)); extern void cgit_index_link(const char *name, const char *title, const char *class, const char *pattern, const char *sort, int ofs, int always_root); extern void cgit_summary_link(const char *name, const char *title, const char *class, const char *head); extern void cgit_tag_link(const char *name, const char *title, const char *class, const char *tag); extern void cgit_tree_link(const char *name, const char *title, const char *class, const char *head, const char *rev, const char *path); extern void cgit_plain_link(const char *name, const char *title, const char *class, const char *head, const char *rev, const char *path); extern void cgit_log_link(const char *name, const char *title, const char *class, const char *head, const char *rev, const char *path, int ofs, const char *grep, const char *pattern, int showmsg); extern void cgit_commit_link(char *name, const char *title, const char *class, const char *head, const char *rev, const char *path); extern void cgit_patch_link(const char *name, const char *title, const char *class, const char *head, const char *rev, const char *path); extern void cgit_refs_link(const char *name, const char *title, const char *class, const char *head, const char *rev, const char *path); extern void cgit_snapshot_link(const char *name, const char *title, const char *class, const char *head, const char *rev, const char *archivename); extern void cgit_diff_link(const char *name, const char *title, const char *class, const char *head, const char *new_rev, const char *old_rev, const char *path); extern void cgit_stats_link(const char *name, const char *title, const char *class, const char *head, const char *path); extern void cgit_object_link(struct object *obj); extern void cgit_submodule_link(const char *class, char *path, const char *rev); __attribute__((format (printf,1,2))) extern void cgit_print_error(const char *fmt, ...); __attribute__((format (printf,1,0))) extern void cgit_vprint_error(const char *fmt, va_list ap); extern void cgit_print_date(time_t secs, const char *format, int local_time); extern void cgit_print_age(time_t t, time_t max_relative, const char *format); extern void cgit_print_http_headers(void); extern void cgit_print_docstart(void); extern void cgit_print_docend(); extern void cgit_print_pageheader(void); extern void cgit_print_filemode(unsigned short mode); extern void cgit_print_snapshot_links(const char *repo, const char *head, const char *hex, int snapshots); extern void cgit_add_hidden_formfields(int incl_head, int incl_search, const char *page); #endif /* UI_SHARED_H */ cgit-0.11.2/ui-snapshot.c0000644000175000017500000001422512500600243015403 0ustar formorerformorer/* ui-snapshot.c: generate snapshot of a commit * * Copyright (C) 2006-2014 cgit Development Team * * Licensed under GNU General Public License v2 * (see COPYING for full license text) */ #include "cgit.h" #include "ui-snapshot.h" #include "html.h" #include "ui-shared.h" static int write_archive_type(const char *format, const char *hex, const char *prefix) { struct argv_array argv = ARGV_ARRAY_INIT; const char **nargv; int result; argv_array_push(&argv, "snapshot"); argv_array_push(&argv, format); if (prefix) { struct strbuf buf = STRBUF_INIT; strbuf_addstr(&buf, prefix); strbuf_addch(&buf, '/'); argv_array_push(&argv, "--prefix"); argv_array_push(&argv, buf.buf); strbuf_release(&buf); } argv_array_push(&argv, hex); /* * Now we need to copy the pointers to arguments into a new * structure because write_archive will rearrange its arguments * which may result in duplicated/missing entries causing leaks * or double-frees in argv_array_clear. */ nargv = xmalloc(sizeof(char *) * (argv.argc + 1)); /* argv_array guarantees a trailing NULL entry. */ memcpy(nargv, argv.argv, sizeof(char *) * (argv.argc + 1)); result = write_archive(argv.argc, nargv, NULL, 1, NULL, 0); argv_array_clear(&argv); free(nargv); return result; } static int write_tar_archive(const char *hex, const char *prefix) { return write_archive_type("--format=tar", hex, prefix); } static int write_zip_archive(const char *hex, const char *prefix) { return write_archive_type("--format=zip", hex, prefix); } static int write_compressed_tar_archive(const char *hex, const char *prefix, char *filter_argv[]) { int rv; struct cgit_exec_filter f; cgit_exec_filter_init(&f, filter_argv[0], filter_argv); cgit_open_filter(&f.base); rv = write_tar_archive(hex, prefix); cgit_close_filter(&f.base); return rv; } static int write_tar_gzip_archive(const char *hex, const char *prefix) { char *argv[] = { "gzip", "-n", NULL }; return write_compressed_tar_archive(hex, prefix, argv); } static int write_tar_bzip2_archive(const char *hex, const char *prefix) { char *argv[] = { "bzip2", NULL }; return write_compressed_tar_archive(hex, prefix, argv); } static int write_tar_xz_archive(const char *hex, const char *prefix) { char *argv[] = { "xz", NULL }; return write_compressed_tar_archive(hex, prefix, argv); } const struct cgit_snapshot_format cgit_snapshot_formats[] = { { ".zip", "application/x-zip", write_zip_archive, 0x01 }, { ".tar.gz", "application/x-gzip", write_tar_gzip_archive, 0x02 }, { ".tar.bz2", "application/x-bzip2", write_tar_bzip2_archive, 0x04 }, { ".tar", "application/x-tar", write_tar_archive, 0x08 }, { ".tar.xz", "application/x-xz", write_tar_xz_archive, 0x10 }, { NULL } }; static const struct cgit_snapshot_format *get_format(const char *filename) { const struct cgit_snapshot_format *fmt; for (fmt = cgit_snapshot_formats; fmt->suffix; fmt++) { if (ends_with(filename, fmt->suffix)) return fmt; } return NULL; } static int make_snapshot(const struct cgit_snapshot_format *format, const char *hex, const char *prefix, const char *filename) { unsigned char sha1[20]; if (get_sha1(hex, sha1)) { cgit_print_error("Bad object id: %s", hex); return 1; } if (!lookup_commit_reference(sha1)) { cgit_print_error("Not a commit reference: %s", hex); return 1; } ctx.page.etag = sha1_to_hex(sha1); ctx.page.mimetype = xstrdup(format->mimetype); ctx.page.filename = xstrdup(filename); cgit_print_http_headers(); format->write_func(hex, prefix); return 0; } /* Try to guess the requested revision from the requested snapshot name. * First the format extension is stripped, e.g. "cgit-0.7.2.tar.gz" become * "cgit-0.7.2". If this is a valid commit object name we've got a winner. * Otherwise, if the snapshot name has a prefix matching the result from * repo_basename(), we strip the basename and any following '-' and '_' * characters ("cgit-0.7.2" -> "0.7.2") and check the resulting name once * more. If this still isn't a valid commit object name, we check if pre- * pending a 'v' or a 'V' to the remaining snapshot name ("0.7.2" -> * "v0.7.2") gives us something valid. */ static const char *get_ref_from_filename(const char *url, const char *filename, const struct cgit_snapshot_format *format) { const char *reponame; unsigned char sha1[20]; struct strbuf snapshot = STRBUF_INIT; int result = 1; strbuf_addstr(&snapshot, filename); strbuf_setlen(&snapshot, snapshot.len - strlen(format->suffix)); if (get_sha1(snapshot.buf, sha1) == 0) goto out; reponame = cgit_repobasename(url); if (starts_with(snapshot.buf, reponame)) { const char *new_start = snapshot.buf; new_start += strlen(reponame); while (new_start && (*new_start == '-' || *new_start == '_')) new_start++; strbuf_splice(&snapshot, 0, new_start - snapshot.buf, "", 0); } if (get_sha1(snapshot.buf, sha1) == 0) goto out; strbuf_insert(&snapshot, 0, "v", 1); if (get_sha1(snapshot.buf, sha1) == 0) goto out; strbuf_splice(&snapshot, 0, 1, "V", 1); if (get_sha1(snapshot.buf, sha1) == 0) goto out; result = 0; strbuf_release(&snapshot); out: return result ? strbuf_detach(&snapshot, NULL) : NULL; } __attribute__((format (printf, 1, 2))) static void show_error(char *fmt, ...) { va_list ap; ctx.page.mimetype = "text/html"; cgit_print_http_headers(); cgit_print_docstart(); cgit_print_pageheader(); va_start(ap, fmt); cgit_vprint_error(fmt, ap); va_end(ap); cgit_print_docend(); } void cgit_print_snapshot(const char *head, const char *hex, const char *filename, int dwim) { const struct cgit_snapshot_format* f; char *prefix = NULL; if (!filename) { show_error("No snapshot name specified"); return; } f = get_format(filename); if (!f) { show_error("Unsupported snapshot format: %s", filename); return; } if (!hex && dwim) { hex = get_ref_from_filename(ctx.repo->url, filename, f); if (hex == NULL) { html_status(404, "Not found", 0); return; } prefix = xstrdup(filename); prefix[strlen(filename) - strlen(f->suffix)] = '\0'; } if (!hex) hex = head; if (!prefix) prefix = xstrdup(cgit_repobasename(ctx.repo->url)); make_snapshot(f, hex, prefix, filename); free(prefix); } cgit-0.11.2/git/0000755000175000017500000000000012476431550013563 5ustar formorerformorercgit-0.11.2/git/version.c0000644000175000017500000000121312476431550015411 0ustar formorerformorer#include "git-compat-util.h" #include "version.h" #include "strbuf.h" const char git_version_string[] = GIT_VERSION; const char *git_user_agent(void) { static const char *agent = NULL; if (!agent) { agent = getenv("GIT_USER_AGENT"); if (!agent) agent = GIT_USER_AGENT; } return agent; } const char *git_user_agent_sanitized(void) { static const char *agent = NULL; if (!agent) { struct strbuf buf = STRBUF_INIT; int i; strbuf_addstr(&buf, git_user_agent()); strbuf_trim(&buf); for (i = 0; i < buf.len; i++) { if (buf.buf[i] <= 32 || buf.buf[i] >= 127) buf.buf[i] = '.'; } agent = buf.buf; } return agent; } cgit-0.11.2/git/unpack-trees.h0000644000175000017500000000436312476431550016343 0ustar formorerformorer#ifndef UNPACK_TREES_H #define UNPACK_TREES_H #include "string-list.h" #define MAX_UNPACK_TREES 8 struct unpack_trees_options; struct exclude_list; typedef int (*merge_fn_t)(const struct cache_entry * const *src, struct unpack_trees_options *options); enum unpack_trees_error_types { ERROR_WOULD_OVERWRITE = 0, ERROR_NOT_UPTODATE_FILE, ERROR_NOT_UPTODATE_DIR, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, ERROR_BIND_OVERLAP, ERROR_SPARSE_NOT_UPTODATE_FILE, ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN, ERROR_WOULD_LOSE_ORPHANED_REMOVED, NB_UNPACK_TREES_ERROR_TYPES }; /* * Sets the list of user-friendly error messages to be used by the * command "cmd" (either merge or checkout), and show_all_errors to 1. */ void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, const char *cmd); struct unpack_trees_options { unsigned int reset, merge, update, index_only, nontrivial_merge, trivial_merges_only, verbose_update, aggressive, skip_unmerged, initial_checkout, diff_index_cached, debug_unpack, skip_sparse_checkout, gently, exiting_early, show_all_errors, dry_run; const char *prefix; int cache_bottom; struct dir_struct *dir; struct pathspec *pathspec; merge_fn_t fn; const char *msgs[NB_UNPACK_TREES_ERROR_TYPES]; /* * Store error messages in an array, each case * corresponding to a error message type */ struct string_list unpack_rejects[NB_UNPACK_TREES_ERROR_TYPES]; int head_idx; int merge_size; struct cache_entry *df_conflict_entry; void *unpack_data; struct index_state *dst_index; struct index_state *src_index; struct index_state result; struct exclude_list *el; /* for internal use */ }; extern int unpack_trees(unsigned n, struct tree_desc *t, struct unpack_trees_options *options); int threeway_merge(const struct cache_entry * const *stages, struct unpack_trees_options *o); int twoway_merge(const struct cache_entry * const *src, struct unpack_trees_options *o); int bind_merge(const struct cache_entry * const *src, struct unpack_trees_options *o); int oneway_merge(const struct cache_entry * const *src, struct unpack_trees_options *o); #endif cgit-0.11.2/git/read-cache.c0000644000175000017500000017205412476431550015714 0ustar formorerformorer/* * GIT - The information manager from hell * * Copyright (C) Linus Torvalds, 2005 */ #define NO_THE_INDEX_COMPATIBILITY_MACROS #include "cache.h" #include "lockfile.h" #include "cache-tree.h" #include "refs.h" #include "dir.h" #include "tree.h" #include "commit.h" #include "blob.h" #include "resolve-undo.h" #include "strbuf.h" #include "varint.h" #include "split-index.h" #include "sigchain.h" #include "utf8.h" static struct cache_entry *refresh_cache_entry(struct cache_entry *ce, unsigned int options); /* Mask for the name length in ce_flags in the on-disk index */ #define CE_NAMEMASK (0x0fff) /* Index extensions. * * The first letter should be 'A'..'Z' for extensions that are not * necessary for a correct operation (i.e. optimization data). * When new extensions are added that _needs_ to be understood in * order to correctly interpret the index file, pick character that * is outside the range, to cause the reader to abort. */ #define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) ) #define CACHE_EXT_TREE 0x54524545 /* "TREE" */ #define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */ #define CACHE_EXT_LINK 0x6c696e6b /* "link" */ /* changes that can be kept in $GIT_DIR/index (basically all extensions) */ #define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \ CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \ SPLIT_INDEX_ORDERED) struct index_state the_index; static const char *alternate_index_output; static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce) { istate->cache[nr] = ce; add_name_hash(istate, ce); } static void replace_index_entry(struct index_state *istate, int nr, struct cache_entry *ce) { struct cache_entry *old = istate->cache[nr]; replace_index_entry_in_base(istate, old, ce); remove_name_hash(istate, old); free(old); set_index_entry(istate, nr, ce); ce->ce_flags |= CE_UPDATE_IN_BASE; istate->cache_changed |= CE_ENTRY_CHANGED; } void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name) { struct cache_entry *old = istate->cache[nr], *new; int namelen = strlen(new_name); new = xmalloc(cache_entry_size(namelen)); copy_cache_entry(new, old); new->ce_flags &= ~CE_HASHED; new->ce_namelen = namelen; new->index = 0; memcpy(new->name, new_name, namelen + 1); cache_tree_invalidate_path(istate, old->name); remove_index_entry_at(istate, nr); add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE); } void fill_stat_data(struct stat_data *sd, struct stat *st) { sd->sd_ctime.sec = (unsigned int)st->st_ctime; sd->sd_mtime.sec = (unsigned int)st->st_mtime; sd->sd_ctime.nsec = ST_CTIME_NSEC(*st); sd->sd_mtime.nsec = ST_MTIME_NSEC(*st); sd->sd_dev = st->st_dev; sd->sd_ino = st->st_ino; sd->sd_uid = st->st_uid; sd->sd_gid = st->st_gid; sd->sd_size = st->st_size; } int match_stat_data(const struct stat_data *sd, struct stat *st) { int changed = 0; if (sd->sd_mtime.sec != (unsigned int)st->st_mtime) changed |= MTIME_CHANGED; if (trust_ctime && check_stat && sd->sd_ctime.sec != (unsigned int)st->st_ctime) changed |= CTIME_CHANGED; #ifdef USE_NSEC if (check_stat && sd->sd_mtime.nsec != ST_MTIME_NSEC(*st)) changed |= MTIME_CHANGED; if (trust_ctime && check_stat && sd->sd_ctime.nsec != ST_CTIME_NSEC(*st)) changed |= CTIME_CHANGED; #endif if (check_stat) { if (sd->sd_uid != (unsigned int) st->st_uid || sd->sd_gid != (unsigned int) st->st_gid) changed |= OWNER_CHANGED; if (sd->sd_ino != (unsigned int) st->st_ino) changed |= INODE_CHANGED; } #ifdef USE_STDEV /* * st_dev breaks on network filesystems where different * clients will have different views of what "device" * the filesystem is on */ if (check_stat && sd->sd_dev != (unsigned int) st->st_dev) changed |= INODE_CHANGED; #endif if (sd->sd_size != (unsigned int) st->st_size) changed |= DATA_CHANGED; return changed; } /* * This only updates the "non-critical" parts of the directory * cache, ie the parts that aren't tracked by GIT, and only used * to validate the cache. */ void fill_stat_cache_info(struct cache_entry *ce, struct stat *st) { fill_stat_data(&ce->ce_stat_data, st); if (assume_unchanged) ce->ce_flags |= CE_VALID; if (S_ISREG(st->st_mode)) ce_mark_uptodate(ce); } static int ce_compare_data(const struct cache_entry *ce, struct stat *st) { int match = -1; int fd = open(ce->name, O_RDONLY); if (fd >= 0) { unsigned char sha1[20]; if (!index_fd(sha1, fd, st, OBJ_BLOB, ce->name, 0)) match = hashcmp(sha1, ce->sha1); /* index_fd() closed the file descriptor already */ } return match; } static int ce_compare_link(const struct cache_entry *ce, size_t expected_size) { int match = -1; void *buffer; unsigned long size; enum object_type type; struct strbuf sb = STRBUF_INIT; if (strbuf_readlink(&sb, ce->name, expected_size)) return -1; buffer = read_sha1_file(ce->sha1, &type, &size); if (buffer) { if (size == sb.len) match = memcmp(buffer, sb.buf, size); free(buffer); } strbuf_release(&sb); return match; } static int ce_compare_gitlink(const struct cache_entry *ce) { unsigned char sha1[20]; /* * We don't actually require that the .git directory * under GITLINK directory be a valid git directory. It * might even be missing (in case nobody populated that * sub-project). * * If so, we consider it always to match. */ if (resolve_gitlink_ref(ce->name, "HEAD", sha1) < 0) return 0; return hashcmp(sha1, ce->sha1); } static int ce_modified_check_fs(const struct cache_entry *ce, struct stat *st) { switch (st->st_mode & S_IFMT) { case S_IFREG: if (ce_compare_data(ce, st)) return DATA_CHANGED; break; case S_IFLNK: if (ce_compare_link(ce, xsize_t(st->st_size))) return DATA_CHANGED; break; case S_IFDIR: if (S_ISGITLINK(ce->ce_mode)) return ce_compare_gitlink(ce) ? DATA_CHANGED : 0; default: return TYPE_CHANGED; } return 0; } static int ce_match_stat_basic(const struct cache_entry *ce, struct stat *st) { unsigned int changed = 0; if (ce->ce_flags & CE_REMOVE) return MODE_CHANGED | DATA_CHANGED | TYPE_CHANGED; switch (ce->ce_mode & S_IFMT) { case S_IFREG: changed |= !S_ISREG(st->st_mode) ? TYPE_CHANGED : 0; /* We consider only the owner x bit to be relevant for * "mode changes" */ if (trust_executable_bit && (0100 & (ce->ce_mode ^ st->st_mode))) changed |= MODE_CHANGED; break; case S_IFLNK: if (!S_ISLNK(st->st_mode) && (has_symlinks || !S_ISREG(st->st_mode))) changed |= TYPE_CHANGED; break; case S_IFGITLINK: /* We ignore most of the st_xxx fields for gitlinks */ if (!S_ISDIR(st->st_mode)) changed |= TYPE_CHANGED; else if (ce_compare_gitlink(ce)) changed |= DATA_CHANGED; return changed; default: die("internal error: ce_mode is %o", ce->ce_mode); } changed |= match_stat_data(&ce->ce_stat_data, st); /* Racily smudged entry? */ if (!ce->ce_stat_data.sd_size) { if (!is_empty_blob_sha1(ce->sha1)) changed |= DATA_CHANGED; } return changed; } static int is_racy_timestamp(const struct index_state *istate, const struct cache_entry *ce) { return (!S_ISGITLINK(ce->ce_mode) && istate->timestamp.sec && #ifdef USE_NSEC /* nanosecond timestamped files can also be racy! */ (istate->timestamp.sec < ce->ce_stat_data.sd_mtime.sec || (istate->timestamp.sec == ce->ce_stat_data.sd_mtime.sec && istate->timestamp.nsec <= ce->ce_stat_data.sd_mtime.nsec)) #else istate->timestamp.sec <= ce->ce_stat_data.sd_mtime.sec #endif ); } int ie_match_stat(const struct index_state *istate, const struct cache_entry *ce, struct stat *st, unsigned int options) { unsigned int changed; int ignore_valid = options & CE_MATCH_IGNORE_VALID; int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE; int assume_racy_is_modified = options & CE_MATCH_RACY_IS_DIRTY; /* * If it's marked as always valid in the index, it's * valid whatever the checked-out copy says. * * skip-worktree has the same effect with higher precedence */ if (!ignore_skip_worktree && ce_skip_worktree(ce)) return 0; if (!ignore_valid && (ce->ce_flags & CE_VALID)) return 0; /* * Intent-to-add entries have not been added, so the index entry * by definition never matches what is in the work tree until it * actually gets added. */ if (ce->ce_flags & CE_INTENT_TO_ADD) return DATA_CHANGED | TYPE_CHANGED | MODE_CHANGED; changed = ce_match_stat_basic(ce, st); /* * Within 1 second of this sequence: * echo xyzzy >file && git-update-index --add file * running this command: * echo frotz >file * would give a falsely clean cache entry. The mtime and * length match the cache, and other stat fields do not change. * * We could detect this at update-index time (the cache entry * being registered/updated records the same time as "now") * and delay the return from git-update-index, but that would * effectively mean we can make at most one commit per second, * which is not acceptable. Instead, we check cache entries * whose mtime are the same as the index file timestamp more * carefully than others. */ if (!changed && is_racy_timestamp(istate, ce)) { if (assume_racy_is_modified) changed |= DATA_CHANGED; else changed |= ce_modified_check_fs(ce, st); } return changed; } int ie_modified(const struct index_state *istate, const struct cache_entry *ce, struct stat *st, unsigned int options) { int changed, changed_fs; changed = ie_match_stat(istate, ce, st, options); if (!changed) return 0; /* * If the mode or type has changed, there's no point in trying * to refresh the entry - it's not going to match */ if (changed & (MODE_CHANGED | TYPE_CHANGED)) return changed; /* * Immediately after read-tree or update-index --cacheinfo, * the length field is zero, as we have never even read the * lstat(2) information once, and we cannot trust DATA_CHANGED * returned by ie_match_stat() which in turn was returned by * ce_match_stat_basic() to signal that the filesize of the * blob changed. We have to actually go to the filesystem to * see if the contents match, and if so, should answer "unchanged". * * The logic does not apply to gitlinks, as ce_match_stat_basic() * already has checked the actual HEAD from the filesystem in the * subproject. If ie_match_stat() already said it is different, * then we know it is. */ if ((changed & DATA_CHANGED) && (S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0)) return changed; changed_fs = ce_modified_check_fs(ce, st); if (changed_fs) return changed | changed_fs; return 0; } int base_name_compare(const char *name1, int len1, int mode1, const char *name2, int len2, int mode2) { unsigned char c1, c2; int len = len1 < len2 ? len1 : len2; int cmp; cmp = memcmp(name1, name2, len); if (cmp) return cmp; c1 = name1[len]; c2 = name2[len]; if (!c1 && S_ISDIR(mode1)) c1 = '/'; if (!c2 && S_ISDIR(mode2)) c2 = '/'; return (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0; } /* * df_name_compare() is identical to base_name_compare(), except it * compares conflicting directory/file entries as equal. Note that * while a directory name compares as equal to a regular file, they * then individually compare _differently_ to a filename that has * a dot after the basename (because '\0' < '.' < '/'). * * This is used by routines that want to traverse the git namespace * but then handle conflicting entries together when possible. */ int df_name_compare(const char *name1, int len1, int mode1, const char *name2, int len2, int mode2) { int len = len1 < len2 ? len1 : len2, cmp; unsigned char c1, c2; cmp = memcmp(name1, name2, len); if (cmp) return cmp; /* Directories and files compare equal (same length, same name) */ if (len1 == len2) return 0; c1 = name1[len]; if (!c1 && S_ISDIR(mode1)) c1 = '/'; c2 = name2[len]; if (!c2 && S_ISDIR(mode2)) c2 = '/'; if (c1 == '/' && !c2) return 0; if (c2 == '/' && !c1) return 0; return c1 - c2; } int name_compare(const char *name1, size_t len1, const char *name2, size_t len2) { size_t min_len = (len1 < len2) ? len1 : len2; int cmp = memcmp(name1, name2, min_len); if (cmp) return cmp; if (len1 < len2) return -1; if (len1 > len2) return 1; return 0; } int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2) { int cmp; cmp = name_compare(name1, len1, name2, len2); if (cmp) return cmp; if (stage1 < stage2) return -1; if (stage1 > stage2) return 1; return 0; } static int index_name_stage_pos(const struct index_state *istate, const char *name, int namelen, int stage) { int first, last; first = 0; last = istate->cache_nr; while (last > first) { int next = (last + first) >> 1; struct cache_entry *ce = istate->cache[next]; int cmp = cache_name_stage_compare(name, namelen, stage, ce->name, ce_namelen(ce), ce_stage(ce)); if (!cmp) return next; if (cmp < 0) { last = next; continue; } first = next+1; } return -first-1; } int index_name_pos(const struct index_state *istate, const char *name, int namelen) { return index_name_stage_pos(istate, name, namelen, 0); } /* Remove entry, return true if there are more entries to go.. */ int remove_index_entry_at(struct index_state *istate, int pos) { struct cache_entry *ce = istate->cache[pos]; record_resolve_undo(istate, ce); remove_name_hash(istate, ce); save_or_free_index_entry(istate, ce); istate->cache_changed |= CE_ENTRY_REMOVED; istate->cache_nr--; if (pos >= istate->cache_nr) return 0; memmove(istate->cache + pos, istate->cache + pos + 1, (istate->cache_nr - pos) * sizeof(struct cache_entry *)); return 1; } /* * Remove all cache entries marked for removal, that is where * CE_REMOVE is set in ce_flags. This is much more effective than * calling remove_index_entry_at() for each entry to be removed. */ void remove_marked_cache_entries(struct index_state *istate) { struct cache_entry **ce_array = istate->cache; unsigned int i, j; for (i = j = 0; i < istate->cache_nr; i++) { if (ce_array[i]->ce_flags & CE_REMOVE) { remove_name_hash(istate, ce_array[i]); save_or_free_index_entry(istate, ce_array[i]); } else ce_array[j++] = ce_array[i]; } if (j == istate->cache_nr) return; istate->cache_changed |= CE_ENTRY_REMOVED; istate->cache_nr = j; } int remove_file_from_index(struct index_state *istate, const char *path) { int pos = index_name_pos(istate, path, strlen(path)); if (pos < 0) pos = -pos-1; cache_tree_invalidate_path(istate, path); while (pos < istate->cache_nr && !strcmp(istate->cache[pos]->name, path)) remove_index_entry_at(istate, pos); return 0; } static int compare_name(struct cache_entry *ce, const char *path, int namelen) { return namelen != ce_namelen(ce) || memcmp(path, ce->name, namelen); } static int index_name_pos_also_unmerged(struct index_state *istate, const char *path, int namelen) { int pos = index_name_pos(istate, path, namelen); struct cache_entry *ce; if (pos >= 0) return pos; /* maybe unmerged? */ pos = -1 - pos; if (pos >= istate->cache_nr || compare_name((ce = istate->cache[pos]), path, namelen)) return -1; /* order of preference: stage 2, 1, 3 */ if (ce_stage(ce) == 1 && pos + 1 < istate->cache_nr && ce_stage((ce = istate->cache[pos + 1])) == 2 && !compare_name(ce, path, namelen)) pos++; return pos; } static int different_name(struct cache_entry *ce, struct cache_entry *alias) { int len = ce_namelen(ce); return ce_namelen(alias) != len || memcmp(ce->name, alias->name, len); } /* * If we add a filename that aliases in the cache, we will use the * name that we already have - but we don't want to update the same * alias twice, because that implies that there were actually two * different files with aliasing names! * * So we use the CE_ADDED flag to verify that the alias was an old * one before we accept it as */ static struct cache_entry *create_alias_ce(struct index_state *istate, struct cache_entry *ce, struct cache_entry *alias) { int len; struct cache_entry *new; if (alias->ce_flags & CE_ADDED) die("Will not add file alias '%s' ('%s' already exists in index)", ce->name, alias->name); /* Ok, create the new entry using the name of the existing alias */ len = ce_namelen(alias); new = xcalloc(1, cache_entry_size(len)); memcpy(new->name, alias->name, len); copy_cache_entry(new, ce); save_or_free_index_entry(istate, ce); return new; } void set_object_name_for_intent_to_add_entry(struct cache_entry *ce) { unsigned char sha1[20]; if (write_sha1_file("", 0, blob_type, sha1)) die("cannot create an empty blob in the object database"); hashcpy(ce->sha1, sha1); } int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags) { int size, namelen, was_same; mode_t st_mode = st->st_mode; struct cache_entry *ce, *alias; unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY; int verbose = flags & (ADD_CACHE_VERBOSE | ADD_CACHE_PRETEND); int pretend = flags & ADD_CACHE_PRETEND; int intent_only = flags & ADD_CACHE_INTENT; int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE| (intent_only ? ADD_CACHE_NEW_ONLY : 0)); if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode)) return error("%s: can only add regular files, symbolic links or git-directories", path); namelen = strlen(path); if (S_ISDIR(st_mode)) { while (namelen && path[namelen-1] == '/') namelen--; } size = cache_entry_size(namelen); ce = xcalloc(1, size); memcpy(ce->name, path, namelen); ce->ce_namelen = namelen; if (!intent_only) fill_stat_cache_info(ce, st); else ce->ce_flags |= CE_INTENT_TO_ADD; if (trust_executable_bit && has_symlinks) ce->ce_mode = create_ce_mode(st_mode); else { /* If there is an existing entry, pick the mode bits and type * from it, otherwise assume unexecutable regular file. */ struct cache_entry *ent; int pos = index_name_pos_also_unmerged(istate, path, namelen); ent = (0 <= pos) ? istate->cache[pos] : NULL; ce->ce_mode = ce_mode_from_stat(ent, st_mode); } /* When core.ignorecase=true, determine if a directory of the same name but differing * case already exists within the Git repository. If it does, ensure the directory * case of the file being added to the repository matches (is folded into) the existing * entry's directory case. */ if (ignore_case) { const char *startPtr = ce->name; const char *ptr = startPtr; while (*ptr) { while (*ptr && *ptr != '/') ++ptr; if (*ptr == '/') { struct cache_entry *foundce; ++ptr; foundce = index_dir_exists(istate, ce->name, ptr - ce->name - 1); if (foundce) { memcpy((void *)startPtr, foundce->name + (startPtr - ce->name), ptr - startPtr); startPtr = ptr; } } } } alias = index_file_exists(istate, ce->name, ce_namelen(ce), ignore_case); if (alias && !ce_stage(alias) && !ie_match_stat(istate, alias, st, ce_option)) { /* Nothing changed, really */ free(ce); if (!S_ISGITLINK(alias->ce_mode)) ce_mark_uptodate(alias); alias->ce_flags |= CE_ADDED; return 0; } if (!intent_only) { if (index_path(ce->sha1, path, st, HASH_WRITE_OBJECT)) return error("unable to index file %s", path); } else set_object_name_for_intent_to_add_entry(ce); if (ignore_case && alias && different_name(ce, alias)) ce = create_alias_ce(istate, ce, alias); ce->ce_flags |= CE_ADDED; /* It was suspected to be racily clean, but it turns out to be Ok */ was_same = (alias && !ce_stage(alias) && !hashcmp(alias->sha1, ce->sha1) && ce->ce_mode == alias->ce_mode); if (pretend) ; else if (add_index_entry(istate, ce, add_option)) return error("unable to add %s to index",path); if (verbose && !was_same) printf("add '%s'\n", path); return 0; } int add_file_to_index(struct index_state *istate, const char *path, int flags) { struct stat st; if (lstat(path, &st)) die_errno("unable to stat '%s'", path); return add_to_index(istate, path, &st, flags); } struct cache_entry *make_cache_entry(unsigned int mode, const unsigned char *sha1, const char *path, int stage, unsigned int refresh_options) { int size, len; struct cache_entry *ce, *ret; if (!verify_path(path)) { error("Invalid path '%s'", path); return NULL; } len = strlen(path); size = cache_entry_size(len); ce = xcalloc(1, size); hashcpy(ce->sha1, sha1); memcpy(ce->name, path, len); ce->ce_flags = create_ce_flags(stage); ce->ce_namelen = len; ce->ce_mode = create_ce_mode(mode); ret = refresh_cache_entry(ce, refresh_options); if (!ret) { free(ce); return NULL; } else { return ret; } } int ce_same_name(const struct cache_entry *a, const struct cache_entry *b) { int len = ce_namelen(a); return ce_namelen(b) == len && !memcmp(a->name, b->name, len); } /* * We fundamentally don't like some paths: we don't want * dot or dot-dot anywhere, and for obvious reasons don't * want to recurse into ".git" either. * * Also, we don't want double slashes or slashes at the * end that can make pathnames ambiguous. */ static int verify_dotfile(const char *rest) { /* * The first character was '.', but that * has already been discarded, we now test * the rest. */ /* "." is not allowed */ if (*rest == '\0' || is_dir_sep(*rest)) return 0; switch (*rest) { /* * ".git" followed by NUL or slash is bad. This * shares the path end test with the ".." case. */ case 'g': case 'G': if (rest[1] != 'i' && rest[1] != 'I') break; if (rest[2] != 't' && rest[2] != 'T') break; rest += 2; /* fallthrough */ case '.': if (rest[1] == '\0' || is_dir_sep(rest[1])) return 0; } return 1; } int verify_path(const char *path) { char c; if (has_dos_drive_prefix(path)) return 0; goto inside; for (;;) { if (!c) return 1; if (is_dir_sep(c)) { inside: if (protect_hfs && is_hfs_dotgit(path)) return 0; if (protect_ntfs && is_ntfs_dotgit(path)) return 0; c = *path++; if ((c == '.' && !verify_dotfile(path)) || is_dir_sep(c) || c == '\0') return 0; } c = *path++; } } /* * Do we have another file that has the beginning components being a * proper superset of the name we're trying to add? */ static int has_file_name(struct index_state *istate, const struct cache_entry *ce, int pos, int ok_to_replace) { int retval = 0; int len = ce_namelen(ce); int stage = ce_stage(ce); const char *name = ce->name; while (pos < istate->cache_nr) { struct cache_entry *p = istate->cache[pos++]; if (len >= ce_namelen(p)) break; if (memcmp(name, p->name, len)) break; if (ce_stage(p) != stage) continue; if (p->name[len] != '/') continue; if (p->ce_flags & CE_REMOVE) continue; retval = -1; if (!ok_to_replace) break; remove_index_entry_at(istate, --pos); } return retval; } /* * Do we have another file with a pathname that is a proper * subset of the name we're trying to add? */ static int has_dir_name(struct index_state *istate, const struct cache_entry *ce, int pos, int ok_to_replace) { int retval = 0; int stage = ce_stage(ce); const char *name = ce->name; const char *slash = name + ce_namelen(ce); for (;;) { int len; for (;;) { if (*--slash == '/') break; if (slash <= ce->name) return retval; } len = slash - name; pos = index_name_stage_pos(istate, name, len, stage); if (pos >= 0) { /* * Found one, but not so fast. This could * be a marker that says "I was here, but * I am being removed". Such an entry is * not a part of the resulting tree, and * it is Ok to have a directory at the same * path. */ if (!(istate->cache[pos]->ce_flags & CE_REMOVE)) { retval = -1; if (!ok_to_replace) break; remove_index_entry_at(istate, pos); continue; } } else pos = -pos-1; /* * Trivial optimization: if we find an entry that * already matches the sub-directory, then we know * we're ok, and we can exit. */ while (pos < istate->cache_nr) { struct cache_entry *p = istate->cache[pos]; if ((ce_namelen(p) <= len) || (p->name[len] != '/') || memcmp(p->name, name, len)) break; /* not our subdirectory */ if (ce_stage(p) == stage && !(p->ce_flags & CE_REMOVE)) /* * p is at the same stage as our entry, and * is a subdirectory of what we are looking * at, so we cannot have conflicts at our * level or anything shorter. */ return retval; pos++; } } return retval; } /* We may be in a situation where we already have path/file and path * is being added, or we already have path and path/file is being * added. Either one would result in a nonsense tree that has path * twice when git-write-tree tries to write it out. Prevent it. * * If ok-to-replace is specified, we remove the conflicting entries * from the cache so the caller should recompute the insert position. * When this happens, we return non-zero. */ static int check_file_directory_conflict(struct index_state *istate, const struct cache_entry *ce, int pos, int ok_to_replace) { int retval; /* * When ce is an "I am going away" entry, we allow it to be added */ if (ce->ce_flags & CE_REMOVE) return 0; /* * We check if the path is a sub-path of a subsequent pathname * first, since removing those will not change the position * in the array. */ retval = has_file_name(istate, ce, pos, ok_to_replace); /* * Then check if the path might have a clashing sub-directory * before it. */ return retval + has_dir_name(istate, ce, pos, ok_to_replace); } static int add_index_entry_with_check(struct index_state *istate, struct cache_entry *ce, int option) { int pos; int ok_to_add = option & ADD_CACHE_OK_TO_ADD; int ok_to_replace = option & ADD_CACHE_OK_TO_REPLACE; int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK; int new_only = option & ADD_CACHE_NEW_ONLY; if (!(option & ADD_CACHE_KEEP_CACHE_TREE)) cache_tree_invalidate_path(istate, ce->name); pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce)); /* existing match? Just replace it. */ if (pos >= 0) { if (!new_only) replace_index_entry(istate, pos, ce); return 0; } pos = -pos-1; /* * Inserting a merged entry ("stage 0") into the index * will always replace all non-merged entries.. */ if (pos < istate->cache_nr && ce_stage(ce) == 0) { while (ce_same_name(istate->cache[pos], ce)) { ok_to_add = 1; if (!remove_index_entry_at(istate, pos)) break; } } if (!ok_to_add) return -1; if (!verify_path(ce->name)) return error("Invalid path '%s'", ce->name); if (!skip_df_check && check_file_directory_conflict(istate, ce, pos, ok_to_replace)) { if (!ok_to_replace) return error("'%s' appears as both a file and as a directory", ce->name); pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce)); pos = -pos-1; } return pos + 1; } int add_index_entry(struct index_state *istate, struct cache_entry *ce, int option) { int pos; if (option & ADD_CACHE_JUST_APPEND) pos = istate->cache_nr; else { int ret; ret = add_index_entry_with_check(istate, ce, option); if (ret <= 0) return ret; pos = ret - 1; } /* Make sure the array is big enough .. */ ALLOC_GROW(istate->cache, istate->cache_nr + 1, istate->cache_alloc); /* Add it in.. */ istate->cache_nr++; if (istate->cache_nr > pos + 1) memmove(istate->cache + pos + 1, istate->cache + pos, (istate->cache_nr - pos - 1) * sizeof(ce)); set_index_entry(istate, pos, ce); istate->cache_changed |= CE_ENTRY_ADDED; return 0; } /* * "refresh" does not calculate a new sha1 file or bring the * cache up-to-date for mode/content changes. But what it * _does_ do is to "re-match" the stat information of a file * with the cache, so that you can refresh the cache for a * file that hasn't been changed but where the stat entry is * out of date. * * For example, you'd want to do this after doing a "git-read-tree", * to link up the stat cache details with the proper files. */ static struct cache_entry *refresh_cache_ent(struct index_state *istate, struct cache_entry *ce, unsigned int options, int *err, int *changed_ret) { struct stat st; struct cache_entry *updated; int changed, size; int refresh = options & CE_MATCH_REFRESH; int ignore_valid = options & CE_MATCH_IGNORE_VALID; int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE; int ignore_missing = options & CE_MATCH_IGNORE_MISSING; if (!refresh || ce_uptodate(ce)) return ce; /* * CE_VALID or CE_SKIP_WORKTREE means the user promised us * that the change to the work tree does not matter and told * us not to worry. */ if (!ignore_skip_worktree && ce_skip_worktree(ce)) { ce_mark_uptodate(ce); return ce; } if (!ignore_valid && (ce->ce_flags & CE_VALID)) { ce_mark_uptodate(ce); return ce; } if (has_symlink_leading_path(ce->name, ce_namelen(ce))) { if (ignore_missing) return ce; if (err) *err = ENOENT; return NULL; } if (lstat(ce->name, &st) < 0) { if (ignore_missing && errno == ENOENT) return ce; if (err) *err = errno; return NULL; } changed = ie_match_stat(istate, ce, &st, options); if (changed_ret) *changed_ret = changed; if (!changed) { /* * The path is unchanged. If we were told to ignore * valid bit, then we did the actual stat check and * found that the entry is unmodified. If the entry * is not marked VALID, this is the place to mark it * valid again, under "assume unchanged" mode. */ if (ignore_valid && assume_unchanged && !(ce->ce_flags & CE_VALID)) ; /* mark this one VALID again */ else { /* * We do not mark the index itself "modified" * because CE_UPTODATE flag is in-core only; * we are not going to write this change out. */ if (!S_ISGITLINK(ce->ce_mode)) ce_mark_uptodate(ce); return ce; } } if (ie_modified(istate, ce, &st, options)) { if (err) *err = EINVAL; return NULL; } size = ce_size(ce); updated = xmalloc(size); memcpy(updated, ce, size); fill_stat_cache_info(updated, &st); /* * If ignore_valid is not set, we should leave CE_VALID bit * alone. Otherwise, paths marked with --no-assume-unchanged * (i.e. things to be edited) will reacquire CE_VALID bit * automatically, which is not really what we want. */ if (!ignore_valid && assume_unchanged && !(ce->ce_flags & CE_VALID)) updated->ce_flags &= ~CE_VALID; /* istate->cache_changed is updated in the caller */ return updated; } static void show_file(const char * fmt, const char * name, int in_porcelain, int * first, const char *header_msg) { if (in_porcelain && *first && header_msg) { printf("%s\n", header_msg); *first = 0; } printf(fmt, name); } int refresh_index(struct index_state *istate, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg) { int i; int has_errors = 0; int really = (flags & REFRESH_REALLY) != 0; int allow_unmerged = (flags & REFRESH_UNMERGED) != 0; int quiet = (flags & REFRESH_QUIET) != 0; int not_new = (flags & REFRESH_IGNORE_MISSING) != 0; int ignore_submodules = (flags & REFRESH_IGNORE_SUBMODULES) != 0; int first = 1; int in_porcelain = (flags & REFRESH_IN_PORCELAIN); unsigned int options = (CE_MATCH_REFRESH | (really ? CE_MATCH_IGNORE_VALID : 0) | (not_new ? CE_MATCH_IGNORE_MISSING : 0)); const char *modified_fmt; const char *deleted_fmt; const char *typechange_fmt; const char *added_fmt; const char *unmerged_fmt; modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n"); deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n"); typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n"); added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n"); unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n"); for (i = 0; i < istate->cache_nr; i++) { struct cache_entry *ce, *new; int cache_errno = 0; int changed = 0; int filtered = 0; ce = istate->cache[i]; if (ignore_submodules && S_ISGITLINK(ce->ce_mode)) continue; if (pathspec && !ce_path_match(ce, pathspec, seen)) filtered = 1; if (ce_stage(ce)) { while ((i < istate->cache_nr) && ! strcmp(istate->cache[i]->name, ce->name)) i++; i--; if (allow_unmerged) continue; if (!filtered) show_file(unmerged_fmt, ce->name, in_porcelain, &first, header_msg); has_errors = 1; continue; } if (filtered) continue; new = refresh_cache_ent(istate, ce, options, &cache_errno, &changed); if (new == ce) continue; if (!new) { const char *fmt; if (really && cache_errno == EINVAL) { /* If we are doing --really-refresh that * means the index is not valid anymore. */ ce->ce_flags &= ~CE_VALID; ce->ce_flags |= CE_UPDATE_IN_BASE; istate->cache_changed |= CE_ENTRY_CHANGED; } if (quiet) continue; if (cache_errno == ENOENT) fmt = deleted_fmt; else if (ce->ce_flags & CE_INTENT_TO_ADD) fmt = added_fmt; /* must be before other checks */ else if (changed & TYPE_CHANGED) fmt = typechange_fmt; else fmt = modified_fmt; show_file(fmt, ce->name, in_porcelain, &first, header_msg); has_errors = 1; continue; } replace_index_entry(istate, i, new); } return has_errors; } static struct cache_entry *refresh_cache_entry(struct cache_entry *ce, unsigned int options) { return refresh_cache_ent(&the_index, ce, options, NULL, NULL); } /***************************************************************** * Index File I/O *****************************************************************/ #define INDEX_FORMAT_DEFAULT 3 static unsigned int get_index_format_default(void) { char *envversion = getenv("GIT_INDEX_VERSION"); char *endp; int value; unsigned int version = INDEX_FORMAT_DEFAULT; if (!envversion) { if (!git_config_get_int("index.version", &value)) version = value; if (version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) { warning(_("index.version set, but the value is invalid.\n" "Using version %i"), INDEX_FORMAT_DEFAULT); return INDEX_FORMAT_DEFAULT; } return version; } version = strtoul(envversion, &endp, 10); if (*endp || version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) { warning(_("GIT_INDEX_VERSION set, but the value is invalid.\n" "Using version %i"), INDEX_FORMAT_DEFAULT); version = INDEX_FORMAT_DEFAULT; } return version; } /* * dev/ino/uid/gid/size are also just tracked to the low 32 bits * Again - this is just a (very strong in practice) heuristic that * the inode hasn't changed. * * We save the fields in big-endian order to allow using the * index file over NFS transparently. */ struct ondisk_cache_entry { struct cache_time ctime; struct cache_time mtime; uint32_t dev; uint32_t ino; uint32_t mode; uint32_t uid; uint32_t gid; uint32_t size; unsigned char sha1[20]; uint16_t flags; char name[FLEX_ARRAY]; /* more */ }; /* * This struct is used when CE_EXTENDED bit is 1 * The struct must match ondisk_cache_entry exactly from * ctime till flags */ struct ondisk_cache_entry_extended { struct cache_time ctime; struct cache_time mtime; uint32_t dev; uint32_t ino; uint32_t mode; uint32_t uid; uint32_t gid; uint32_t size; unsigned char sha1[20]; uint16_t flags; uint16_t flags2; char name[FLEX_ARRAY]; /* more */ }; /* These are only used for v3 or lower */ #define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,name) + (len) + 8) & ~7) #define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len) #define ondisk_cache_entry_extended_size(len) align_flex_name(ondisk_cache_entry_extended,len) #define ondisk_ce_size(ce) (((ce)->ce_flags & CE_EXTENDED) ? \ ondisk_cache_entry_extended_size(ce_namelen(ce)) : \ ondisk_cache_entry_size(ce_namelen(ce))) static int verify_hdr(struct cache_header *hdr, unsigned long size) { git_SHA_CTX c; unsigned char sha1[20]; int hdr_version; if (hdr->hdr_signature != htonl(CACHE_SIGNATURE)) return error("bad signature"); hdr_version = ntohl(hdr->hdr_version); if (hdr_version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < hdr_version) return error("bad index version %d", hdr_version); git_SHA1_Init(&c); git_SHA1_Update(&c, hdr, size - 20); git_SHA1_Final(sha1, &c); if (hashcmp(sha1, (unsigned char *)hdr + size - 20)) return error("bad index file sha1 signature"); return 0; } static int read_index_extension(struct index_state *istate, const char *ext, void *data, unsigned long sz) { switch (CACHE_EXT(ext)) { case CACHE_EXT_TREE: istate->cache_tree = cache_tree_read(data, sz); break; case CACHE_EXT_RESOLVE_UNDO: istate->resolve_undo = resolve_undo_read(data, sz); break; case CACHE_EXT_LINK: if (read_link_extension(istate, data, sz)) return -1; break; default: if (*ext < 'A' || 'Z' < *ext) return error("index uses %.4s extension, which we do not understand", ext); fprintf(stderr, "ignoring %.4s extension\n", ext); break; } return 0; } int hold_locked_index(struct lock_file *lk, int die_on_error) { return hold_lock_file_for_update(lk, get_index_file(), die_on_error ? LOCK_DIE_ON_ERROR : 0); } int read_index(struct index_state *istate) { return read_index_from(istate, get_index_file()); } static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk, unsigned int flags, const char *name, size_t len) { struct cache_entry *ce = xmalloc(cache_entry_size(len)); ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec); ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec); ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec); ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec); ce->ce_stat_data.sd_dev = get_be32(&ondisk->dev); ce->ce_stat_data.sd_ino = get_be32(&ondisk->ino); ce->ce_mode = get_be32(&ondisk->mode); ce->ce_stat_data.sd_uid = get_be32(&ondisk->uid); ce->ce_stat_data.sd_gid = get_be32(&ondisk->gid); ce->ce_stat_data.sd_size = get_be32(&ondisk->size); ce->ce_flags = flags & ~CE_NAMEMASK; ce->ce_namelen = len; ce->index = 0; hashcpy(ce->sha1, ondisk->sha1); memcpy(ce->name, name, len); ce->name[len] = '\0'; return ce; } /* * Adjacent cache entries tend to share the leading paths, so it makes * sense to only store the differences in later entries. In the v4 * on-disk format of the index, each on-disk cache entry stores the * number of bytes to be stripped from the end of the previous name, * and the bytes to append to the result, to come up with its name. */ static unsigned long expand_name_field(struct strbuf *name, const char *cp_) { const unsigned char *ep, *cp = (const unsigned char *)cp_; size_t len = decode_varint(&cp); if (name->len < len) die("malformed name field in the index"); strbuf_remove(name, name->len - len, len); for (ep = cp; *ep; ep++) ; /* find the end */ strbuf_add(name, cp, ep - cp); return (const char *)ep + 1 - cp_; } static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk, unsigned long *ent_size, struct strbuf *previous_name) { struct cache_entry *ce; size_t len; const char *name; unsigned int flags; /* On-disk flags are just 16 bits */ flags = get_be16(&ondisk->flags); len = flags & CE_NAMEMASK; if (flags & CE_EXTENDED) { struct ondisk_cache_entry_extended *ondisk2; int extended_flags; ondisk2 = (struct ondisk_cache_entry_extended *)ondisk; extended_flags = get_be16(&ondisk2->flags2) << 16; /* We do not yet understand any bit out of CE_EXTENDED_FLAGS */ if (extended_flags & ~CE_EXTENDED_FLAGS) die("Unknown index entry format %08x", extended_flags); flags |= extended_flags; name = ondisk2->name; } else name = ondisk->name; if (!previous_name) { /* v3 and earlier */ if (len == CE_NAMEMASK) len = strlen(name); ce = cache_entry_from_ondisk(ondisk, flags, name, len); *ent_size = ondisk_ce_size(ce); } else { unsigned long consumed; consumed = expand_name_field(previous_name, name); ce = cache_entry_from_ondisk(ondisk, flags, previous_name->buf, previous_name->len); *ent_size = (name - ((char *)ondisk)) + consumed; } return ce; } static void check_ce_order(struct cache_entry *ce, struct cache_entry *next_ce) { int name_compare = strcmp(ce->name, next_ce->name); if (0 < name_compare) die("unordered stage entries in index"); if (!name_compare) { if (!ce_stage(ce)) die("multiple stage entries for merged file '%s'", ce->name); if (ce_stage(ce) > ce_stage(next_ce)) die("unordered stage entries for '%s'", ce->name); } } /* remember to discard_cache() before reading a different cache! */ int do_read_index(struct index_state *istate, const char *path, int must_exist) { int fd, i; struct stat st; unsigned long src_offset; struct cache_header *hdr; void *mmap; size_t mmap_size; struct strbuf previous_name_buf = STRBUF_INIT, *previous_name; if (istate->initialized) return istate->cache_nr; istate->timestamp.sec = 0; istate->timestamp.nsec = 0; fd = open(path, O_RDONLY); if (fd < 0) { if (!must_exist && errno == ENOENT) return 0; die_errno("%s: index file open failed", path); } if (fstat(fd, &st)) die_errno("cannot stat the open index"); mmap_size = xsize_t(st.st_size); if (mmap_size < sizeof(struct cache_header) + 20) die("index file smaller than expected"); mmap = xmmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (mmap == MAP_FAILED) die_errno("unable to map index file"); close(fd); hdr = mmap; if (verify_hdr(hdr, mmap_size) < 0) goto unmap; hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20); istate->version = ntohl(hdr->hdr_version); istate->cache_nr = ntohl(hdr->hdr_entries); istate->cache_alloc = alloc_nr(istate->cache_nr); istate->cache = xcalloc(istate->cache_alloc, sizeof(*istate->cache)); istate->initialized = 1; if (istate->version == 4) previous_name = &previous_name_buf; else previous_name = NULL; src_offset = sizeof(*hdr); for (i = 0; i < istate->cache_nr; i++) { struct ondisk_cache_entry *disk_ce; struct cache_entry *ce; unsigned long consumed; disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset); ce = create_from_disk(disk_ce, &consumed, previous_name); set_index_entry(istate, i, ce); if (i > 0) check_ce_order(istate->cache[i - 1], ce); src_offset += consumed; } strbuf_release(&previous_name_buf); istate->timestamp.sec = st.st_mtime; istate->timestamp.nsec = ST_MTIME_NSEC(st); while (src_offset <= mmap_size - 20 - 8) { /* After an array of active_nr index entries, * there can be arbitrary number of extended * sections, each of which is prefixed with * extension name (4-byte) and section length * in 4-byte network byte order. */ uint32_t extsize; memcpy(&extsize, (char *)mmap + src_offset + 4, 4); extsize = ntohl(extsize); if (read_index_extension(istate, (const char *) mmap + src_offset, (char *) mmap + src_offset + 8, extsize) < 0) goto unmap; src_offset += 8; src_offset += extsize; } munmap(mmap, mmap_size); return istate->cache_nr; unmap: munmap(mmap, mmap_size); die("index file corrupt"); } int read_index_from(struct index_state *istate, const char *path) { struct split_index *split_index; int ret; /* istate->initialized covers both .git/index and .git/sharedindex.xxx */ if (istate->initialized) return istate->cache_nr; ret = do_read_index(istate, path, 0); split_index = istate->split_index; if (!split_index) return ret; if (is_null_sha1(split_index->base_sha1)) return ret; if (split_index->base) discard_index(split_index->base); else split_index->base = xcalloc(1, sizeof(*split_index->base)); ret = do_read_index(split_index->base, git_path("sharedindex.%s", sha1_to_hex(split_index->base_sha1)), 1); if (hashcmp(split_index->base_sha1, split_index->base->sha1)) die("broken index, expect %s in %s, got %s", sha1_to_hex(split_index->base_sha1), git_path("sharedindex.%s", sha1_to_hex(split_index->base_sha1)), sha1_to_hex(split_index->base->sha1)); merge_base_index(istate); return ret; } int is_index_unborn(struct index_state *istate) { return (!istate->cache_nr && !istate->timestamp.sec); } int discard_index(struct index_state *istate) { int i; for (i = 0; i < istate->cache_nr; i++) { if (istate->cache[i]->index && istate->split_index && istate->split_index->base && istate->cache[i]->index <= istate->split_index->base->cache_nr && istate->cache[i] == istate->split_index->base->cache[istate->cache[i]->index - 1]) continue; free(istate->cache[i]); } resolve_undo_clear_index(istate); istate->cache_nr = 0; istate->cache_changed = 0; istate->timestamp.sec = 0; istate->timestamp.nsec = 0; free_name_hash(istate); cache_tree_free(&(istate->cache_tree)); istate->initialized = 0; free(istate->cache); istate->cache = NULL; istate->cache_alloc = 0; discard_split_index(istate); return 0; } int unmerged_index(const struct index_state *istate) { int i; for (i = 0; i < istate->cache_nr; i++) { if (ce_stage(istate->cache[i])) return 1; } return 0; } #define WRITE_BUFFER_SIZE 8192 static unsigned char write_buffer[WRITE_BUFFER_SIZE]; static unsigned long write_buffer_len; static int ce_write_flush(git_SHA_CTX *context, int fd) { unsigned int buffered = write_buffer_len; if (buffered) { git_SHA1_Update(context, write_buffer, buffered); if (write_in_full(fd, write_buffer, buffered) != buffered) return -1; write_buffer_len = 0; } return 0; } static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len) { while (len) { unsigned int buffered = write_buffer_len; unsigned int partial = WRITE_BUFFER_SIZE - buffered; if (partial > len) partial = len; memcpy(write_buffer + buffered, data, partial); buffered += partial; if (buffered == WRITE_BUFFER_SIZE) { write_buffer_len = buffered; if (ce_write_flush(context, fd)) return -1; buffered = 0; } write_buffer_len = buffered; len -= partial; data = (char *) data + partial; } return 0; } static int write_index_ext_header(git_SHA_CTX *context, int fd, unsigned int ext, unsigned int sz) { ext = htonl(ext); sz = htonl(sz); return ((ce_write(context, fd, &ext, 4) < 0) || (ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0; } static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1) { unsigned int left = write_buffer_len; if (left) { write_buffer_len = 0; git_SHA1_Update(context, write_buffer, left); } /* Flush first if not enough space for SHA1 signature */ if (left + 20 > WRITE_BUFFER_SIZE) { if (write_in_full(fd, write_buffer, left) != left) return -1; left = 0; } /* Append the SHA1 signature at the end */ git_SHA1_Final(write_buffer + left, context); hashcpy(sha1, write_buffer + left); left += 20; return (write_in_full(fd, write_buffer, left) != left) ? -1 : 0; } static void ce_smudge_racily_clean_entry(struct cache_entry *ce) { /* * The only thing we care about in this function is to smudge the * falsely clean entry due to touch-update-touch race, so we leave * everything else as they are. We are called for entries whose * ce_stat_data.sd_mtime match the index file mtime. * * Note that this actually does not do much for gitlinks, for * which ce_match_stat_basic() always goes to the actual * contents. The caller checks with is_racy_timestamp() which * always says "no" for gitlinks, so we are not called for them ;-) */ struct stat st; if (lstat(ce->name, &st) < 0) return; if (ce_match_stat_basic(ce, &st)) return; if (ce_modified_check_fs(ce, &st)) { /* This is "racily clean"; smudge it. Note that this * is a tricky code. At first glance, it may appear * that it can break with this sequence: * * $ echo xyzzy >frotz * $ git-update-index --add frotz * $ : >frotz * $ sleep 3 * $ echo filfre >nitfol * $ git-update-index --add nitfol * * but it does not. When the second update-index runs, * it notices that the entry "frotz" has the same timestamp * as index, and if we were to smudge it by resetting its * size to zero here, then the object name recorded * in index is the 6-byte file but the cached stat information * becomes zero --- which would then match what we would * obtain from the filesystem next time we stat("frotz"). * * However, the second update-index, before calling * this function, notices that the cached size is 6 * bytes and what is on the filesystem is an empty * file, and never calls us, so the cached size information * for "frotz" stays 6 which does not match the filesystem. */ ce->ce_stat_data.sd_size = 0; } } /* Copy miscellaneous fields but not the name */ static char *copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk, struct cache_entry *ce) { short flags; ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec); ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec); ondisk->ctime.nsec = htonl(ce->ce_stat_data.sd_ctime.nsec); ondisk->mtime.nsec = htonl(ce->ce_stat_data.sd_mtime.nsec); ondisk->dev = htonl(ce->ce_stat_data.sd_dev); ondisk->ino = htonl(ce->ce_stat_data.sd_ino); ondisk->mode = htonl(ce->ce_mode); ondisk->uid = htonl(ce->ce_stat_data.sd_uid); ondisk->gid = htonl(ce->ce_stat_data.sd_gid); ondisk->size = htonl(ce->ce_stat_data.sd_size); hashcpy(ondisk->sha1, ce->sha1); flags = ce->ce_flags & ~CE_NAMEMASK; flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce)); ondisk->flags = htons(flags); if (ce->ce_flags & CE_EXTENDED) { struct ondisk_cache_entry_extended *ondisk2; ondisk2 = (struct ondisk_cache_entry_extended *)ondisk; ondisk2->flags2 = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16); return ondisk2->name; } else { return ondisk->name; } } static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce, struct strbuf *previous_name) { int size; struct ondisk_cache_entry *ondisk; int saved_namelen = saved_namelen; /* compiler workaround */ char *name; int result; if (ce->ce_flags & CE_STRIP_NAME) { saved_namelen = ce_namelen(ce); ce->ce_namelen = 0; } if (!previous_name) { size = ondisk_ce_size(ce); ondisk = xcalloc(1, size); name = copy_cache_entry_to_ondisk(ondisk, ce); memcpy(name, ce->name, ce_namelen(ce)); } else { int common, to_remove, prefix_size; unsigned char to_remove_vi[16]; for (common = 0; (ce->name[common] && common < previous_name->len && ce->name[common] == previous_name->buf[common]); common++) ; /* still matching */ to_remove = previous_name->len - common; prefix_size = encode_varint(to_remove, to_remove_vi); if (ce->ce_flags & CE_EXTENDED) size = offsetof(struct ondisk_cache_entry_extended, name); else size = offsetof(struct ondisk_cache_entry, name); size += prefix_size + (ce_namelen(ce) - common + 1); ondisk = xcalloc(1, size); name = copy_cache_entry_to_ondisk(ondisk, ce); memcpy(name, to_remove_vi, prefix_size); memcpy(name + prefix_size, ce->name + common, ce_namelen(ce) - common); strbuf_splice(previous_name, common, to_remove, ce->name + common, ce_namelen(ce) - common); } if (ce->ce_flags & CE_STRIP_NAME) { ce->ce_namelen = saved_namelen; ce->ce_flags &= ~CE_STRIP_NAME; } result = ce_write(c, fd, ondisk, size); free(ondisk); return result; } /* * This function verifies if index_state has the correct sha1 of the * index file. Don't die if we have any other failure, just return 0. */ static int verify_index_from(const struct index_state *istate, const char *path) { int fd; ssize_t n; struct stat st; unsigned char sha1[20]; if (!istate->initialized) return 0; fd = open(path, O_RDONLY); if (fd < 0) return 0; if (fstat(fd, &st)) goto out; if (st.st_size < sizeof(struct cache_header) + 20) goto out; n = pread_in_full(fd, sha1, 20, st.st_size - 20); if (n != 20) goto out; if (hashcmp(istate->sha1, sha1)) goto out; close(fd); return 1; out: close(fd); return 0; } static int verify_index(const struct index_state *istate) { return verify_index_from(istate, get_index_file()); } static int has_racy_timestamp(struct index_state *istate) { int entries = istate->cache_nr; int i; for (i = 0; i < entries; i++) { struct cache_entry *ce = istate->cache[i]; if (is_racy_timestamp(istate, ce)) return 1; } return 0; } /* * Opportunistically update the index but do not complain if we can't */ void update_index_if_able(struct index_state *istate, struct lock_file *lockfile) { if ((istate->cache_changed || has_racy_timestamp(istate)) && verify_index(istate) && write_locked_index(istate, lockfile, COMMIT_LOCK)) rollback_lock_file(lockfile); } static int do_write_index(struct index_state *istate, int newfd, int strip_extensions) { git_SHA_CTX c; struct cache_header hdr; int i, err, removed, extended, hdr_version; struct cache_entry **cache = istate->cache; int entries = istate->cache_nr; struct stat st; struct strbuf previous_name_buf = STRBUF_INIT, *previous_name; for (i = removed = extended = 0; i < entries; i++) { if (cache[i]->ce_flags & CE_REMOVE) removed++; /* reduce extended entries if possible */ cache[i]->ce_flags &= ~CE_EXTENDED; if (cache[i]->ce_flags & CE_EXTENDED_FLAGS) { extended++; cache[i]->ce_flags |= CE_EXTENDED; } } if (!istate->version) { istate->version = get_index_format_default(); if (getenv("GIT_TEST_SPLIT_INDEX")) init_split_index(istate); } /* demote version 3 to version 2 when the latter suffices */ if (istate->version == 3 || istate->version == 2) istate->version = extended ? 3 : 2; hdr_version = istate->version; hdr.hdr_signature = htonl(CACHE_SIGNATURE); hdr.hdr_version = htonl(hdr_version); hdr.hdr_entries = htonl(entries - removed); git_SHA1_Init(&c); if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0) return -1; previous_name = (hdr_version == 4) ? &previous_name_buf : NULL; for (i = 0; i < entries; i++) { struct cache_entry *ce = cache[i]; if (ce->ce_flags & CE_REMOVE) continue; if (!ce_uptodate(ce) && is_racy_timestamp(istate, ce)) ce_smudge_racily_clean_entry(ce); if (is_null_sha1(ce->sha1)) { static const char msg[] = "cache entry has null sha1: %s"; static int allow = -1; if (allow < 0) allow = git_env_bool("GIT_ALLOW_NULL_SHA1", 0); if (allow) warning(msg, ce->name); else return error(msg, ce->name); } if (ce_write_entry(&c, newfd, ce, previous_name) < 0) return -1; } strbuf_release(&previous_name_buf); /* Write extension data here */ if (!strip_extensions && istate->split_index) { struct strbuf sb = STRBUF_INIT; err = write_link_extension(&sb, istate) < 0 || write_index_ext_header(&c, newfd, CACHE_EXT_LINK, sb.len) < 0 || ce_write(&c, newfd, sb.buf, sb.len) < 0; strbuf_release(&sb); if (err) return -1; } if (!strip_extensions && istate->cache_tree) { struct strbuf sb = STRBUF_INIT; cache_tree_write(&sb, istate->cache_tree); err = write_index_ext_header(&c, newfd, CACHE_EXT_TREE, sb.len) < 0 || ce_write(&c, newfd, sb.buf, sb.len) < 0; strbuf_release(&sb); if (err) return -1; } if (!strip_extensions && istate->resolve_undo) { struct strbuf sb = STRBUF_INIT; resolve_undo_write(&sb, istate->resolve_undo); err = write_index_ext_header(&c, newfd, CACHE_EXT_RESOLVE_UNDO, sb.len) < 0 || ce_write(&c, newfd, sb.buf, sb.len) < 0; strbuf_release(&sb); if (err) return -1; } if (ce_flush(&c, newfd, istate->sha1) || fstat(newfd, &st)) return -1; istate->timestamp.sec = (unsigned int)st.st_mtime; istate->timestamp.nsec = ST_MTIME_NSEC(st); return 0; } void set_alternate_index_output(const char *name) { alternate_index_output = name; } static int commit_locked_index(struct lock_file *lk) { if (alternate_index_output) return commit_lock_file_to(lk, alternate_index_output); else return commit_lock_file(lk); } static int do_write_locked_index(struct index_state *istate, struct lock_file *lock, unsigned flags) { int ret = do_write_index(istate, lock->fd, 0); if (ret) return ret; assert((flags & (COMMIT_LOCK | CLOSE_LOCK)) != (COMMIT_LOCK | CLOSE_LOCK)); if (flags & COMMIT_LOCK) return commit_locked_index(lock); else if (flags & CLOSE_LOCK) return close_lock_file(lock); else return ret; } static int write_split_index(struct index_state *istate, struct lock_file *lock, unsigned flags) { int ret; prepare_to_write_split_index(istate); ret = do_write_locked_index(istate, lock, flags); finish_writing_split_index(istate); return ret; } static char *temporary_sharedindex; static void remove_temporary_sharedindex(void) { if (temporary_sharedindex) { unlink_or_warn(temporary_sharedindex); free(temporary_sharedindex); temporary_sharedindex = NULL; } } static void remove_temporary_sharedindex_on_signal(int signo) { remove_temporary_sharedindex(); sigchain_pop(signo); raise(signo); } static int write_shared_index(struct index_state *istate, struct lock_file *lock, unsigned flags) { struct split_index *si = istate->split_index; static int installed_handler; int fd, ret; temporary_sharedindex = git_pathdup("sharedindex_XXXXXX"); fd = mkstemp(temporary_sharedindex); if (fd < 0) { free(temporary_sharedindex); temporary_sharedindex = NULL; hashclr(si->base_sha1); return do_write_locked_index(istate, lock, flags); } if (!installed_handler) { atexit(remove_temporary_sharedindex); sigchain_push_common(remove_temporary_sharedindex_on_signal); } move_cache_to_base_index(istate); ret = do_write_index(si->base, fd, 1); close(fd); if (ret) { remove_temporary_sharedindex(); return ret; } ret = rename(temporary_sharedindex, git_path("sharedindex.%s", sha1_to_hex(si->base->sha1))); free(temporary_sharedindex); temporary_sharedindex = NULL; if (!ret) hashcpy(si->base_sha1, si->base->sha1); return ret; } int write_locked_index(struct index_state *istate, struct lock_file *lock, unsigned flags) { struct split_index *si = istate->split_index; if (!si || alternate_index_output || (istate->cache_changed & ~EXTMASK)) { if (si) hashclr(si->base_sha1); return do_write_locked_index(istate, lock, flags); } if (getenv("GIT_TEST_SPLIT_INDEX")) { int v = si->base_sha1[0]; if ((v & 15) < 6) istate->cache_changed |= SPLIT_INDEX_ORDERED; } if (istate->cache_changed & SPLIT_INDEX_ORDERED) { int ret = write_shared_index(istate, lock, flags); if (ret) return ret; } return write_split_index(istate, lock, flags); } /* * Read the index file that is potentially unmerged into given * index_state, dropping any unmerged entries. Returns true if * the index is unmerged. Callers who want to refuse to work * from an unmerged state can call this and check its return value, * instead of calling read_cache(). */ int read_index_unmerged(struct index_state *istate) { int i; int unmerged = 0; read_index(istate); for (i = 0; i < istate->cache_nr; i++) { struct cache_entry *ce = istate->cache[i]; struct cache_entry *new_ce; int size, len; if (!ce_stage(ce)) continue; unmerged = 1; len = ce_namelen(ce); size = cache_entry_size(len); new_ce = xcalloc(1, size); memcpy(new_ce->name, ce->name, len); new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED; new_ce->ce_namelen = len; new_ce->ce_mode = ce->ce_mode; if (add_index_entry(istate, new_ce, 0)) return error("%s: cannot drop to stage #0", new_ce->name); } return unmerged; } /* * Returns 1 if the path is an "other" path with respect to * the index; that is, the path is not mentioned in the index at all, * either as a file, a directory with some files in the index, * or as an unmerged entry. * * We helpfully remove a trailing "/" from directories so that * the output of read_directory can be used as-is. */ int index_name_is_other(const struct index_state *istate, const char *name, int namelen) { int pos; if (namelen && name[namelen - 1] == '/') namelen--; pos = index_name_pos(istate, name, namelen); if (0 <= pos) return 0; /* exact match */ pos = -pos - 1; if (pos < istate->cache_nr) { struct cache_entry *ce = istate->cache[pos]; if (ce_namelen(ce) == namelen && !memcmp(ce->name, name, namelen)) return 0; /* Yup, this one exists unmerged */ } return 1; } void *read_blob_data_from_index(struct index_state *istate, const char *path, unsigned long *size) { int pos, len; unsigned long sz; enum object_type type; void *data; len = strlen(path); pos = index_name_pos(istate, path, len); if (pos < 0) { /* * We might be in the middle of a merge, in which * case we would read stage #2 (ours). */ int i; for (i = -pos - 1; (pos < 0 && i < istate->cache_nr && !strcmp(istate->cache[i]->name, path)); i++) if (ce_stage(istate->cache[i]) == 2) pos = i; } if (pos < 0) return NULL; data = read_sha1_file(istate->cache[pos]->sha1, &type, &sz); if (!data || type != OBJ_BLOB) { free(data); return NULL; } if (size) *size = sz; return data; } void stat_validity_clear(struct stat_validity *sv) { free(sv->sd); sv->sd = NULL; } int stat_validity_check(struct stat_validity *sv, const char *path) { struct stat st; if (stat(path, &st) < 0) return sv->sd == NULL; if (!sv->sd) return 0; return S_ISREG(st.st_mode) && !match_stat_data(sv->sd, &st); } void stat_validity_update(struct stat_validity *sv, int fd) { struct stat st; if (fstat(fd, &st) < 0 || !S_ISREG(st.st_mode)) stat_validity_clear(sv); else { if (!sv->sd) sv->sd = xcalloc(1, sizeof(struct stat_data)); fill_stat_data(sv->sd, &st); } } cgit-0.11.2/git/dir.h0000644000175000017500000001566012476431550014522 0ustar formorerformorer#ifndef DIR_H #define DIR_H /* See Documentation/technical/api-directory-listing.txt */ #include "strbuf.h" struct dir_entry { unsigned int len; char name[FLEX_ARRAY]; /* more */ }; #define EXC_FLAG_NODIR 1 #define EXC_FLAG_ENDSWITH 4 #define EXC_FLAG_MUSTBEDIR 8 #define EXC_FLAG_NEGATIVE 16 struct exclude { /* * This allows callers of last_exclude_matching() etc. * to determine the origin of the matching pattern. */ struct exclude_list *el; const char *pattern; int patternlen; int nowildcardlen; const char *base; int baselen; int flags; /* * Counting starts from 1 for line numbers in ignore files, * and from -1 decrementing for patterns from CLI args. */ int srcpos; }; /* * Each excludes file will be parsed into a fresh exclude_list which * is appended to the relevant exclude_list_group (either EXC_DIRS or * EXC_FILE). An exclude_list within the EXC_CMDL exclude_list_group * can also be used to represent the list of --exclude values passed * via CLI args. */ struct exclude_list { int nr; int alloc; /* remember pointer to exclude file contents so we can free() */ char *filebuf; /* origin of list, e.g. path to filename, or descriptive string */ const char *src; struct exclude **excludes; }; /* * The contents of the per-directory exclude files are lazily read on * demand and then cached in memory, one per exclude_stack struct, in * order to avoid opening and parsing each one every time that * directory is traversed. */ struct exclude_stack { struct exclude_stack *prev; /* the struct exclude_stack for the parent directory */ int baselen; int exclude_ix; /* index of exclude_list within EXC_DIRS exclude_list_group */ }; struct exclude_list_group { int nr, alloc; struct exclude_list *el; }; struct dir_struct { int nr, alloc; int ignored_nr, ignored_alloc; enum { DIR_SHOW_IGNORED = 1<<0, DIR_SHOW_OTHER_DIRECTORIES = 1<<1, DIR_HIDE_EMPTY_DIRECTORIES = 1<<2, DIR_NO_GITLINKS = 1<<3, DIR_COLLECT_IGNORED = 1<<4, DIR_SHOW_IGNORED_TOO = 1<<5, DIR_COLLECT_KILLED_ONLY = 1<<6 } flags; struct dir_entry **entries; struct dir_entry **ignored; /* Exclude info */ const char *exclude_per_dir; /* * We maintain three groups of exclude pattern lists: * * EXC_CMDL lists patterns explicitly given on the command line. * EXC_DIRS lists patterns obtained from per-directory ignore files. * EXC_FILE lists patterns from fallback ignore files, e.g. * - .git/info/exclude * - core.excludesfile * * Each group contains multiple exclude lists, a single list * per source. */ #define EXC_CMDL 0 #define EXC_DIRS 1 #define EXC_FILE 2 struct exclude_list_group exclude_list_group[3]; /* * Temporary variables which are used during loading of the * per-directory exclude lists. * * exclude_stack points to the top of the exclude_stack, and * basebuf contains the full path to the current * (sub)directory in the traversal. Exclude points to the * matching exclude struct if the directory is excluded. */ struct exclude_stack *exclude_stack; struct exclude *exclude; struct strbuf basebuf; }; /* * The ordering of these constants is significant, with * higher-numbered match types signifying "closer" (i.e. more * specific) matches which will override lower-numbered match types * when populating the seen[] array. */ #define MATCHED_RECURSIVELY 1 #define MATCHED_FNMATCH 2 #define MATCHED_EXACTLY 3 extern int simple_length(const char *match); extern int no_wildcard(const char *string); extern char *common_prefix(const struct pathspec *pathspec); extern int match_pathspec(const struct pathspec *pathspec, const char *name, int namelen, int prefix, char *seen, int is_dir); extern int within_depth(const char *name, int namelen, int depth, int max_depth); extern int fill_directory(struct dir_struct *dir, const struct pathspec *pathspec); extern int read_directory(struct dir_struct *, const char *path, int len, const struct pathspec *pathspec); extern int is_excluded_from_list(const char *pathname, int pathlen, const char *basename, int *dtype, struct exclude_list *el); struct dir_entry *dir_add_ignored(struct dir_struct *dir, const char *pathname, int len); /* * these implement the matching logic for dir.c:excluded_from_list and * attr.c:path_matches() */ extern int match_basename(const char *, int, const char *, int, int, int); extern int match_pathname(const char *, int, const char *, int, const char *, int, int, int); extern struct exclude *last_exclude_matching(struct dir_struct *dir, const char *name, int *dtype); extern int is_excluded(struct dir_struct *dir, const char *name, int *dtype); extern struct exclude_list *add_exclude_list(struct dir_struct *dir, int group_type, const char *src); extern int add_excludes_from_file_to_list(const char *fname, const char *base, int baselen, struct exclude_list *el, int check_index); extern void add_excludes_from_file(struct dir_struct *, const char *fname); extern void parse_exclude_pattern(const char **string, int *patternlen, int *flags, int *nowildcardlen); extern void add_exclude(const char *string, const char *base, int baselen, struct exclude_list *el, int srcpos); extern void clear_exclude_list(struct exclude_list *el); extern void clear_directory(struct dir_struct *dir); extern int file_exists(const char *); extern int is_inside_dir(const char *dir); extern int dir_inside_of(const char *subdir, const char *dir); static inline int is_dot_or_dotdot(const char *name) { return (name[0] == '.' && (name[1] == '\0' || (name[1] == '.' && name[2] == '\0'))); } extern int is_empty_dir(const char *dir); extern void setup_standard_excludes(struct dir_struct *dir); #define REMOVE_DIR_EMPTY_ONLY 01 #define REMOVE_DIR_KEEP_NESTED_GIT 02 #define REMOVE_DIR_KEEP_TOPLEVEL 04 extern int remove_dir_recursively(struct strbuf *path, int flag); /* tries to remove the path with empty directories along it, ignores ENOENT */ extern int remove_path(const char *path); extern int strcmp_icase(const char *a, const char *b); extern int strncmp_icase(const char *a, const char *b, size_t count); extern int fnmatch_icase(const char *pattern, const char *string, int flags); /* * The prefix part of pattern must not contains wildcards. */ struct pathspec_item; extern int git_fnmatch(const struct pathspec_item *item, const char *pattern, const char *string, int prefix); static inline int ce_path_match(const struct cache_entry *ce, const struct pathspec *pathspec, char *seen) { return match_pathspec(pathspec, ce->name, ce_namelen(ce), 0, seen, S_ISDIR(ce->ce_mode) || S_ISGITLINK(ce->ce_mode)); } static inline int dir_path_match(const struct dir_entry *ent, const struct pathspec *pathspec, int prefix, char *seen) { int has_trailing_dir = ent->len && ent->name[ent->len - 1] == '/'; int len = has_trailing_dir ? ent->len - 1 : ent->len; return match_pathspec(pathspec, ent->name, len, prefix, seen, has_trailing_dir); } #endif cgit-0.11.2/git/branch.c0000644000175000017500000001756012476431550015175 0ustar formorerformorer#include "git-compat-util.h" #include "cache.h" #include "branch.h" #include "refs.h" #include "remote.h" #include "commit.h" struct tracking { struct refspec spec; char *src; const char *remote; int matches; }; static int find_tracked_branch(struct remote *remote, void *priv) { struct tracking *tracking = priv; if (!remote_find_tracking(remote, &tracking->spec)) { if (++tracking->matches == 1) { tracking->src = tracking->spec.src; tracking->remote = remote->name; } else { free(tracking->spec.src); if (tracking->src) { free(tracking->src); tracking->src = NULL; } } tracking->spec.src = NULL; } return 0; } static int should_setup_rebase(const char *origin) { switch (autorebase) { case AUTOREBASE_NEVER: return 0; case AUTOREBASE_LOCAL: return origin == NULL; case AUTOREBASE_REMOTE: return origin != NULL; case AUTOREBASE_ALWAYS: return 1; } return 0; } void install_branch_config(int flag, const char *local, const char *origin, const char *remote) { const char *shortname = NULL; struct strbuf key = STRBUF_INIT; int rebasing = should_setup_rebase(origin); if (skip_prefix(remote, "refs/heads/", &shortname) && !strcmp(local, shortname) && !origin) { warning(_("Not setting branch %s as its own upstream."), local); return; } strbuf_addf(&key, "branch.%s.remote", local); git_config_set(key.buf, origin ? origin : "."); strbuf_reset(&key); strbuf_addf(&key, "branch.%s.merge", local); git_config_set(key.buf, remote); if (rebasing) { strbuf_reset(&key); strbuf_addf(&key, "branch.%s.rebase", local); git_config_set(key.buf, "true"); } strbuf_release(&key); if (flag & BRANCH_CONFIG_VERBOSE) { if (shortname) { if (origin) printf_ln(rebasing ? _("Branch %s set up to track remote branch %s from %s by rebasing.") : _("Branch %s set up to track remote branch %s from %s."), local, shortname, origin); else printf_ln(rebasing ? _("Branch %s set up to track local branch %s by rebasing.") : _("Branch %s set up to track local branch %s."), local, shortname); } else { if (origin) printf_ln(rebasing ? _("Branch %s set up to track remote ref %s by rebasing.") : _("Branch %s set up to track remote ref %s."), local, remote); else printf_ln(rebasing ? _("Branch %s set up to track local ref %s by rebasing.") : _("Branch %s set up to track local ref %s."), local, remote); } } } /* * This is called when new_ref is branched off of orig_ref, and tries * to infer the settings for branch..{remote,merge} from the * config. */ static int setup_tracking(const char *new_ref, const char *orig_ref, enum branch_track track, int quiet) { struct tracking tracking; int config_flags = quiet ? 0 : BRANCH_CONFIG_VERBOSE; memset(&tracking, 0, sizeof(tracking)); tracking.spec.dst = (char *)orig_ref; if (for_each_remote(find_tracked_branch, &tracking)) return 1; if (!tracking.matches) switch (track) { case BRANCH_TRACK_ALWAYS: case BRANCH_TRACK_EXPLICIT: case BRANCH_TRACK_OVERRIDE: break; default: return 1; } if (tracking.matches > 1) return error(_("Not tracking: ambiguous information for ref %s"), orig_ref); install_branch_config(config_flags, new_ref, tracking.remote, tracking.src ? tracking.src : orig_ref); free(tracking.src); return 0; } int read_branch_desc(struct strbuf *buf, const char *branch_name) { char *v = NULL; struct strbuf name = STRBUF_INIT; strbuf_addf(&name, "branch.%s.description", branch_name); if (git_config_get_string(name.buf, &v)) { strbuf_release(&name); return -1; } strbuf_addstr(buf, v); free(v); strbuf_release(&name); return 0; } int validate_new_branchname(const char *name, struct strbuf *ref, int force, int attr_only) { if (strbuf_check_branch_ref(ref, name)) die(_("'%s' is not a valid branch name."), name); if (!ref_exists(ref->buf)) return 0; else if (!force && !attr_only) die(_("A branch named '%s' already exists."), ref->buf + strlen("refs/heads/")); if (!attr_only) { const char *head; unsigned char sha1[20]; head = resolve_ref_unsafe("HEAD", 0, sha1, NULL); if (!is_bare_repository() && head && !strcmp(head, ref->buf)) die(_("Cannot force update the current branch.")); } return 1; } static int check_tracking_branch(struct remote *remote, void *cb_data) { char *tracking_branch = cb_data; struct refspec query; memset(&query, 0, sizeof(struct refspec)); query.dst = tracking_branch; return !remote_find_tracking(remote, &query); } static int validate_remote_tracking_branch(char *ref) { return !for_each_remote(check_tracking_branch, ref); } static const char upstream_not_branch[] = N_("Cannot setup tracking information; starting point '%s' is not a branch."); static const char upstream_missing[] = N_("the requested upstream branch '%s' does not exist"); static const char upstream_advice[] = N_("\n" "If you are planning on basing your work on an upstream\n" "branch that already exists at the remote, you may need to\n" "run \"git fetch\" to retrieve it.\n" "\n" "If you are planning to push out a new local branch that\n" "will track its remote counterpart, you may want to use\n" "\"git push -u\" to set the upstream config as you push."); void create_branch(const char *head, const char *name, const char *start_name, int force, int reflog, int clobber_head, int quiet, enum branch_track track) { struct commit *commit; unsigned char sha1[20]; char *real_ref, msg[PATH_MAX + 20]; struct strbuf ref = STRBUF_INIT; int forcing = 0; int dont_change_ref = 0; int explicit_tracking = 0; if (track == BRANCH_TRACK_EXPLICIT || track == BRANCH_TRACK_OVERRIDE) explicit_tracking = 1; if (validate_new_branchname(name, &ref, force, track == BRANCH_TRACK_OVERRIDE || clobber_head)) { if (!force) dont_change_ref = 1; else forcing = 1; } real_ref = NULL; if (get_sha1(start_name, sha1)) { if (explicit_tracking) { if (advice_set_upstream_failure) { error(_(upstream_missing), start_name); advise(_(upstream_advice)); exit(1); } die(_(upstream_missing), start_name); } die(_("Not a valid object name: '%s'."), start_name); } switch (dwim_ref(start_name, strlen(start_name), sha1, &real_ref)) { case 0: /* Not branching from any existing branch */ if (explicit_tracking) die(_(upstream_not_branch), start_name); break; case 1: /* Unique completion -- good, only if it is a real branch */ if (!starts_with(real_ref, "refs/heads/") && validate_remote_tracking_branch(real_ref)) { if (explicit_tracking) die(_(upstream_not_branch), start_name); else real_ref = NULL; } break; default: die(_("Ambiguous object name: '%s'."), start_name); break; } if ((commit = lookup_commit_reference(sha1)) == NULL) die(_("Not a valid branch point: '%s'."), start_name); hashcpy(sha1, commit->object.sha1); if (forcing) snprintf(msg, sizeof msg, "branch: Reset to %s", start_name); else if (!dont_change_ref) snprintf(msg, sizeof msg, "branch: Created from %s", start_name); if (reflog) log_all_ref_updates = 1; if (!dont_change_ref) { struct ref_transaction *transaction; struct strbuf err = STRBUF_INIT; transaction = ref_transaction_begin(&err); if (!transaction || ref_transaction_update(transaction, ref.buf, sha1, null_sha1, 0, !forcing, msg, &err) || ref_transaction_commit(transaction, &err)) die("%s", err.buf); ref_transaction_free(transaction); strbuf_release(&err); } if (real_ref && track) setup_tracking(ref.buf + 11, real_ref, track, quiet); strbuf_release(&ref); free(real_ref); } void remove_branch_state(void) { unlink(git_path("CHERRY_PICK_HEAD")); unlink(git_path("REVERT_HEAD")); unlink(git_path("MERGE_HEAD")); unlink(git_path("MERGE_RR")); unlink(git_path("MERGE_MSG")); unlink(git_path("MERGE_MODE")); unlink(git_path("SQUASH_MSG")); } cgit-0.11.2/git/update_unicode.sh0000755000175000017500000000205512476431550017114 0ustar formorerformorer#!/bin/sh #See http://www.unicode.org/reports/tr44/ # #Me Enclosing_Mark an enclosing combining mark #Mn Nonspacing_Mark a nonspacing combining mark (zero advance width) #Cf Format a format control character # UNICODEWIDTH_H=../unicode_width.h if ! test -d unicode; then mkdir unicode fi && ( cd unicode && if ! test -f UnicodeData.txt; then wget http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt fi && if ! test -f EastAsianWidth.txt; then wget http://www.unicode.org/Public/UCD/latest/ucd/EastAsianWidth.txt fi && if ! test -d uniset; then git clone https://github.com/depp/uniset.git fi && ( cd uniset && if ! test -x uniset; then autoreconf -i && ./configure --enable-warnings=-Werror CFLAGS='-O0 -ggdb' fi && make ) && UNICODE_DIR=. && export UNICODE_DIR && cat >$UNICODEWIDTH_H <<-EOF static const struct interval zero_width[] = { $(uniset/uniset --32 cat:Me,Mn,Cf + U+1160..U+11FF - U+00AD | grep -v plane) }; static const struct interval double_width[] = { $(uniset/uniset --32 eaw:F,W) }; EOF ) cgit-0.11.2/git/version.h0000644000175000017500000000026412476431550015423 0ustar formorerformorer#ifndef VERSION_H #define VERSION_H extern const char git_version_string[]; const char *git_user_agent(void); const char *git_user_agent_sanitized(void); #endif /* VERSION_H */ cgit-0.11.2/git/exec_cmd.h0000644000175000017500000000076712476431550015515 0ustar formorerformorer#ifndef GIT_EXEC_CMD_H #define GIT_EXEC_CMD_H extern void git_set_argv_exec_path(const char *exec_path); extern const char *git_extract_argv0_path(const char *path); extern const char *git_exec_path(void); extern void setup_path(void); extern const char **prepare_git_cmd(const char **argv); extern int execv_git_cmd(const char **argv); /* NULL terminated */ LAST_ARG_MUST_BE_NULL extern int execl_git_cmd(const char *cmd, ...); extern char *system_path(const char *path); #endif /* GIT_EXEC_CMD_H */ cgit-0.11.2/git/credential-cache--daemon.c0000644000175000017500000001445112476431550020425 0ustar formorerformorer#include "cache.h" #include "credential.h" #include "unix-socket.h" #include "sigchain.h" #include "parse-options.h" static const char *socket_path; static void cleanup_socket(void) { if (socket_path) unlink(socket_path); } static void cleanup_socket_on_signal(int sig) { cleanup_socket(); sigchain_pop(sig); raise(sig); } struct credential_cache_entry { struct credential item; unsigned long expiration; }; static struct credential_cache_entry *entries; static int entries_nr; static int entries_alloc; static void cache_credential(struct credential *c, int timeout) { struct credential_cache_entry *e; ALLOC_GROW(entries, entries_nr + 1, entries_alloc); e = &entries[entries_nr++]; /* take ownership of pointers */ memcpy(&e->item, c, sizeof(*c)); memset(c, 0, sizeof(*c)); e->expiration = time(NULL) + timeout; } static struct credential_cache_entry *lookup_credential(const struct credential *c) { int i; for (i = 0; i < entries_nr; i++) { struct credential *e = &entries[i].item; if (credential_match(c, e)) return &entries[i]; } return NULL; } static void remove_credential(const struct credential *c) { struct credential_cache_entry *e; e = lookup_credential(c); if (e) e->expiration = 0; } static int check_expirations(void) { static unsigned long wait_for_entry_until; int i = 0; unsigned long now = time(NULL); unsigned long next = (unsigned long)-1; /* * Initially give the client 30 seconds to actually contact us * and store a credential before we decide there's no point in * keeping the daemon around. */ if (!wait_for_entry_until) wait_for_entry_until = now + 30; while (i < entries_nr) { if (entries[i].expiration <= now) { entries_nr--; credential_clear(&entries[i].item); if (i != entries_nr) memcpy(&entries[i], &entries[entries_nr], sizeof(*entries)); /* * Stick around 30 seconds in case a new credential * shows up (e.g., because we just removed a failed * one, and we will soon get the correct one). */ wait_for_entry_until = now + 30; } else { if (entries[i].expiration < next) next = entries[i].expiration; i++; } } if (!entries_nr) { if (wait_for_entry_until <= now) return 0; next = wait_for_entry_until; } return next - now; } static int read_request(FILE *fh, struct credential *c, struct strbuf *action, int *timeout) { static struct strbuf item = STRBUF_INIT; const char *p; strbuf_getline(&item, fh, '\n'); if (!skip_prefix(item.buf, "action=", &p)) return error("client sent bogus action line: %s", item.buf); strbuf_addstr(action, p); strbuf_getline(&item, fh, '\n'); if (!skip_prefix(item.buf, "timeout=", &p)) return error("client sent bogus timeout line: %s", item.buf); *timeout = atoi(p); if (credential_read(c, fh) < 0) return -1; return 0; } static void serve_one_client(FILE *in, FILE *out) { struct credential c = CREDENTIAL_INIT; struct strbuf action = STRBUF_INIT; int timeout = -1; if (read_request(in, &c, &action, &timeout) < 0) /* ignore error */ ; else if (!strcmp(action.buf, "get")) { struct credential_cache_entry *e = lookup_credential(&c); if (e) { fprintf(out, "username=%s\n", e->item.username); fprintf(out, "password=%s\n", e->item.password); } } else if (!strcmp(action.buf, "exit")) exit(0); else if (!strcmp(action.buf, "erase")) remove_credential(&c); else if (!strcmp(action.buf, "store")) { if (timeout < 0) warning("cache client didn't specify a timeout"); else if (!c.username || !c.password) warning("cache client gave us a partial credential"); else { remove_credential(&c); cache_credential(&c, timeout); } } else warning("cache client sent unknown action: %s", action.buf); credential_clear(&c); strbuf_release(&action); } static int serve_cache_loop(int fd) { struct pollfd pfd; unsigned long wakeup; wakeup = check_expirations(); if (!wakeup) return 0; pfd.fd = fd; pfd.events = POLLIN; if (poll(&pfd, 1, 1000 * wakeup) < 0) { if (errno != EINTR) die_errno("poll failed"); return 1; } if (pfd.revents & POLLIN) { int client, client2; FILE *in, *out; client = accept(fd, NULL, NULL); if (client < 0) { warning("accept failed: %s", strerror(errno)); return 1; } client2 = dup(client); if (client2 < 0) { warning("dup failed: %s", strerror(errno)); close(client); return 1; } in = xfdopen(client, "r"); out = xfdopen(client2, "w"); serve_one_client(in, out); fclose(in); fclose(out); } return 1; } static void serve_cache(const char *socket_path, int debug) { int fd; fd = unix_stream_listen(socket_path); if (fd < 0) die_errno("unable to bind to '%s'", socket_path); printf("ok\n"); fclose(stdout); if (!debug) { if (!freopen("/dev/null", "w", stderr)) die_errno("unable to point stderr to /dev/null"); } while (serve_cache_loop(fd)) ; /* nothing */ close(fd); unlink(socket_path); } static const char permissions_advice[] = "The permissions on your socket directory are too loose; other\n" "users may be able to read your cached credentials. Consider running:\n" "\n" " chmod 0700 %s"; static void check_socket_directory(const char *path) { struct stat st; char *path_copy = xstrdup(path); char *dir = dirname(path_copy); if (!stat(dir, &st)) { if (st.st_mode & 077) die(permissions_advice, dir); free(path_copy); return; } /* * We must be sure to create the directory with the correct mode, * not just chmod it after the fact; otherwise, there is a race * condition in which somebody can chdir to it, sleep, then try to open * our protected socket. */ if (safe_create_leading_directories_const(dir) < 0) die_errno("unable to create directories for '%s'", dir); if (mkdir(dir, 0700) < 0) die_errno("unable to mkdir '%s'", dir); free(path_copy); } int main(int argc, const char **argv) { static const char *usage[] = { "git-credential-cache--daemon [opts] ", NULL }; int debug = 0; const struct option options[] = { OPT_BOOL(0, "debug", &debug, N_("print debugging messages to stderr")), OPT_END() }; argc = parse_options(argc, argv, NULL, options, usage, 0); socket_path = argv[0]; if (!socket_path) usage_with_options(usage, options); check_socket_directory(socket_path); atexit(cleanup_socket); sigchain_push_common(cleanup_socket_on_signal); serve_cache(socket_path, debug); return 0; } cgit-0.11.2/git/git-merge-one-file.sh0000755000175000017500000000663212476431550017505 0ustar formorerformorer#!/bin/sh # # Copyright (c) Linus Torvalds, 2005 # # This is the git per-file merge script, called with # # $1 - original file SHA1 (or empty) # $2 - file in branch1 SHA1 (or empty) # $3 - file in branch2 SHA1 (or empty) # $4 - pathname in repository # $5 - original file mode (or empty) # $6 - file in branch1 mode (or empty) # $7 - file in branch2 mode (or empty) # # Handle some trivial cases.. The _really_ trivial cases have # been handled already by git read-tree, but that one doesn't # do any merges that might change the tree layout. USAGE=' ' USAGE="$USAGE " LONG_USAGE="usage: git merge-one-file $USAGE Blob ids and modes should be empty for missing files." SUBDIRECTORY_OK=Yes . git-sh-setup cd_to_toplevel require_work_tree if test $# != 7 then echo "$LONG_USAGE" exit 1 fi case "${1:-.}${2:-.}${3:-.}" in # # Deleted in both or deleted in one and unchanged in the other # "$1.." | "$1.$1" | "$1$1.") if test -n "$2" then echo "Removing $4" else # read-tree checked that index matches HEAD already, # so we know we do not have this path tracked. # there may be an unrelated working tree file here, # which we should just leave unmolested. Make sure # we do not have it in the index, though. exec git update-index --remove -- "$4" fi if test -f "$4" then rm -f -- "$4" && rmdir -p "$(expr "z$4" : 'z\(.*\)/')" 2>/dev/null || : fi && exec git update-index --remove -- "$4" ;; # # Added in one. # ".$2.") # the other side did not add and we added so there is nothing # to be done, except making the path merged. exec git update-index --add --cacheinfo "$6" "$2" "$4" ;; "..$3") echo "Adding $4" if test -f "$4" then echo "ERROR: untracked $4 is overwritten by the merge." >&2 exit 1 fi git update-index --add --cacheinfo "$7" "$3" "$4" && exec git checkout-index -u -f -- "$4" ;; # # Added in both, identically (check for same permissions). # ".$3$2") if test "$6" != "$7" then echo "ERROR: File $4 added identically in both branches," >&2 echo "ERROR: but permissions conflict $6->$7." >&2 exit 1 fi echo "Adding $4" git update-index --add --cacheinfo "$6" "$2" "$4" && exec git checkout-index -u -f -- "$4" ;; # # Modified in both, but differently. # "$1$2$3" | ".$2$3") case ",$6,$7," in *,120000,*) echo "ERROR: $4: Not merging symbolic link changes." >&2 exit 1 ;; *,160000,*) echo "ERROR: $4: Not merging conflicting submodule changes." >&2 exit 1 ;; esac src1=$(git-unpack-file $2) src2=$(git-unpack-file $3) case "$1" in '') echo "Added $4 in both, but differently." orig=$(git-unpack-file $2) create_virtual_base "$orig" "$src2" ;; *) echo "Auto-merging $4" orig=$(git-unpack-file $1) ;; esac git merge-file "$src1" "$orig" "$src2" ret=$? msg= if test $ret != 0 || test -z "$1" then msg='content conflict' ret=1 fi # Create the working tree file, using "our tree" version from the # index, and then store the result of the merge. git checkout-index -f --stage=2 -- "$4" && cat "$src1" >"$4" || exit 1 rm -f -- "$orig" "$src1" "$src2" if test "$6" != "$7" then if test -n "$msg" then msg="$msg, " fi msg="${msg}permissions conflict: $5->$6,$7" ret=1 fi if test $ret != 0 then echo "ERROR: $msg in $4" >&2 exit 1 fi exec git update-index -- "$4" ;; *) echo "ERROR: $4: Not handling case $1 -> $2 -> $3" >&2 ;; esac exit 1 cgit-0.11.2/git/test-date.c0000644000175000017500000000273412476431550015627 0ustar formorerformorer#include "cache.h" static const char *usage_msg = "\n" " test-date show [time_t]...\n" " test-date parse [date]...\n" " test-date approxidate [date]...\n"; static void show_dates(char **argv, struct timeval *now) { struct strbuf buf = STRBUF_INIT; for (; *argv; argv++) { time_t t = atoi(*argv); show_date_relative(t, 0, now, &buf); printf("%s -> %s\n", *argv, buf.buf); } strbuf_release(&buf); } static void parse_dates(char **argv, struct timeval *now) { struct strbuf result = STRBUF_INIT; for (; *argv; argv++) { unsigned long t; int tz; strbuf_reset(&result); parse_date(*argv, &result); if (sscanf(result.buf, "%lu %d", &t, &tz) == 2) printf("%s -> %s\n", *argv, show_date(t, tz, DATE_ISO8601)); else printf("%s -> bad\n", *argv); } strbuf_release(&result); } static void parse_approxidate(char **argv, struct timeval *now) { for (; *argv; argv++) { time_t t; t = approxidate_relative(*argv, now); printf("%s -> %s\n", *argv, show_date(t, 0, DATE_ISO8601)); } } int main(int argc, char **argv) { struct timeval now; const char *x; x = getenv("TEST_DATE_NOW"); if (x) { now.tv_sec = atoi(x); now.tv_usec = 0; } else gettimeofday(&now, NULL); argv++; if (!*argv) usage(usage_msg); if (!strcmp(*argv, "show")) show_dates(argv+1, &now); else if (!strcmp(*argv, "parse")) parse_dates(argv+1, &now); else if (!strcmp(*argv, "approxidate")) parse_approxidate(argv+1, &now); else usage(usage_msg); return 0; } cgit-0.11.2/git/diffcore-break.c0000644000175000017500000002200412476431550016570 0ustar formorerformorer/* * Copyright (C) 2005 Junio C Hamano */ #include "cache.h" #include "diff.h" #include "diffcore.h" static int should_break(struct diff_filespec *src, struct diff_filespec *dst, int break_score, int *merge_score_p) { /* dst is recorded as a modification of src. Are they so * different that we are better off recording this as a pair * of delete and create? * * There are two criteria used in this algorithm. For the * purposes of helping later rename/copy, we take both delete * and insert into account and estimate the amount of "edit". * If the edit is very large, we break this pair so that * rename/copy can pick the pieces up to match with other * files. * * On the other hand, we would want to ignore inserts for the * pure "complete rewrite" detection. As long as most of the * existing contents were removed from the file, it is a * complete rewrite, and if sizable chunk from the original * still remains in the result, it is not a rewrite. It does * not matter how much or how little new material is added to * the file. * * The score we leave for such a broken filepair uses the * latter definition so that later clean-up stage can find the * pieces that should not have been broken according to the * latter definition after rename/copy runs, and merge the * broken pair that have a score lower than given criteria * back together. The break operation itself happens * according to the former definition. * * The minimum_edit parameter tells us when to break (the * amount of "edit" required for us to consider breaking the * pair). We leave the amount of deletion in *merge_score_p * when we return. * * The value we return is 1 if we want the pair to be broken, * or 0 if we do not. */ unsigned long delta_size, max_size; unsigned long src_copied, literal_added, src_removed; *merge_score_p = 0; /* assume no deletion --- "do not break" * is the default. */ if (S_ISREG(src->mode) != S_ISREG(dst->mode)) { *merge_score_p = (int)MAX_SCORE; return 1; /* even their types are different */ } if (src->sha1_valid && dst->sha1_valid && !hashcmp(src->sha1, dst->sha1)) return 0; /* they are the same */ if (diff_populate_filespec(src, 0) || diff_populate_filespec(dst, 0)) return 0; /* error but caught downstream */ max_size = ((src->size > dst->size) ? src->size : dst->size); if (max_size < MINIMUM_BREAK_SIZE) return 0; /* we do not break too small filepair */ if (!src->size) return 0; /* we do not let empty files get renamed */ if (diffcore_count_changes(src, dst, &src->cnt_data, &dst->cnt_data, 0, &src_copied, &literal_added)) return 0; /* sanity */ if (src->size < src_copied) src_copied = src->size; if (dst->size < literal_added + src_copied) { if (src_copied < dst->size) literal_added = dst->size - src_copied; else literal_added = 0; } src_removed = src->size - src_copied; /* Compute merge-score, which is "how much is removed * from the source material". The clean-up stage will * merge the surviving pair together if the score is * less than the minimum, after rename/copy runs. */ *merge_score_p = (int)(src_removed * MAX_SCORE / src->size); if (*merge_score_p > break_score) return 1; /* Extent of damage, which counts both inserts and * deletes. */ delta_size = src_removed + literal_added; if (delta_size * MAX_SCORE / max_size < break_score) return 0; /* If you removed a lot without adding new material, that is * not really a rewrite. */ if ((src->size * break_score < src_removed * MAX_SCORE) && (literal_added * 20 < src_removed) && (literal_added * 20 < src_copied)) return 0; return 1; } void diffcore_break(int break_score) { struct diff_queue_struct *q = &diff_queued_diff; struct diff_queue_struct outq; /* When the filepair has this much edit (insert and delete), * it is first considered to be a rewrite and broken into a * create and delete filepair. This is to help breaking a * file that had too much new stuff added, possibly from * moving contents from another file, so that rename/copy can * match it with the other file. * * int break_score; we reuse incoming parameter for this. */ /* After a pair is broken according to break_score and * subjected to rename/copy, both of them may survive intact, * due to lack of suitable rename/copy peer. Or, the caller * may be calling us without using rename/copy. When that * happens, we merge the broken pieces back into one * modification together if the pair did not have more than * this much delete. For this computation, we do not take * insert into account at all. If you start from a 100-line * file and delete 97 lines of it, it does not matter if you * add 27 lines to it to make a new 30-line file or if you add * 997 lines to it to make a 1000-line file. Either way what * you did was a rewrite of 97%. On the other hand, if you * delete 3 lines, keeping 97 lines intact, it does not matter * if you add 3 lines to it to make a new 100-line file or if * you add 903 lines to it to make a new 1000-line file. * Either way you did a lot of additions and not a rewrite. * This merge happens to catch the latter case. A merge_score * of 80% would be a good default value (a broken pair that * has score lower than merge_score will be merged back * together). */ int merge_score; int i; /* See comment on DEFAULT_BREAK_SCORE and * DEFAULT_MERGE_SCORE in diffcore.h */ merge_score = (break_score >> 16) & 0xFFFF; break_score = (break_score & 0xFFFF); if (!break_score) break_score = DEFAULT_BREAK_SCORE; if (!merge_score) merge_score = DEFAULT_MERGE_SCORE; DIFF_QUEUE_CLEAR(&outq); for (i = 0; i < q->nr; i++) { struct diff_filepair *p = q->queue[i]; int score; /* * We deal only with in-place edit of blobs. * We do not break anything else. */ if (DIFF_FILE_VALID(p->one) && DIFF_FILE_VALID(p->two) && object_type(p->one->mode) == OBJ_BLOB && object_type(p->two->mode) == OBJ_BLOB && !strcmp(p->one->path, p->two->path)) { if (should_break(p->one, p->two, break_score, &score)) { /* Split this into delete and create */ struct diff_filespec *null_one, *null_two; struct diff_filepair *dp; /* Set score to 0 for the pair that * needs to be merged back together * should they survive rename/copy. * Also we do not want to break very * small files. */ if (score < merge_score) score = 0; /* deletion of one */ null_one = alloc_filespec(p->one->path); dp = diff_queue(&outq, p->one, null_one); dp->score = score; dp->broken_pair = 1; /* creation of two */ null_two = alloc_filespec(p->two->path); dp = diff_queue(&outq, null_two, p->two); dp->score = score; dp->broken_pair = 1; diff_free_filespec_blob(p->one); diff_free_filespec_blob(p->two); free(p); /* not diff_free_filepair(), we are * reusing one and two here. */ continue; } } diff_free_filespec_data(p->one); diff_free_filespec_data(p->two); diff_q(&outq, p); } free(q->queue); *q = outq; return; } static void merge_broken(struct diff_filepair *p, struct diff_filepair *pp, struct diff_queue_struct *outq) { /* p and pp are broken pairs we want to merge */ struct diff_filepair *c = p, *d = pp, *dp; if (DIFF_FILE_VALID(p->one)) { /* this must be a delete half */ d = p; c = pp; } /* Sanity check */ if (!DIFF_FILE_VALID(d->one)) die("internal error in merge #1"); if (DIFF_FILE_VALID(d->two)) die("internal error in merge #2"); if (DIFF_FILE_VALID(c->one)) die("internal error in merge #3"); if (!DIFF_FILE_VALID(c->two)) die("internal error in merge #4"); dp = diff_queue(outq, d->one, c->two); dp->score = p->score; /* * We will be one extra user of the same src side of the * broken pair, if it was used as the rename source for other * paths elsewhere. Increment to mark that the path stays * in the resulting tree. */ d->one->rename_used++; diff_free_filespec_data(d->two); diff_free_filespec_data(c->one); free(d); free(c); } void diffcore_merge_broken(void) { struct diff_queue_struct *q = &diff_queued_diff; struct diff_queue_struct outq; int i, j; DIFF_QUEUE_CLEAR(&outq); for (i = 0; i < q->nr; i++) { struct diff_filepair *p = q->queue[i]; if (!p) /* we already merged this with its peer */ continue; else if (p->broken_pair && !strcmp(p->one->path, p->two->path)) { /* If the peer also survived rename/copy, then * we merge them back together. */ for (j = i + 1; j < q->nr; j++) { struct diff_filepair *pp = q->queue[j]; if (pp->broken_pair && !strcmp(pp->one->path, pp->two->path) && !strcmp(p->one->path, pp->two->path)) { /* Peer survived. Merge them */ merge_broken(p, pp, &outq); q->queue[j] = NULL; break; } } if (q->nr <= j) /* The peer did not survive, so we keep * it in the output. */ diff_q(&outq, p); } else diff_q(&outq, p); } free(q->queue); *q = outq; return; } cgit-0.11.2/git/streaming.h0000644000175000017500000000077012476431550015731 0ustar formorerformorer/* * Copyright (c) 2011, Google Inc. */ #ifndef STREAMING_H #define STREAMING_H 1 #include "cache.h" /* opaque */ struct git_istream; extern struct git_istream *open_istream(const unsigned char *, enum object_type *, unsigned long *, struct stream_filter *); extern int close_istream(struct git_istream *); extern ssize_t read_istream(struct git_istream *, void *, size_t); extern int stream_blob_to_fd(int fd, const unsigned char *, struct stream_filter *, int can_seek); #endif /* STREAMING_H */ cgit-0.11.2/git/credential.c0000644000175000017500000001735612476431550016055 0ustar formorerformorer#include "cache.h" #include "credential.h" #include "string-list.h" #include "run-command.h" #include "url.h" #include "prompt.h" void credential_init(struct credential *c) { memset(c, 0, sizeof(*c)); c->helpers.strdup_strings = 1; } void credential_clear(struct credential *c) { free(c->protocol); free(c->host); free(c->path); free(c->username); free(c->password); string_list_clear(&c->helpers, 0); credential_init(c); } int credential_match(const struct credential *want, const struct credential *have) { #define CHECK(x) (!want->x || (have->x && !strcmp(want->x, have->x))) return CHECK(protocol) && CHECK(host) && CHECK(path) && CHECK(username); #undef CHECK } static int credential_config_callback(const char *var, const char *value, void *data) { struct credential *c = data; const char *key, *dot; if (!skip_prefix(var, "credential.", &key)) return 0; if (!value) return config_error_nonbool(var); dot = strrchr(key, '.'); if (dot) { struct credential want = CREDENTIAL_INIT; char *url = xmemdupz(key, dot - key); int matched; credential_from_url(&want, url); matched = credential_match(&want, c); credential_clear(&want); free(url); if (!matched) return 0; key = dot + 1; } if (!strcmp(key, "helper")) string_list_append(&c->helpers, value); else if (!strcmp(key, "username")) { if (!c->username) c->username = xstrdup(value); } else if (!strcmp(key, "usehttppath")) c->use_http_path = git_config_bool(var, value); return 0; } static int proto_is_http(const char *s) { if (!s) return 0; return !strcmp(s, "https") || !strcmp(s, "http"); } static void credential_apply_config(struct credential *c) { if (c->configured) return; git_config(credential_config_callback, c); c->configured = 1; if (!c->use_http_path && proto_is_http(c->protocol)) { free(c->path); c->path = NULL; } } static void credential_describe(struct credential *c, struct strbuf *out) { if (!c->protocol) return; strbuf_addf(out, "%s://", c->protocol); if (c->username && *c->username) strbuf_addf(out, "%s@", c->username); if (c->host) strbuf_addstr(out, c->host); if (c->path) strbuf_addf(out, "/%s", c->path); } static char *credential_ask_one(const char *what, struct credential *c, int flags) { struct strbuf desc = STRBUF_INIT; struct strbuf prompt = STRBUF_INIT; char *r; credential_describe(c, &desc); if (desc.len) strbuf_addf(&prompt, "%s for '%s': ", what, desc.buf); else strbuf_addf(&prompt, "%s: ", what); r = git_prompt(prompt.buf, flags); strbuf_release(&desc); strbuf_release(&prompt); return xstrdup(r); } static void credential_getpass(struct credential *c) { if (!c->username) c->username = credential_ask_one("Username", c, PROMPT_ASKPASS|PROMPT_ECHO); if (!c->password) c->password = credential_ask_one("Password", c, PROMPT_ASKPASS); } int credential_read(struct credential *c, FILE *fp) { struct strbuf line = STRBUF_INIT; while (strbuf_getline(&line, fp, '\n') != EOF) { char *key = line.buf; char *value = strchr(key, '='); if (!line.len) break; if (!value) { warning("invalid credential line: %s", key); strbuf_release(&line); return -1; } *value++ = '\0'; if (!strcmp(key, "username")) { free(c->username); c->username = xstrdup(value); } else if (!strcmp(key, "password")) { free(c->password); c->password = xstrdup(value); } else if (!strcmp(key, "protocol")) { free(c->protocol); c->protocol = xstrdup(value); } else if (!strcmp(key, "host")) { free(c->host); c->host = xstrdup(value); } else if (!strcmp(key, "path")) { free(c->path); c->path = xstrdup(value); } else if (!strcmp(key, "url")) { credential_from_url(c, value); } else if (!strcmp(key, "quit")) { c->quit = !!git_config_bool("quit", value); } /* * Ignore other lines; we don't know what they mean, but * this future-proofs us when later versions of git do * learn new lines, and the helpers are updated to match. */ } strbuf_release(&line); return 0; } static void credential_write_item(FILE *fp, const char *key, const char *value) { if (!value) return; fprintf(fp, "%s=%s\n", key, value); } void credential_write(const struct credential *c, FILE *fp) { credential_write_item(fp, "protocol", c->protocol); credential_write_item(fp, "host", c->host); credential_write_item(fp, "path", c->path); credential_write_item(fp, "username", c->username); credential_write_item(fp, "password", c->password); } static int run_credential_helper(struct credential *c, const char *cmd, int want_output) { struct child_process helper = CHILD_PROCESS_INIT; const char *argv[] = { NULL, NULL }; FILE *fp; argv[0] = cmd; helper.argv = argv; helper.use_shell = 1; helper.in = -1; if (want_output) helper.out = -1; else helper.no_stdout = 1; if (start_command(&helper) < 0) return -1; fp = xfdopen(helper.in, "w"); credential_write(c, fp); fclose(fp); if (want_output) { int r; fp = xfdopen(helper.out, "r"); r = credential_read(c, fp); fclose(fp); if (r < 0) { finish_command(&helper); return -1; } } if (finish_command(&helper)) return -1; return 0; } static int credential_do(struct credential *c, const char *helper, const char *operation) { struct strbuf cmd = STRBUF_INIT; int r; if (helper[0] == '!') strbuf_addstr(&cmd, helper + 1); else if (is_absolute_path(helper)) strbuf_addstr(&cmd, helper); else strbuf_addf(&cmd, "git credential-%s", helper); strbuf_addf(&cmd, " %s", operation); r = run_credential_helper(c, cmd.buf, !strcmp(operation, "get")); strbuf_release(&cmd); return r; } void credential_fill(struct credential *c) { int i; if (c->username && c->password) return; credential_apply_config(c); for (i = 0; i < c->helpers.nr; i++) { credential_do(c, c->helpers.items[i].string, "get"); if (c->username && c->password) return; if (c->quit) die("credential helper '%s' told us to quit", c->helpers.items[i].string); } credential_getpass(c); if (!c->username && !c->password) die("unable to get password from user"); } void credential_approve(struct credential *c) { int i; if (c->approved) return; if (!c->username || !c->password) return; credential_apply_config(c); for (i = 0; i < c->helpers.nr; i++) credential_do(c, c->helpers.items[i].string, "store"); c->approved = 1; } void credential_reject(struct credential *c) { int i; credential_apply_config(c); for (i = 0; i < c->helpers.nr; i++) credential_do(c, c->helpers.items[i].string, "erase"); free(c->username); c->username = NULL; free(c->password); c->password = NULL; c->approved = 0; } void credential_from_url(struct credential *c, const char *url) { const char *at, *colon, *cp, *slash, *host, *proto_end; credential_clear(c); /* * Match one of: * (1) proto:///... * (2) proto://@/... * (3) proto://:@/... */ proto_end = strstr(url, "://"); if (!proto_end) return; cp = proto_end + 3; at = strchr(cp, '@'); colon = strchr(cp, ':'); slash = strchrnul(cp, '/'); if (!at || slash <= at) { /* Case (1) */ host = cp; } else if (!colon || at <= colon) { /* Case (2) */ c->username = url_decode_mem(cp, at - cp); host = at + 1; } else { /* Case (3) */ c->username = url_decode_mem(cp, colon - cp); c->password = url_decode_mem(colon + 1, at - (colon + 1)); host = at + 1; } if (proto_end - url > 0) c->protocol = xmemdupz(url, proto_end - url); if (slash - host > 0) c->host = url_decode_mem(host, slash - host); /* Trim leading and trailing slashes from path */ while (*slash == '/') slash++; if (*slash) { char *p; c->path = url_decode(slash); p = c->path + strlen(c->path) - 1; while (p > c->path && *p == '/') *p-- = '\0'; } } cgit-0.11.2/git/string-list.h0000644000175000017500000001221712476431550016216 0ustar formorerformorer#ifndef STRING_LIST_H #define STRING_LIST_H struct string_list_item { char *string; void *util; }; typedef int (*compare_strings_fn)(const char *, const char *); struct string_list { struct string_list_item *items; unsigned int nr, alloc; unsigned int strdup_strings:1; compare_strings_fn cmp; /* NULL uses strcmp() */ }; #define STRING_LIST_INIT_NODUP { NULL, 0, 0, 0, NULL } #define STRING_LIST_INIT_DUP { NULL, 0, 0, 1, NULL } void string_list_init(struct string_list *list, int strdup_strings); void print_string_list(const struct string_list *p, const char *text); void string_list_clear(struct string_list *list, int free_util); /* Use this function to call a custom clear function on each util pointer */ /* The string associated with the util pointer is passed as the second argument */ typedef void (*string_list_clear_func_t)(void *p, const char *str); void string_list_clear_func(struct string_list *list, string_list_clear_func_t clearfunc); /* Use this function or the macro below to iterate over each item */ typedef int (*string_list_each_func_t)(struct string_list_item *, void *); int for_each_string_list(struct string_list *list, string_list_each_func_t, void *cb_data); #define for_each_string_list_item(item,list) \ for (item = (list)->items; item < (list)->items + (list)->nr; ++item) /* * Apply want to each item in list, retaining only the ones for which * the function returns true. If free_util is true, call free() on * the util members of any items that have to be deleted. Preserve * the order of the items that are retained. */ void filter_string_list(struct string_list *list, int free_util, string_list_each_func_t want, void *cb_data); /* * Remove any empty strings from the list. If free_util is true, call * free() on the util members of any items that have to be deleted. * Preserve the order of the items that are retained. */ void string_list_remove_empty_items(struct string_list *list, int free_util); /* Use these functions only on sorted lists: */ int string_list_has_string(const struct string_list *list, const char *string); int string_list_find_insert_index(const struct string_list *list, const char *string, int negative_existing_index); /* * Inserts the given string into the sorted list. * If the string already exists, the list is not altered. * Returns the string_list_item, the string is part of. */ struct string_list_item *string_list_insert(struct string_list *list, const char *string); /* * Checks if the given string is part of a sorted list. If it is part of the list, * return the coresponding string_list_item, NULL otherwise. */ struct string_list_item *string_list_lookup(struct string_list *list, const char *string); /* * Remove all but the first of consecutive entries with the same * string value. If free_util is true, call free() on the util * members of any items that have to be deleted. */ void string_list_remove_duplicates(struct string_list *sorted_list, int free_util); /* Use these functions only on unsorted lists: */ /* * Add string to the end of list. If list->strdup_string is set, then * string is copied; otherwise the new string_list_entry refers to the * input string. */ struct string_list_item *string_list_append(struct string_list *list, const char *string); /* * Like string_list_append(), except string is never copied. When * list->strdup_strings is set, this function can be used to hand * ownership of a malloc()ed string to list without making an extra * copy. */ struct string_list_item *string_list_append_nodup(struct string_list *list, char *string); void string_list_sort(struct string_list *list); int unsorted_string_list_has_string(struct string_list *list, const char *string); struct string_list_item *unsorted_string_list_lookup(struct string_list *list, const char *string); void unsorted_string_list_delete_item(struct string_list *list, int i, int free_util); /* * Split string into substrings on character delim and append the * substrings to list. The input string is not modified. * list->strdup_strings must be set, as new memory needs to be * allocated to hold the substrings. If maxsplit is non-negative, * then split at most maxsplit times. Return the number of substrings * appended to list. * * Examples: * string_list_split(l, "foo:bar:baz", ':', -1) -> ["foo", "bar", "baz"] * string_list_split(l, "foo:bar:baz", ':', 0) -> ["foo:bar:baz"] * string_list_split(l, "foo:bar:baz", ':', 1) -> ["foo", "bar:baz"] * string_list_split(l, "foo:bar:", ':', -1) -> ["foo", "bar", ""] * string_list_split(l, "", ':', -1) -> [""] * string_list_split(l, ":", ':', -1) -> ["", ""] */ int string_list_split(struct string_list *list, const char *string, int delim, int maxsplit); /* * Like string_list_split(), except that string is split in-place: the * delimiter characters in string are overwritten with NULs, and the * new string_list_items point into string (which therefore must not * be modified or freed while the string_list is in use). * list->strdup_strings must *not* be set. */ int string_list_split_in_place(struct string_list *list, char *string, int delim, int maxsplit); #endif /* STRING_LIST_H */ cgit-0.11.2/git/test-sha1.c0000644000175000017500000000165512476431550015547 0ustar formorerformorer#include "cache.h" int main(int ac, char **av) { git_SHA_CTX ctx; unsigned char sha1[20]; unsigned bufsz = 8192; int binary = 0; char *buffer; if (ac == 2) { if (!strcmp(av[1], "-b")) binary = 1; else bufsz = strtoul(av[1], NULL, 10) * 1024 * 1024; } if (!bufsz) bufsz = 8192; while ((buffer = malloc(bufsz)) == NULL) { fprintf(stderr, "bufsz %u is too big, halving...\n", bufsz); bufsz /= 2; if (bufsz < 1024) die("OOPS"); } git_SHA1_Init(&ctx); while (1) { ssize_t sz, this_sz; char *cp = buffer; unsigned room = bufsz; this_sz = 0; while (room) { sz = xread(0, cp, room); if (sz == 0) break; if (sz < 0) die_errno("test-sha1"); this_sz += sz; cp += sz; room -= sz; } if (this_sz == 0) break; git_SHA1_Update(&ctx, buffer, this_sz); } git_SHA1_Final(sha1, &ctx); if (binary) fwrite(sha1, 1, 20, stdout); else puts(sha1_to_hex(sha1)); exit(0); } cgit-0.11.2/git/show-index.c0000644000175000017500000000435312476431550016021 0ustar formorerformorer#include "cache.h" #include "pack.h" static const char show_index_usage[] = "git show-index < "; int main(int argc, char **argv) { int i; unsigned nr; unsigned int version; static unsigned int top_index[256]; git_setup_gettext(); if (argc != 1) usage(show_index_usage); if (fread(top_index, 2 * 4, 1, stdin) != 1) die("unable to read header"); if (top_index[0] == htonl(PACK_IDX_SIGNATURE)) { version = ntohl(top_index[1]); if (version < 2 || version > 2) die("unknown index version"); if (fread(top_index, 256 * 4, 1, stdin) != 1) die("unable to read index"); } else { version = 1; if (fread(&top_index[2], 254 * 4, 1, stdin) != 1) die("unable to read index"); } nr = 0; for (i = 0; i < 256; i++) { unsigned n = ntohl(top_index[i]); if (n < nr) die("corrupt index file"); nr = n; } if (version == 1) { for (i = 0; i < nr; i++) { unsigned int offset, entry[6]; if (fread(entry, 4 + 20, 1, stdin) != 1) die("unable to read entry %u/%u", i, nr); offset = ntohl(entry[0]); printf("%u %s\n", offset, sha1_to_hex((void *)(entry+1))); } } else { unsigned off64_nr = 0; struct { unsigned char sha1[20]; uint32_t crc; uint32_t off; } *entries = xmalloc(nr * sizeof(entries[0])); for (i = 0; i < nr; i++) if (fread(entries[i].sha1, 20, 1, stdin) != 1) die("unable to read sha1 %u/%u", i, nr); for (i = 0; i < nr; i++) if (fread(&entries[i].crc, 4, 1, stdin) != 1) die("unable to read crc %u/%u", i, nr); for (i = 0; i < nr; i++) if (fread(&entries[i].off, 4, 1, stdin) != 1) die("unable to read 32b offset %u/%u", i, nr); for (i = 0; i < nr; i++) { uint64_t offset; uint32_t off = ntohl(entries[i].off); if (!(off & 0x80000000)) { offset = off; } else { uint32_t off64[2]; if ((off & 0x7fffffff) != off64_nr) die("inconsistent 64b offset index"); if (fread(off64, 8, 1, stdin) != 1) die("unable to read 64b offset %u", off64_nr); offset = (((uint64_t)ntohl(off64[0])) << 32) | ntohl(off64[1]); off64_nr++; } printf("%" PRIuMAX " %s (%08"PRIx32")\n", (uintmax_t) offset, sha1_to_hex(entries[i].sha1), ntohl(entries[i].crc)); } free(entries); } return 0; } cgit-0.11.2/git/advice.c0000644000175000017500000000632212476431550015165 0ustar formorerformorer#include "cache.h" int advice_push_update_rejected = 1; int advice_push_non_ff_current = 1; int advice_push_non_ff_matching = 1; int advice_push_already_exists = 1; int advice_push_fetch_first = 1; int advice_push_needs_force = 1; int advice_status_hints = 1; int advice_status_u_option = 1; int advice_commit_before_merge = 1; int advice_resolve_conflict = 1; int advice_implicit_identity = 1; int advice_detached_head = 1; int advice_set_upstream_failure = 1; int advice_object_name_warning = 1; int advice_rm_hints = 1; static struct { const char *name; int *preference; } advice_config[] = { { "pushupdaterejected", &advice_push_update_rejected }, { "pushnonffcurrent", &advice_push_non_ff_current }, { "pushnonffmatching", &advice_push_non_ff_matching }, { "pushalreadyexists", &advice_push_already_exists }, { "pushfetchfirst", &advice_push_fetch_first }, { "pushneedsforce", &advice_push_needs_force }, { "statushints", &advice_status_hints }, { "statusuoption", &advice_status_u_option }, { "commitbeforemerge", &advice_commit_before_merge }, { "resolveconflict", &advice_resolve_conflict }, { "implicitidentity", &advice_implicit_identity }, { "detachedhead", &advice_detached_head }, { "setupstreamfailure", &advice_set_upstream_failure }, { "objectnamewarning", &advice_object_name_warning }, { "rmhints", &advice_rm_hints }, /* make this an alias for backward compatibility */ { "pushnonfastforward", &advice_push_update_rejected } }; void advise(const char *advice, ...) { struct strbuf buf = STRBUF_INIT; va_list params; const char *cp, *np; va_start(params, advice); strbuf_vaddf(&buf, advice, params); va_end(params); for (cp = buf.buf; *cp; cp = np) { np = strchrnul(cp, '\n'); fprintf(stderr, _("hint: %.*s\n"), (int)(np - cp), cp); if (*np) np++; } strbuf_release(&buf); } int git_default_advice_config(const char *var, const char *value) { const char *k; int i; if (!skip_prefix(var, "advice.", &k)) return 0; for (i = 0; i < ARRAY_SIZE(advice_config); i++) { if (strcmp(k, advice_config[i].name)) continue; *advice_config[i].preference = git_config_bool(var, value); return 0; } return 0; } int error_resolve_conflict(const char *me) { error("%s is not possible because you have unmerged files.", me); if (advice_resolve_conflict) /* * Message used both when 'git commit' fails and when * other commands doing a merge do. */ advise(_("Fix them up in the work tree, and then use 'git add/rm '\n" "as appropriate to mark resolution and make a commit.")); return -1; } void NORETURN die_resolve_conflict(const char *me) { error_resolve_conflict(me); die("Exiting because of an unresolved conflict."); } void detach_advice(const char *new_name) { const char fmt[] = "Note: checking out '%s'.\n\n" "You are in 'detached HEAD' state. You can look around, make experimental\n" "changes and commit them, and you can discard any commits you make in this\n" "state without impacting any branches by performing another checkout.\n\n" "If you want to create a new branch to retain commits you create, you may\n" "do so (now or later) by using -b with the checkout command again. Example:\n\n" " git checkout -b new_branch_name\n\n"; fprintf(stderr, fmt, new_name); } cgit-0.11.2/git/resolve-undo.h0000644000175000017500000000114412476431550016356 0ustar formorerformorer#ifndef RESOLVE_UNDO_H #define RESOLVE_UNDO_H struct resolve_undo_info { unsigned int mode[3]; unsigned char sha1[3][20]; }; extern void record_resolve_undo(struct index_state *, struct cache_entry *); extern void resolve_undo_write(struct strbuf *, struct string_list *); extern struct string_list *resolve_undo_read(const char *, unsigned long); extern void resolve_undo_clear_index(struct index_state *); extern int unmerge_index_entry_at(struct index_state *, int); extern void unmerge_index(struct index_state *, const struct pathspec *); extern void unmerge_marked_index(struct index_state *); #endif cgit-0.11.2/git/git-cvsexportcommit.perl0000755000175000017500000003107612476431550020510 0ustar formorerformorer#!/usr/bin/perl use 5.008; use strict; use warnings; use Getopt::Std; use File::Temp qw(tempdir); use Data::Dumper; use File::Basename qw(basename dirname); use File::Spec; use Git; our ($opt_h, $opt_P, $opt_p, $opt_v, $opt_c, $opt_f, $opt_a, $opt_m, $opt_d, $opt_u, $opt_w, $opt_W, $opt_k); getopts('uhPpvcfkam:d:w:W'); $opt_h && usage(); die "Need at least one commit identifier!" unless @ARGV; # Get git-config settings my $repo = Git->repository(); $opt_w = $repo->config('cvsexportcommit.cvsdir') unless defined $opt_w; if ($opt_w || $opt_W) { # Remember where GIT_DIR is before changing to CVS checkout unless ($ENV{GIT_DIR}) { # No GIT_DIR set. Figure it out for ourselves my $gd =`git-rev-parse --git-dir`; chomp($gd); $ENV{GIT_DIR} = $gd; } # On MSYS, convert a Windows-style path to an MSYS-style path # so that rel2abs() below works correctly. if ($^O eq 'msys') { $ENV{GIT_DIR} =~ s#^([[:alpha:]]):/#/$1/#; } # Make sure GIT_DIR is absolute $ENV{GIT_DIR} = File::Spec->rel2abs($ENV{GIT_DIR}); } if ($opt_w) { if (! -d $opt_w."/CVS" ) { die "$opt_w is not a CVS checkout"; } chdir $opt_w or die "Cannot change to CVS checkout at $opt_w"; } unless ($ENV{GIT_DIR} && -r $ENV{GIT_DIR}){ die "GIT_DIR is not defined or is unreadable"; } my @cvs; if ($opt_d) { @cvs = ('cvs', '-d', $opt_d); } else { @cvs = ('cvs'); } # resolve target commit my $commit; $commit = pop @ARGV; $commit = safe_pipe_capture('git-rev-parse', '--verify', "$commit^0"); chomp $commit; if ($?) { die "The commit reference $commit did not resolve!"; } # resolve what parent we want my $parent; if (@ARGV) { $parent = pop @ARGV; $parent = safe_pipe_capture('git-rev-parse', '--verify', "$parent^0"); chomp $parent; if ($?) { die "The parent reference did not resolve!"; } } # find parents from the commit itself my @commit = safe_pipe_capture('git-cat-file', 'commit', $commit); my @parents; my $committer; my $author; my $stage = 'headers'; # headers, msg my $title; my $msg = ''; foreach my $line (@commit) { chomp $line; if ($stage eq 'headers' && $line eq '') { $stage = 'msg'; next; } if ($stage eq 'headers') { if ($line =~ m/^parent (\w{40})$/) { # found a parent push @parents, $1; } elsif ($line =~ m/^author (.+) \d+ [-+]\d+$/) { $author = $1; } elsif ($line =~ m/^committer (.+) \d+ [-+]\d+$/) { $committer = $1; } } else { $msg .= $line . "\n"; unless ($title) { $title = $line; } } } my $noparent = "0000000000000000000000000000000000000000"; if ($parent) { my $found; # double check that it's a valid parent foreach my $p (@parents) { if ($p eq $parent) { $found = 1; last; }; # found it } die "Did not find $parent in the parents for this commit!" if !$found and !$opt_P; } else { # we don't have a parent from the cmdline... if (@parents == 1) { # it's safe to get it from the commit $parent = $parents[0]; } elsif (@parents == 0) { # there is no parent $parent = $noparent; } else { # cannot choose automatically from multiple parents die "This commit has more than one parent -- please name the parent you want to use explicitly"; } } my $go_back_to = 0; if ($opt_W) { $opt_v && print "Resetting to $parent\n"; $go_back_to = `git symbolic-ref HEAD 2> /dev/null || git rev-parse HEAD` || die "Could not determine current branch"; system("git checkout -q $parent^0") && die "Could not check out $parent^0"; } $opt_v && print "Applying to CVS commit $commit from parent $parent\n"; # grab the commit message open(MSG, ">.msg") or die "Cannot open .msg for writing"; if ($opt_m) { print MSG $opt_m; } print MSG $msg; if ($opt_a) { print MSG "\n\nAuthor: $author\n"; if ($author ne $committer) { print MSG "Committer: $committer\n"; } } close MSG; if ($parent eq $noparent) { `git-diff-tree --binary -p --root $commit >.cvsexportcommit.diff`;# || die "Cannot diff"; } else { `git-diff-tree --binary -p $parent $commit >.cvsexportcommit.diff`;# || die "Cannot diff"; } ## apply non-binary changes # In pedantic mode require all lines of context to match. In normal # mode, be compatible with diff/patch: assume 3 lines of context and # require at least one line match, i.e. ignore at most 2 lines of # context, like diff/patch do by default. my $context = $opt_p ? '' : '-C1'; print "Checking if patch will apply\n"; my @stat; open APPLY, "GIT_DIR= git-apply $context --summary --numstat<.cvsexportcommit.diff|" || die "cannot patch"; @stat=; close APPLY || die "Cannot patch"; my (@bfiles,@files,@afiles,@dfiles); chomp @stat; foreach (@stat) { push (@bfiles,$1) if m/^-\t-\t(.*)$/; push (@files, $1) if m/^-\t-\t(.*)$/; push (@files, $1) if m/^\d+\t\d+\t(.*)$/; push (@afiles,$1) if m/^ create mode [0-7]+ (.*)$/; push (@dfiles,$1) if m/^ delete mode [0-7]+ (.*)$/; } map { s/^"(.*)"$/$1/g } @bfiles,@files; map { s/\\([0-7]{3})/sprintf('%c',oct $1)/eg } @bfiles,@files; # check that the files are clean and up to date according to cvs my $dirty; my @dirs; foreach my $p (@afiles) { my $path = dirname $p; while (!-d $path and ! grep { $_ eq $path } @dirs) { unshift @dirs, $path; $path = dirname $path; } } # ... check dirs, foreach my $d (@dirs) { if (-e $d) { $dirty = 1; warn "$d exists and is not a directory!\n"; } } # ... query status of all files that we have a directory for and parse output of 'cvs status' to %cvsstat. my @canstatusfiles; foreach my $f (@files) { my $path = dirname $f; next if (grep { $_ eq $path } @dirs); push @canstatusfiles, $f; } my %cvsstat; if (@canstatusfiles) { if ($opt_u) { my @updated = xargs_safe_pipe_capture([@cvs, 'update'], @canstatusfiles); print @updated; } # "cvs status" reorders the parameters, notably when there are multiple # arguments with the same basename. So be precise here. my %added = map { $_ => 1 } @afiles; my %todo = map { $_ => 1 } @canstatusfiles; while (%todo) { my @canstatusfiles2 = (); my %fullname = (); foreach my $name (keys %todo) { my $basename = basename($name); # CVS reports files that don't exist in the current revision as # "no file $basename" in its "status" output, so we should # anticipate that. Totally unknown files will have a status # "Unknown". However, if they exist in the Attic, their status # will be "Up-to-date" (this means they were added once but have # been removed). $basename = "no file $basename" if $added{$basename}; $basename =~ s/^\s+//; $basename =~ s/\s+$//; if (!exists($fullname{$basename})) { $fullname{$basename} = $name; push (@canstatusfiles2, $name); delete($todo{$name}); } } my @cvsoutput; @cvsoutput = xargs_safe_pipe_capture([@cvs, 'status'], @canstatusfiles2); foreach my $l (@cvsoutput) { chomp $l; next unless my ($file, $status) = $l =~ /^File:\s+(.*\S)\s+Status: (.*)$/; my $fullname = $fullname{$file}; print STDERR "Huh? Status '$status' reported for unexpected file '$file'\n" unless defined $fullname; # This response means the file does not exist except in # CVS's attic, so set the status accordingly $status = "In-attic" if $file =~ /^no file / && $status eq 'Up-to-date'; $cvsstat{$fullname{$file}} = $status if defined $fullname{$file}; } } } # ... Validate that new files have the correct status foreach my $f (@afiles) { next unless defined(my $stat = $cvsstat{$f}); # This means the file has never been seen before next if $stat eq 'Unknown'; # This means the file has been seen before but was removed next if $stat eq 'In-attic'; $dirty = 1; warn "File $f is already known in your CVS checkout -- perhaps it has been added by another user. Or this may indicate that it exists on a different branch. If this is the case, use -f to force the merge.\n"; warn "Status was: $cvsstat{$f}\n"; } # ... validate known files. foreach my $f (@files) { next if grep { $_ eq $f } @afiles; # TODO:we need to handle removed in cvs unless (defined ($cvsstat{$f}) and $cvsstat{$f} eq "Up-to-date") { $dirty = 1; warn "File $f not up to date but has status '$cvsstat{$f}' in your CVS checkout!\n"; } # Depending on how your GIT tree got imported from CVS you may # have a conflict between expanded keywords in your CVS tree and # unexpanded keywords in the patch about to be applied. if ($opt_k) { my $orig_file ="$f.orig"; rename $f, $orig_file; open(FILTER_IN, "<$orig_file") or die "Cannot open $orig_file\n"; open(FILTER_OUT, ">$f") or die "Cannot open $f\n"; while () { my $line = $_; $line =~ s/\$([A-Z][a-z]+):[^\$]+\$/\$$1\$/g; print FILTER_OUT $line; } close FILTER_IN; close FILTER_OUT; } } if ($dirty) { if ($opt_f) { warn "The tree is not clean -- forced merge\n"; $dirty = 0; } else { die "Exiting: your CVS tree is not clean for this merge."; } } print "Applying\n"; if ($opt_W) { system("git checkout -q $commit^0") && die "cannot patch"; } else { `GIT_DIR= git-apply $context --summary --numstat --apply <.cvsexportcommit.diff` || die "cannot patch"; } print "Patch applied successfully. Adding new files and directories to CVS\n"; my $dirtypatch = 0; # # We have to add the directories in order otherwise we will have # problems when we try and add the sub-directory of a directory we # have not added yet. # # Luckily this is easy to deal with by sorting the directories and # dealing with the shortest ones first. # @dirs = sort { length $a <=> length $b} @dirs; foreach my $d (@dirs) { if (system(@cvs,'add',$d)) { $dirtypatch = 1; warn "Failed to cvs add directory $d -- you may need to do it manually"; } } foreach my $f (@afiles) { if (grep { $_ eq $f } @bfiles) { system(@cvs, 'add','-kb',$f); } else { system(@cvs, 'add', $f); } if ($?) { $dirtypatch = 1; warn "Failed to cvs add $f -- you may need to do it manually"; } } foreach my $f (@dfiles) { system(@cvs, 'rm', '-f', $f); if ($?) { $dirtypatch = 1; warn "Failed to cvs rm -f $f -- you may need to do it manually"; } } print "Commit to CVS\n"; print "Patch title (first comment line): $title\n"; my @commitfiles = map { unless (m/\s/) { '\''.$_.'\''; } else { $_; }; } (@files); my $cmd = join(' ', @cvs)." commit -F .msg @commitfiles"; if ($dirtypatch) { print "NOTE: One or more hunks failed to apply cleanly.\n"; print "You'll need to apply the patch in .cvsexportcommit.diff manually\n"; print "using a patch program. After applying the patch and resolving the\n"; print "problems you may commit using:"; print "\n cd \"$opt_w\"" if $opt_w; print "\n $cmd\n"; print "\n git checkout $go_back_to\n" if $go_back_to; print "\n"; exit(1); } if ($opt_c) { print "Autocommit\n $cmd\n"; print xargs_safe_pipe_capture([@cvs, 'commit', '-F', '.msg'], @files); if ($?) { die "Exiting: The commit did not succeed"; } print "Committed successfully to CVS\n"; # clean up unlink(".msg"); } else { print "Ready for you to commit, just run:\n\n $cmd\n"; } # clean up unlink(".cvsexportcommit.diff"); if ($opt_W) { system("git checkout $go_back_to") && die "cannot move back to $go_back_to"; if (!($go_back_to =~ /^[0-9a-fA-F]{40}$/)) { system("git symbolic-ref HEAD $go_back_to") && die "cannot move back to $go_back_to"; } } # CVS version 1.11.x and 1.12.x sleeps the wrong way to ensure the timestamp # used by CVS and the one set by subsequence file modifications are different. # If they are not different CVS will not detect changes. sleep(1); sub usage { print STDERR <); close $child or die join(' ',@_).": $! $?"; } else { exec(@_) or die "$! $?"; # exec() can fail the executable can't be found } return wantarray ? @output : join('',@output); } sub xargs_safe_pipe_capture { my $MAX_ARG_LENGTH = 65536; my $cmd = shift; my @output; my $output; while(@_) { my @args; my $length = 0; while(@_ && $length < $MAX_ARG_LENGTH) { push @args, shift; $length += length($args[$#args]); } if (wantarray) { push @output, safe_pipe_capture(@$cmd, @args); } else { $output .= safe_pipe_capture(@$cmd, @args); } } return wantarray ? @output : $output; } cgit-0.11.2/git/git-cvsserver.perl0000755000175000017500000047506712476431550017300 0ustar formorerformorer#!/usr/bin/perl #### #### This application is a CVS emulation layer for git. #### It is intended for clients to connect over SSH. #### See the documentation for more details. #### #### Copyright The Open University UK - 2006. #### #### Authors: Martyn Smith #### Martin Langhoff #### #### #### Released under the GNU Public License, version 2. #### #### use 5.008; use strict; use warnings; use bytes; use Fcntl; use File::Temp qw/tempdir tempfile/; use File::Path qw/rmtree/; use File::Basename; use Getopt::Long qw(:config require_order no_ignore_case); my $VERSION = '@@GIT_VERSION@@'; my $log = GITCVS::log->new(); my $cfg; my $DATE_LIST = { Jan => "01", Feb => "02", Mar => "03", Apr => "04", May => "05", Jun => "06", Jul => "07", Aug => "08", Sep => "09", Oct => "10", Nov => "11", Dec => "12", }; # Enable autoflush for STDOUT (otherwise the whole thing falls apart) $| = 1; #### Definition and mappings of functions #### # NOTE: Despite the existence of req_CATCHALL and req_EMPTY unimplemented # requests, this list is incomplete. It is missing many rarer/optional # requests. Perhaps some clients require a claim of support for # these specific requests for main functionality to work? my $methods = { 'Root' => \&req_Root, 'Valid-responses' => \&req_Validresponses, 'valid-requests' => \&req_validrequests, 'Directory' => \&req_Directory, 'Sticky' => \&req_Sticky, 'Entry' => \&req_Entry, 'Modified' => \&req_Modified, 'Unchanged' => \&req_Unchanged, 'Questionable' => \&req_Questionable, 'Argument' => \&req_Argument, 'Argumentx' => \&req_Argument, 'expand-modules' => \&req_expandmodules, 'add' => \&req_add, 'remove' => \&req_remove, 'co' => \&req_co, 'update' => \&req_update, 'ci' => \&req_ci, 'diff' => \&req_diff, 'log' => \&req_log, 'rlog' => \&req_log, 'tag' => \&req_CATCHALL, 'status' => \&req_status, 'admin' => \&req_CATCHALL, 'history' => \&req_CATCHALL, 'watchers' => \&req_EMPTY, 'editors' => \&req_EMPTY, 'noop' => \&req_EMPTY, 'annotate' => \&req_annotate, 'Global_option' => \&req_Globaloption, }; ############################################## # $state holds all the bits of information the clients sends us that could # potentially be useful when it comes to actually _doing_ something. my $state = { prependdir => '' }; # Work is for managing temporary working directory my $work = { state => undef, # undef, 1 (empty), 2 (with stuff) workDir => undef, index => undef, emptyDir => undef, tmpDir => undef }; $log->info("--------------- STARTING -----------------"); my $usage = "usage: git cvsserver [options] [pserver|server] [ ...]\n". " --base-path : Prepend to requested CVSROOT\n". " Can be read from GIT_CVSSERVER_BASE_PATH\n". " --strict-paths : Don't allow recursing into subdirectories\n". " --export-all : Don't check for gitcvs.enabled in config\n". " --version, -V : Print version information and exit\n". " -h, -H : Print usage information and exit\n". "\n". " ... is a list of allowed directories. If no directories\n". "are given, all are allowed. This is an additional restriction, gitcvs\n". "access still needs to be enabled by the gitcvs.enabled config option.\n". "Alternately, one directory may be specified in GIT_CVSSERVER_ROOT.\n"; my @opts = ( 'h|H', 'version|V', 'base-path=s', 'strict-paths', 'export-all' ); GetOptions( $state, @opts ) or die $usage; if ($state->{version}) { print "git-cvsserver version $VERSION\n"; exit; } if ($state->{help}) { print $usage; exit; } my $TEMP_DIR = tempdir( CLEANUP => 1 ); $log->debug("Temporary directory is '$TEMP_DIR'"); $state->{method} = 'ext'; if (@ARGV) { if ($ARGV[0] eq 'pserver') { $state->{method} = 'pserver'; shift @ARGV; } elsif ($ARGV[0] eq 'server') { shift @ARGV; } } # everything else is a directory $state->{allowed_roots} = [ @ARGV ]; # don't export the whole system unless the users requests it if ($state->{'export-all'} && !@{$state->{allowed_roots}}) { die "--export-all can only be used together with an explicit whitelist\n"; } # Environment handling for running under git-shell if (exists $ENV{GIT_CVSSERVER_BASE_PATH}) { if ($state->{'base-path'}) { die "Cannot specify base path both ways.\n"; } my $base_path = $ENV{GIT_CVSSERVER_BASE_PATH}; $state->{'base-path'} = $base_path; $log->debug("Picked up base path '$base_path' from environment.\n"); } if (exists $ENV{GIT_CVSSERVER_ROOT}) { if (@{$state->{allowed_roots}}) { die "Cannot specify roots both ways: @ARGV\n"; } my $allowed_root = $ENV{GIT_CVSSERVER_ROOT}; $state->{allowed_roots} = [ $allowed_root ]; $log->debug("Picked up allowed root '$allowed_root' from environment.\n"); } # if we are called with a pserver argument, # deal with the authentication cat before entering the # main loop if ($state->{method} eq 'pserver') { my $line = ; chomp $line; unless( $line =~ /^BEGIN (AUTH|VERIFICATION) REQUEST$/) { die "E Do not understand $line - expecting BEGIN AUTH REQUEST\n"; } my $request = $1; $line = ; chomp $line; unless (req_Root('root', $line)) { # reuse Root print "E Invalid root $line \n"; exit 1; } $line = ; chomp $line; my $user = $line; $line = ; chomp $line; my $password = $line; if ($user eq 'anonymous') { # "A" will be 1 byte, use length instead in case the # encryption method ever changes (yeah, right!) if (length($password) > 1 ) { print "E Don't supply a password for the `anonymous' user\n"; print "I HATE YOU\n"; exit 1; } # Fall through to LOVE } else { # Trying to authenticate a user if (not exists $cfg->{gitcvs}->{authdb}) { print "E the repo config file needs a [gitcvs] section with an 'authdb' parameter set to the filename of the authentication database\n"; print "I HATE YOU\n"; exit 1; } my $authdb = $cfg->{gitcvs}->{authdb}; unless (-e $authdb) { print "E The authentication database specified in [gitcvs.authdb] does not exist\n"; print "I HATE YOU\n"; exit 1; } my $auth_ok; open my $passwd, "<", $authdb or die $!; while (<$passwd>) { if (m{^\Q$user\E:(.*)}) { if (crypt($user, descramble($password)) eq $1) { $auth_ok = 1; } }; } close $passwd; unless ($auth_ok) { print "I HATE YOU\n"; exit 1; } # Fall through to LOVE } # For checking whether the user is anonymous on commit $state->{user} = $user; $line = ; chomp $line; unless ($line eq "END $request REQUEST") { die "E Do not understand $line -- expecting END $request REQUEST\n"; } print "I LOVE YOU\n"; exit if $request eq 'VERIFICATION'; # cvs login # and now back to our regular programme... } # Keep going until the client closes the connection while () { chomp; # Check to see if we've seen this method, and call appropriate function. if ( /^([\w-]+)(?:\s+(.*))?$/ and defined($methods->{$1}) ) { # use the $methods hash to call the appropriate sub for this command #$log->info("Method : $1"); &{$methods->{$1}}($1,$2); } else { # log fatal because we don't understand this function. If this happens # we're fairly screwed because we don't know if the client is expecting # a response. If it is, the client will hang, we'll hang, and the whole # thing will be custard. $log->fatal("Don't understand command $_\n"); die("Unknown command $_"); } } $log->debug("Processing time : user=" . (times)[0] . " system=" . (times)[1]); $log->info("--------------- FINISH -----------------"); chdir '/'; exit 0; # Magic catchall method. # This is the method that will handle all commands we haven't yet # implemented. It simply sends a warning to the log file indicating a # command that hasn't been implemented has been invoked. sub req_CATCHALL { my ( $cmd, $data ) = @_; $log->warn("Unhandled command : req_$cmd : $data"); } # This method invariably succeeds with an empty response. sub req_EMPTY { print "ok\n"; } # Root pathname \n # Response expected: no. Tell the server which CVSROOT to use. Note that # pathname is a local directory and not a fully qualified CVSROOT variable. # pathname must already exist; if creating a new root, use the init # request, not Root. pathname does not include the hostname of the server, # how to access the server, etc.; by the time the CVS protocol is in use, # connection, authentication, etc., are already taken care of. The Root # request must be sent only once, and it must be sent before any requests # other than Valid-responses, valid-requests, UseUnchanged, Set or init. sub req_Root { my ( $cmd, $data ) = @_; $log->debug("req_Root : $data"); unless ($data =~ m#^/#) { print "error 1 Root must be an absolute pathname\n"; return 0; } my $cvsroot = $state->{'base-path'} || ''; $cvsroot =~ s#/+$##; $cvsroot .= $data; if ($state->{CVSROOT} && ($state->{CVSROOT} ne $cvsroot)) { print "error 1 Conflicting roots specified\n"; return 0; } $state->{CVSROOT} = $cvsroot; $ENV{GIT_DIR} = $state->{CVSROOT} . "/"; if (@{$state->{allowed_roots}}) { my $allowed = 0; foreach my $dir (@{$state->{allowed_roots}}) { next unless $dir =~ m#^/#; $dir =~ s#/+$##; if ($state->{'strict-paths'}) { if ($ENV{GIT_DIR} =~ m#^\Q$dir\E/?$#) { $allowed = 1; last; } } elsif ($ENV{GIT_DIR} =~ m#^\Q$dir\E(/?$|/)#) { $allowed = 1; last; } } unless ($allowed) { print "E $ENV{GIT_DIR} does not seem to be a valid GIT repository\n"; print "E \n"; print "error 1 $ENV{GIT_DIR} is not a valid repository\n"; return 0; } } unless (-d $ENV{GIT_DIR} && -e $ENV{GIT_DIR}.'HEAD') { print "E $ENV{GIT_DIR} does not seem to be a valid GIT repository\n"; print "E \n"; print "error 1 $ENV{GIT_DIR} is not a valid repository\n"; return 0; } my @gitvars = `git config -l`; if ($?) { print "E problems executing git-config on the server -- this is not a git repository or the PATH is not set correctly.\n"; print "E \n"; print "error 1 - problem executing git-config\n"; return 0; } foreach my $line ( @gitvars ) { next unless ( $line =~ /^(gitcvs)\.(?:(ext|pserver)\.)?([\w-]+)=(.*)$/ ); unless ($2) { $cfg->{$1}{$3} = $4; } else { $cfg->{$1}{$2}{$3} = $4; } } my $enabled = ($cfg->{gitcvs}{$state->{method}}{enabled} || $cfg->{gitcvs}{enabled}); unless ($state->{'export-all'} || ($enabled && $enabled =~ /^\s*(1|true|yes)\s*$/i)) { print "E GITCVS emulation needs to be enabled on this repo\n"; print "E the repo config file needs a [gitcvs] section added, and the parameter 'enabled' set to 1\n"; print "E \n"; print "error 1 GITCVS emulation disabled\n"; return 0; } my $logfile = $cfg->{gitcvs}{$state->{method}}{logfile} || $cfg->{gitcvs}{logfile}; if ( $logfile ) { $log->setfile($logfile); } else { $log->nofile(); } return 1; } # Global_option option \n # Response expected: no. Transmit one of the global options `-q', `-Q', # `-l', `-t', `-r', or `-n'. option must be one of those strings, no # variations (such as combining of options) are allowed. For graceful # handling of valid-requests, it is probably better to make new global # options separate requests, rather than trying to add them to this # request. sub req_Globaloption { my ( $cmd, $data ) = @_; $log->debug("req_Globaloption : $data"); $state->{globaloptions}{$data} = 1; } # Valid-responses request-list \n # Response expected: no. Tell the server what responses the client will # accept. request-list is a space separated list of tokens. sub req_Validresponses { my ( $cmd, $data ) = @_; $log->debug("req_Validresponses : $data"); # TODO : re-enable this, currently it's not particularly useful #$state->{validresponses} = [ split /\s+/, $data ]; } # valid-requests \n # Response expected: yes. Ask the server to send back a Valid-requests # response. sub req_validrequests { my ( $cmd, $data ) = @_; $log->debug("req_validrequests"); $log->debug("SEND : Valid-requests " . join(" ",sort keys %$methods)); $log->debug("SEND : ok"); print "Valid-requests " . join(" ",sort keys %$methods) . "\n"; print "ok\n"; } # Directory local-directory \n # Additional data: repository \n. Response expected: no. Tell the server # what directory to use. The repository should be a directory name from a # previous server response. Note that this both gives a default for Entry # and Modified and also for ci and the other commands; normal usage is to # send Directory for each directory in which there will be an Entry or # Modified, and then a final Directory for the original directory, then the # command. The local-directory is relative to the top level at which the # command is occurring (i.e. the last Directory which is sent before the # command); to indicate that top level, `.' should be sent for # local-directory. sub req_Directory { my ( $cmd, $data ) = @_; my $repository = ; chomp $repository; $state->{localdir} = $data; $state->{repository} = $repository; $state->{path} = $repository; $state->{path} =~ s/^\Q$state->{CVSROOT}\E\///; $state->{module} = $1 if ($state->{path} =~ s/^(.*?)(\/|$)//); $state->{path} .= "/" if ( $state->{path} =~ /\S/ ); $state->{directory} = $state->{localdir}; $state->{directory} = "" if ( $state->{directory} eq "." ); $state->{directory} .= "/" if ( $state->{directory} =~ /\S/ ); if ( (not defined($state->{prependdir}) or $state->{prependdir} eq '') and $state->{localdir} eq "." and $state->{path} =~ /\S/ ) { $log->info("Setting prepend to '$state->{path}'"); $state->{prependdir} = $state->{path}; my %entries; foreach my $entry ( keys %{$state->{entries}} ) { $entries{$state->{prependdir} . $entry} = $state->{entries}{$entry}; } $state->{entries}=\%entries; my %dirMap; foreach my $dir ( keys %{$state->{dirMap}} ) { $dirMap{$state->{prependdir} . $dir} = $state->{dirMap}{$dir}; } $state->{dirMap}=\%dirMap; } if ( defined ( $state->{prependdir} ) ) { $log->debug("Prepending '$state->{prependdir}' to state|directory"); $state->{directory} = $state->{prependdir} . $state->{directory} } if ( ! defined($state->{dirMap}{$state->{directory}}) ) { $state->{dirMap}{$state->{directory}} = { 'names' => {} #'tagspec' => undef }; } $log->debug("req_Directory : localdir=$data repository=$repository path=$state->{path} directory=$state->{directory} module=$state->{module}"); } # Sticky tagspec \n # Response expected: no. Tell the server that the directory most # recently specified with Directory has a sticky tag or date # tagspec. The first character of tagspec is T for a tag, D for # a date, or some other character supplied by a Set-sticky # response from a previous request to the server. The remainder # of tagspec contains the actual tag or date, again as supplied # by Set-sticky. # The server should remember Static-directory and Sticky requests # for a particular directory; the client need not resend them each # time it sends a Directory request for a given directory. However, # the server is not obliged to remember them beyond the context # of a single command. sub req_Sticky { my ( $cmd, $tagspec ) = @_; my ( $stickyInfo ); if($tagspec eq "") { # nothing } elsif($tagspec=~/^T([^ ]+)\s*$/) { $stickyInfo = { 'tag' => $1 }; } elsif($tagspec=~/^D([0-9.]+)\s*$/) { $stickyInfo= { 'date' => $1 }; } else { die "Unknown tag_or_date format\n"; } $state->{dirMap}{$state->{directory}}{stickyInfo}=$stickyInfo; $log->debug("req_Sticky : tagspec=$tagspec repository=$state->{repository}" . " path=$state->{path} directory=$state->{directory}" . " module=$state->{module}"); } # Entry entry-line \n # Response expected: no. Tell the server what version of a file is on the # local machine. The name in entry-line is a name relative to the directory # most recently specified with Directory. If the user is operating on only # some files in a directory, Entry requests for only those files need be # included. If an Entry request is sent without Modified, Is-modified, or # Unchanged, it means the file is lost (does not exist in the working # directory). If both Entry and one of Modified, Is-modified, or Unchanged # are sent for the same file, Entry must be sent first. For a given file, # one can send Modified, Is-modified, or Unchanged, but not more than one # of these three. sub req_Entry { my ( $cmd, $data ) = @_; #$log->debug("req_Entry : $data"); my @data = split(/\//, $data, -1); $state->{entries}{$state->{directory}.$data[1]} = { revision => $data[2], conflict => $data[3], options => $data[4], tag_or_date => $data[5], }; $state->{dirMap}{$state->{directory}}{names}{$data[1]} = 'F'; $log->info("Received entry line '$data' => '" . $state->{directory} . $data[1] . "'"); } # Questionable filename \n # Response expected: no. Additional data: no. Tell the server to check # whether filename should be ignored, and if not, next time the server # sends responses, send (in a M response) `?' followed by the directory and # filename. filename must not contain `/'; it needs to be a file in the # directory named by the most recent Directory request. sub req_Questionable { my ( $cmd, $data ) = @_; $log->debug("req_Questionable : $data"); $state->{entries}{$state->{directory}.$data}{questionable} = 1; } # add \n # Response expected: yes. Add a file or directory. This uses any previous # Argument, Directory, Entry, or Modified requests, if they have been sent. # The last Directory sent specifies the working directory at the time of # the operation. To add a directory, send the directory to be added using # Directory and Argument requests. sub req_add { my ( $cmd, $data ) = @_; argsplit("add"); my $updater = GITCVS::updater->new($state->{CVSROOT}, $state->{module}, $log); $updater->update(); my $addcount = 0; foreach my $filename ( @{$state->{args}} ) { $filename = filecleanup($filename); # no -r, -A, or -D with add my $stickyInfo = resolveStickyInfo($filename); my $meta = $updater->getmeta($filename,$stickyInfo); my $wrev = revparse($filename); if ($wrev && $meta && ($wrev=~/^-/)) { # previously removed file, add back $log->info("added file $filename was previously removed, send $meta->{revision}"); print "MT +updated\n"; print "MT text U \n"; print "MT fname $filename\n"; print "MT newline\n"; print "MT -updated\n"; unless ( $state->{globaloptions}{-n} ) { my ( $filepart, $dirpart ) = filenamesplit($filename,1); print "Created $dirpart\n"; print $state->{CVSROOT} . "/$state->{module}/$filename\n"; # this is an "entries" line my $kopts = kopts_from_path($filename,"sha1",$meta->{filehash}); my $entryLine = "/$filepart/$meta->{revision}//$kopts/"; $entryLine .= getStickyTagOrDate($stickyInfo); $log->debug($entryLine); print "$entryLine\n"; # permissions $log->debug("SEND : u=$meta->{mode},g=$meta->{mode},o=$meta->{mode}"); print "u=$meta->{mode},g=$meta->{mode},o=$meta->{mode}\n"; # transmit file transmitfile($meta->{filehash}); } next; } unless ( defined ( $state->{entries}{$filename}{modified_filename} ) ) { print "E cvs add: nothing known about `$filename'\n"; next; } # TODO : check we're not squashing an already existing file if ( defined ( $state->{entries}{$filename}{revision} ) ) { print "E cvs add: `$filename' has already been entered\n"; next; } my ( $filepart, $dirpart ) = filenamesplit($filename, 1); print "E cvs add: scheduling file `$filename' for addition\n"; print "Checked-in $dirpart\n"; print "$filename\n"; my $kopts = kopts_from_path($filename,"file", $state->{entries}{$filename}{modified_filename}); print "/$filepart/0//$kopts/" . getStickyTagOrDate($stickyInfo) . "\n"; my $requestedKopts = $state->{opt}{k}; if(defined($requestedKopts)) { $requestedKopts = "-k$requestedKopts"; } else { $requestedKopts = ""; } if( $kopts ne $requestedKopts ) { $log->warn("Ignoring requested -k='$requestedKopts'" . " for '$filename'; detected -k='$kopts' instead"); #TODO: Also have option to send warning to user? } $addcount++; } if ( $addcount == 1 ) { print "E cvs add: use `cvs commit' to add this file permanently\n"; } elsif ( $addcount > 1 ) { print "E cvs add: use `cvs commit' to add these files permanently\n"; } print "ok\n"; } # remove \n # Response expected: yes. Remove a file. This uses any previous Argument, # Directory, Entry, or Modified requests, if they have been sent. The last # Directory sent specifies the working directory at the time of the # operation. Note that this request does not actually do anything to the # repository; the only effect of a successful remove request is to supply # the client with a new entries line containing `-' to indicate a removed # file. In fact, the client probably could perform this operation without # contacting the server, although using remove may cause the server to # perform a few more checks. The client sends a subsequent ci request to # actually record the removal in the repository. sub req_remove { my ( $cmd, $data ) = @_; argsplit("remove"); # Grab a handle to the SQLite db and do any necessary updates my $updater = GITCVS::updater->new($state->{CVSROOT}, $state->{module}, $log); $updater->update(); #$log->debug("add state : " . Dumper($state)); my $rmcount = 0; foreach my $filename ( @{$state->{args}} ) { $filename = filecleanup($filename); if ( defined ( $state->{entries}{$filename}{unchanged} ) or defined ( $state->{entries}{$filename}{modified_filename} ) ) { print "E cvs remove: file `$filename' still in working directory\n"; next; } # only from entries my $stickyInfo = resolveStickyInfo($filename); my $meta = $updater->getmeta($filename,$stickyInfo); my $wrev = revparse($filename); unless ( defined ( $wrev ) ) { print "E cvs remove: nothing known about `$filename'\n"; next; } if ( defined($wrev) and ($wrev=~/^-/) ) { print "E cvs remove: file `$filename' already scheduled for removal\n"; next; } unless ( $wrev eq $meta->{revision} ) { # TODO : not sure if the format of this message is quite correct. print "E cvs remove: Up to date check failed for `$filename'\n"; next; } my ( $filepart, $dirpart ) = filenamesplit($filename, 1); print "E cvs remove: scheduling `$filename' for removal\n"; print "Checked-in $dirpart\n"; print "$filename\n"; my $kopts = kopts_from_path($filename,"sha1",$meta->{filehash}); print "/$filepart/-$wrev//$kopts/" . getStickyTagOrDate($stickyInfo) . "\n"; $rmcount++; } if ( $rmcount == 1 ) { print "E cvs remove: use `cvs commit' to remove this file permanently\n"; } elsif ( $rmcount > 1 ) { print "E cvs remove: use `cvs commit' to remove these files permanently\n"; } print "ok\n"; } # Modified filename \n # Response expected: no. Additional data: mode, \n, file transmission. Send # the server a copy of one locally modified file. filename is a file within # the most recent directory sent with Directory; it must not contain `/'. # If the user is operating on only some files in a directory, only those # files need to be included. This can also be sent without Entry, if there # is no entry for the file. sub req_Modified { my ( $cmd, $data ) = @_; my $mode = ; defined $mode or (print "E end of file reading mode for $data\n"), return; chomp $mode; my $size = ; defined $size or (print "E end of file reading size of $data\n"), return; chomp $size; # Grab config information my $blocksize = 8192; my $bytesleft = $size; my $tmp; # Get a filehandle/name to write it to my ( $fh, $filename ) = tempfile( DIR => $TEMP_DIR ); # Loop over file data writing out to temporary file. while ( $bytesleft ) { $blocksize = $bytesleft if ( $bytesleft < $blocksize ); read STDIN, $tmp, $blocksize; print $fh $tmp; $bytesleft -= $blocksize; } close $fh or (print "E failed to write temporary, $filename: $!\n"), return; # Ensure we have something sensible for the file mode if ( $mode =~ /u=(\w+)/ ) { $mode = $1; } else { $mode = "rw"; } # Save the file data in $state $state->{entries}{$state->{directory}.$data}{modified_filename} = $filename; $state->{entries}{$state->{directory}.$data}{modified_mode} = $mode; $state->{entries}{$state->{directory}.$data}{modified_hash} = `git hash-object $filename`; $state->{entries}{$state->{directory}.$data}{modified_hash} =~ s/\s.*$//s; #$log->debug("req_Modified : file=$data mode=$mode size=$size"); } # Unchanged filename \n # Response expected: no. Tell the server that filename has not been # modified in the checked out directory. The filename is a file within the # most recent directory sent with Directory; it must not contain `/'. sub req_Unchanged { my ( $cmd, $data ) = @_; $state->{entries}{$state->{directory}.$data}{unchanged} = 1; #$log->debug("req_Unchanged : $data"); } # Argument text \n # Response expected: no. Save argument for use in a subsequent command. # Arguments accumulate until an argument-using command is given, at which # point they are forgotten. # Argumentx text \n # Response expected: no. Append \n followed by text to the current argument # being saved. sub req_Argument { my ( $cmd, $data ) = @_; # Argumentx means: append to last Argument (with a newline in front) $log->debug("$cmd : $data"); if ( $cmd eq 'Argumentx') { ${$state->{arguments}}[$#{$state->{arguments}}] .= "\n" . $data; } else { push @{$state->{arguments}}, $data; } } # expand-modules \n # Response expected: yes. Expand the modules which are specified in the # arguments. Returns the data in Module-expansion responses. Note that the # server can assume that this is checkout or export, not rtag or rdiff; the # latter do not access the working directory and thus have no need to # expand modules on the client side. Expand may not be the best word for # what this request does. It does not necessarily tell you all the files # contained in a module, for example. Basically it is a way of telling you # which working directories the server needs to know about in order to # handle a checkout of the specified modules. For example, suppose that the # server has a module defined by # aliasmodule -a 1dir # That is, one can check out aliasmodule and it will take 1dir in the # repository and check it out to 1dir in the working directory. Now suppose # the client already has this module checked out and is planning on using # the co request to update it. Without using expand-modules, the client # would have two bad choices: it could either send information about all # working directories under the current directory, which could be # unnecessarily slow, or it could be ignorant of the fact that aliasmodule # stands for 1dir, and neglect to send information for 1dir, which would # lead to incorrect operation. With expand-modules, the client would first # ask for the module to be expanded: sub req_expandmodules { my ( $cmd, $data ) = @_; argsplit(); $log->debug("req_expandmodules : " . ( defined($data) ? $data : "[NULL]" ) ); unless ( ref $state->{arguments} eq "ARRAY" ) { print "ok\n"; return; } foreach my $module ( @{$state->{arguments}} ) { $log->debug("SEND : Module-expansion $module"); print "Module-expansion $module\n"; } print "ok\n"; statecleanup(); } # co \n # Response expected: yes. Get files from the repository. This uses any # previous Argument, Directory, Entry, or Modified requests, if they have # been sent. Arguments to this command are module names; the client cannot # know what directories they correspond to except by (1) just sending the # co request, and then seeing what directory names the server sends back in # its responses, and (2) the expand-modules request. sub req_co { my ( $cmd, $data ) = @_; argsplit("co"); # Provide list of modules, if -c was used. if (exists $state->{opt}{c}) { my $showref = `git show-ref --heads`; for my $line (split '\n', $showref) { if ( $line =~ m% refs/heads/(.*)$% ) { print "M $1\t$1\n"; } } print "ok\n"; return 1; } my $stickyInfo = { 'tag' => $state->{opt}{r}, 'date' => $state->{opt}{D} }; my $module = $state->{args}[0]; $state->{module} = $module; my $checkout_path = $module; # use the user specified directory if we're given it $checkout_path = $state->{opt}{d} if ( exists ( $state->{opt}{d} ) ); $log->debug("req_co : " . ( defined($data) ? $data : "[NULL]" ) ); $log->info("Checking out module '$module' ($state->{CVSROOT}) to '$checkout_path'"); $ENV{GIT_DIR} = $state->{CVSROOT} . "/"; # Grab a handle to the SQLite db and do any necessary updates my $updater = GITCVS::updater->new($state->{CVSROOT}, $module, $log); $updater->update(); my $headHash; if( defined($stickyInfo) && defined($stickyInfo->{tag}) ) { $headHash = $updater->lookupCommitRef($stickyInfo->{tag}); if( !defined($headHash) ) { print "error 1 no such tag `$stickyInfo->{tag}'\n"; cleanupWorkTree(); exit; } } $checkout_path =~ s|/$||; # get rid of trailing slashes my %seendirs = (); my $lastdir =''; prepDirForOutput( ".", $state->{CVSROOT} . "/$module", $checkout_path, \%seendirs, 'checkout', $state->{dirArgs} ); foreach my $git ( @{$updater->getAnyHead($headHash)} ) { # Don't want to check out deleted files next if ( $git->{filehash} eq "deleted" ); my $fullName = $git->{name}; ( $git->{name}, $git->{dir} ) = filenamesplit($git->{name}); unless (exists($seendirs{$git->{dir}})) { prepDirForOutput($git->{dir}, $state->{CVSROOT} . "/$module/", $checkout_path, \%seendirs, 'checkout', $state->{dirArgs} ); $lastdir = $git->{dir}; $seendirs{$git->{dir}} = 1; } # modification time of this file print "Mod-time $git->{modified}\n"; # print some information to the client if ( defined ( $git->{dir} ) and $git->{dir} ne "./" ) { print "M U $checkout_path/$git->{dir}$git->{name}\n"; } else { print "M U $checkout_path/$git->{name}\n"; } # instruct client we're sending a file to put in this path print "Created $checkout_path/" . ( defined ( $git->{dir} ) and $git->{dir} ne "./" ? $git->{dir} . "/" : "" ) . "\n"; print $state->{CVSROOT} . "/$module/" . ( defined ( $git->{dir} ) and $git->{dir} ne "./" ? $git->{dir} . "/" : "" ) . "$git->{name}\n"; # this is an "entries" line my $kopts = kopts_from_path($fullName,"sha1",$git->{filehash}); print "/$git->{name}/$git->{revision}//$kopts/" . getStickyTagOrDate($stickyInfo) . "\n"; # permissions print "u=$git->{mode},g=$git->{mode},o=$git->{mode}\n"; # transmit file transmitfile($git->{filehash}); } print "ok\n"; statecleanup(); } # used by req_co and req_update to set up directories for files # recursively handles parents sub prepDirForOutput { my ($dir, $repodir, $remotedir, $seendirs, $request, $dirArgs) = @_; my $parent = dirname($dir); $dir =~ s|/+$||; $repodir =~ s|/+$||; $remotedir =~ s|/+$||; $parent =~ s|/+$||; if ($parent eq '.' || $parent eq './') { $parent = ''; } # recurse to announce unseen parents first if( length($parent) && !exists($seendirs->{$parent}) && ( $request eq "checkout" || exists($dirArgs->{$parent}) ) ) { prepDirForOutput($parent, $repodir, $remotedir, $seendirs, $request, $dirArgs); } # Announce that we are going to modify at the parent level if ($dir eq '.' || $dir eq './') { $dir = ''; } if(exists($seendirs->{$dir})) { return; } $log->debug("announcedir $dir, $repodir, $remotedir" ); my($thisRemoteDir,$thisRepoDir); if ($dir ne "") { $thisRepoDir="$repodir/$dir"; if($remotedir eq ".") { $thisRemoteDir=$dir; } else { $thisRemoteDir="$remotedir/$dir"; } } else { $thisRepoDir=$repodir; $thisRemoteDir=$remotedir; } unless ( $state->{globaloptions}{-Q} || $state->{globaloptions}{-q} ) { print "E cvs $request: Updating $thisRemoteDir\n"; } my ($opt_r)=$state->{opt}{r}; my $stickyInfo; if(exists($state->{opt}{A})) { # $stickyInfo=undef; } elsif( defined($opt_r) && $opt_r ne "" ) # || ( defined($state->{opt}{D}) && $state->{opt}{D} ne "" ) # TODO { $stickyInfo={ 'tag' => (defined($opt_r)?$opt_r:undef) }; # TODO: Convert -D value into the form 2011.04.10.04.46.57, # similar to an entry line's sticky date, without the D prefix. # It sometimes (always?) arrives as something more like # '10 Apr 2011 04:46:57 -0000'... # $stickyInfo={ 'date' => (defined($stickyDate)?$stickyDate:undef) }; } else { $stickyInfo=getDirStickyInfo($state->{prependdir} . $dir); } my $stickyResponse; if(defined($stickyInfo)) { $stickyResponse = "Set-sticky $thisRemoteDir/\n" . "$thisRepoDir/\n" . getStickyTagOrDate($stickyInfo) . "\n"; } else { $stickyResponse = "Clear-sticky $thisRemoteDir/\n" . "$thisRepoDir/\n"; } unless ( $state->{globaloptions}{-n} ) { print $stickyResponse; print "Clear-static-directory $thisRemoteDir/\n"; print "$thisRepoDir/\n"; print $stickyResponse; # yes, twice print "Template $thisRemoteDir/\n"; print "$thisRepoDir/\n"; print "0\n"; } $seendirs->{$dir} = 1; # FUTURE: This would more accurately emulate CVS by sending # another copy of sticky after processing the files in that # directory. Or intermediate: perhaps send all sticky's for # $seendirs after after processing all files. } # update \n # Response expected: yes. Actually do a cvs update command. This uses any # previous Argument, Directory, Entry, or Modified requests, if they have # been sent. The last Directory sent specifies the working directory at the # time of the operation. The -I option is not used--files which the client # can decide whether to ignore are not mentioned and the client sends the # Questionable request for others. sub req_update { my ( $cmd, $data ) = @_; $log->debug("req_update : " . ( defined($data) ? $data : "[NULL]" )); argsplit("update"); # # It may just be a client exploring the available heads/modules # in that case, list them as top level directories and leave it # at that. Eclipse uses this technique to offer you a list of # projects (heads in this case) to checkout. # if ($state->{module} eq '') { my $showref = `git show-ref --heads`; print "E cvs update: Updating .\n"; for my $line (split '\n', $showref) { if ( $line =~ m% refs/heads/(.*)$% ) { print "E cvs update: New directory `$1'\n"; } } print "ok\n"; return 1; } # Grab a handle to the SQLite db and do any necessary updates my $updater = GITCVS::updater->new($state->{CVSROOT}, $state->{module}, $log); $updater->update(); argsfromdir($updater); #$log->debug("update state : " . Dumper($state)); my($repoDir); $repoDir=$state->{CVSROOT} . "/$state->{module}/$state->{prependdir}"; my %seendirs = (); # foreach file specified on the command line ... foreach my $argsFilename ( @{$state->{args}} ) { my $filename; $filename = filecleanup($argsFilename); $log->debug("Processing file $filename"); # if we have a -C we should pretend we never saw modified stuff if ( exists ( $state->{opt}{C} ) ) { delete $state->{entries}{$filename}{modified_hash}; delete $state->{entries}{$filename}{modified_filename}; $state->{entries}{$filename}{unchanged} = 1; } my $stickyInfo = resolveStickyInfo($filename, $state->{opt}{r}, $state->{opt}{D}, exists($state->{opt}{A})); my $meta = $updater->getmeta($filename, $stickyInfo); # If -p was given, "print" the contents of the requested revision. if ( exists ( $state->{opt}{p} ) ) { if ( defined ( $meta->{revision} ) ) { $log->info("Printing '$filename' revision " . $meta->{revision}); transmitfile($meta->{filehash}, { print => 1 }); } next; } # Directories: prepDirForOutput( dirname($argsFilename), $repoDir, ".", \%seendirs, "update", $state->{dirArgs} ); my $wrev = revparse($filename); if ( ! defined $meta ) { $meta = { name => $filename, revision => '0', filehash => 'added' }; if($wrev ne "0") { $meta->{filehash}='deleted'; } } my $oldmeta = $meta; # If the working copy is an old revision, lets get that version too for comparison. my $oldWrev=$wrev; if(defined($oldWrev)) { $oldWrev=~s/^-//; if($oldWrev ne $meta->{revision}) { $oldmeta = $updater->getmeta($filename, $oldWrev); } } #$log->debug("Target revision is $meta->{revision}, current working revision is $wrev"); # Files are up to date if the working copy and repo copy have the same revision, # and the working copy is unmodified _and_ the user hasn't specified -C next if ( defined ( $wrev ) and defined($meta->{revision}) and $wrev eq $meta->{revision} and $state->{entries}{$filename}{unchanged} and not exists ( $state->{opt}{C} ) ); # If the working copy and repo copy have the same revision, # but the working copy is modified, tell the client it's modified if ( defined ( $wrev ) and defined($meta->{revision}) and $wrev eq $meta->{revision} and $wrev ne "0" and defined($state->{entries}{$filename}{modified_hash}) and not exists ( $state->{opt}{C} ) ) { $log->info("Tell the client the file is modified"); print "MT text M \n"; print "MT fname $filename\n"; print "MT newline\n"; next; } if ( $meta->{filehash} eq "deleted" && $wrev ne "0" ) { # TODO: If it has been modified in the sandbox, error out # with the appropriate message, rather than deleting a modified # file. my ( $filepart, $dirpart ) = filenamesplit($filename,1); $log->info("Removing '$filename' from working copy (no longer in the repo)"); print "E cvs update: `$filename' is no longer in the repository\n"; # Don't want to actually _DO_ the update if -n specified unless ( $state->{globaloptions}{-n} ) { print "Removed $dirpart\n"; print "$filepart\n"; } } elsif ( not defined ( $state->{entries}{$filename}{modified_hash} ) or $state->{entries}{$filename}{modified_hash} eq $oldmeta->{filehash} or $meta->{filehash} eq 'added' ) { # normal update, just send the new revision (either U=Update, # or A=Add, or R=Remove) if ( defined($wrev) && ($wrev=~/^-/) ) { $log->info("Tell the client the file is scheduled for removal"); print "MT text R \n"; print "MT fname $filename\n"; print "MT newline\n"; next; } elsif ( (!defined($wrev) || $wrev eq '0') && (!defined($meta->{revision}) || $meta->{revision} eq '0') ) { $log->info("Tell the client the file is scheduled for addition"); print "MT text A \n"; print "MT fname $filename\n"; print "MT newline\n"; next; } else { $log->info("UpdatingX3 '$filename' to ".$meta->{revision}); print "MT +updated\n"; print "MT text U \n"; print "MT fname $filename\n"; print "MT newline\n"; print "MT -updated\n"; } my ( $filepart, $dirpart ) = filenamesplit($filename,1); # Don't want to actually _DO_ the update if -n specified unless ( $state->{globaloptions}{-n} ) { if ( defined ( $wrev ) ) { # instruct client we're sending a file to put in this path as a replacement print "Update-existing $dirpart\n"; $log->debug("Updating existing file 'Update-existing $dirpart'"); } else { # instruct client we're sending a file to put in this path as a new file $log->debug("Creating new file 'Created $dirpart'"); print "Created $dirpart\n"; } print $state->{CVSROOT} . "/$state->{module}/$filename\n"; # this is an "entries" line my $kopts = kopts_from_path($filename,"sha1",$meta->{filehash}); my $entriesLine = "/$filepart/$meta->{revision}//$kopts/"; $entriesLine .= getStickyTagOrDate($stickyInfo); $log->debug($entriesLine); print "$entriesLine\n"; # permissions $log->debug("SEND : u=$meta->{mode},g=$meta->{mode},o=$meta->{mode}"); print "u=$meta->{mode},g=$meta->{mode},o=$meta->{mode}\n"; # transmit file transmitfile($meta->{filehash}); } } else { my ( $filepart, $dirpart ) = filenamesplit($meta->{name},1); my $mergeDir = setupTmpDir(); my $file_local = $filepart . ".mine"; my $mergedFile = "$mergeDir/$file_local"; system("ln","-s",$state->{entries}{$filename}{modified_filename}, $file_local); my $file_old = $filepart . "." . $oldmeta->{revision}; transmitfile($oldmeta->{filehash}, { targetfile => $file_old }); my $file_new = $filepart . "." . $meta->{revision}; transmitfile($meta->{filehash}, { targetfile => $file_new }); # we need to merge with the local changes ( M=successful merge, C=conflict merge ) $log->info("Merging $file_local, $file_old, $file_new"); print "M Merging differences between $oldmeta->{revision} and $meta->{revision} into $filename\n"; $log->debug("Temporary directory for merge is $mergeDir"); my $return = system("git", "merge-file", $file_local, $file_old, $file_new); $return >>= 8; cleanupTmpDir(); if ( $return == 0 ) { $log->info("Merged successfully"); print "M M $filename\n"; $log->debug("Merged $dirpart"); # Don't want to actually _DO_ the update if -n specified unless ( $state->{globaloptions}{-n} ) { print "Merged $dirpart\n"; $log->debug($state->{CVSROOT} . "/$state->{module}/$filename"); print $state->{CVSROOT} . "/$state->{module}/$filename\n"; my $kopts = kopts_from_path("$dirpart/$filepart", "file",$mergedFile); $log->debug("/$filepart/$meta->{revision}//$kopts/"); my $entriesLine="/$filepart/$meta->{revision}//$kopts/"; $entriesLine .= getStickyTagOrDate($stickyInfo); print "$entriesLine\n"; } } elsif ( $return == 1 ) { $log->info("Merged with conflicts"); print "E cvs update: conflicts found in $filename\n"; print "M C $filename\n"; # Don't want to actually _DO_ the update if -n specified unless ( $state->{globaloptions}{-n} ) { print "Merged $dirpart\n"; print $state->{CVSROOT} . "/$state->{module}/$filename\n"; my $kopts = kopts_from_path("$dirpart/$filepart", "file",$mergedFile); my $entriesLine = "/$filepart/$meta->{revision}/+/$kopts/"; $entriesLine .= getStickyTagOrDate($stickyInfo); print "$entriesLine\n"; } } else { $log->warn("Merge failed"); next; } # Don't want to actually _DO_ the update if -n specified unless ( $state->{globaloptions}{-n} ) { # permissions $log->debug("SEND : u=$meta->{mode},g=$meta->{mode},o=$meta->{mode}"); print "u=$meta->{mode},g=$meta->{mode},o=$meta->{mode}\n"; # transmit file, format is single integer on a line by itself (file # size) followed by the file contents # TODO : we should copy files in blocks my $data = `cat $mergedFile`; $log->debug("File size : " . length($data)); print length($data) . "\n"; print $data; } } } # prepDirForOutput() any other existing directories unless they already # have the right sticky tag: unless ( $state->{globaloptions}{n} ) { my $dir; foreach $dir (keys(%{$state->{dirMap}})) { if( ! $seendirs{$dir} && exists($state->{dirArgs}{$dir}) ) { my($oldTag); $oldTag=$state->{dirMap}{$dir}{tagspec}; unless( ( exists($state->{opt}{A}) && defined($oldTag) ) || ( defined($state->{opt}{r}) && ( !defined($oldTag) || $state->{opt}{r} ne $oldTag ) ) ) # TODO?: OR sticky dir is different... { next; } prepDirForOutput( $dir, $repoDir, ".", \%seendirs, 'update', $state->{dirArgs} ); } # TODO?: Consider sending a final duplicate Sticky response # to more closely mimic real CVS. } } print "ok\n"; } sub req_ci { my ( $cmd, $data ) = @_; argsplit("ci"); #$log->debug("State : " . Dumper($state)); $log->info("req_ci : " . ( defined($data) ? $data : "[NULL]" )); if ( $state->{method} eq 'pserver' and $state->{user} eq 'anonymous' ) { print "error 1 anonymous user cannot commit via pserver\n"; cleanupWorkTree(); exit; } if ( -e $state->{CVSROOT} . "/index" ) { $log->warn("file 'index' already exists in the git repository"); print "error 1 Index already exists in git repo\n"; cleanupWorkTree(); exit; } # Grab a handle to the SQLite db and do any necessary updates my $updater = GITCVS::updater->new($state->{CVSROOT}, $state->{module}, $log); $updater->update(); my @committedfiles = (); my %oldmeta; my $stickyInfo; my $branchRef; my $parenthash; # foreach file specified on the command line ... foreach my $filename ( @{$state->{args}} ) { my $committedfile = $filename; $filename = filecleanup($filename); next unless ( exists $state->{entries}{$filename}{modified_filename} or not $state->{entries}{$filename}{unchanged} ); ##### # Figure out which branch and parenthash we are committing # to, and setup worktree: # should always come from entries: my $fileStickyInfo = resolveStickyInfo($filename); if( !defined($branchRef) ) { $stickyInfo = $fileStickyInfo; if( defined($stickyInfo) && ( defined($stickyInfo->{date}) || !defined($stickyInfo->{tag}) ) ) { print "error 1 cannot commit with sticky date for file `$filename'\n"; cleanupWorkTree(); exit; } $branchRef = "refs/heads/$state->{module}"; if ( defined($stickyInfo) && defined($stickyInfo->{tag}) ) { $branchRef = "refs/heads/$stickyInfo->{tag}"; } $parenthash = `git show-ref -s $branchRef`; chomp $parenthash; if ($parenthash !~ /^[0-9a-f]{40}$/) { if ( defined($stickyInfo) && defined($stickyInfo->{tag}) ) { print "error 1 sticky tag `$stickyInfo->{tag}' for file `$filename' is not a branch\n"; } else { print "error 1 pserver cannot find the current HEAD of module"; } cleanupWorkTree(); exit; } setupWorkTree($parenthash); $log->info("Lockless commit start, basing commit on '$work->{workDir}', index file is '$work->{index}'"); $log->info("Created index '$work->{index}' for head $state->{module} - exit status $?"); } elsif( !refHashEqual($stickyInfo,$fileStickyInfo) ) { #TODO: We could split the cvs commit into multiple # git commits by distinct stickyTag values, but that # is lowish priority. print "error 1 Committing different files to different" . " branches is not currently supported\n"; cleanupWorkTree(); exit; } ##### # Process this file: my $meta = $updater->getmeta($filename,$stickyInfo); $oldmeta{$filename} = $meta; my $wrev = revparse($filename); my ( $filepart, $dirpart ) = filenamesplit($filename); # do a checkout of the file if it is part of this tree if ($wrev) { system('git', 'checkout-index', '-f', '-u', $filename); unless ($? == 0) { die "Error running git-checkout-index -f -u $filename : $!"; } } my $addflag = 0; my $rmflag = 0; $rmflag = 1 if ( defined($wrev) and ($wrev=~/^-/) ); $addflag = 1 unless ( -e $filename ); # Do up to date checking unless ( $addflag or $wrev eq $meta->{revision} or ( $rmflag and $wrev eq "-$meta->{revision}" ) ) { # fail everything if an up to date check fails print "error 1 Up to date check failed for $filename\n"; cleanupWorkTree(); exit; } push @committedfiles, $committedfile; $log->info("Committing $filename"); system("mkdir","-p",$dirpart) unless ( -d $dirpart ); unless ( $rmflag ) { $log->debug("rename $state->{entries}{$filename}{modified_filename} $filename"); rename $state->{entries}{$filename}{modified_filename},$filename; # Calculate modes to remove my $invmode = ""; foreach ( qw (r w x) ) { $invmode .= $_ unless ( $state->{entries}{$filename}{modified_mode} =~ /$_/ ); } $log->debug("chmod u+" . $state->{entries}{$filename}{modified_mode} . "-" . $invmode . " $filename"); system("chmod","u+" . $state->{entries}{$filename}{modified_mode} . "-" . $invmode, $filename); } if ( $rmflag ) { $log->info("Removing file '$filename'"); unlink($filename); system("git", "update-index", "--remove", $filename); } elsif ( $addflag ) { $log->info("Adding file '$filename'"); system("git", "update-index", "--add", $filename); } else { $log->info("UpdatingX2 file '$filename'"); system("git", "update-index", $filename); } } unless ( scalar(@committedfiles) > 0 ) { print "E No files to commit\n"; print "ok\n"; cleanupWorkTree(); return; } my $treehash = `git write-tree`; chomp $treehash; $log->debug("Treehash : $treehash, Parenthash : $parenthash"); # write our commit message out if we have one ... my ( $msg_fh, $msg_filename ) = tempfile( DIR => $TEMP_DIR ); print $msg_fh $state->{opt}{m};# if ( exists ( $state->{opt}{m} ) ); if ( defined ( $cfg->{gitcvs}{commitmsgannotation} ) ) { if ($cfg->{gitcvs}{commitmsgannotation} !~ /^\s*$/ ) { print $msg_fh "\n\n".$cfg->{gitcvs}{commitmsgannotation}."\n" } } else { print $msg_fh "\n\nvia git-CVS emulator\n"; } close $msg_fh; my $commithash = `git commit-tree $treehash -p $parenthash < $msg_filename`; chomp($commithash); $log->info("Commit hash : $commithash"); unless ( $commithash =~ /[a-zA-Z0-9]{40}/ ) { $log->warn("Commit failed (Invalid commit hash)"); print "error 1 Commit failed (unknown reason)\n"; cleanupWorkTree(); exit; } ### Emulate git-receive-pack by running hooks/update my @hook = ( $ENV{GIT_DIR}.'hooks/update', $branchRef, $parenthash, $commithash ); if( -x $hook[0] ) { unless( system( @hook ) == 0 ) { $log->warn("Commit failed (update hook declined to update ref)"); print "error 1 Commit failed (update hook declined)\n"; cleanupWorkTree(); exit; } } ### Update the ref if (system(qw(git update-ref -m), "cvsserver ci", $branchRef, $commithash, $parenthash)) { $log->warn("update-ref for $state->{module} failed."); print "error 1 Cannot commit -- update first\n"; cleanupWorkTree(); exit; } ### Emulate git-receive-pack by running hooks/post-receive my $hook = $ENV{GIT_DIR}.'hooks/post-receive'; if( -x $hook ) { open(my $pipe, "| $hook") || die "can't fork $!"; local $SIG{PIPE} = sub { die 'pipe broke' }; print $pipe "$parenthash $commithash $branchRef\n"; close $pipe || die "bad pipe: $! $?"; } $updater->update(); ### Then hooks/post-update $hook = $ENV{GIT_DIR}.'hooks/post-update'; if (-x $hook) { system($hook, $branchRef); } # foreach file specified on the command line ... foreach my $filename ( @committedfiles ) { $filename = filecleanup($filename); my $meta = $updater->getmeta($filename,$stickyInfo); unless (defined $meta->{revision}) { $meta->{revision} = "1.1"; } my ( $filepart, $dirpart ) = filenamesplit($filename, 1); $log->debug("Checked-in $dirpart : $filename"); print "M $state->{CVSROOT}/$state->{module}/$filename,v <-- $dirpart$filepart\n"; if ( defined $meta->{filehash} && $meta->{filehash} eq "deleted" ) { print "M new revision: delete; previous revision: $oldmeta{$filename}{revision}\n"; print "Remove-entry $dirpart\n"; print "$filename\n"; } else { if ($meta->{revision} eq "1.1") { print "M initial revision: 1.1\n"; } else { print "M new revision: $meta->{revision}; previous revision: $oldmeta{$filename}{revision}\n"; } print "Checked-in $dirpart\n"; print "$filename\n"; my $kopts = kopts_from_path($filename,"sha1",$meta->{filehash}); print "/$filepart/$meta->{revision}//$kopts/" . getStickyTagOrDate($stickyInfo) . "\n"; } } cleanupWorkTree(); print "ok\n"; } sub req_status { my ( $cmd, $data ) = @_; argsplit("status"); $log->info("req_status : " . ( defined($data) ? $data : "[NULL]" )); #$log->debug("status state : " . Dumper($state)); # Grab a handle to the SQLite db and do any necessary updates my $updater; $updater = GITCVS::updater->new($state->{CVSROOT}, $state->{module}, $log); $updater->update(); # if no files were specified, we need to work out what files we should # be providing status on ... argsfromdir($updater); # foreach file specified on the command line ... foreach my $filename ( @{$state->{args}} ) { $filename = filecleanup($filename); if ( exists($state->{opt}{l}) && index($filename, '/', length($state->{prependdir})) >= 0 ) { next; } my $wrev = revparse($filename); my $stickyInfo = resolveStickyInfo($filename); my $meta = $updater->getmeta($filename,$stickyInfo); my $oldmeta = $meta; # If the working copy is an old revision, lets get that # version too for comparison. if ( defined($wrev) and $wrev ne $meta->{revision} ) { my($rmRev)=$wrev; $rmRev=~s/^-//; $oldmeta = $updater->getmeta($filename, $rmRev); } # TODO : All possible statuses aren't yet implemented my $status; # Files are up to date if the working copy and repo copy have # the same revision, and the working copy is unmodified if ( defined ( $wrev ) and defined($meta->{revision}) and $wrev eq $meta->{revision} and ( ( $state->{entries}{$filename}{unchanged} and ( not defined ( $state->{entries}{$filename}{conflict} ) or $state->{entries}{$filename}{conflict} !~ /^\+=/ ) ) or ( defined($state->{entries}{$filename}{modified_hash}) and $state->{entries}{$filename}{modified_hash} eq $meta->{filehash} ) ) ) { $status = "Up-to-date" } # Need checkout if the working copy has a different (usually # older) revision than the repo copy, and the working copy is # unmodified if ( defined ( $wrev ) and defined ( $meta->{revision} ) and $meta->{revision} ne $wrev and ( $state->{entries}{$filename}{unchanged} or ( defined($state->{entries}{$filename}{modified_hash}) and $state->{entries}{$filename}{modified_hash} eq $oldmeta->{filehash} ) ) ) { $status ||= "Needs Checkout"; } # Need checkout if it exists in the repo but doesn't have a working # copy if ( not defined ( $wrev ) and defined ( $meta->{revision} ) ) { $status ||= "Needs Checkout"; } # Locally modified if working copy and repo copy have the # same revision but there are local changes if ( defined ( $wrev ) and defined($meta->{revision}) and $wrev eq $meta->{revision} and $wrev ne "0" and $state->{entries}{$filename}{modified_filename} ) { $status ||= "Locally Modified"; } # Needs Merge if working copy revision is different # (usually older) than repo copy and there are local changes if ( defined ( $wrev ) and defined ( $meta->{revision} ) and $meta->{revision} ne $wrev and $state->{entries}{$filename}{modified_filename} ) { $status ||= "Needs Merge"; } if ( defined ( $state->{entries}{$filename}{revision} ) and ( !defined($meta->{revision}) || $meta->{revision} eq "0" ) ) { $status ||= "Locally Added"; } if ( defined ( $wrev ) and defined ( $meta->{revision} ) and $wrev eq "-$meta->{revision}" ) { $status ||= "Locally Removed"; } if ( defined ( $state->{entries}{$filename}{conflict} ) and $state->{entries}{$filename}{conflict} =~ /^\+=/ ) { $status ||= "Unresolved Conflict"; } if ( 0 ) { $status ||= "File had conflicts on merge"; } $status ||= "Unknown"; my ($filepart) = filenamesplit($filename); print "M =======" . ( "=" x 60 ) . "\n"; print "M File: $filepart\tStatus: $status\n"; if ( defined($state->{entries}{$filename}{revision}) ) { print "M Working revision:\t" . $state->{entries}{$filename}{revision} . "\n"; } else { print "M Working revision:\tNo entry for $filename\n"; } if ( defined($meta->{revision}) ) { print "M Repository revision:\t" . $meta->{revision} . "\t$state->{CVSROOT}/$state->{module}/$filename,v\n"; my($tagOrDate)=$state->{entries}{$filename}{tag_or_date}; my($tag)=($tagOrDate=~m/^T(.+)$/); if( !defined($tag) ) { $tag="(none)"; } print "M Sticky Tag:\t\t$tag\n"; my($date)=($tagOrDate=~m/^D(.+)$/); if( !defined($date) ) { $date="(none)"; } print "M Sticky Date:\t\t$date\n"; my($options)=$state->{entries}{$filename}{options}; if( $options eq "" ) { $options="(none)"; } print "M Sticky Options:\t\t$options\n"; } else { print "M Repository revision:\tNo revision control file\n"; } print "M\n"; } print "ok\n"; } sub req_diff { my ( $cmd, $data ) = @_; argsplit("diff"); $log->debug("req_diff : " . ( defined($data) ? $data : "[NULL]" )); #$log->debug("status state : " . Dumper($state)); my ($revision1, $revision2); if ( defined ( $state->{opt}{r} ) and ref $state->{opt}{r} eq "ARRAY" ) { $revision1 = $state->{opt}{r}[0]; $revision2 = $state->{opt}{r}[1]; } else { $revision1 = $state->{opt}{r}; } $log->debug("Diffing revisions " . ( defined($revision1) ? $revision1 : "[NULL]" ) . " and " . ( defined($revision2) ? $revision2 : "[NULL]" ) ); # Grab a handle to the SQLite db and do any necessary updates my $updater; $updater = GITCVS::updater->new($state->{CVSROOT}, $state->{module}, $log); $updater->update(); # if no files were specified, we need to work out what files we should # be providing status on ... argsfromdir($updater); my($foundDiff); # foreach file specified on the command line ... foreach my $argFilename ( @{$state->{args}} ) { my($filename) = filecleanup($argFilename); my ( $fh, $file1, $file2, $meta1, $meta2, $filediff ); my $wrev = revparse($filename); # Priority for revision1: # 1. First -r (missing file: check -N) # 2. wrev from client's Entry line # - missing line/file: check -N # - "0": added file not committed (empty contents for rev1) # - Prefixed with dash (to be removed): check -N if ( defined ( $revision1 ) ) { $meta1 = $updater->getmeta($filename, $revision1); } elsif( defined($wrev) && $wrev ne "0" ) { my($rmRev)=$wrev; $rmRev=~s/^-//; $meta1 = $updater->getmeta($filename, $rmRev); } if ( !defined($meta1) || $meta1->{filehash} eq "deleted" ) { if( !exists($state->{opt}{N}) ) { if(!defined($revision1)) { print "E File $filename at revision $revision1 doesn't exist\n"; } next; } elsif( !defined($meta1) ) { $meta1 = { name => $filename, revision => '0', filehash => 'deleted' }; } } # Priority for revision2: # 1. Second -r (missing file: check -N) # 2. Modified file contents from client # 3. wrev from client's Entry line # - missing line/file: check -N # - Prefixed with dash (to be removed): check -N # if we have a second -r switch, use it too if ( defined ( $revision2 ) ) { $meta2 = $updater->getmeta($filename, $revision2); } elsif(defined($state->{entries}{$filename}{modified_filename})) { $file2 = $state->{entries}{$filename}{modified_filename}; $meta2 = { name => $filename, revision => '0', filehash => 'modified' }; } elsif( defined($wrev) && ($wrev!~/^-/) ) { if(!defined($revision1)) # no revision and no modifications: { next; } $meta2 = $updater->getmeta($filename, $wrev); } if(!defined($file2)) { if ( !defined($meta2) || $meta2->{filehash} eq "deleted" ) { if( !exists($state->{opt}{N}) ) { if(!defined($revision2)) { print "E File $filename at revision $revision2 doesn't exist\n"; } next; } elsif( !defined($meta2) ) { $meta2 = { name => $filename, revision => '0', filehash => 'deleted' }; } } } if( $meta1->{filehash} eq $meta2->{filehash} ) { $log->info("unchanged $filename"); next; } # Retrieve revision contents: ( undef, $file1 ) = tempfile( DIR => $TEMP_DIR, OPEN => 0 ); transmitfile($meta1->{filehash}, { targetfile => $file1 }); if(!defined($file2)) { ( undef, $file2 ) = tempfile( DIR => $TEMP_DIR, OPEN => 0 ); transmitfile($meta2->{filehash}, { targetfile => $file2 }); } # Generate the actual diff: print "M Index: $argFilename\n"; print "M =======" . ( "=" x 60 ) . "\n"; print "M RCS file: $state->{CVSROOT}/$state->{module}/$filename,v\n"; if ( defined ( $meta1 ) && $meta1->{revision} ne "0" ) { print "M retrieving revision $meta1->{revision}\n" } if ( defined ( $meta2 ) && $meta2->{revision} ne "0" ) { print "M retrieving revision $meta2->{revision}\n" } print "M diff "; foreach my $opt ( sort keys %{$state->{opt}} ) { if ( ref $state->{opt}{$opt} eq "ARRAY" ) { foreach my $value ( @{$state->{opt}{$opt}} ) { print "-$opt $value "; } } else { print "-$opt "; if ( defined ( $state->{opt}{$opt} ) ) { print "$state->{opt}{$opt} " } } } print "$argFilename\n"; $log->info("Diffing $filename -r $meta1->{revision} -r " . ( $meta2->{revision} or "workingcopy" )); # TODO: Use --label instead of -L because -L is no longer # documented and may go away someday. Not sure if there there are # versions that only support -L, which would make this change risky? # http://osdir.com/ml/bug-gnu-utils-gnu/2010-12/msg00060.html # ("man diff" should actually document the best migration strategy, # [current behavior, future changes, old compatibility issues # or lack thereof, etc], not just stop mentioning the option...) # TODO: Real CVS seems to include a date in the label, before # the revision part, without the keyword "revision". The following # has minimal changes compared to original versions of # git-cvsserver.perl. (Mostly tab vs space after filename.) my (@diffCmd) = ( 'diff' ); if ( exists($state->{opt}{N}) ) { push @diffCmd,"-N"; } if ( exists $state->{opt}{u} ) { push @diffCmd,("-u","-L"); if( $meta1->{filehash} eq "deleted" ) { push @diffCmd,"/dev/null"; } else { push @diffCmd,("$argFilename\trevision $meta1->{revision}"); } if( defined($meta2->{filehash}) ) { if( $meta2->{filehash} eq "deleted" ) { push @diffCmd,("-L","/dev/null"); } else { push @diffCmd,("-L", "$argFilename\trevision $meta2->{revision}"); } } else { push @diffCmd,("-L","$argFilename\tworking copy"); } } push @diffCmd,($file1,$file2); if(!open(DIFF,"-|",@diffCmd)) { $log->warn("Unable to run diff: $!"); } my($diffLine); while(defined($diffLine=)) { print "M $diffLine"; $foundDiff=1; } close(DIFF); } if($foundDiff) { print "error \n"; } else { print "ok\n"; } } sub req_log { my ( $cmd, $data ) = @_; argsplit("log"); $log->debug("req_log : " . ( defined($data) ? $data : "[NULL]" )); #$log->debug("log state : " . Dumper($state)); my ( $revFilter ); if ( defined ( $state->{opt}{r} ) ) { $revFilter = $state->{opt}{r}; } # Grab a handle to the SQLite db and do any necessary updates my $updater; $updater = GITCVS::updater->new($state->{CVSROOT}, $state->{module}, $log); $updater->update(); # if no files were specified, we need to work out what files we # should be providing status on ... argsfromdir($updater); # foreach file specified on the command line ... foreach my $filename ( @{$state->{args}} ) { $filename = filecleanup($filename); my $headmeta = $updater->getmeta($filename); my ($revisions,$totalrevisions) = $updater->getlog($filename, $revFilter); next unless ( scalar(@$revisions) ); print "M \n"; print "M RCS file: $state->{CVSROOT}/$state->{module}/$filename,v\n"; print "M Working file: $filename\n"; print "M head: $headmeta->{revision}\n"; print "M branch:\n"; print "M locks: strict\n"; print "M access list:\n"; print "M symbolic names:\n"; print "M keyword substitution: kv\n"; print "M total revisions: $totalrevisions;\tselected revisions: " . scalar(@$revisions) . "\n"; print "M description:\n"; foreach my $revision ( @$revisions ) { print "M ----------------------------\n"; print "M revision $revision->{revision}\n"; # reformat the date for log output if ( $revision->{modified} =~ /(\d+)\s+(\w+)\s+(\d+)\s+(\S+)/ and defined($DATE_LIST->{$2}) ) { $revision->{modified} = sprintf('%04d/%02d/%02d %s', $3, $DATE_LIST->{$2}, $1, $4 ); } $revision->{author} = cvs_author($revision->{author}); print "M date: $revision->{modified};" . " author: $revision->{author}; state: " . ( $revision->{filehash} eq "deleted" ? "dead" : "Exp" ) . "; lines: +2 -3\n"; my $commitmessage; $commitmessage = $updater->commitmessage($revision->{commithash}); $commitmessage =~ s/^/M /mg; print $commitmessage . "\n"; } print "M =======" . ( "=" x 70 ) . "\n"; } print "ok\n"; } sub req_annotate { my ( $cmd, $data ) = @_; argsplit("annotate"); $log->info("req_annotate : " . ( defined($data) ? $data : "[NULL]" )); #$log->debug("status state : " . Dumper($state)); # Grab a handle to the SQLite db and do any necessary updates my $updater = GITCVS::updater->new($state->{CVSROOT}, $state->{module}, $log); $updater->update(); # if no files were specified, we need to work out what files we should be providing annotate on ... argsfromdir($updater); # we'll need a temporary checkout dir setupWorkTree(); $log->info("Temp checkoutdir creation successful, basing annotate session work on '$work->{workDir}', index file is '$ENV{GIT_INDEX_FILE}'"); # foreach file specified on the command line ... foreach my $filename ( @{$state->{args}} ) { $filename = filecleanup($filename); my $meta = $updater->getmeta($filename); next unless ( $meta->{revision} ); # get all the commits that this file was in # in dense format -- aka skip dead revisions my $revisions = $updater->gethistorydense($filename); my $lastseenin = $revisions->[0][2]; # populate the temporary index based on the latest commit were we saw # the file -- but do it cheaply without checking out any files # TODO: if we got a revision from the client, use that instead # to look up the commithash in sqlite (still good to default to # the current head as we do now) system("git", "read-tree", $lastseenin); unless ($? == 0) { print "E error running git-read-tree $lastseenin $ENV{GIT_INDEX_FILE} $!\n"; return; } $log->info("Created index '$ENV{GIT_INDEX_FILE}' with commit $lastseenin - exit status $?"); # do a checkout of the file system('git', 'checkout-index', '-f', '-u', $filename); unless ($? == 0) { print "E error running git-checkout-index -f -u $filename : $!\n"; return; } $log->info("Annotate $filename"); # Prepare a file with the commits from the linearized # history that annotate should know about. This prevents # git-jsannotate telling us about commits we are hiding # from the client. my $a_hints = "$work->{workDir}/.annotate_hints"; if (!open(ANNOTATEHINTS, '>', $a_hints)) { print "E failed to open '$a_hints' for writing: $!\n"; return; } for (my $i=0; $i < @$revisions; $i++) { print ANNOTATEHINTS $revisions->[$i][2]; if ($i+1 < @$revisions) { # have we got a parent? print ANNOTATEHINTS ' ' . $revisions->[$i+1][2]; } print ANNOTATEHINTS "\n"; } print ANNOTATEHINTS "\n"; close ANNOTATEHINTS or (print "E failed to write $a_hints: $!\n"), return; my @cmd = (qw(git annotate -l -S), $a_hints, $filename); if (!open(ANNOTATE, "-|", @cmd)) { print "E error invoking ". join(' ',@cmd) .": $!\n"; return; } my $metadata = {}; print "E Annotations for $filename\n"; print "E ***************\n"; while ( ) { if (m/^([a-zA-Z0-9]{40})\t\([^\)]*\)(.*)$/i) { my $commithash = $1; my $data = $2; unless ( defined ( $metadata->{$commithash} ) ) { $metadata->{$commithash} = $updater->getmeta($filename, $commithash); $metadata->{$commithash}{author} = cvs_author($metadata->{$commithash}{author}); $metadata->{$commithash}{modified} = sprintf("%02d-%s-%02d", $1, $2, $3) if ( $metadata->{$commithash}{modified} =~ /^(\d+)\s(\w+)\s\d\d(\d\d)/ ); } printf("M %-7s (%-8s %10s): %s\n", $metadata->{$commithash}{revision}, $metadata->{$commithash}{author}, $metadata->{$commithash}{modified}, $data ); } else { $log->warn("Error in annotate output! LINE: $_"); print "E Annotate error \n"; next; } } close ANNOTATE; } # done; get out of the tempdir cleanupWorkTree(); print "ok\n"; } # This method takes the state->{arguments} array and produces two new arrays. # The first is $state->{args} which is everything before the '--' argument, and # the second is $state->{files} which is everything after it. sub argsplit { $state->{args} = []; $state->{files} = []; $state->{opt} = {}; return unless( defined($state->{arguments}) and ref $state->{arguments} eq "ARRAY" ); my $type = shift; if ( defined($type) ) { my $opt = {}; $opt = { A => 0, N => 0, P => 0, R => 0, c => 0, f => 0, l => 0, n => 0, p => 0, s => 0, r => 1, D => 1, d => 1, k => 1, j => 1, } if ( $type eq "co" ); $opt = { v => 0, l => 0, R => 0 } if ( $type eq "status" ); $opt = { A => 0, P => 0, C => 0, d => 0, f => 0, l => 0, R => 0, p => 0, k => 1, r => 1, D => 1, j => 1, I => 1, W => 1 } if ( $type eq "update" ); $opt = { l => 0, R => 0, k => 1, D => 1, D => 1, r => 2, N => 0 } if ( $type eq "diff" ); $opt = { c => 0, R => 0, l => 0, f => 0, F => 1, m => 1, r => 1 } if ( $type eq "ci" ); $opt = { k => 1, m => 1 } if ( $type eq "add" ); $opt = { f => 0, l => 0, R => 0 } if ( $type eq "remove" ); $opt = { l => 0, b => 0, h => 0, R => 0, t => 0, N => 0, S => 0, r => 1, d => 1, s => 1, w => 1 } if ( $type eq "log" ); while ( scalar ( @{$state->{arguments}} ) > 0 ) { my $arg = shift @{$state->{arguments}}; next if ( $arg eq "--" ); next unless ( $arg =~ /\S/ ); # if the argument looks like a switch if ( $arg =~ /^-(\w)(.*)/ ) { # if it's a switch that takes an argument if ( $opt->{$1} ) { # If this switch has already been provided if ( $opt->{$1} > 1 and exists ( $state->{opt}{$1} ) ) { $state->{opt}{$1} = [ $state->{opt}{$1} ]; if ( length($2) > 0 ) { push @{$state->{opt}{$1}},$2; } else { push @{$state->{opt}{$1}}, shift @{$state->{arguments}}; } } else { # if there's extra data in the arg, use that as the argument for the switch if ( length($2) > 0 ) { $state->{opt}{$1} = $2; } else { $state->{opt}{$1} = shift @{$state->{arguments}}; } } } else { $state->{opt}{$1} = undef; } } else { push @{$state->{args}}, $arg; } } } else { my $mode = 0; foreach my $value ( @{$state->{arguments}} ) { if ( $value eq "--" ) { $mode++; next; } push @{$state->{args}}, $value if ( $mode == 0 ); push @{$state->{files}}, $value if ( $mode == 1 ); } } } # Used by argsfromdir sub expandArg { my ($updater,$outNameMap,$outDirMap,$path,$isDir) = @_; my $fullPath = filecleanup($path); # Is it a directory? if( defined($state->{dirMap}{$fullPath}) || defined($state->{dirMap}{"$fullPath/"}) ) { # It is a directory in the user's sandbox. $isDir=1; if(defined($state->{entries}{$fullPath})) { $log->fatal("Inconsistent file/dir type"); die "Inconsistent file/dir type"; } } elsif(defined($state->{entries}{$fullPath})) { # It is a file in the user's sandbox. $isDir=0; } my($revDirMap,$otherRevDirMap); if(!defined($isDir) || $isDir) { # Resolve version tree for sticky tag: # (for now we only want list of files for the version, not # particular versions of those files: assume it is a directory # for the moment; ignore Entry's stick tag) # Order of precedence of sticky tags: # -A [head] # -r /tag/ # [file entry sticky tag, but that is only relevant to files] # [the tag specified in dir req_Sticky] # [the tag specified in a parent dir req_Sticky] # [head] # Also, -r may appear twice (for diff). # # FUTURE: When/if -j (merges) are supported, we also # need to add relevant files from one or two # versions specified with -j. if(exists($state->{opt}{A})) { $revDirMap=$updater->getRevisionDirMap(); } elsif( defined($state->{opt}{r}) and ref $state->{opt}{r} eq "ARRAY" ) { $revDirMap=$updater->getRevisionDirMap($state->{opt}{r}[0]); $otherRevDirMap=$updater->getRevisionDirMap($state->{opt}{r}[1]); } elsif(defined($state->{opt}{r})) { $revDirMap=$updater->getRevisionDirMap($state->{opt}{r}); } else { my($sticky)=getDirStickyInfo($fullPath); $revDirMap=$updater->getRevisionDirMap($sticky->{tag}); } # Is it a directory? if( defined($revDirMap->{$fullPath}) || defined($otherRevDirMap->{$fullPath}) ) { $isDir=1; } } # What to do with it? if(!$isDir) { $outNameMap->{$fullPath}=1; } else { $outDirMap->{$fullPath}=1; if(defined($revDirMap->{$fullPath})) { addDirMapFiles($updater,$outNameMap,$outDirMap, $revDirMap->{$fullPath}); } if( defined($otherRevDirMap) && defined($otherRevDirMap->{$fullPath}) ) { addDirMapFiles($updater,$outNameMap,$outDirMap, $otherRevDirMap->{$fullPath}); } } } # Used by argsfromdir # Add entries from dirMap to outNameMap. Also recurse into entries # that are subdirectories. sub addDirMapFiles { my($updater,$outNameMap,$outDirMap,$dirMap)=@_; my($fullName); foreach $fullName (keys(%$dirMap)) { my $cleanName=$fullName; if(defined($state->{prependdir})) { if(!($cleanName=~s/^\Q$state->{prependdir}\E//)) { $log->fatal("internal error stripping prependdir"); die "internal error stripping prependdir"; } } if($dirMap->{$fullName} eq "F") { $outNameMap->{$cleanName}=1; } elsif($dirMap->{$fullName} eq "D") { if(!$state->{opt}{l}) { expandArg($updater,$outNameMap,$outDirMap,$cleanName,1); } } else { $log->fatal("internal error in addDirMapFiles"); die "internal error in addDirMapFiles"; } } } # This method replaces $state->{args} with a directory-expanded # list of all relevant filenames (recursively unless -d), based # on $state->{entries}, and the "current" list of files in # each directory. "Current" files as determined by # either the requested (-r/-A) or "req_Sticky" version of # that directory. # Both the input args and the new output args are relative # to the cvs-client's CWD, although some of the internal # computations are relative to the top of the project. sub argsfromdir { my $updater = shift; # Notes about requirements for specific callers: # update # "standard" case (entries; a single -r/-A/default; -l) # # Special case: -d for create missing directories. # diff # 0 or 1 -r's: "standard" case. # # 2 -r's: We could ignore entries (just use the two -r's), # # but it doesn't really matter. # annotate # "standard" case # log # Punting: log -r has a more complex non-"standard" # # meaning, and we don't currently try to support log'ing # # branches at all (need a lot of work to # # support CVS-consistent branch relative version # # numbering). #HERE: But we still want to expand directories. Maybe we should # essentially force "-A". # status # "standard", except that -r/-A/default are not possible. # # Mostly only used to expand entries only) # # Don't use argsfromdir at all: # add # Explicit arguments required. Directory args imply add # # the directory itself, not the files in it. # co # Obtain list directly. # remove # HERE: TEST: MAYBE client does the recursion for us, # # since it only makes sense to remove stuff already in # # the sandobx? # ci # HERE: Similar to remove... # # Don't try to implement the confusing/weird # # ci -r bug er.."feature". if(scalar(@{$state->{args}})==0) { $state->{args} = [ "." ]; } my %allArgs; my %allDirs; for my $file (@{$state->{args}}) { expandArg($updater,\%allArgs,\%allDirs,$file); } # Include any entries from sandbox. Generally client won't # send entries that shouldn't be used. foreach my $file (keys %{$state->{entries}}) { $allArgs{remove_prependdir($file)} = 1; } $state->{dirArgs} = \%allDirs; $state->{args} = [ sort { # Sort priority: by directory depth, then actual file name: my @piecesA=split('/',$a); my @piecesB=split('/',$b); my $count=scalar(@piecesA); my $tmp=scalar(@piecesB); return $count<=>$tmp if($count!=$tmp); for($tmp=0;$tmp<$count;$tmp++) { if($piecesA[$tmp] ne $piecesB[$tmp]) { return $piecesA[$tmp] cmp $piecesB[$tmp] } } return 0; } keys(%allArgs) ]; } ## look up directory sticky tag, of either fullPath or a parent: sub getDirStickyInfo { my($fullPath)=@_; $fullPath=~s%/+$%%; while($fullPath ne "" && !defined($state->{dirMap}{"$fullPath/"})) { $fullPath=~s%/?[^/]*$%%; } if( !defined($state->{dirMap}{"$fullPath/"}) && ( $fullPath eq "" || $fullPath eq "." ) ) { return $state->{dirMap}{""}{stickyInfo}; } else { return $state->{dirMap}{"$fullPath/"}{stickyInfo}; } } # Resolve precedence of various ways of specifying which version of # a file you want. Returns undef (for default head), or a ref to a hash # that contains "tag" and/or "date" keys. sub resolveStickyInfo { my($filename,$stickyTag,$stickyDate,$reset) = @_; # Order of precedence of sticky tags: # -A [head] # -r /tag/ # [file entry sticky tag] # [the tag specified in dir req_Sticky] # [the tag specified in a parent dir req_Sticky] # [head] my $result; if($reset) { # $result=undef; } elsif( defined($stickyTag) && $stickyTag ne "" ) # || ( defined($stickyDate) && $stickyDate ne "" ) # TODO { $result={ 'tag' => (defined($stickyTag)?$stickyTag:undef) }; # TODO: Convert -D value into the form 2011.04.10.04.46.57, # similar to an entry line's sticky date, without the D prefix. # It sometimes (always?) arrives as something more like # '10 Apr 2011 04:46:57 -0000'... # $result={ 'date' => (defined($stickyDate)?$stickyDate:undef) }; } elsif( defined($state->{entries}{$filename}) && defined($state->{entries}{$filename}{tag_or_date}) && $state->{entries}{$filename}{tag_or_date} ne "" ) { my($tagOrDate)=$state->{entries}{$filename}{tag_or_date}; if($tagOrDate=~/^T([^ ]+)\s*$/) { $result = { 'tag' => $1 }; } elsif($tagOrDate=~/^D([0-9.]+)\s*$/) { $result= { 'date' => $1 }; } else { die "Unknown tag_or_date format\n"; } } else { $result=getDirStickyInfo($filename); } return $result; } # Convert a stickyInfo (ref to a hash) as returned by resolveStickyInfo into # a form appropriate for the sticky tag field of an Entries # line (field index 5, 0-based). sub getStickyTagOrDate { my($stickyInfo)=@_; my $result; if(defined($stickyInfo) && defined($stickyInfo->{tag})) { $result="T$stickyInfo->{tag}"; } # TODO: When/if we actually pick versions by {date} properly, # also handle it here: # "D$stickyInfo->{date}" (example: "D2011.04.13.20.37.07"). else { $result=""; } return $result; } # This method cleans up the $state variable after a command that uses arguments has run sub statecleanup { $state->{files} = []; $state->{dirArgs} = {}; $state->{args} = []; $state->{arguments} = []; $state->{entries} = {}; $state->{dirMap} = {}; } # Return working directory CVS revision "1.X" out # of the the working directory "entries" state, for the given filename. # This is prefixed with a dash if the file is scheduled for removal # when it is committed. sub revparse { my $filename = shift; return $state->{entries}{$filename}{revision}; } # This method takes a file hash and does a CVS "file transfer". Its # exact behaviour depends on a second, optional hash table argument: # - If $options->{targetfile}, dump the contents to that file; # - If $options->{print}, use M/MT to transmit the contents one line # at a time; # - Otherwise, transmit the size of the file, followed by the file # contents. sub transmitfile { my $filehash = shift; my $options = shift; if ( defined ( $filehash ) and $filehash eq "deleted" ) { $log->warn("filehash is 'deleted'"); return; } die "Need filehash" unless ( defined ( $filehash ) and $filehash =~ /^[a-zA-Z0-9]{40}$/ ); my $type = `git cat-file -t $filehash`; chomp $type; die ( "Invalid type '$type' (expected 'blob')" ) unless ( defined ( $type ) and $type eq "blob" ); my $size = `git cat-file -s $filehash`; chomp $size; $log->debug("transmitfile($filehash) size=$size, type=$type"); if ( open my $fh, '-|', "git", "cat-file", "blob", $filehash ) { if ( defined ( $options->{targetfile} ) ) { my $targetfile = $options->{targetfile}; open NEWFILE, ">", $targetfile or die("Couldn't open '$targetfile' for writing : $!"); print NEWFILE $_ while ( <$fh> ); close NEWFILE or die("Failed to write '$targetfile': $!"); } elsif ( defined ( $options->{print} ) && $options->{print} ) { while ( <$fh> ) { if( /\n\z/ ) { print 'M ', $_; } else { print 'MT text ', $_, "\n"; } } } else { print "$size\n"; print while ( <$fh> ); } close $fh or die ("Couldn't close filehandle for transmitfile(): $!"); } else { die("Couldn't execute git-cat-file"); } } # This method takes a file name, and returns ( $dirpart, $filepart ) which # refers to the directory portion and the file portion of the filename # respectively sub filenamesplit { my $filename = shift; my $fixforlocaldir = shift; my ( $filepart, $dirpart ) = ( $filename, "." ); ( $filepart, $dirpart ) = ( $2, $1 ) if ( $filename =~ /(.*)\/(.*)/ ); $dirpart .= "/"; if ( $fixforlocaldir ) { $dirpart =~ s/^$state->{prependdir}//; } return ( $filepart, $dirpart ); } # Cleanup various junk in filename (try to canonicalize it), and # add prependdir to accommodate running CVS client from a # subdirectory (so the output is relative to top directory of the project). sub filecleanup { my $filename = shift; return undef unless(defined($filename)); if ( $filename =~ /^\// ) { print "E absolute filenames '$filename' not supported by server\n"; return undef; } if($filename eq ".") { $filename=""; } $filename =~ s/^\.\///g; $filename =~ s%/+%/%g; $filename = $state->{prependdir} . $filename; $filename =~ s%/$%%; return $filename; } # Remove prependdir from the path, so that is is relative to the directory # the CVS client was started from, rather than the top of the project. # Essentially the inverse of filecleanup(). sub remove_prependdir { my($path) = @_; if(defined($state->{prependdir}) && $state->{prependdir} ne "") { my($pre)=$state->{prependdir}; $pre=~s%/$%%; if(!($path=~s%^\Q$pre\E/?%%)) { $log->fatal("internal error missing prependdir"); die("internal error missing prependdir"); } } return $path; } sub validateGitDir { if( !defined($state->{CVSROOT}) ) { print "error 1 CVSROOT not specified\n"; cleanupWorkTree(); exit; } if( $ENV{GIT_DIR} ne ($state->{CVSROOT} . '/') ) { print "error 1 Internally inconsistent CVSROOT\n"; cleanupWorkTree(); exit; } } # Setup working directory in a work tree with the requested version # loaded in the index. sub setupWorkTree { my ($ver) = @_; validateGitDir(); if( ( defined($work->{state}) && $work->{state} != 1 ) || defined($work->{tmpDir}) ) { $log->warn("Bad work tree state management"); print "error 1 Internal setup multiple work trees without cleanup\n"; cleanupWorkTree(); exit; } $work->{workDir} = tempdir ( DIR => $TEMP_DIR ); if( !defined($work->{index}) ) { (undef, $work->{index}) = tempfile ( DIR => $TEMP_DIR, OPEN => 0 ); } chdir $work->{workDir} or die "Unable to chdir to $work->{workDir}\n"; $log->info("Setting up GIT_WORK_TREE as '.' in '$work->{workDir}', index file is '$work->{index}'"); $ENV{GIT_WORK_TREE} = "."; $ENV{GIT_INDEX_FILE} = $work->{index}; $work->{state} = 2; if($ver) { system("git","read-tree",$ver); unless ($? == 0) { $log->warn("Error running git-read-tree"); die "Error running git-read-tree $ver in $work->{workDir} $!\n"; } } # else # req_annotate reads tree for each file } # Ensure current directory is in some kind of working directory, # with a recent version loaded in the index. sub ensureWorkTree { if( defined($work->{tmpDir}) ) { $log->warn("Bad work tree state management [ensureWorkTree()]"); print "error 1 Internal setup multiple dirs without cleanup\n"; cleanupWorkTree(); exit; } if( $work->{state} ) { return; } validateGitDir(); if( !defined($work->{emptyDir}) ) { $work->{emptyDir} = tempdir ( DIR => $TEMP_DIR, OPEN => 0); } chdir $work->{emptyDir} or die "Unable to chdir to $work->{emptyDir}\n"; my $ver = `git show-ref -s refs/heads/$state->{module}`; chomp $ver; if ($ver !~ /^[0-9a-f]{40}$/) { $log->warn("Error from git show-ref -s refs/head$state->{module}"); print "error 1 cannot find the current HEAD of module"; cleanupWorkTree(); exit; } if( !defined($work->{index}) ) { (undef, $work->{index}) = tempfile ( DIR => $TEMP_DIR, OPEN => 0 ); } $ENV{GIT_WORK_TREE} = "."; $ENV{GIT_INDEX_FILE} = $work->{index}; $work->{state} = 1; system("git","read-tree",$ver); unless ($? == 0) { die "Error running git-read-tree $ver $!\n"; } } # Cleanup working directory that is not needed any longer. sub cleanupWorkTree { if( ! $work->{state} ) { return; } chdir "/" or die "Unable to chdir '/'\n"; if( defined($work->{workDir}) ) { rmtree( $work->{workDir} ); undef $work->{workDir}; } undef $work->{state}; } # Setup a temporary directory (not a working tree), typically for # merging dirty state as in req_update. sub setupTmpDir { $work->{tmpDir} = tempdir ( DIR => $TEMP_DIR ); chdir $work->{tmpDir} or die "Unable to chdir $work->{tmpDir}\n"; return $work->{tmpDir}; } # Clean up a previously setupTmpDir. Restore previous work tree if # appropriate. sub cleanupTmpDir { if ( !defined($work->{tmpDir}) ) { $log->warn("cleanup tmpdir that has not been setup"); die "Cleanup tmpDir that has not been setup\n"; } if( defined($work->{state}) ) { if( $work->{state} == 1 ) { chdir $work->{emptyDir} or die "Unable to chdir to $work->{emptyDir}\n"; } elsif( $work->{state} == 2 ) { chdir $work->{workDir} or die "Unable to chdir to $work->{emptyDir}\n"; } else { $log->warn("Inconsistent work dir state"); die "Inconsistent work dir state\n"; } } else { chdir "/" or die "Unable to chdir '/'\n"; } } # Given a path, this function returns a string containing the kopts # that should go into that path's Entries line. For example, a binary # file should get -kb. sub kopts_from_path { my ($path, $srcType, $name) = @_; if ( defined ( $cfg->{gitcvs}{usecrlfattr} ) and $cfg->{gitcvs}{usecrlfattr} =~ /\s*(1|true|yes)\s*$/i ) { my ($val) = check_attr( "text", $path ); if ( $val eq "unspecified" ) { $val = check_attr( "crlf", $path ); } if ( $val eq "unset" ) { return "-kb" } elsif ( check_attr( "eol", $path ) ne "unspecified" || $val eq "set" || $val eq "input" ) { return ""; } else { $log->info("Unrecognized check_attr crlf $path : $val"); } } if ( defined ( $cfg->{gitcvs}{allbinary} ) ) { if( ($cfg->{gitcvs}{allbinary} =~ /^\s*(1|true|yes)\s*$/i) ) { return "-kb"; } elsif( ($cfg->{gitcvs}{allbinary} =~ /^\s*guess\s*$/i) ) { if( is_binary($srcType,$name) ) { $log->debug("... as binary"); return "-kb"; } else { $log->debug("... as text"); } } } # Return "" to give no special treatment to any path return ""; } sub check_attr { my ($attr,$path) = @_; ensureWorkTree(); if ( open my $fh, '-|', "git", "check-attr", $attr, "--", $path ) { my $val = <$fh>; close $fh; $val =~ s/.*: ([^:\r\n]*)\s*$/$1/; return $val; } else { return undef; } } # This should have the same heuristics as convert.c:is_binary() and related. # Note that the bare CR test is done by callers in convert.c. sub is_binary { my ($srcType,$name) = @_; $log->debug("is_binary($srcType,$name)"); # Minimize amount of interpreted code run in the inner per-character # loop for large files, by totalling each character value and # then analyzing the totals. my @counts; my $i; for($i=0;$i<256;$i++) { $counts[$i]=0; } my $fh = open_blob_or_die($srcType,$name); my $line; while( defined($line=<$fh>) ) { # Any '\0' and bare CR are considered binary. if( $line =~ /\0|(\r[^\n])/ ) { close($fh); return 1; } # Count up each character in the line: my $len=length($line); for($i=0;$i<$len;$i++) { $counts[ord(substr($line,$i,1))]++; } } close $fh; # Don't count CR and LF as either printable/nonprintable $counts[ord("\n")]=0; $counts[ord("\r")]=0; # Categorize individual character count into printable and nonprintable: my $printable=0; my $nonprintable=0; for($i=0;$i<256;$i++) { if( $i < 32 && $i != ord("\b") && $i != ord("\t") && $i != 033 && # ESC $i != 014 ) # FF { $nonprintable+=$counts[$i]; } elsif( $i==127 ) # DEL { $nonprintable+=$counts[$i]; } else { $printable+=$counts[$i]; } } return ($printable >> 7) < $nonprintable; } # Returns open file handle. Possible invocations: # - open_blob_or_die("file",$filename); # - open_blob_or_die("sha1",$filehash); sub open_blob_or_die { my ($srcType,$name) = @_; my ($fh); if( $srcType eq "file" ) { if( !open $fh,"<",$name ) { $log->warn("Unable to open file $name: $!"); die "Unable to open file $name: $!\n"; } } elsif( $srcType eq "sha1" ) { unless ( defined ( $name ) and $name =~ /^[a-zA-Z0-9]{40}$/ ) { $log->warn("Need filehash"); die "Need filehash\n"; } my $type = `git cat-file -t $name`; chomp $type; unless ( defined ( $type ) and $type eq "blob" ) { $log->warn("Invalid type '$type' for '$name'"); die ( "Invalid type '$type' (expected 'blob')" ) } my $size = `git cat-file -s $name`; chomp $size; $log->debug("open_blob_or_die($name) size=$size, type=$type"); unless( open $fh, '-|', "git", "cat-file", "blob", $name ) { $log->warn("Unable to open sha1 $name"); die "Unable to open sha1 $name\n"; } } else { $log->warn("Unknown type of blob source: $srcType"); die "Unknown type of blob source: $srcType\n"; } return $fh; } # Generate a CVS author name from Git author information, by taking the local # part of the email address and replacing characters not in the Portable # Filename Character Set (see IEEE Std 1003.1-2001, 3.276) by underscores. CVS # Login names are Unix login names, which should be restricted to this # character set. sub cvs_author { my $author_line = shift; (my $author) = $author_line =~ /<([^@>]*)/; $author =~ s/[^-a-zA-Z0-9_.]/_/g; $author =~ s/^-/_/; $author; } sub descramble { # This table is from src/scramble.c in the CVS source my @SHIFTS = ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 114,120, 53, 79, 96,109, 72,108, 70, 64, 76, 67,116, 74, 68, 87, 111, 52, 75,119, 49, 34, 82, 81, 95, 65,112, 86,118,110,122,105, 41, 57, 83, 43, 46,102, 40, 89, 38,103, 45, 50, 42,123, 91, 35, 125, 55, 54, 66,124,126, 59, 47, 92, 71,115, 78, 88,107,106, 56, 36,121,117,104,101,100, 69, 73, 99, 63, 94, 93, 39, 37, 61, 48, 58,113, 32, 90, 44, 98, 60, 51, 33, 97, 62, 77, 84, 80, 85,223, 225,216,187,166,229,189,222,188,141,249,148,200,184,136,248,190, 199,170,181,204,138,232,218,183,255,234,220,247,213,203,226,193, 174,172,228,252,217,201,131,230,197,211,145,238,161,179,160,212, 207,221,254,173,202,146,224,151,140,196,205,130,135,133,143,246, 192,159,244,239,185,168,215,144,139,165,180,157,147,186,214,176, 227,231,219,169,175,156,206,198,129,164,150,210,154,177,134,127, 182,128,158,208,162,132,167,209,149,241,153,251,237,236,171,195, 243,233,253,240,194,250,191,155,142,137,245,235,163,242,178,152 ); my ($str) = @_; # This should never happen, the same password format (A) has been # used by CVS since the beginning of time { my $fmt = substr($str, 0, 1); die "invalid password format `$fmt'" unless $fmt eq 'A'; } my @str = unpack "C*", substr($str, 1); my $ret = join '', map { chr $SHIFTS[$_] } @str; return $ret; } # Test if the (deep) values of two references to a hash are the same. sub refHashEqual { my($v1,$v2) = @_; my $out; if(!defined($v1)) { if(!defined($v2)) { $out=1; } } elsif( !defined($v2) || scalar(keys(%{$v1})) != scalar(keys(%{$v2})) ) { # $out=undef; } else { $out=1; my $key; foreach $key (keys(%{$v1})) { if( !exists($v2->{$key}) || defined($v1->{$key}) ne defined($v2->{$key}) || ( defined($v1->{$key}) && $v1->{$key} ne $v2->{$key} ) ) { $out=undef; last; } } } return $out; } package GITCVS::log; #### #### Copyright The Open University UK - 2006. #### #### Authors: Martyn Smith #### Martin Langhoff #### #### use strict; use warnings; =head1 NAME GITCVS::log =head1 DESCRIPTION This module provides very crude logging with a similar interface to Log::Log4perl =head1 METHODS =cut =head2 new Creates a new log object, optionally you can specify a filename here to indicate the file to log to. If no log file is specified, you can specify one later with method setfile, or indicate you no longer want logging with method nofile. Until one of these methods is called, all log calls will buffer messages ready to write out. =cut sub new { my $class = shift; my $filename = shift; my $self = {}; bless $self, $class; if ( defined ( $filename ) ) { open $self->{fh}, ">>", $filename or die("Couldn't open '$filename' for writing : $!"); } return $self; } =head2 setfile This methods takes a filename, and attempts to open that file as the log file. If successful, all buffered data is written out to the file, and any further logging is written directly to the file. =cut sub setfile { my $self = shift; my $filename = shift; if ( defined ( $filename ) ) { open $self->{fh}, ">>", $filename or die("Couldn't open '$filename' for writing : $!"); } return unless ( defined ( $self->{buffer} ) and ref $self->{buffer} eq "ARRAY" ); while ( my $line = shift @{$self->{buffer}} ) { print {$self->{fh}} $line; } } =head2 nofile This method indicates no logging is going to be used. It flushes any entries in the internal buffer, and sets a flag to ensure no further data is put there. =cut sub nofile { my $self = shift; $self->{nolog} = 1; return unless ( defined ( $self->{buffer} ) and ref $self->{buffer} eq "ARRAY" ); $self->{buffer} = []; } =head2 _logopen Internal method. Returns true if the log file is open, false otherwise. =cut sub _logopen { my $self = shift; return 1 if ( defined ( $self->{fh} ) and ref $self->{fh} eq "GLOB" ); return 0; } =head2 debug info warn fatal These four methods are wrappers to _log. They provide the actual interface for logging data. =cut sub debug { my $self = shift; $self->_log("debug", @_); } sub info { my $self = shift; $self->_log("info" , @_); } sub warn { my $self = shift; $self->_log("warn" , @_); } sub fatal { my $self = shift; $self->_log("fatal", @_); } =head2 _log This is an internal method called by the logging functions. It generates a timestamp and pushes the logged line either to file, or internal buffer. =cut sub _log { my $self = shift; my $level = shift; return if ( $self->{nolog} ); my @time = localtime; my $timestring = sprintf("%4d-%02d-%02d %02d:%02d:%02d : %-5s", $time[5] + 1900, $time[4] + 1, $time[3], $time[2], $time[1], $time[0], uc $level, ); if ( $self->_logopen ) { print {$self->{fh}} $timestring . " - " . join(" ",@_) . "\n"; } else { push @{$self->{buffer}}, $timestring . " - " . join(" ",@_) . "\n"; } } =head2 DESTROY This method simply closes the file handle if one is open =cut sub DESTROY { my $self = shift; if ( $self->_logopen ) { close $self->{fh}; } } package GITCVS::updater; #### #### Copyright The Open University UK - 2006. #### #### Authors: Martyn Smith #### Martin Langhoff #### #### use strict; use warnings; use DBI; =head1 METHODS =cut =head2 new =cut sub new { my $class = shift; my $config = shift; my $module = shift; my $log = shift; die "Need to specify a git repository" unless ( defined($config) and -d $config ); die "Need to specify a module" unless ( defined($module) ); $class = ref($class) || $class; my $self = {}; bless $self, $class; $self->{valid_tables} = {'revision' => 1, 'revision_ix1' => 1, 'revision_ix2' => 1, 'head' => 1, 'head_ix1' => 1, 'properties' => 1, 'commitmsgs' => 1}; $self->{module} = $module; $self->{git_path} = $config . "/"; $self->{log} = $log; die "Git repo '$self->{git_path}' doesn't exist" unless ( -d $self->{git_path} ); # Stores full sha1's for various branch/tag names, abbreviations, etc: $self->{commitRefCache} = {}; $self->{dbdriver} = $cfg->{gitcvs}{$state->{method}}{dbdriver} || $cfg->{gitcvs}{dbdriver} || "SQLite"; $self->{dbname} = $cfg->{gitcvs}{$state->{method}}{dbname} || $cfg->{gitcvs}{dbname} || "%Ggitcvs.%m.sqlite"; $self->{dbuser} = $cfg->{gitcvs}{$state->{method}}{dbuser} || $cfg->{gitcvs}{dbuser} || ""; $self->{dbpass} = $cfg->{gitcvs}{$state->{method}}{dbpass} || $cfg->{gitcvs}{dbpass} || ""; $self->{dbtablenameprefix} = $cfg->{gitcvs}{$state->{method}}{dbtablenameprefix} || $cfg->{gitcvs}{dbtablenameprefix} || ""; my %mapping = ( m => $module, a => $state->{method}, u => getlogin || getpwuid($<) || $<, G => $self->{git_path}, g => mangle_dirname($self->{git_path}), ); $self->{dbname} =~ s/%([mauGg])/$mapping{$1}/eg; $self->{dbuser} =~ s/%([mauGg])/$mapping{$1}/eg; $self->{dbtablenameprefix} =~ s/%([mauGg])/$mapping{$1}/eg; $self->{dbtablenameprefix} = mangle_tablename($self->{dbtablenameprefix}); die "Invalid char ':' in dbdriver" if $self->{dbdriver} =~ /:/; die "Invalid char ';' in dbname" if $self->{dbname} =~ /;/; $self->{dbh} = DBI->connect("dbi:$self->{dbdriver}:dbname=$self->{dbname}", $self->{dbuser}, $self->{dbpass}); die "Error connecting to database\n" unless defined $self->{dbh}; $self->{tables} = {}; foreach my $table ( keys %{$self->{dbh}->table_info(undef,undef,undef,'TABLE')->fetchall_hashref('TABLE_NAME')} ) { $self->{tables}{$table} = 1; } # Construct the revision table if required # The revision table stores an entry for each file, each time that file # changes. # numberOfRecords = O( numCommits * averageNumChangedFilesPerCommit ) # This is not sufficient to support "-r {commithash}" for any # files except files that were modified by that commit (also, # some places in the code ignore/effectively strip out -r in # some cases, before it gets passed to getmeta()). # The "filehash" field typically has a git blob hash, but can also # be set to "dead" to indicate that the given version of the file # should not exist in the sandbox. unless ( $self->{tables}{$self->tablename("revision")} ) { my $tablename = $self->tablename("revision"); my $ix1name = $self->tablename("revision_ix1"); my $ix2name = $self->tablename("revision_ix2"); $self->{dbh}->do(" CREATE TABLE $tablename ( name TEXT NOT NULL, revision INTEGER NOT NULL, filehash TEXT NOT NULL, commithash TEXT NOT NULL, author TEXT NOT NULL, modified TEXT NOT NULL, mode TEXT NOT NULL ) "); $self->{dbh}->do(" CREATE INDEX $ix1name ON $tablename (name,revision) "); $self->{dbh}->do(" CREATE INDEX $ix2name ON $tablename (name,commithash) "); } # Construct the head table if required # The head table (along with the "last_commit" entry in the property # table) is the persisted working state of the "sub update" subroutine. # All of it's data is read entirely first, and completely recreated # last, every time "sub update" runs. # This is also used by "sub getmeta" when it is asked for the latest # version of a file (as opposed to some specific version). # Another way of thinking about it is as a single slice out of # "revisions", giving just the most recent revision information for # each file. unless ( $self->{tables}{$self->tablename("head")} ) { my $tablename = $self->tablename("head"); my $ix1name = $self->tablename("head_ix1"); $self->{dbh}->do(" CREATE TABLE $tablename ( name TEXT NOT NULL, revision INTEGER NOT NULL, filehash TEXT NOT NULL, commithash TEXT NOT NULL, author TEXT NOT NULL, modified TEXT NOT NULL, mode TEXT NOT NULL ) "); $self->{dbh}->do(" CREATE INDEX $ix1name ON $tablename (name) "); } # Construct the properties table if required # - "last_commit" - Used by "sub update". unless ( $self->{tables}{$self->tablename("properties")} ) { my $tablename = $self->tablename("properties"); $self->{dbh}->do(" CREATE TABLE $tablename ( key TEXT NOT NULL PRIMARY KEY, value TEXT ) "); } # Construct the commitmsgs table if required # The commitmsgs table is only used for merge commits, since # "sub update" will only keep one branch of parents. Shortlogs # for ignored commits (i.e. not on the chosen branch) will be used # to construct a replacement "collapsed" merge commit message, # which will be stored in this table. See also "sub commitmessage". unless ( $self->{tables}{$self->tablename("commitmsgs")} ) { my $tablename = $self->tablename("commitmsgs"); $self->{dbh}->do(" CREATE TABLE $tablename ( key TEXT NOT NULL PRIMARY KEY, value TEXT ) "); } return $self; } =head2 tablename =cut sub tablename { my $self = shift; my $name = shift; if (exists $self->{valid_tables}{$name}) { return $self->{dbtablenameprefix} . $name; } else { return undef; } } =head2 update Bring the database up to date with the latest changes from the git repository. Internal working state is read out of the "head" table and the "last_commit" property, then it updates "revisions" based on that, and finally it writes the new internal state back to the "head" table so it can be used as a starting point the next time update is called. =cut sub update { my $self = shift; # first lets get the commit list $ENV{GIT_DIR} = $self->{git_path}; my $commitsha1 = `git rev-parse $self->{module}`; chomp $commitsha1; my $commitinfo = `git cat-file commit $self->{module} 2>&1`; unless ( $commitinfo =~ /tree\s+[a-zA-Z0-9]{40}/ ) { die("Invalid module '$self->{module}'"); } my $git_log; my $lastcommit = $self->_get_prop("last_commit"); if (defined $lastcommit && $lastcommit eq $commitsha1) { # up-to-date # invalidate the gethead cache $self->clearCommitRefCaches(); return 1; } # Start exclusive lock here... $self->{dbh}->begin_work() or die "Cannot lock database for BEGIN"; # TODO: log processing is memory bound # if we can parse into a 2nd file that is in reverse order # we can probably do something really efficient my @git_log_params = ('--pretty', '--parents', '--topo-order'); if (defined $lastcommit) { push @git_log_params, "$lastcommit..$self->{module}"; } else { push @git_log_params, $self->{module}; } # git-rev-list is the backend / plumbing version of git-log open(my $gitLogPipe, '-|', 'git', 'rev-list', @git_log_params) or die "Cannot call git-rev-list: $!"; my @commits=readCommits($gitLogPipe); close $gitLogPipe; # Now all the commits are in the @commits bucket # ordered by time DESC. for each commit that needs processing, # determine whether it's following the last head we've seen or if # it's on its own branch, grab a file list, and add whatever's changed # NOTE: $lastcommit refers to the last commit from previous run # $lastpicked is the last commit we picked in this run my $lastpicked; my $head = {}; if (defined $lastcommit) { $lastpicked = $lastcommit; } my $committotal = scalar(@commits); my $commitcount = 0; # Load the head table into $head (for cached lookups during the update process) foreach my $file ( @{$self->gethead(1)} ) { $head->{$file->{name}} = $file; } foreach my $commit ( @commits ) { $self->{log}->debug("GITCVS::updater - Processing commit $commit->{hash} (" . (++$commitcount) . " of $committotal)"); if (defined $lastpicked) { if (!in_array($lastpicked, @{$commit->{parents}})) { # skip, we'll see this delta # as part of a merge later # warn "skipping off-track $commit->{hash}\n"; next; } elsif (@{$commit->{parents}} > 1) { # it is a merge commit, for each parent that is # not $lastpicked (not given a CVS revision number), # see if we can get a log # from the merge-base to that parent to put it # in the message as a merge summary. my @parents = @{$commit->{parents}}; foreach my $parent (@parents) { if ($parent eq $lastpicked) { next; } # git-merge-base can potentially (but rarely) throw # several candidate merge bases. let's assume # that the first one is the best one. my $base = eval { safe_pipe_capture('git', 'merge-base', $lastpicked, $parent); }; # The two branches may not be related at all, # in which case merge base simply fails to find # any, but that's Ok. next if ($@); chomp $base; if ($base) { my @merged; # print "want to log between $base $parent \n"; open(GITLOG, '-|', 'git', 'log', '--pretty=medium', "$base..$parent") or die "Cannot call git-log: $!"; my $mergedhash; while () { chomp; if (!defined $mergedhash) { if (m/^commit\s+(.+)$/) { $mergedhash = $1; } else { next; } } else { # grab the first line that looks non-rfc822 # aka has content after leading space if (m/^\s+(\S.*)$/) { my $title = $1; $title = substr($title,0,100); # truncate unshift @merged, "$mergedhash $title"; undef $mergedhash; } } } close GITLOG; if (@merged) { $commit->{mergemsg} = $commit->{message}; $commit->{mergemsg} .= "\nSummary of merged commits:\n\n"; foreach my $summary (@merged) { $commit->{mergemsg} .= "\t$summary\n"; } $commit->{mergemsg} .= "\n\n"; # print "Message for $commit->{hash} \n$commit->{mergemsg}"; } } } } } # convert the date to CVS-happy format my $cvsDate = convertToCvsDate($commit->{date}); if ( defined ( $lastpicked ) ) { my $filepipe = open(FILELIST, '-|', 'git', 'diff-tree', '-z', '-r', $lastpicked, $commit->{hash}) or die("Cannot call git-diff-tree : $!"); local ($/) = "\0"; while ( ) { chomp; unless ( /^:\d{6}\s+([0-7]{6})\s+[a-f0-9]{40}\s+([a-f0-9]{40})\s+(\w)$/o ) { die("Couldn't process git-diff-tree line : $_"); } my ($mode, $hash, $change) = ($1, $2, $3); my $name = ; chomp($name); # $log->debug("File mode=$mode, hash=$hash, change=$change, name=$name"); my $dbMode = convertToDbMode($mode); if ( $change eq "D" ) { #$log->debug("DELETE $name"); $head->{$name} = { name => $name, revision => $head->{$name}{revision} + 1, filehash => "deleted", commithash => $commit->{hash}, modified => $cvsDate, author => $commit->{author}, mode => $dbMode, }; $self->insert_rev($name, $head->{$name}{revision}, $hash, $commit->{hash}, $cvsDate, $commit->{author}, $dbMode); } elsif ( $change eq "M" || $change eq "T" ) { #$log->debug("MODIFIED $name"); $head->{$name} = { name => $name, revision => $head->{$name}{revision} + 1, filehash => $hash, commithash => $commit->{hash}, modified => $cvsDate, author => $commit->{author}, mode => $dbMode, }; $self->insert_rev($name, $head->{$name}{revision}, $hash, $commit->{hash}, $cvsDate, $commit->{author}, $dbMode); } elsif ( $change eq "A" ) { #$log->debug("ADDED $name"); $head->{$name} = { name => $name, revision => $head->{$name}{revision} ? $head->{$name}{revision}+1 : 1, filehash => $hash, commithash => $commit->{hash}, modified => $cvsDate, author => $commit->{author}, mode => $dbMode, }; $self->insert_rev($name, $head->{$name}{revision}, $hash, $commit->{hash}, $cvsDate, $commit->{author}, $dbMode); } else { $log->warn("UNKNOWN FILE CHANGE mode=$mode, hash=$hash, change=$change, name=$name"); die; } } close FILELIST; } else { # this is used to detect files removed from the repo my $seen_files = {}; my $filepipe = open(FILELIST, '-|', 'git', 'ls-tree', '-z', '-r', $commit->{hash}) or die("Cannot call git-ls-tree : $!"); local $/ = "\0"; while ( ) { chomp; unless ( /^(\d+)\s+(\w+)\s+([a-zA-Z0-9]+)\t(.*)$/o ) { die("Couldn't process git-ls-tree line : $_"); } my ( $mode, $git_type, $git_hash, $git_filename ) = ( $1, $2, $3, $4 ); $seen_files->{$git_filename} = 1; my ( $oldhash, $oldrevision, $oldmode ) = ( $head->{$git_filename}{filehash}, $head->{$git_filename}{revision}, $head->{$git_filename}{mode} ); my $dbMode = convertToDbMode($mode); # unless the file exists with the same hash, we need to update it ... unless ( defined($oldhash) and $oldhash eq $git_hash and defined($oldmode) and $oldmode eq $dbMode ) { my $newrevision = ( $oldrevision or 0 ) + 1; $head->{$git_filename} = { name => $git_filename, revision => $newrevision, filehash => $git_hash, commithash => $commit->{hash}, modified => $cvsDate, author => $commit->{author}, mode => $dbMode, }; $self->insert_rev($git_filename, $newrevision, $git_hash, $commit->{hash}, $cvsDate, $commit->{author}, $dbMode); } } close FILELIST; # Detect deleted files foreach my $file ( sort keys %$head ) { unless ( exists $seen_files->{$file} or $head->{$file}{filehash} eq "deleted" ) { $head->{$file}{revision}++; $head->{$file}{filehash} = "deleted"; $head->{$file}{commithash} = $commit->{hash}; $head->{$file}{modified} = $cvsDate; $head->{$file}{author} = $commit->{author}; $self->insert_rev($file, $head->{$file}{revision}, $head->{$file}{filehash}, $commit->{hash}, $cvsDate, $commit->{author}, $head->{$file}{mode}); } } # END : "Detect deleted files" } if (exists $commit->{mergemsg}) { $self->insert_mergelog($commit->{hash}, $commit->{mergemsg}); } $lastpicked = $commit->{hash}; $self->_set_prop("last_commit", $commit->{hash}); } $self->delete_head(); foreach my $file ( sort keys %$head ) { $self->insert_head( $file, $head->{$file}{revision}, $head->{$file}{filehash}, $head->{$file}{commithash}, $head->{$file}{modified}, $head->{$file}{author}, $head->{$file}{mode}, ); } # invalidate the gethead cache $self->clearCommitRefCaches(); # Ending exclusive lock here $self->{dbh}->commit() or die "Failed to commit changes to SQLite"; } sub readCommits { my $pipeHandle = shift; my @commits; my %commit = (); while ( <$pipeHandle> ) { chomp; if (m/^commit\s+(.*)$/) { # on ^commit lines put the just seen commit in the stack # and prime things for the next one if (keys %commit) { my %copy = %commit; unshift @commits, \%copy; %commit = (); } my @parents = split(m/\s+/, $1); $commit{hash} = shift @parents; $commit{parents} = \@parents; } elsif (m/^(\w+?):\s+(.*)$/ && !exists($commit{message})) { # on rfc822-like lines seen before we see any message, # lowercase the entry and put it in the hash as key-value $commit{lc($1)} = $2; } else { # message lines - skip initial empty line # and trim whitespace if (!exists($commit{message}) && m/^\s*$/) { # define it to mark the end of headers $commit{message} = ''; next; } s/^\s+//; s/\s+$//; # trim ws $commit{message} .= $_ . "\n"; } } unshift @commits, \%commit if ( keys %commit ); return @commits; } sub convertToCvsDate { my $date = shift; # Convert from: "git rev-list --pretty" formatted date # Convert to: "the format specified by RFC822 as modified by RFC1123." # Example: 26 May 1997 13:01:40 -0400 if( $date =~ /^\w+\s+(\w+)\s+(\d+)\s+(\d+:\d+:\d+)\s+(\d+)\s+([+-]\d+)$/ ) { $date = "$2 $1 $4 $3 $5"; } return $date; } sub convertToDbMode { my $mode = shift; # NOTE: The CVS protocol uses a string similar "u=rw,g=rw,o=rw", # but the database "mode" column historically (and currently) # only stores the "rw" (for user) part of the string. # FUTURE: It might make more sense to persist the raw # octal mode (or perhaps the final full CVS form) instead of # this half-converted form, but it isn't currently worth the # backwards compatibility headaches. $mode=~/^\d{3}(\d)\d\d$/; my $userBits=$1; my $dbMode = ""; $dbMode .= "r" if ( $userBits & 4 ); $dbMode .= "w" if ( $userBits & 2 ); $dbMode .= "x" if ( $userBits & 1 ); $dbMode = "rw" if ( $dbMode eq "" ); return $dbMode; } sub insert_rev { my $self = shift; my $name = shift; my $revision = shift; my $filehash = shift; my $commithash = shift; my $modified = shift; my $author = shift; my $mode = shift; my $tablename = $self->tablename("revision"); my $insert_rev = $self->{dbh}->prepare_cached("INSERT INTO $tablename (name, revision, filehash, commithash, modified, author, mode) VALUES (?,?,?,?,?,?,?)",{},1); $insert_rev->execute($name, $revision, $filehash, $commithash, $modified, $author, $mode); } sub insert_mergelog { my $self = shift; my $key = shift; my $value = shift; my $tablename = $self->tablename("commitmsgs"); my $insert_mergelog = $self->{dbh}->prepare_cached("INSERT INTO $tablename (key, value) VALUES (?,?)",{},1); $insert_mergelog->execute($key, $value); } sub delete_head { my $self = shift; my $tablename = $self->tablename("head"); my $delete_head = $self->{dbh}->prepare_cached("DELETE FROM $tablename",{},1); $delete_head->execute(); } sub insert_head { my $self = shift; my $name = shift; my $revision = shift; my $filehash = shift; my $commithash = shift; my $modified = shift; my $author = shift; my $mode = shift; my $tablename = $self->tablename("head"); my $insert_head = $self->{dbh}->prepare_cached("INSERT INTO $tablename (name, revision, filehash, commithash, modified, author, mode) VALUES (?,?,?,?,?,?,?)",{},1); $insert_head->execute($name, $revision, $filehash, $commithash, $modified, $author, $mode); } sub _get_prop { my $self = shift; my $key = shift; my $tablename = $self->tablename("properties"); my $db_query = $self->{dbh}->prepare_cached("SELECT value FROM $tablename WHERE key=?",{},1); $db_query->execute($key); my ( $value ) = $db_query->fetchrow_array; return $value; } sub _set_prop { my $self = shift; my $key = shift; my $value = shift; my $tablename = $self->tablename("properties"); my $db_query = $self->{dbh}->prepare_cached("UPDATE $tablename SET value=? WHERE key=?",{},1); $db_query->execute($value, $key); unless ( $db_query->rows ) { $db_query = $self->{dbh}->prepare_cached("INSERT INTO $tablename (key, value) VALUES (?,?)",{},1); $db_query->execute($key, $value); } return $value; } =head2 gethead =cut sub gethead { my $self = shift; my $intRev = shift; my $tablename = $self->tablename("head"); return $self->{gethead_cache} if ( defined ( $self->{gethead_cache} ) ); my $db_query = $self->{dbh}->prepare_cached("SELECT name, filehash, mode, revision, modified, commithash, author FROM $tablename ORDER BY name ASC",{},1); $db_query->execute(); my $tree = []; while ( my $file = $db_query->fetchrow_hashref ) { if(!$intRev) { $file->{revision} = "1.$file->{revision}" } push @$tree, $file; } $self->{gethead_cache} = $tree; return $tree; } =head2 getAnyHead Returns a reference to an array of getmeta structures, one per file in the specified tree hash. =cut sub getAnyHead { my ($self,$hash) = @_; if(!defined($hash)) { return $self->gethead(); } my @files; { open(my $filePipe, '-|', 'git', 'ls-tree', '-z', '-r', $hash) or die("Cannot call git-ls-tree : $!"); local $/ = "\0"; @files=<$filePipe>; close $filePipe; } my $tree=[]; my($line); foreach $line (@files) { $line=~s/\0$//; unless ( $line=~/^(\d+)\s+(\w+)\s+([a-zA-Z0-9]+)\t(.*)$/o ) { die("Couldn't process git-ls-tree line : $_"); } my($mode, $git_type, $git_hash, $git_filename) = ($1, $2, $3, $4); push @$tree, $self->getMetaFromCommithash($git_filename,$hash); } return $tree; } =head2 getRevisionDirMap A "revision dir map" contains all the plain-file filenames associated with a particular revision (tree-ish), organized by directory: $type = $out->{$dir}{$fullName} The type of each is "F" (for ordinary file) or "D" (for directory, for which the map $out->{$fullName} will also exist). =cut sub getRevisionDirMap { my ($self,$ver)=@_; if(!defined($self->{revisionDirMapCache})) { $self->{revisionDirMapCache}={}; } # Get file list (previously cached results are dependent on HEAD, # but are early in each case): my $cacheKey; my (@fileList); if( !defined($ver) || $ver eq "" ) { $cacheKey=""; if( defined($self->{revisionDirMapCache}{$cacheKey}) ) { return $self->{revisionDirMapCache}{$cacheKey}; } my @head = @{$self->gethead()}; foreach my $file ( @head ) { next if ( $file->{filehash} eq "deleted" ); push @fileList,$file->{name}; } } else { my ($hash)=$self->lookupCommitRef($ver); if( !defined($hash) ) { return undef; } $cacheKey=$hash; if( defined($self->{revisionDirMapCache}{$cacheKey}) ) { return $self->{revisionDirMapCache}{$cacheKey}; } open(my $filePipe, '-|', 'git', 'ls-tree', '-z', '-r', $hash) or die("Cannot call git-ls-tree : $!"); local $/ = "\0"; while ( <$filePipe> ) { chomp; unless ( /^(\d+)\s+(\w+)\s+([a-zA-Z0-9]+)\t(.*)$/o ) { die("Couldn't process git-ls-tree line : $_"); } my($mode, $git_type, $git_hash, $git_filename) = ($1, $2, $3, $4); push @fileList, $git_filename; } close $filePipe; } # Convert to normalized form: my %revMap; my $file; foreach $file (@fileList) { my($dir) = ($file=~m%^(?:(.*)/)?([^/]*)$%); $dir='' if(!defined($dir)); # parent directories: # ... create empty dir maps for parent dirs: my($td)=$dir; while(!defined($revMap{$td})) { $revMap{$td}={}; my($tp)=($td=~m%^(?:(.*)/)?([^/]*)$%); $tp='' if(!defined($tp)); $td=$tp; } # ... add children to parent maps (now that they exist): $td=$dir; while($td ne "") { my($tp)=($td=~m%^(?:(.*)/)?([^/]*)$%); $tp='' if(!defined($tp)); if(defined($revMap{$tp}{$td})) { if($revMap{$tp}{$td} ne 'D') { die "Weird file/directory inconsistency in $cacheKey"; } last; # loop exit } $revMap{$tp}{$td}='D'; $td=$tp; } # file $revMap{$dir}{$file}='F'; } # Save in cache: $self->{revisionDirMapCache}{$cacheKey}=\%revMap; return $self->{revisionDirMapCache}{$cacheKey}; } =head2 getlog See also gethistorydense(). =cut sub getlog { my $self = shift; my $filename = shift; my $revFilter = shift; my $tablename = $self->tablename("revision"); # Filters: # TODO: date, state, or by specific logins filters? # TODO: Handle comma-separated list of revFilter items, each item # can be a range [only case currently handled] or individual # rev or branch or "branch.". # TODO: Adjust $db_query WHERE clause based on revFilter, instead of # manually filtering the results of the query? my ( $minrev, $maxrev ); if( defined($revFilter) and $state->{opt}{r} =~ /^(1.(\d+))?(::?)(1.(\d.+))?$/ ) { my $control = $3; $minrev = $2; $maxrev = $5; $minrev++ if ( defined($minrev) and $control eq "::" ); } my $db_query = $self->{dbh}->prepare_cached("SELECT name, filehash, author, mode, revision, modified, commithash FROM $tablename WHERE name=? ORDER BY revision DESC",{},1); $db_query->execute($filename); my $totalRevs=0; my $tree = []; while ( my $file = $db_query->fetchrow_hashref ) { $totalRevs++; if( defined($minrev) and $file->{revision} < $minrev ) { next; } if( defined($maxrev) and $file->{revision} > $maxrev ) { next; } $file->{revision} = "1." . $file->{revision}; push @$tree, $file; } return ($tree,$totalRevs); } =head2 getmeta This function takes a filename (with path) argument and returns a hashref of metadata for that file. There are several ways $revision can be specified: - A reference to hash that contains a "tag" that is the actual revision (one of the below). TODO: Also allow it to specify a "date" in the hash. - undef, to refer to the latest version on the main branch. - Full CVS client revision number (mapped to integer in DB, without the "1." prefix), - Complex CVS-compatible "special" revision number for non-linear history (see comment below) - git commit sha1 hash - branch or tag name =cut sub getmeta { my $self = shift; my $filename = shift; my $revision = shift; my $tablename_rev = $self->tablename("revision"); my $tablename_head = $self->tablename("head"); if ( ref($revision) eq "HASH" ) { $revision = $revision->{tag}; } # Overview of CVS revision numbers: # # General CVS numbering scheme: # - Basic mainline branch numbers: "1.1", "1.2", "1.3", etc. # - Result of "cvs checkin -r" (possible, but not really # recommended): "2.1", "2.2", etc # - Branch tag: "1.2.0.n", where "1.2" is revision it was branched # from, "0" is a magic placeholder that identifies it as a # branch tag instead of a version tag, and n is 2 times the # branch number off of "1.2", starting with "2". # - Version on a branch: "1.2.n.x", where "1.2" is branch-from, "n" # is branch number off of "1.2" (like n above), and "x" is # the version number on the branch. # - Branches can branch off of branches: "1.3.2.7.4.1" (even number # of components). # - Odd "n"s are used by "vendor branches" that result # from "cvs import". Vendor branches have additional # strangeness in the sense that the main rcs "head" of the main # branch will (temporarily until first normal commit) point # to the version on the vendor branch, rather than the actual # main branch. (FUTURE: This may provide an opportunity # to use "strange" revision numbers for fast-forward-merged # branch tip when CVS client is asking for the main branch.) # # git-cvsserver CVS-compatible special numbering schemes: # - Currently git-cvsserver only tries to be identical to CVS for # simple "1.x" numbers on the "main" branch (as identified # by the module name that was originally cvs checkout'ed). # - The database only stores the "x" part, for historical reasons. # But most of the rest of the cvsserver preserves # and thinks using the full revision number. # - To handle non-linear history, it uses a version of the form # "2.1.1.2000.b.b.b."..., where the 2.1.1.2000 is to help uniquely # identify this as a special revision number, and there are # 20 b's that together encode the sha1 git commit from which # this version of this file originated. Each b is # the numerical value of the corresponding byte plus # 100. # - "plus 100" avoids "0"s, and also reduces the # likelihood of a collision in the case that someone someday # writes an import tool that tries to preserve original # CVS revision numbers, and the original CVS data had done # lots of branches off of branches and other strangeness to # end up with a real version number that just happens to look # like this special revision number form. Also, if needed # there are several ways to extend/identify alternative encodings # within the "2.1.1.2000" part if necessary. # - Unlike real CVS revisions, you can't really reconstruct what # relation a revision of this form has to other revisions. # - FUTURE: TODO: Rework database somehow to make up and remember # fully-CVS-compatible branches and branch version numbers. my $meta; if ( defined($revision) ) { if ( $revision =~ /^1\.(\d+)$/ ) { my ($intRev) = $1; my $db_query; $db_query = $self->{dbh}->prepare_cached( "SELECT * FROM $tablename_rev WHERE name=? AND revision=?", {},1); $db_query->execute($filename, $intRev); $meta = $db_query->fetchrow_hashref; } elsif ( $revision =~ /^2\.1\.1\.2000(\.[1-3][0-9][0-9]){20}$/ ) { my ($commitHash)=($revision=~/^2\.1\.1\.2000(.*)$/); $commitHash=~s/\.([0-9]+)/sprintf("%02x",$1-100)/eg; if($commitHash=~/^[0-9a-f]{40}$/) { return $self->getMetaFromCommithash($filename,$commitHash); } # error recovery: fall back on head version below print "E Failed to find $filename version=$revision or commit=$commitHash\n"; $log->warning("failed get $revision with commithash=$commitHash"); undef $revision; } elsif ( $revision =~ /^[0-9a-f]{40}$/ ) { # Try DB first. This is mostly only useful for req_annotate(), # which only calls this for stuff that should already be in # the DB. It is fairly likely to be a waste of time # in most other cases [unless the file happened to be # modified in $revision specifically], but # it is probably in the noise compared to how long # getMetaFromCommithash() will take. my $db_query; $db_query = $self->{dbh}->prepare_cached( "SELECT * FROM $tablename_rev WHERE name=? AND commithash=?", {},1); $db_query->execute($filename, $revision); $meta = $db_query->fetchrow_hashref; if(! $meta) { my($revCommit)=$self->lookupCommitRef($revision); if($revCommit=~/^[0-9a-f]{40}$/) { return $self->getMetaFromCommithash($filename,$revCommit); } # error recovery: nothing found: print "E Failed to find $filename version=$revision\n"; $log->warning("failed get $revision"); return $meta; } } else { my($revCommit)=$self->lookupCommitRef($revision); if($revCommit=~/^[0-9a-f]{40}$/) { return $self->getMetaFromCommithash($filename,$revCommit); } # error recovery: fall back on head version below print "E Failed to find $filename version=$revision\n"; $log->warning("failed get $revision"); undef $revision; # Allow fallback } } if(!defined($revision)) { my $db_query; $db_query = $self->{dbh}->prepare_cached( "SELECT * FROM $tablename_head WHERE name=?",{},1); $db_query->execute($filename); $meta = $db_query->fetchrow_hashref; } if($meta) { $meta->{revision} = "1.$meta->{revision}"; } return $meta; } sub getMetaFromCommithash { my $self = shift; my $filename = shift; my $revCommit = shift; # NOTE: This function doesn't scale well (lots of forks), especially # if you have many files that have not been modified for many commits # (each git-rev-parse redoes a lot of work for each file # that theoretically could be done in parallel by smarter # graph traversal). # # TODO: Possible optimization strategies: # - Solve the issue of assigning and remembering "real" CVS # revision numbers for branches, and ensure the # data structure can do this efficiently. Perhaps something # similar to "git notes", and carefully structured to take # advantage same-sha1-is-same-contents, to roll the same # unmodified subdirectory data onto multiple commits? # - Write and use a C tool that is like git-blame, but # operates on multiple files with file granularity, instead # of one file with line granularity. Cache # most-recently-modified in $self->{commitRefCache}{$revCommit}. # Try to be intelligent about how many files we do with # one fork (perhaps one directory at a time, without recursion, # and/or include directory as one line item, recurse from here # instead of in C tool?). # - Perhaps we could ask the DB for (filename,fileHash), # and just guess that it is correct (that the file hadn't # changed between $revCommit and the found commit, then # changed back, confusing anything trying to interpret # history). Probably need to add another index to revisions # DB table for this. # - NOTE: Trying to store all (commit,file) keys in DB [to # find "lastModfiedCommit] (instead of # just files that changed in each commit as we do now) is # probably not practical from a disk space perspective. # Does the file exist in $revCommit? # TODO: Include file hash in dirmap cache. my($dirMap)=$self->getRevisionDirMap($revCommit); my($dir,$file)=($filename=~m%^(?:(.*)/)?([^/]*$)%); if(!defined($dir)) { $dir=""; } if( !defined($dirMap->{$dir}) || !defined($dirMap->{$dir}{$filename}) ) { my($fileHash)="deleted"; my($retVal)={}; $retVal->{name}=$filename; $retVal->{filehash}=$fileHash; # not needed and difficult to compute: $retVal->{revision}="0"; # $revision; $retVal->{commithash}=$revCommit; #$retVal->{author}=$commit->{author}; #$retVal->{modified}=convertToCvsDate($commit->{date}); #$retVal->{mode}=convertToDbMode($mode); return $retVal; } my($fileHash)=safe_pipe_capture("git","rev-parse","$revCommit:$filename"); chomp $fileHash; if(!($fileHash=~/^[0-9a-f]{40}$/)) { die "Invalid fileHash '$fileHash' looking up" ." '$revCommit:$filename'\n"; } # information about most recent commit to modify $filename: open(my $gitLogPipe, '-|', 'git', 'rev-list', '--max-count=1', '--pretty', '--parents', $revCommit, '--', $filename) or die "Cannot call git-rev-list: $!"; my @commits=readCommits($gitLogPipe); close $gitLogPipe; if(scalar(@commits)!=1) { die "Can't find most recent commit changing $filename\n"; } my($commit)=$commits[0]; if( !defined($commit) || !defined($commit->{hash}) ) { return undef; } # does this (commit,file) have a real assigned CVS revision number? my $tablename_rev = $self->tablename("revision"); my $db_query; $db_query = $self->{dbh}->prepare_cached( "SELECT * FROM $tablename_rev WHERE name=? AND commithash=?", {},1); $db_query->execute($filename, $commit->{hash}); my($meta)=$db_query->fetchrow_hashref; if($meta) { $meta->{revision} = "1.$meta->{revision}"; return $meta; } # fall back on special revision number my($revision)=$commit->{hash}; $revision=~s/(..)/'.' . (hex($1)+100)/eg; $revision="2.1.1.2000$revision"; # meta data about $filename: open(my $filePipe, '-|', 'git', 'ls-tree', '-z', $commit->{hash}, '--', $filename) or die("Cannot call git-ls-tree : $!"); local $/ = "\0"; my $line; $line=<$filePipe>; if(defined(<$filePipe>)) { die "Expected only a single file for git-ls-tree $filename\n"; } close $filePipe; chomp $line; unless ( $line=~m/^(\d+)\s+(\w+)\s+([a-zA-Z0-9]+)\t(.*)$/o ) { die("Couldn't process git-ls-tree line : $line\n"); } my ( $mode, $git_type, $git_hash, $git_filename ) = ( $1, $2, $3, $4 ); # save result: my($retVal)={}; $retVal->{name}=$filename; $retVal->{revision}=$revision; $retVal->{filehash}=$fileHash; $retVal->{commithash}=$revCommit; $retVal->{author}=$commit->{author}; $retVal->{modified}=convertToCvsDate($commit->{date}); $retVal->{mode}=convertToDbMode($mode); return $retVal; } =head2 lookupCommitRef Convert tag/branch/abbreviation/etc into a commit sha1 hash. Caches the result so looking it up again is fast. =cut sub lookupCommitRef { my $self = shift; my $ref = shift; my $commitHash = $self->{commitRefCache}{$ref}; if(defined($commitHash)) { return $commitHash; } $commitHash=safe_pipe_capture("git","rev-parse","--verify","--quiet", $self->unescapeRefName($ref)); $commitHash=~s/\s*$//; if(!($commitHash=~/^[0-9a-f]{40}$/)) { $commitHash=undef; } if( defined($commitHash) ) { my $type=safe_pipe_capture("git","cat-file","-t",$commitHash); if( ! ($type=~/^commit\s*$/ ) ) { $commitHash=undef; } } if(defined($commitHash)) { $self->{commitRefCache}{$ref}=$commitHash; } return $commitHash; } =head2 clearCommitRefCaches Clears cached commit cache (sha1's for various tags/abbeviations/etc), and related caches. =cut sub clearCommitRefCaches { my $self = shift; $self->{commitRefCache} = {}; $self->{revisionDirMapCache} = undef; $self->{gethead_cache} = undef; } =head2 commitmessage this function takes a commithash and returns the commit message for that commit =cut sub commitmessage { my $self = shift; my $commithash = shift; my $tablename = $self->tablename("commitmsgs"); die("Need commithash") unless ( defined($commithash) and $commithash =~ /^[a-zA-Z0-9]{40}$/ ); my $db_query; $db_query = $self->{dbh}->prepare_cached("SELECT value FROM $tablename WHERE key=?",{},1); $db_query->execute($commithash); my ( $message ) = $db_query->fetchrow_array; if ( defined ( $message ) ) { $message .= " " if ( $message =~ /\n$/ ); return $message; } my @lines = safe_pipe_capture("git", "cat-file", "commit", $commithash); shift @lines while ( $lines[0] =~ /\S/ ); $message = join("",@lines); $message .= " " if ( $message =~ /\n$/ ); return $message; } =head2 gethistorydense This function takes a filename (with path) argument and returns an arrayofarrays containing revision,filehash,commithash ordered by revision descending. This version of gethistory skips deleted entries -- so it is useful for annotate. The 'dense' part is a reference to a '--dense' option available for git-rev-list and other git tools that depend on it. See also getlog(). =cut sub gethistorydense { my $self = shift; my $filename = shift; my $tablename = $self->tablename("revision"); my $db_query; $db_query = $self->{dbh}->prepare_cached("SELECT revision, filehash, commithash FROM $tablename WHERE name=? AND filehash!='deleted' ORDER BY revision DESC",{},1); $db_query->execute($filename); my $result = $db_query->fetchall_arrayref; my $i; for($i=0 ; $i[$i][0]="1." . $result->[$i][0]; } return $result; } =head2 escapeRefName Apply an escape mechanism to compensate for characters that git ref names can have that CVS tags can not. =cut sub escapeRefName { my($self,$refName)=@_; # CVS officially only allows [-_A-Za-z0-9] in tag names (or in # many contexts it can also be a CVS revision number). # # Git tags commonly use '/' and '.' as well, but also handle # anything else just in case: # # = "_-s-" For '/'. # = "_-p-" For '.'. # = "_-u-" For underscore, in case someone wants a literal "_-" in # a tag name. # = "_-xx-" Where "xx" is the hexadecimal representation of the # desired ASCII character byte. (for anything else) if(! $refName=~/^[1-9][0-9]*(\.[1-9][0-9]*)*$/) { $refName=~s/_-/_-u--/g; $refName=~s/\./_-p-/g; $refName=~s%/%_-s-%g; $refName=~s/[^-_a-zA-Z0-9]/sprintf("_-%02x-",$1)/eg; } } =head2 unescapeRefName Undo an escape mechanism to compensate for characters that git ref names can have that CVS tags can not. =cut sub unescapeRefName { my($self,$refName)=@_; # see escapeRefName() for description of escape mechanism. $refName=~s/_-([spu]|[0-9a-f][0-9a-f])-/unescapeRefNameChar($1)/eg; # allowed tag names # TODO: Perhaps use git check-ref-format, with an in-process cache of # validated names? if( !( $refName=~m%^[^-][-a-zA-Z0-9_/.]*$% ) || ( $refName=~m%[/.]$% ) || ( $refName=~/\.lock$/ ) || ( $refName=~m%\.\.|/\.|[[\\:?*~]|\@\{% ) ) # matching } { # Error: $log->warn("illegal refName: $refName"); $refName=undef; } return $refName; } sub unescapeRefNameChar { my($char)=@_; if($char eq "s") { $char="/"; } elsif($char eq "p") { $char="."; } elsif($char eq "u") { $char="_"; } elsif($char=~/^[0-9a-f][0-9a-f]$/) { $char=chr(hex($char)); } else { # Error case: Maybe it has come straight from user, and # wasn't supposed to be escaped? Restore it the way we got it: $char="_-$char-"; } return $char; } =head2 in_array() from Array::PAT - mimics the in_array() function found in PHP. Yuck but works for small arrays. =cut sub in_array { my ($check, @array) = @_; my $retval = 0; foreach my $test (@array){ if($check eq $test){ $retval = 1; } } return $retval; } =head2 safe_pipe_capture an alternative to `command` that allows input to be passed as an array to work around shell problems with weird characters in arguments =cut sub safe_pipe_capture { my @output; if (my $pid = open my $child, '-|') { @output = (<$child>); close $child or die join(' ',@_).": $! $?"; } else { exec(@_) or die "$! $?"; # exec() can fail the executable can't be found } return wantarray ? @output : join('',@output); } =head2 mangle_dirname create a string from a directory name that is suitable to use as part of a filename, mainly by converting all chars except \w.- to _ =cut sub mangle_dirname { my $dirname = shift; return unless defined $dirname; $dirname =~ s/[^\w.-]/_/g; return $dirname; } =head2 mangle_tablename create a string from a that is suitable to use as part of an SQL table name, mainly by converting all chars except \w to _ =cut sub mangle_tablename { my $tablename = shift; return unless defined $tablename; $tablename =~ s/[^\w_]/_/g; return $tablename; } 1; cgit-0.11.2/git/sh-i18n--envsubst.c0000644000175000017500000002446412476431550017054 0ustar formorerformorer/* * sh-i18n--envsubst.c - a stripped-down version of gettext's envsubst(1) * * Copyright (C) 2010 Ævar Arnfjörð Bjarmason * * This is a modified version of * 67d0871a8c:gettext-runtime/src/envsubst.c from the gettext.git * repository. It has been stripped down to only implement the * envsubst(1) features that we need in the git-sh-i18n fallbacks. * * The "Close standard error" part in main() is from * 8dac033df0:gnulib-local/lib/closeout.c. The copyright notices for * both files are reproduced immediately below. */ #include "git-compat-util.h" /* Substitution of environment variables in shell format strings. Copyright (C) 2003-2007 Free Software Foundation, Inc. Written by Bruno Haible , 2003. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* closeout.c - close standard output and standard error Copyright (C) 1998-2007 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include #include #include #include /* If true, substitution shall be performed on all variables. */ static unsigned short int all_variables; /* Forward declaration of local functions. */ static void print_variables (const char *string); static void note_variables (const char *string); static void subst_from_stdin (void); int main (int argc, char *argv[]) { /* Default values for command line options. */ /* unsigned short int show_variables = 0; */ switch (argc) { case 1: error ("we won't substitute all variables on stdin for you"); break; /* all_variables = 1; subst_from_stdin (); */ case 2: /* echo '$foo and $bar' | git sh-i18n--envsubst --variables '$foo and $bar' */ all_variables = 0; note_variables (argv[1]); subst_from_stdin (); break; case 3: /* git sh-i18n--envsubst --variables '$foo and $bar' */ if (strcmp(argv[1], "--variables")) error ("first argument must be --variables when two are given"); /* show_variables = 1; */ print_variables (argv[2]); break; default: error ("too many arguments"); break; } /* Close standard error. This is simpler than fwriteerror_no_ebadf, because upon failure we don't need an errno - all we can do at this point is to set an exit status. */ errno = 0; if (ferror (stderr) || fflush (stderr)) { fclose (stderr); exit (EXIT_FAILURE); } if (fclose (stderr) && errno != EBADF) exit (EXIT_FAILURE); exit (EXIT_SUCCESS); } /* Parse the string and invoke the callback each time a $VARIABLE or ${VARIABLE} construct is seen, where VARIABLE is a nonempty sequence of ASCII alphanumeric/underscore characters, starting with an ASCII alphabetic/underscore character. We allow only ASCII characters, to avoid dependencies w.r.t. the current encoding: While "${\xe0}" looks like a variable access in ISO-8859-1 encoding, it doesn't look like one in the BIG5, BIG5-HKSCS, GBK, GB18030, SHIFT_JIS, JOHAB encodings, because \xe0\x7d is a single character in these encodings. */ static void find_variables (const char *string, void (*callback) (const char *var_ptr, size_t var_len)) { for (; *string != '\0';) if (*string++ == '$') { const char *variable_start; const char *variable_end; unsigned short int valid; char c; if (*string == '{') string++; variable_start = string; c = *string; if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || c == '_') { do c = *++string; while ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '_'); variable_end = string; if (variable_start[-1] == '{') { if (*string == '}') { string++; valid = 1; } else valid = 0; } else valid = 1; if (valid) callback (variable_start, variable_end - variable_start); } } } /* Print a variable to stdout, followed by a newline. */ static void print_variable (const char *var_ptr, size_t var_len) { fwrite (var_ptr, var_len, 1, stdout); putchar ('\n'); } /* Print the variables contained in STRING to stdout, each one followed by a newline. */ static void print_variables (const char *string) { find_variables (string, &print_variable); } /* Type describing list of immutable strings, implemented using a dynamic array. */ typedef struct string_list_ty string_list_ty; struct string_list_ty { const char **item; size_t nitems; size_t nitems_max; }; /* Initialize an empty list of strings. */ static inline void string_list_init (string_list_ty *slp) { slp->item = NULL; slp->nitems = 0; slp->nitems_max = 0; } /* Append a single string to the end of a list of strings. */ static inline void string_list_append (string_list_ty *slp, const char *s) { /* Grow the list. */ if (slp->nitems >= slp->nitems_max) { slp->nitems_max = slp->nitems_max * 2 + 4; REALLOC_ARRAY(slp->item, slp->nitems_max); } /* Add the string to the end of the list. */ slp->item[slp->nitems++] = s; } /* Compare two strings given by reference. */ static int cmp_string (const void *pstr1, const void *pstr2) { const char *str1 = *(const char **)pstr1; const char *str2 = *(const char **)pstr2; return strcmp (str1, str2); } /* Sort a list of strings. */ static inline void string_list_sort (string_list_ty *slp) { if (slp->nitems > 0) qsort (slp->item, slp->nitems, sizeof (slp->item[0]), cmp_string); } /* Test whether a sorted string list contains a given string. */ static int sorted_string_list_member (const string_list_ty *slp, const char *s) { size_t j1, j2; j1 = 0; j2 = slp->nitems; if (j2 > 0) { /* Binary search. */ while (j2 - j1 > 1) { /* Here we know that if s is in the list, it is at an index j with j1 <= j < j2. */ size_t j = (j1 + j2) >> 1; int result = strcmp (slp->item[j], s); if (result > 0) j2 = j; else if (result == 0) return 1; else j1 = j + 1; } if (j2 > j1) if (strcmp (slp->item[j1], s) == 0) return 1; } return 0; } /* Set of variables on which to perform substitution. Used only if !all_variables. */ static string_list_ty variables_set; /* Adds a variable to variables_set. */ static void note_variable (const char *var_ptr, size_t var_len) { char *string = xmemdupz (var_ptr, var_len); string_list_append (&variables_set, string); } /* Stores the variables occurring in the string in variables_set. */ static void note_variables (const char *string) { string_list_init (&variables_set); find_variables (string, ¬e_variable); string_list_sort (&variables_set); } static int do_getc (void) { int c = getc (stdin); if (c == EOF) { if (ferror (stdin)) error ("error while reading standard input"); } return c; } static inline void do_ungetc (int c) { if (c != EOF) ungetc (c, stdin); } /* Copies stdin to stdout, performing substitutions. */ static void subst_from_stdin (void) { static char *buffer; static size_t bufmax; static size_t buflen; int c; for (;;) { c = do_getc (); if (c == EOF) break; /* Look for $VARIABLE or ${VARIABLE}. */ if (c == '$') { unsigned short int opening_brace = 0; unsigned short int closing_brace = 0; c = do_getc (); if (c == '{') { opening_brace = 1; c = do_getc (); } if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || c == '_') { unsigned short int valid; /* Accumulate the VARIABLE in buffer. */ buflen = 0; do { if (buflen >= bufmax) { bufmax = 2 * bufmax + 10; buffer = xrealloc (buffer, bufmax); } buffer[buflen++] = c; c = do_getc (); } while ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '_'); if (opening_brace) { if (c == '}') { closing_brace = 1; valid = 1; } else { valid = 0; do_ungetc (c); } } else { valid = 1; do_ungetc (c); } if (valid) { /* Terminate the variable in the buffer. */ if (buflen >= bufmax) { bufmax = 2 * bufmax + 10; buffer = xrealloc (buffer, bufmax); } buffer[buflen] = '\0'; /* Test whether the variable shall be substituted. */ if (!all_variables && !sorted_string_list_member (&variables_set, buffer)) valid = 0; } if (valid) { /* Substitute the variable's value from the environment. */ const char *env_value = getenv (buffer); if (env_value != NULL) fputs (env_value, stdout); } else { /* Perform no substitution at all. Since the buffered input contains no other '$' than at the start, we can just output all the buffered contents. */ putchar ('$'); if (opening_brace) putchar ('{'); fwrite (buffer, buflen, 1, stdout); if (closing_brace) putchar ('}'); } } else { do_ungetc (c); putchar ('$'); if (opening_brace) putchar ('{'); } } else putchar (c); } } cgit-0.11.2/git/unix-socket.c0000644000175000017500000000462312476431550016205 0ustar formorerformorer#include "cache.h" #include "unix-socket.h" static int unix_stream_socket(void) { int fd = socket(AF_UNIX, SOCK_STREAM, 0); if (fd < 0) die_errno("unable to create socket"); return fd; } static int chdir_len(const char *orig, int len) { char *path = xmemdupz(orig, len); int r = chdir(path); free(path); return r; } struct unix_sockaddr_context { char *orig_dir; }; static void unix_sockaddr_cleanup(struct unix_sockaddr_context *ctx) { if (!ctx->orig_dir) return; /* * If we fail, we can't just return an error, since we have * moved the cwd of the whole process, which could confuse calling * code. We are better off to just die. */ if (chdir(ctx->orig_dir) < 0) die("unable to restore original working directory"); free(ctx->orig_dir); } static int unix_sockaddr_init(struct sockaddr_un *sa, const char *path, struct unix_sockaddr_context *ctx) { int size = strlen(path) + 1; ctx->orig_dir = NULL; if (size > sizeof(sa->sun_path)) { const char *slash = find_last_dir_sep(path); const char *dir; struct strbuf cwd = STRBUF_INIT; if (!slash) { errno = ENAMETOOLONG; return -1; } dir = path; path = slash + 1; size = strlen(path) + 1; if (size > sizeof(sa->sun_path)) { errno = ENAMETOOLONG; return -1; } if (strbuf_getcwd(&cwd)) return -1; ctx->orig_dir = strbuf_detach(&cwd, NULL); if (chdir_len(dir, slash - dir) < 0) return -1; } memset(sa, 0, sizeof(*sa)); sa->sun_family = AF_UNIX; memcpy(sa->sun_path, path, size); return 0; } int unix_stream_connect(const char *path) { int fd, saved_errno; struct sockaddr_un sa; struct unix_sockaddr_context ctx; if (unix_sockaddr_init(&sa, path, &ctx) < 0) return -1; fd = unix_stream_socket(); if (connect(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0) goto fail; unix_sockaddr_cleanup(&ctx); return fd; fail: saved_errno = errno; unix_sockaddr_cleanup(&ctx); close(fd); errno = saved_errno; return -1; } int unix_stream_listen(const char *path) { int fd, saved_errno; struct sockaddr_un sa; struct unix_sockaddr_context ctx; unlink(path); if (unix_sockaddr_init(&sa, path, &ctx) < 0) return -1; fd = unix_stream_socket(); if (bind(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0) goto fail; if (listen(fd, 5) < 0) goto fail; unix_sockaddr_cleanup(&ctx); return fd; fail: saved_errno = errno; unix_sockaddr_cleanup(&ctx); close(fd); errno = saved_errno; return -1; } cgit-0.11.2/git/pathspec.c0000644000175000017500000003413512476431550015544 0ustar formorerformorer#include "cache.h" #include "dir.h" #include "pathspec.h" /* * Finds which of the given pathspecs match items in the index. * * For each pathspec, sets the corresponding entry in the seen[] array * (which should be specs items long, i.e. the same size as pathspec) * to the nature of the "closest" (i.e. most specific) match found for * that pathspec in the index, if it was a closer type of match than * the existing entry. As an optimization, matching is skipped * altogether if seen[] already only contains non-zero entries. * * If seen[] has not already been written to, it may make sense * to use find_pathspecs_matching_against_index() instead. */ void add_pathspec_matches_against_index(const struct pathspec *pathspec, char *seen) { int num_unmatched = 0, i; /* * Since we are walking the index as if we were walking the directory, * we have to mark the matched pathspec as seen; otherwise we will * mistakenly think that the user gave a pathspec that did not match * anything. */ for (i = 0; i < pathspec->nr; i++) if (!seen[i]) num_unmatched++; if (!num_unmatched) return; for (i = 0; i < active_nr; i++) { const struct cache_entry *ce = active_cache[i]; ce_path_match(ce, pathspec, seen); } } /* * Finds which of the given pathspecs match items in the index. * * This is a one-shot wrapper around add_pathspec_matches_against_index() * which allocates, populates, and returns a seen[] array indicating the * nature of the "closest" (i.e. most specific) matches which each of the * given pathspecs achieves against all items in the index. */ char *find_pathspecs_matching_against_index(const struct pathspec *pathspec) { char *seen = xcalloc(pathspec->nr, 1); add_pathspec_matches_against_index(pathspec, seen); return seen; } /* * Magic pathspec * * Possible future magic semantics include stuff like: * * { PATHSPEC_RECURSIVE, '*', "recursive" }, * { PATHSPEC_REGEXP, '\0', "regexp" }, * */ static struct pathspec_magic { unsigned bit; char mnemonic; /* this cannot be ':'! */ const char *name; } pathspec_magic[] = { { PATHSPEC_FROMTOP, '/', "top" }, { PATHSPEC_LITERAL, 0, "literal" }, { PATHSPEC_GLOB, '\0', "glob" }, { PATHSPEC_ICASE, '\0', "icase" }, { PATHSPEC_EXCLUDE, '!', "exclude" }, }; static void prefix_short_magic(struct strbuf *sb, int prefixlen, unsigned short_magic) { int i; strbuf_addstr(sb, ":("); for (i = 0; i < ARRAY_SIZE(pathspec_magic); i++) if (short_magic & pathspec_magic[i].bit) { if (sb->buf[sb->len - 1] != '(') strbuf_addch(sb, ','); strbuf_addstr(sb, pathspec_magic[i].name); } strbuf_addf(sb, ",prefix:%d)", prefixlen); } /* * Take an element of a pathspec and check for magic signatures. * Append the result to the prefix. Return the magic bitmap. * * For now, we only parse the syntax and throw out anything other than * "top" magic. * * NEEDSWORK: This needs to be rewritten when we start migrating * get_pathspec() users to use the "struct pathspec" interface. For * example, a pathspec element may be marked as case-insensitive, but * the prefix part must always match literally, and a single stupid * string cannot express such a case. */ static unsigned prefix_pathspec(struct pathspec_item *item, unsigned *p_short_magic, const char **raw, unsigned flags, const char *prefix, int prefixlen, const char *elt) { static int literal_global = -1; static int glob_global = -1; static int noglob_global = -1; static int icase_global = -1; unsigned magic = 0, short_magic = 0, global_magic = 0; const char *copyfrom = elt, *long_magic_end = NULL; char *match; int i, pathspec_prefix = -1; if (literal_global < 0) literal_global = git_env_bool(GIT_LITERAL_PATHSPECS_ENVIRONMENT, 0); if (literal_global) global_magic |= PATHSPEC_LITERAL; if (glob_global < 0) glob_global = git_env_bool(GIT_GLOB_PATHSPECS_ENVIRONMENT, 0); if (glob_global) global_magic |= PATHSPEC_GLOB; if (noglob_global < 0) noglob_global = git_env_bool(GIT_NOGLOB_PATHSPECS_ENVIRONMENT, 0); if (glob_global && noglob_global) die(_("global 'glob' and 'noglob' pathspec settings are incompatible")); if (icase_global < 0) icase_global = git_env_bool(GIT_ICASE_PATHSPECS_ENVIRONMENT, 0); if (icase_global) global_magic |= PATHSPEC_ICASE; if ((global_magic & PATHSPEC_LITERAL) && (global_magic & ~PATHSPEC_LITERAL)) die(_("global 'literal' pathspec setting is incompatible " "with all other global pathspec settings")); if (flags & PATHSPEC_LITERAL_PATH) global_magic = 0; if (elt[0] != ':' || literal_global || (flags & PATHSPEC_LITERAL_PATH)) { ; /* nothing to do */ } else if (elt[1] == '(') { /* longhand */ const char *nextat; for (copyfrom = elt + 2; *copyfrom && *copyfrom != ')'; copyfrom = nextat) { size_t len = strcspn(copyfrom, ",)"); if (copyfrom[len] == ',') nextat = copyfrom + len + 1; else /* handle ')' and '\0' */ nextat = copyfrom + len; if (!len) continue; for (i = 0; i < ARRAY_SIZE(pathspec_magic); i++) { if (strlen(pathspec_magic[i].name) == len && !strncmp(pathspec_magic[i].name, copyfrom, len)) { magic |= pathspec_magic[i].bit; break; } if (starts_with(copyfrom, "prefix:")) { char *endptr; pathspec_prefix = strtol(copyfrom + 7, &endptr, 10); if (endptr - copyfrom != len) die(_("invalid parameter for pathspec magic 'prefix'")); /* "i" would be wrong, but it does not matter */ break; } } if (ARRAY_SIZE(pathspec_magic) <= i) die(_("Invalid pathspec magic '%.*s' in '%s'"), (int) len, copyfrom, elt); } if (*copyfrom != ')') die(_("Missing ')' at the end of pathspec magic in '%s'"), elt); long_magic_end = copyfrom; copyfrom++; } else { /* shorthand */ for (copyfrom = elt + 1; *copyfrom && *copyfrom != ':'; copyfrom++) { char ch = *copyfrom; if (!is_pathspec_magic(ch)) break; for (i = 0; i < ARRAY_SIZE(pathspec_magic); i++) if (pathspec_magic[i].mnemonic == ch) { short_magic |= pathspec_magic[i].bit; break; } if (ARRAY_SIZE(pathspec_magic) <= i) die(_("Unimplemented pathspec magic '%c' in '%s'"), ch, elt); } if (*copyfrom == ':') copyfrom++; } magic |= short_magic; *p_short_magic = short_magic; /* --noglob-pathspec adds :(literal) _unless_ :(glob) is specified */ if (noglob_global && !(magic & PATHSPEC_GLOB)) global_magic |= PATHSPEC_LITERAL; /* --glob-pathspec is overridden by :(literal) */ if ((global_magic & PATHSPEC_GLOB) && (magic & PATHSPEC_LITERAL)) global_magic &= ~PATHSPEC_GLOB; magic |= global_magic; if (pathspec_prefix >= 0 && (prefixlen || (prefix && *prefix))) die("BUG: 'prefix' magic is supposed to be used at worktree's root"); if ((magic & PATHSPEC_LITERAL) && (magic & PATHSPEC_GLOB)) die(_("%s: 'literal' and 'glob' are incompatible"), elt); if (pathspec_prefix >= 0) { match = xstrdup(copyfrom); prefixlen = pathspec_prefix; } else if (magic & PATHSPEC_FROMTOP) { match = xstrdup(copyfrom); prefixlen = 0; } else { match = prefix_path_gently(prefix, prefixlen, &prefixlen, copyfrom); if (!match) die(_("%s: '%s' is outside repository"), elt, copyfrom); } *raw = item->match = match; /* * Prefix the pathspec (keep all magic) and assign to * original. Useful for passing to another command. */ if (flags & PATHSPEC_PREFIX_ORIGIN) { struct strbuf sb = STRBUF_INIT; if (prefixlen && !literal_global) { /* Preserve the actual prefix length of each pattern */ if (short_magic) prefix_short_magic(&sb, prefixlen, short_magic); else if (long_magic_end) { strbuf_add(&sb, elt, long_magic_end - elt); strbuf_addf(&sb, ",prefix:%d)", prefixlen); } else strbuf_addf(&sb, ":(prefix:%d)", prefixlen); } strbuf_addstr(&sb, match); item->original = strbuf_detach(&sb, NULL); } else item->original = elt; item->len = strlen(item->match); item->prefix = prefixlen; if ((flags & PATHSPEC_STRIP_SUBMODULE_SLASH_CHEAP) && (item->len >= 1 && item->match[item->len - 1] == '/') && (i = cache_name_pos(item->match, item->len - 1)) >= 0 && S_ISGITLINK(active_cache[i]->ce_mode)) { item->len--; match[item->len] = '\0'; } if (flags & PATHSPEC_STRIP_SUBMODULE_SLASH_EXPENSIVE) for (i = 0; i < active_nr; i++) { struct cache_entry *ce = active_cache[i]; int ce_len = ce_namelen(ce); if (!S_ISGITLINK(ce->ce_mode)) continue; if (item->len <= ce_len || match[ce_len] != '/' || memcmp(ce->name, match, ce_len)) continue; if (item->len == ce_len + 1) { /* strip trailing slash */ item->len--; match[item->len] = '\0'; } else die (_("Pathspec '%s' is in submodule '%.*s'"), elt, ce_len, ce->name); } if (magic & PATHSPEC_LITERAL) item->nowildcard_len = item->len; else { item->nowildcard_len = simple_length(item->match); if (item->nowildcard_len < prefixlen) item->nowildcard_len = prefixlen; } item->flags = 0; if (magic & PATHSPEC_GLOB) { /* * FIXME: should we enable ONESTAR in _GLOB for * pattern "* * / * . c"? */ } else { if (item->nowildcard_len < item->len && item->match[item->nowildcard_len] == '*' && no_wildcard(item->match + item->nowildcard_len + 1)) item->flags |= PATHSPEC_ONESTAR; } /* sanity checks, pathspec matchers assume these are sane */ assert(item->nowildcard_len <= item->len && item->prefix <= item->len); return magic; } static int pathspec_item_cmp(const void *a_, const void *b_) { struct pathspec_item *a, *b; a = (struct pathspec_item *)a_; b = (struct pathspec_item *)b_; return strcmp(a->match, b->match); } static void NORETURN unsupported_magic(const char *pattern, unsigned magic, unsigned short_magic) { struct strbuf sb = STRBUF_INIT; int i, n; for (n = i = 0; i < ARRAY_SIZE(pathspec_magic); i++) { const struct pathspec_magic *m = pathspec_magic + i; if (!(magic & m->bit)) continue; if (sb.len) strbuf_addch(&sb, ' '); if (short_magic & m->bit) strbuf_addf(&sb, "'%c'", m->mnemonic); else strbuf_addf(&sb, "'%s'", m->name); n++; } /* * We may want to substitute "this command" with a command * name. E.g. when add--interactive dies when running * "checkout -p" */ die(_("%s: pathspec magic not supported by this command: %s"), pattern, sb.buf); } /* * Given command line arguments and a prefix, convert the input to * pathspec. die() if any magic in magic_mask is used. */ void parse_pathspec(struct pathspec *pathspec, unsigned magic_mask, unsigned flags, const char *prefix, const char **argv) { struct pathspec_item *item; const char *entry = argv ? *argv : NULL; int i, n, prefixlen, nr_exclude = 0; memset(pathspec, 0, sizeof(*pathspec)); if (flags & PATHSPEC_MAXDEPTH_VALID) pathspec->magic |= PATHSPEC_MAXDEPTH; /* No arguments, no prefix -> no pathspec */ if (!entry && !prefix) return; if ((flags & PATHSPEC_PREFER_CWD) && (flags & PATHSPEC_PREFER_FULL)) die("BUG: PATHSPEC_PREFER_CWD and PATHSPEC_PREFER_FULL are incompatible"); /* No arguments with prefix -> prefix pathspec */ if (!entry) { static const char *raw[2]; if (flags & PATHSPEC_PREFER_FULL) return; if (!(flags & PATHSPEC_PREFER_CWD)) die("BUG: PATHSPEC_PREFER_CWD requires arguments"); pathspec->items = item = xcalloc(1, sizeof(*item)); item->match = prefix; item->original = prefix; item->nowildcard_len = item->len = strlen(prefix); item->prefix = item->len; raw[0] = prefix; raw[1] = NULL; pathspec->nr = 1; pathspec->_raw = raw; return; } n = 0; while (argv[n]) n++; pathspec->nr = n; pathspec->items = item = xmalloc(sizeof(*item) * n); pathspec->_raw = argv; prefixlen = prefix ? strlen(prefix) : 0; for (i = 0; i < n; i++) { unsigned short_magic; entry = argv[i]; item[i].magic = prefix_pathspec(item + i, &short_magic, argv + i, flags, prefix, prefixlen, entry); if ((flags & PATHSPEC_LITERAL_PATH) && !(magic_mask & PATHSPEC_LITERAL)) item[i].magic |= PATHSPEC_LITERAL; if (item[i].magic & PATHSPEC_EXCLUDE) nr_exclude++; if (item[i].magic & magic_mask) unsupported_magic(entry, item[i].magic & magic_mask, short_magic); if ((flags & PATHSPEC_SYMLINK_LEADING_PATH) && has_symlink_leading_path(item[i].match, item[i].len)) { die(_("pathspec '%s' is beyond a symbolic link"), entry); } if (item[i].nowildcard_len < item[i].len) pathspec->has_wildcard = 1; pathspec->magic |= item[i].magic; } if (nr_exclude == n) die(_("There is nothing to exclude from by :(exclude) patterns.\n" "Perhaps you forgot to add either ':/' or '.' ?")); if (pathspec->magic & PATHSPEC_MAXDEPTH) { if (flags & PATHSPEC_KEEP_ORDER) die("BUG: PATHSPEC_MAXDEPTH_VALID and PATHSPEC_KEEP_ORDER are incompatible"); qsort(pathspec->items, pathspec->nr, sizeof(struct pathspec_item), pathspec_item_cmp); } } /* * N.B. get_pathspec() is deprecated in favor of the "struct pathspec" * based interface - see pathspec.c:parse_pathspec(). * * Arguments: * - prefix - a path relative to the root of the working tree * - pathspec - a list of paths underneath the prefix path * * Iterates over pathspec, prepending each path with prefix, * and return the resulting list. * * If pathspec is empty, return a singleton list containing prefix. * * If pathspec and prefix are both empty, return an empty list. * * This is typically used by built-in commands such as add.c, in order * to normalize argv arguments provided to the built-in into a list of * paths to process, all relative to the root of the working tree. */ const char **get_pathspec(const char *prefix, const char **pathspec) { struct pathspec ps; parse_pathspec(&ps, PATHSPEC_ALL_MAGIC & ~(PATHSPEC_FROMTOP | PATHSPEC_LITERAL), PATHSPEC_PREFER_CWD, prefix, pathspec); return ps._raw; } void copy_pathspec(struct pathspec *dst, const struct pathspec *src) { *dst = *src; dst->items = xmalloc(sizeof(struct pathspec_item) * dst->nr); memcpy(dst->items, src->items, sizeof(struct pathspec_item) * dst->nr); } void free_pathspec(struct pathspec *pathspec) { free(pathspec->items); pathspec->items = NULL; } cgit-0.11.2/git/test-path-utils.c0000644000175000017500000000704412476431550017003 0ustar formorerformorer#include "cache.h" #include "string-list.h" /* * A "string_list_each_func_t" function that normalizes an entry from * GIT_CEILING_DIRECTORIES. If the path is unusable for some reason, * die with an explanation. */ static int normalize_ceiling_entry(struct string_list_item *item, void *unused) { const char *ceil = item->string; int len = strlen(ceil); char buf[PATH_MAX+1]; if (len == 0) die("Empty path is not supported"); if (len > PATH_MAX) die("Path \"%s\" is too long", ceil); if (!is_absolute_path(ceil)) die("Path \"%s\" is not absolute", ceil); if (normalize_path_copy(buf, ceil) < 0) die("Path \"%s\" could not be normalized", ceil); len = strlen(buf); if (len > 1 && buf[len-1] == '/') die("Normalized path \"%s\" ended with slash", buf); free(item->string); item->string = xstrdup(buf); return 1; } static void normalize_argv_string(const char **var, const char *input) { if (!strcmp(input, "")) *var = NULL; else if (!strcmp(input, "")) *var = ""; else *var = input; if (*var && (**var == '<' || **var == '(')) die("Bad value: %s\n", input); } int main(int argc, char **argv) { if (argc == 3 && !strcmp(argv[1], "normalize_path_copy")) { char *buf = xmalloc(PATH_MAX + 1); int rv = normalize_path_copy(buf, argv[2]); if (rv) buf = "++failed++"; puts(buf); return 0; } if (argc >= 2 && !strcmp(argv[1], "real_path")) { while (argc > 2) { puts(real_path(argv[2])); argc--; argv++; } return 0; } if (argc >= 2 && !strcmp(argv[1], "absolute_path")) { while (argc > 2) { puts(absolute_path(argv[2])); argc--; argv++; } return 0; } if (argc == 4 && !strcmp(argv[1], "longest_ancestor_length")) { int len; struct string_list ceiling_dirs = STRING_LIST_INIT_DUP; char *path = xstrdup(argv[2]); /* * We have to normalize the arguments because under * Windows, bash mangles arguments that look like * absolute POSIX paths or colon-separate lists of * absolute POSIX paths into DOS paths (e.g., * "/foo:/foo/bar" might be converted to * "D:\Src\msysgit\foo;D:\Src\msysgit\foo\bar"), * whereas longest_ancestor_length() requires paths * that use forward slashes. */ if (normalize_path_copy(path, path)) die("Path \"%s\" could not be normalized", argv[2]); string_list_split(&ceiling_dirs, argv[3], PATH_SEP, -1); filter_string_list(&ceiling_dirs, 0, normalize_ceiling_entry, NULL); len = longest_ancestor_length(path, &ceiling_dirs); string_list_clear(&ceiling_dirs, 0); free(path); printf("%d\n", len); return 0; } if (argc >= 4 && !strcmp(argv[1], "prefix_path")) { char *prefix = argv[2]; int prefix_len = strlen(prefix); int nongit_ok; setup_git_directory_gently(&nongit_ok); while (argc > 3) { puts(prefix_path(prefix, prefix_len, argv[3])); argc--; argv++; } return 0; } if (argc == 4 && !strcmp(argv[1], "strip_path_suffix")) { char *prefix = strip_path_suffix(argv[2], argv[3]); printf("%s\n", prefix ? prefix : "(null)"); return 0; } if (argc == 3 && !strcmp(argv[1], "print_path")) { puts(argv[2]); return 0; } if (argc == 4 && !strcmp(argv[1], "relative_path")) { struct strbuf sb = STRBUF_INIT; const char *in, *prefix, *rel; normalize_argv_string(&in, argv[2]); normalize_argv_string(&prefix, argv[3]); rel = relative_path(in, prefix, &sb); if (!rel) puts("(null)"); else puts(strlen(rel) > 0 ? rel : "(empty)"); strbuf_release(&sb); return 0; } fprintf(stderr, "%s: unknown function name: %s\n", argv[0], argv[1] ? argv[1] : "(there was none)"); return 1; } cgit-0.11.2/git/diffcore-order.c0000644000175000017500000000467412476431550016634 0ustar formorerformorer/* * Copyright (C) 2005 Junio C Hamano */ #include "cache.h" #include "diff.h" #include "diffcore.h" static char **order; static int order_cnt; static void prepare_order(const char *orderfile) { int cnt, pass; struct strbuf sb = STRBUF_INIT; void *map; char *cp, *endp; ssize_t sz; if (order) return; sz = strbuf_read_file(&sb, orderfile, 0); if (sz < 0) die_errno(_("failed to read orderfile '%s'"), orderfile); map = strbuf_detach(&sb, NULL); endp = (char *) map + sz; for (pass = 0; pass < 2; pass++) { cnt = 0; cp = map; while (cp < endp) { char *ep; for (ep = cp; ep < endp && *ep != '\n'; ep++) ; /* cp to ep has one line */ if (*cp == '\n' || *cp == '#') ; /* comment */ else if (pass == 0) cnt++; else { if (*ep == '\n') { *ep = 0; order[cnt] = cp; } else { order[cnt] = xmemdupz(cp, ep - cp); } cnt++; } if (ep < endp) ep++; cp = ep; } if (pass == 0) { order_cnt = cnt; order = xmalloc(sizeof(*order) * cnt); } } } static int match_order(const char *path) { int i; static struct strbuf p = STRBUF_INIT; for (i = 0; i < order_cnt; i++) { strbuf_reset(&p); strbuf_addstr(&p, path); while (p.buf[0]) { char *cp; if (!wildmatch(order[i], p.buf, 0, NULL)) return i; cp = strrchr(p.buf, '/'); if (!cp) break; *cp = 0; } } return order_cnt; } static int compare_objs_order(const void *a_, const void *b_) { struct obj_order const *a, *b; a = (struct obj_order const *)a_; b = (struct obj_order const *)b_; if (a->order != b->order) return a->order - b->order; return a->orig_order - b->orig_order; } void order_objects(const char *orderfile, obj_path_fn_t obj_path, struct obj_order *objs, int nr) { int i; if (!nr) return; prepare_order(orderfile); for (i = 0; i < nr; i++) { objs[i].orig_order = i; objs[i].order = match_order(obj_path(objs[i].obj)); } qsort(objs, nr, sizeof(*objs), compare_objs_order); } static const char *pair_pathtwo(void *obj) { struct diff_filepair *pair = (struct diff_filepair *)obj; return pair->two->path; } void diffcore_order(const char *orderfile) { struct diff_queue_struct *q = &diff_queued_diff; struct obj_order *o; int i; if (!q->nr) return; o = xmalloc(sizeof(*o) * q->nr); for (i = 0; i < q->nr; i++) o[i].obj = q->queue[i]; order_objects(orderfile, pair_pathtwo, o, q->nr); for (i = 0; i < q->nr; i++) q->queue[i] = o[i].obj; free(o); return; } cgit-0.11.2/git/archive.c0000644000175000017500000003515612476431550015362 0ustar formorerformorer#include "cache.h" #include "commit.h" #include "tree-walk.h" #include "attr.h" #include "archive.h" #include "parse-options.h" #include "unpack-trees.h" #include "dir.h" static char const * const archive_usage[] = { N_("git archive [options] [...]"), N_("git archive --list"), N_("git archive --remote [--exec ] [options] [...]"), N_("git archive --remote [--exec ] --list"), NULL }; static const struct archiver **archivers; static int nr_archivers; static int alloc_archivers; static int remote_allow_unreachable; void register_archiver(struct archiver *ar) { ALLOC_GROW(archivers, nr_archivers + 1, alloc_archivers); archivers[nr_archivers++] = ar; } static void format_subst(const struct commit *commit, const char *src, size_t len, struct strbuf *buf) { char *to_free = NULL; struct strbuf fmt = STRBUF_INIT; struct pretty_print_context ctx = {0}; ctx.date_mode = DATE_NORMAL; ctx.abbrev = DEFAULT_ABBREV; if (src == buf->buf) to_free = strbuf_detach(buf, NULL); for (;;) { const char *b, *c; b = memmem(src, len, "$Format:", 8); if (!b) break; c = memchr(b + 8, '$', (src + len) - b - 8); if (!c) break; strbuf_reset(&fmt); strbuf_add(&fmt, b + 8, c - b - 8); strbuf_add(buf, src, b - src); format_commit_message(commit, fmt.buf, buf, &ctx); len -= c + 1 - src; src = c + 1; } strbuf_add(buf, src, len); strbuf_release(&fmt); free(to_free); } void *sha1_file_to_archive(const struct archiver_args *args, const char *path, const unsigned char *sha1, unsigned int mode, enum object_type *type, unsigned long *sizep) { void *buffer; const struct commit *commit = args->convert ? args->commit : NULL; path += args->baselen; buffer = read_sha1_file(sha1, type, sizep); if (buffer && S_ISREG(mode)) { struct strbuf buf = STRBUF_INIT; size_t size = 0; strbuf_attach(&buf, buffer, *sizep, *sizep + 1); convert_to_working_tree(path, buf.buf, buf.len, &buf); if (commit) format_subst(commit, buf.buf, buf.len, &buf); buffer = strbuf_detach(&buf, &size); *sizep = size; } return buffer; } static void setup_archive_check(struct git_attr_check *check) { static struct git_attr *attr_export_ignore; static struct git_attr *attr_export_subst; if (!attr_export_ignore) { attr_export_ignore = git_attr("export-ignore"); attr_export_subst = git_attr("export-subst"); } check[0].attr = attr_export_ignore; check[1].attr = attr_export_subst; } struct directory { struct directory *up; unsigned char sha1[20]; int baselen, len; unsigned mode; int stage; char path[FLEX_ARRAY]; }; struct archiver_context { struct archiver_args *args; write_archive_entry_fn_t write_entry; struct directory *bottom; }; static int write_archive_entry(const unsigned char *sha1, const char *base, int baselen, const char *filename, unsigned mode, int stage, void *context) { static struct strbuf path = STRBUF_INIT; struct archiver_context *c = context; struct archiver_args *args = c->args; write_archive_entry_fn_t write_entry = c->write_entry; struct git_attr_check check[2]; const char *path_without_prefix; int err; args->convert = 0; strbuf_reset(&path); strbuf_grow(&path, PATH_MAX); strbuf_add(&path, args->base, args->baselen); strbuf_add(&path, base, baselen); strbuf_addstr(&path, filename); if (S_ISDIR(mode) || S_ISGITLINK(mode)) strbuf_addch(&path, '/'); path_without_prefix = path.buf + args->baselen; setup_archive_check(check); if (!git_check_attr(path_without_prefix, ARRAY_SIZE(check), check)) { if (ATTR_TRUE(check[0].value)) return 0; args->convert = ATTR_TRUE(check[1].value); } if (S_ISDIR(mode) || S_ISGITLINK(mode)) { if (args->verbose) fprintf(stderr, "%.*s\n", (int)path.len, path.buf); err = write_entry(args, sha1, path.buf, path.len, mode); if (err) return err; return (S_ISDIR(mode) ? READ_TREE_RECURSIVE : 0); } if (args->verbose) fprintf(stderr, "%.*s\n", (int)path.len, path.buf); return write_entry(args, sha1, path.buf, path.len, mode); } static int write_archive_entry_buf(const unsigned char *sha1, struct strbuf *base, const char *filename, unsigned mode, int stage, void *context) { return write_archive_entry(sha1, base->buf, base->len, filename, mode, stage, context); } static void queue_directory(const unsigned char *sha1, struct strbuf *base, const char *filename, unsigned mode, int stage, struct archiver_context *c) { struct directory *d; d = xmallocz(sizeof(*d) + base->len + 1 + strlen(filename)); d->up = c->bottom; d->baselen = base->len; d->mode = mode; d->stage = stage; c->bottom = d; d->len = sprintf(d->path, "%.*s%s/", (int)base->len, base->buf, filename); hashcpy(d->sha1, sha1); } static int write_directory(struct archiver_context *c) { struct directory *d = c->bottom; int ret; if (!d) return 0; c->bottom = d->up; d->path[d->len - 1] = '\0'; /* no trailing slash */ ret = write_directory(c) || write_archive_entry(d->sha1, d->path, d->baselen, d->path + d->baselen, d->mode, d->stage, c) != READ_TREE_RECURSIVE; free(d); return ret ? -1 : 0; } static int queue_or_write_archive_entry(const unsigned char *sha1, struct strbuf *base, const char *filename, unsigned mode, int stage, void *context) { struct archiver_context *c = context; while (c->bottom && !(base->len >= c->bottom->len && !strncmp(base->buf, c->bottom->path, c->bottom->len))) { struct directory *next = c->bottom->up; free(c->bottom); c->bottom = next; } if (S_ISDIR(mode)) { queue_directory(sha1, base, filename, mode, stage, c); return READ_TREE_RECURSIVE; } if (write_directory(c)) return -1; return write_archive_entry(sha1, base->buf, base->len, filename, mode, stage, context); } int write_archive_entries(struct archiver_args *args, write_archive_entry_fn_t write_entry) { struct archiver_context context; struct unpack_trees_options opts; struct tree_desc t; int err; if (args->baselen > 0 && args->base[args->baselen - 1] == '/') { size_t len = args->baselen; while (len > 1 && args->base[len - 2] == '/') len--; if (args->verbose) fprintf(stderr, "%.*s\n", (int)len, args->base); err = write_entry(args, args->tree->object.sha1, args->base, len, 040777); if (err) return err; } memset(&context, 0, sizeof(context)); context.args = args; context.write_entry = write_entry; /* * Setup index and instruct attr to read index only */ if (!args->worktree_attributes) { memset(&opts, 0, sizeof(opts)); opts.index_only = 1; opts.head_idx = -1; opts.src_index = &the_index; opts.dst_index = &the_index; opts.fn = oneway_merge; init_tree_desc(&t, args->tree->buffer, args->tree->size); if (unpack_trees(1, &t, &opts)) return -1; git_attr_set_direction(GIT_ATTR_INDEX, &the_index); } err = read_tree_recursive(args->tree, "", 0, 0, &args->pathspec, args->pathspec.has_wildcard ? queue_or_write_archive_entry : write_archive_entry_buf, &context); if (err == READ_TREE_RECURSIVE) err = 0; while (context.bottom) { struct directory *next = context.bottom->up; free(context.bottom); context.bottom = next; } return err; } static const struct archiver *lookup_archiver(const char *name) { int i; if (!name) return NULL; for (i = 0; i < nr_archivers; i++) { if (!strcmp(name, archivers[i]->name)) return archivers[i]; } return NULL; } static int reject_entry(const unsigned char *sha1, struct strbuf *base, const char *filename, unsigned mode, int stage, void *context) { int ret = -1; if (S_ISDIR(mode)) { struct strbuf sb = STRBUF_INIT; strbuf_addbuf(&sb, base); strbuf_addstr(&sb, filename); if (!match_pathspec(context, sb.buf, sb.len, 0, NULL, 1)) ret = READ_TREE_RECURSIVE; strbuf_release(&sb); } return ret; } static int path_exists(struct tree *tree, const char *path) { const char *paths[] = { path, NULL }; struct pathspec pathspec; int ret; parse_pathspec(&pathspec, 0, 0, "", paths); pathspec.recursive = 1; ret = read_tree_recursive(tree, "", 0, 0, &pathspec, reject_entry, &pathspec); free_pathspec(&pathspec); return ret != 0; } static void parse_pathspec_arg(const char **pathspec, struct archiver_args *ar_args) { /* * must be consistent with parse_pathspec in path_exists() * Also if pathspec patterns are dependent, we're in big * trouble as we test each one separately */ parse_pathspec(&ar_args->pathspec, 0, PATHSPEC_PREFER_FULL, "", pathspec); ar_args->pathspec.recursive = 1; if (pathspec) { while (*pathspec) { if (**pathspec && !path_exists(ar_args->tree, *pathspec)) die(_("pathspec '%s' did not match any files"), *pathspec); pathspec++; } } } static void parse_treeish_arg(const char **argv, struct archiver_args *ar_args, const char *prefix, int remote) { const char *name = argv[0]; const unsigned char *commit_sha1; time_t archive_time; struct tree *tree; const struct commit *commit; unsigned char sha1[20]; /* Remotes are only allowed to fetch actual refs */ if (remote && !remote_allow_unreachable) { char *ref = NULL; const char *colon = strchrnul(name, ':'); int refnamelen = colon - name; if (!dwim_ref(name, refnamelen, sha1, &ref)) die("no such ref: %.*s", refnamelen, name); free(ref); } if (get_sha1(name, sha1)) die("Not a valid object name"); commit = lookup_commit_reference_gently(sha1, 1); if (commit) { commit_sha1 = commit->object.sha1; archive_time = commit->date; } else { commit_sha1 = NULL; archive_time = time(NULL); } tree = parse_tree_indirect(sha1); if (tree == NULL) die("not a tree object"); if (prefix) { unsigned char tree_sha1[20]; unsigned int mode; int err; err = get_tree_entry(tree->object.sha1, prefix, tree_sha1, &mode); if (err || !S_ISDIR(mode)) die("current working directory is untracked"); tree = parse_tree_indirect(tree_sha1); } ar_args->tree = tree; ar_args->commit_sha1 = commit_sha1; ar_args->commit = commit; ar_args->time = archive_time; } #define OPT__COMPR(s, v, h, p) \ { OPTION_SET_INT, (s), NULL, (v), NULL, (h), \ PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, (p) } #define OPT__COMPR_HIDDEN(s, v, p) \ { OPTION_SET_INT, (s), NULL, (v), NULL, "", \ PARSE_OPT_NOARG | PARSE_OPT_NONEG | PARSE_OPT_HIDDEN, NULL, (p) } static int parse_archive_args(int argc, const char **argv, const struct archiver **ar, struct archiver_args *args, const char *name_hint, int is_remote) { const char *format = NULL; const char *base = NULL; const char *remote = NULL; const char *exec = NULL; const char *output = NULL; int compression_level = -1; int verbose = 0; int i; int list = 0; int worktree_attributes = 0; struct option opts[] = { OPT_GROUP(""), OPT_STRING(0, "format", &format, N_("fmt"), N_("archive format")), OPT_STRING(0, "prefix", &base, N_("prefix"), N_("prepend prefix to each pathname in the archive")), OPT_STRING('o', "output", &output, N_("file"), N_("write the archive to this file")), OPT_BOOL(0, "worktree-attributes", &worktree_attributes, N_("read .gitattributes in working directory")), OPT__VERBOSE(&verbose, N_("report archived files on stderr")), OPT__COMPR('0', &compression_level, N_("store only"), 0), OPT__COMPR('1', &compression_level, N_("compress faster"), 1), OPT__COMPR_HIDDEN('2', &compression_level, 2), OPT__COMPR_HIDDEN('3', &compression_level, 3), OPT__COMPR_HIDDEN('4', &compression_level, 4), OPT__COMPR_HIDDEN('5', &compression_level, 5), OPT__COMPR_HIDDEN('6', &compression_level, 6), OPT__COMPR_HIDDEN('7', &compression_level, 7), OPT__COMPR_HIDDEN('8', &compression_level, 8), OPT__COMPR('9', &compression_level, N_("compress better"), 9), OPT_GROUP(""), OPT_BOOL('l', "list", &list, N_("list supported archive formats")), OPT_GROUP(""), OPT_STRING(0, "remote", &remote, N_("repo"), N_("retrieve the archive from remote repository ")), OPT_STRING(0, "exec", &exec, N_("command"), N_("path to the remote git-upload-archive command")), OPT_END() }; argc = parse_options(argc, argv, NULL, opts, archive_usage, 0); if (remote) die("Unexpected option --remote"); if (exec) die("Option --exec can only be used together with --remote"); if (output) die("Unexpected option --output"); if (!base) base = ""; if (list) { for (i = 0; i < nr_archivers; i++) if (!is_remote || archivers[i]->flags & ARCHIVER_REMOTE) printf("%s\n", archivers[i]->name); exit(0); } if (!format && name_hint) format = archive_format_from_filename(name_hint); if (!format) format = "tar"; /* We need at least one parameter -- tree-ish */ if (argc < 1) usage_with_options(archive_usage, opts); *ar = lookup_archiver(format); if (!*ar || (is_remote && !((*ar)->flags & ARCHIVER_REMOTE))) die("Unknown archive format '%s'", format); args->compression_level = Z_DEFAULT_COMPRESSION; if (compression_level != -1) { if ((*ar)->flags & ARCHIVER_WANT_COMPRESSION_LEVELS) args->compression_level = compression_level; else { die("Argument not supported for format '%s': -%d", format, compression_level); } } args->verbose = verbose; args->base = base; args->baselen = strlen(base); args->worktree_attributes = worktree_attributes; return argc; } int write_archive(int argc, const char **argv, const char *prefix, int setup_prefix, const char *name_hint, int remote) { int nongit = 0; const struct archiver *ar = NULL; struct archiver_args args; if (setup_prefix && prefix == NULL) prefix = setup_git_directory_gently(&nongit); git_config_get_bool("uploadarchive.allowunreachable", &remote_allow_unreachable); git_config(git_default_config, NULL); init_tar_archiver(); init_zip_archiver(); argc = parse_archive_args(argc, argv, &ar, &args, name_hint, remote); if (nongit) { /* * We know this will die() with an error, so we could just * die ourselves; but its error message will be more specific * than what we could write here. */ setup_git_directory(); } parse_treeish_arg(argv, &args, prefix, remote); parse_pathspec_arg(argv + 1, &args); return ar->write_archive(ar, &args); } static int match_extension(const char *filename, const char *ext) { int prefixlen = strlen(filename) - strlen(ext); /* * We need 1 character for the '.', and 1 character to ensure that the * prefix is non-empty (k.e., we don't match .tar.gz with no actual * filename). */ if (prefixlen < 2 || filename[prefixlen - 1] != '.') return 0; return !strcmp(filename + prefixlen, ext); } const char *archive_format_from_filename(const char *filename) { int i; for (i = 0; i < nr_archivers; i++) if (match_extension(filename, archivers[i]->name)) return archivers[i]->name; return NULL; } cgit-0.11.2/git/credential-store.c0000644000175000017500000000775212476431550017206 0ustar formorerformorer#include "cache.h" #include "lockfile.h" #include "credential.h" #include "string-list.h" #include "parse-options.h" static struct lock_file credential_lock; static void parse_credential_file(const char *fn, struct credential *c, void (*match_cb)(struct credential *), void (*other_cb)(struct strbuf *)) { FILE *fh; struct strbuf line = STRBUF_INIT; struct credential entry = CREDENTIAL_INIT; fh = fopen(fn, "r"); if (!fh) { if (errno != ENOENT) die_errno("unable to open %s", fn); return; } while (strbuf_getline(&line, fh, '\n') != EOF) { credential_from_url(&entry, line.buf); if (entry.username && entry.password && credential_match(c, &entry)) { if (match_cb) { match_cb(&entry); break; } } else if (other_cb) other_cb(&line); } credential_clear(&entry); strbuf_release(&line); fclose(fh); } static void print_entry(struct credential *c) { printf("username=%s\n", c->username); printf("password=%s\n", c->password); } static void print_line(struct strbuf *buf) { strbuf_addch(buf, '\n'); write_or_die(credential_lock.fd, buf->buf, buf->len); } static void rewrite_credential_file(const char *fn, struct credential *c, struct strbuf *extra) { if (hold_lock_file_for_update(&credential_lock, fn, 0) < 0) die_errno("unable to get credential storage lock"); if (extra) print_line(extra); parse_credential_file(fn, c, NULL, print_line); if (commit_lock_file(&credential_lock) < 0) die_errno("unable to commit credential store"); } static void store_credential(const char *fn, struct credential *c) { struct strbuf buf = STRBUF_INIT; /* * Sanity check that what we are storing is actually sensible. * In particular, we can't make a URL without a protocol field. * Without either a host or pathname (depending on the scheme), * we have no primary key. And without a username and password, * we are not actually storing a credential. */ if (!c->protocol || !(c->host || c->path) || !c->username || !c->password) return; strbuf_addf(&buf, "%s://", c->protocol); strbuf_addstr_urlencode(&buf, c->username, 1); strbuf_addch(&buf, ':'); strbuf_addstr_urlencode(&buf, c->password, 1); strbuf_addch(&buf, '@'); if (c->host) strbuf_addstr_urlencode(&buf, c->host, 1); if (c->path) { strbuf_addch(&buf, '/'); strbuf_addstr_urlencode(&buf, c->path, 0); } rewrite_credential_file(fn, c, &buf); strbuf_release(&buf); } static void remove_credential(const char *fn, struct credential *c) { /* * Sanity check that we actually have something to match * against. The input we get is a restrictive pattern, * so technically a blank credential means "erase everything". * But it is too easy to accidentally send this, since it is equivalent * to empty input. So explicitly disallow it, and require that the * pattern have some actual content to match. */ if (c->protocol || c->host || c->path || c->username) rewrite_credential_file(fn, c, NULL); } static int lookup_credential(const char *fn, struct credential *c) { parse_credential_file(fn, c, print_entry, NULL); return c->username && c->password; } int main(int argc, char **argv) { const char * const usage[] = { "git credential-store [options] ", NULL }; const char *op; struct credential c = CREDENTIAL_INIT; char *file = NULL; struct option options[] = { OPT_STRING(0, "file", &file, "path", "fetch and store credentials in "), OPT_END() }; umask(077); argc = parse_options(argc, (const char **)argv, NULL, options, usage, 0); if (argc != 1) usage_with_options(usage, options); op = argv[0]; if (!file) file = expand_user_path("~/.git-credentials"); if (!file) die("unable to set up default path; use --file"); if (credential_read(&c, stdin) < 0) die("unable to read credential"); if (!strcmp(op, "get")) lookup_credential(file, &c); else if (!strcmp(op, "erase")) remove_credential(file, &c); else if (!strcmp(op, "store")) store_credential(file, &c); else ; /* Ignore unknown operation. */ return 0; } cgit-0.11.2/git/sha1-array.h0000644000175000017500000000110712476431550015703 0ustar formorerformorer#ifndef SHA1_ARRAY_H #define SHA1_ARRAY_H struct sha1_array { unsigned char (*sha1)[20]; int nr; int alloc; int sorted; }; #define SHA1_ARRAY_INIT { NULL, 0, 0, 0 } void sha1_array_append(struct sha1_array *array, const unsigned char *sha1); int sha1_array_lookup(struct sha1_array *array, const unsigned char *sha1); void sha1_array_clear(struct sha1_array *array); typedef void (*for_each_sha1_fn)(const unsigned char sha1[20], void *data); void sha1_array_for_each_unique(struct sha1_array *array, for_each_sha1_fn fn, void *data); #endif /* SHA1_ARRAY_H */ cgit-0.11.2/git/Documentation/0000755000175000017500000000000012476431550016374 5ustar formorerformorercgit-0.11.2/git/Documentation/git-clone.txt0000644000175000017500000002314012476431550021016 0ustar formorerformorergit-clone(1) ============ NAME ---- git-clone - Clone a repository into a new directory SYNOPSIS -------- [verse] 'git clone' [--template=] [-l] [-s] [--no-hardlinks] [-q] [-n] [--bare] [--mirror] [-o ] [-b ] [-u ] [--reference ] [--dissociate] [--separate-git-dir ] [--depth ] [--[no-]single-branch] [--recursive | --recurse-submodules] [--] [] DESCRIPTION ----------- Clones a repository into a newly created directory, creates remote-tracking branches for each branch in the cloned repository (visible using `git branch -r`), and creates and checks out an initial branch that is forked from the cloned repository's currently active branch. After the clone, a plain `git fetch` without arguments will update all the remote-tracking branches, and a `git pull` without arguments will in addition merge the remote master branch into the current master branch, if any (this is untrue when "--single-branch" is given; see below). This default configuration is achieved by creating references to the remote branch heads under `refs/remotes/origin` and by initializing `remote.origin.url` and `remote.origin.fetch` configuration variables. OPTIONS ------- --local:: -l:: When the repository to clone from is on a local machine, this flag bypasses the normal "Git aware" transport mechanism and clones the repository by making a copy of HEAD and everything under objects and refs directories. The files under `.git/objects/` directory are hardlinked to save space when possible. + If the repository is specified as a local path (e.g., `/path/to/repo`), this is the default, and --local is essentially a no-op. If the repository is specified as a URL, then this flag is ignored (and we never use the local optimizations). Specifying `--no-local` will override the default when `/path/to/repo` is given, using the regular Git transport instead. --no-hardlinks:: Force the cloning process from a repository on a local filesystem to copy the files under the `.git/objects` directory instead of using hardlinks. This may be desirable if you are trying to make a back-up of your repository. --shared:: -s:: When the repository to clone is on the local machine, instead of using hard links, automatically setup `.git/objects/info/alternates` to share the objects with the source repository. The resulting repository starts out without any object of its own. + *NOTE*: this is a possibly dangerous operation; do *not* use it unless you understand what it does. If you clone your repository using this option and then delete branches (or use any other Git command that makes any existing commit unreferenced) in the source repository, some objects may become unreferenced (or dangling). These objects may be removed by normal Git operations (such as `git commit`) which automatically call `git gc --auto`. (See linkgit:git-gc[1].) If these objects are removed and were referenced by the cloned repository, then the cloned repository will become corrupt. + Note that running `git repack` without the `-l` option in a repository cloned with `-s` will copy objects from the source repository into a pack in the cloned repository, removing the disk space savings of `clone -s`. It is safe, however, to run `git gc`, which uses the `-l` option by default. + If you want to break the dependency of a repository cloned with `-s` on its source repository, you can simply run `git repack -a` to copy all objects from the source repository into a pack in the cloned repository. --reference :: If the reference repository is on the local machine, automatically setup `.git/objects/info/alternates` to obtain objects from the reference repository. Using an already existing repository as an alternate will require fewer objects to be copied from the repository being cloned, reducing network and local storage costs. + *NOTE*: see the NOTE for the `--shared` option, and also the `--dissociate` option. --dissociate:: Borrow the objects from reference repositories specified with the `--reference` options only to reduce network transfer and stop borrowing from them after a clone is made by making necessary local copies of borrowed objects. --quiet:: -q:: Operate quietly. Progress is not reported to the standard error stream. This flag is also passed to the `rsync' command when given. --verbose:: -v:: Run verbosely. Does not affect the reporting of progress status to the standard error stream. --progress:: Progress status is reported on the standard error stream by default when it is attached to a terminal, unless -q is specified. This flag forces progress status even if the standard error stream is not directed to a terminal. --no-checkout:: -n:: No checkout of HEAD is performed after the clone is complete. --bare:: Make a 'bare' Git repository. That is, instead of creating `` and placing the administrative files in `/.git`, make the `` itself the `$GIT_DIR`. This obviously implies the `-n` because there is nowhere to check out the working tree. Also the branch heads at the remote are copied directly to corresponding local branch heads, without mapping them to `refs/remotes/origin/`. When this option is used, neither remote-tracking branches nor the related configuration variables are created. --mirror:: Set up a mirror of the source repository. This implies `--bare`. Compared to `--bare`, `--mirror` not only maps local branches of the source to local branches of the target, it maps all refs (including remote-tracking branches, notes etc.) and sets up a refspec configuration such that all these refs are overwritten by a `git remote update` in the target repository. --origin :: -o :: Instead of using the remote name `origin` to keep track of the upstream repository, use ``. --branch :: -b :: Instead of pointing the newly created HEAD to the branch pointed to by the cloned repository's HEAD, point to `` branch instead. In a non-bare repository, this is the branch that will be checked out. `--branch` can also take tags and detaches the HEAD at that commit in the resulting repository. --upload-pack :: -u :: When given, and the repository to clone from is accessed via ssh, this specifies a non-default path for the command run on the other end. --template=:: Specify the directory from which templates will be used; (See the "TEMPLATE DIRECTORY" section of linkgit:git-init[1].) --config =:: -c =:: Set a configuration variable in the newly-created repository; this takes effect immediately after the repository is initialized, but before the remote history is fetched or any files checked out. The key is in the same format as expected by linkgit:git-config[1] (e.g., `core.eol=true`). If multiple values are given for the same key, each value will be written to the config file. This makes it safe, for example, to add additional fetch refspecs to the origin remote. --depth :: Create a 'shallow' clone with a history truncated to the specified number of revisions. --[no-]single-branch:: Clone only the history leading to the tip of a single branch, either specified by the `--branch` option or the primary branch remote's `HEAD` points at. When creating a shallow clone with the `--depth` option, this is the default, unless `--no-single-branch` is given to fetch the histories near the tips of all branches. Further fetches into the resulting repository will only update the remote-tracking branch for the branch this option was used for the initial cloning. If the HEAD at the remote did not point at any branch when `--single-branch` clone was made, no remote-tracking branch is created. --recursive:: --recurse-submodules:: After the clone is created, initialize all submodules within, using their default settings. This is equivalent to running `git submodule update --init --recursive` immediately after the clone is finished. This option is ignored if the cloned repository does not have a worktree/checkout (i.e. if any of `--no-checkout`/`-n`, `--bare`, or `--mirror` is given) --separate-git-dir=:: Instead of placing the cloned repository where it is supposed to be, place the cloned repository at the specified directory, then make a filesystem-agnostic Git symbolic link to there. The result is Git repository can be separated from working tree. :: The (possibly remote) repository to clone from. See the <> section below for more information on specifying repositories. :: The name of a new directory to clone into. The "humanish" part of the source repository is used if no directory is explicitly given (`repo` for `/path/to/repo.git` and `foo` for `host.xz:foo/.git`). Cloning into an existing directory is only allowed if the directory is empty. :git-clone: 1 include::urls.txt[] Examples -------- * Clone from upstream: + ------------ $ git clone git://git.kernel.org/pub/scm/.../linux.git my-linux $ cd my-linux $ make ------------ * Make a local clone that borrows from the current directory, without checking things out: + ------------ $ git clone -l -s -n . ../copy $ cd ../copy $ git show-branch ------------ * Clone from upstream while borrowing from an existing local directory: + ------------ $ git clone --reference /git/linux.git \ git://git.kernel.org/pub/scm/.../linux.git \ my-linux $ cd my-linux ------------ * Create a bare repository to publish your changes to the public: + ------------ $ git clone --bare -l /home/proj/.git /pub/scm/proj.git ------------ GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-diff-tree.txt0000644000175000017500000001240612476431550021566 0ustar formorerformorergit-diff-tree(1) ================ NAME ---- git-diff-tree - Compares the content and mode of blobs found via two tree objects SYNOPSIS -------- [verse] 'git diff-tree' [--stdin] [-m] [-s] [-v] [--no-commit-id] [--pretty] [-t] [-r] [-c | --cc] [--root] [] [] [...] DESCRIPTION ----------- Compares the content and mode of the blobs found via two tree objects. If there is only one given, the commit is compared with its parents (see --stdin below). Note that 'git diff-tree' can use the tree encapsulated in a commit object. OPTIONS ------- include::diff-options.txt[] :: The id of a tree object. ...:: If provided, the results are limited to a subset of files matching one of these prefix strings. i.e., file matches `/^||.../` Note that this parameter does not provide any wildcard or regexp features. -r:: recurse into sub-trees -t:: show tree entry itself as well as subtrees. Implies -r. --root:: When '--root' is specified the initial commit will be shown as a big creation event. This is equivalent to a diff against the NULL tree. --stdin:: When '--stdin' is specified, the command does not take arguments from the command line. Instead, it reads lines containing either two , one , or a list of from its standard input. (Use a single space as separator.) + When two trees are given, it compares the first tree with the second. When a single commit is given, it compares the commit with its parents. The remaining commits, when given, are used as if they are parents of the first commit. + When comparing two trees, the ID of both trees (separated by a space and terminated by a newline) is printed before the difference. When comparing commits, the ID of the first (or only) commit, followed by a newline, is printed. + The following flags further affect the behavior when comparing commits (but not trees). -m:: By default, 'git diff-tree --stdin' does not show differences for merge commits. With this flag, it shows differences to that commit from all of its parents. See also '-c'. -s:: By default, 'git diff-tree --stdin' shows differences, either in machine-readable form (without '-p') or in patch form (with '-p'). This output can be suppressed. It is only useful with '-v' flag. -v:: This flag causes 'git diff-tree --stdin' to also show the commit message before the differences. include::pretty-options.txt[] --no-commit-id:: 'git diff-tree' outputs a line with the commit ID when applicable. This flag suppressed the commit ID output. -c:: This flag changes the way a merge commit is displayed (which means it is useful only when the command is given one , or '--stdin'). It shows the differences from each of the parents to the merge result simultaneously instead of showing pairwise diff between a parent and the result one at a time (which is what the '-m' option does). Furthermore, it lists only files which were modified from all parents. --cc:: This flag changes the way a merge commit patch is displayed, in a similar way to the '-c' option. It implies the '-c' and '-p' options and further compresses the patch output by omitting uninteresting hunks whose the contents in the parents have only two variants and the merge result picks one of them without modification. When all hunks are uninteresting, the commit itself and the commit log message is not shown, just like in any other "empty diff" case. --always:: Show the commit itself and the commit log message even if the diff itself is empty. include::pretty-formats.txt[] Limiting Output --------------- If you're only interested in differences in a subset of files, for example some architecture-specific files, you might do: git diff-tree -r arch/ia64 include/asm-ia64 and it will only show you what changed in those two directories. Or if you are searching for what changed in just `kernel/sched.c`, just do git diff-tree -r kernel/sched.c and it will ignore all differences to other files. The pattern is always the prefix, and is matched exactly. There are no wildcards. Even stricter, it has to match a complete path component. I.e. "foo" does not pick up `foobar.h`. "foo" does match `foo/bar.h` so it can be used to name subdirectories. An example of normal usage is: torvalds@ppc970:~/git> git diff-tree --abbrev 5319e4 :100664 100664 ac348b... a01513... git-fsck-objects.c which tells you that the last commit changed just one file (it's from this one: ----------------------------------------------------------------------------- commit 3c6f7ca19ad4043e9e72fa94106f352897e651a8 tree 5319e4d609cdd282069cc4dce33c1db559539b03 parent b4e628ea30d5ab3606119d2ea5caeab141d38df7 author Linus Torvalds Sat Apr 9 12:02:30 2005 committer Linus Torvalds Sat Apr 9 12:02:30 2005 Make "git-fsck-objects" print out all the root commits it finds. Once I do the reference tracking, I'll also make it print out all the HEAD commits it finds, which is even more interesting. ----------------------------------------------------------------------------- in case you care). include::diff-format.txt[] GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-init-db.txt0000644000175000017500000000063112476431550021244 0ustar formorerformorergit-init-db(1) ============== NAME ---- git-init-db - Creates an empty Git repository SYNOPSIS -------- [verse] 'git init-db' [-q | --quiet] [--bare] [--template=] [--separate-git-dir ] [--shared[=]] DESCRIPTION ----------- This is a synonym for linkgit:git-init[1]. Please refer to the documentation of that command. GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-mv.txt0000644000175000017500000000405512476431550020344 0ustar formorerformorergit-mv(1) ========= NAME ---- git-mv - Move or rename a file, a directory, or a symlink SYNOPSIS -------- [verse] 'git mv' ... ... DESCRIPTION ----------- Move or rename a file, directory or symlink. git mv [-v] [-f] [-n] [-k] git mv [-v] [-f] [-n] [-k] ... In the first form, it renames , which must exist and be either a file, symlink or directory, to . In the second form, the last argument has to be an existing directory; the given sources will be moved into this directory. The index is updated after successful completion, but the change must still be committed. OPTIONS ------- -f:: --force:: Force renaming or moving of a file even if the target exists -k:: Skip move or rename actions which would lead to an error condition. An error happens when a source is neither existing nor controlled by Git, or when it would overwrite an existing file unless '-f' is given. -n:: --dry-run:: Do nothing; only show what would happen -v:: --verbose:: Report the names of files as they are moved. SUBMODULES ---------- Moving a submodule using a gitfile (which means they were cloned with a Git version 1.7.8 or newer) will update the gitfile and core.worktree setting to make the submodule work in the new location. It also will attempt to update the submodule..path setting in the linkgit:gitmodules[5] file and stage that file (unless -n is used). BUGS ---- Each time a superproject update moves a populated submodule (e.g. when switching between commits before and after the move) a stale submodule checkout will remain in the old location and an empty directory will appear in the new location. To populate the submodule again in the new location the user will have to run "git submodule update" afterwards. Removing the old directory is only safe when it uses a gitfile, as otherwise the history of the submodule will be deleted too. Both steps will be obsolete when recursive submodule update has been implemented. GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-update-ref.txt0000644000175000017500000001245212476431550021756 0ustar formorerformorergit-update-ref(1) ================= NAME ---- git-update-ref - Update the object name stored in a ref safely SYNOPSIS -------- [verse] 'git update-ref' [-m ] (-d [] | [--no-deref] [] | --stdin [-z]) DESCRIPTION ----------- Given two arguments, stores the in the , possibly dereferencing the symbolic refs. E.g. `git update-ref HEAD ` updates the current branch head to the new object. Given three arguments, stores the in the , possibly dereferencing the symbolic refs, after verifying that the current value of the matches . E.g. `git update-ref refs/heads/master ` updates the master branch head to only if its current value is . You can specify 40 "0" or an empty string as to make sure that the ref you are creating does not exist. It also allows a "ref" file to be a symbolic pointer to another ref file by starting with the four-byte header sequence of "ref:". More importantly, it allows the update of a ref file to follow these symbolic pointers, whether they are symlinks or these "regular file symbolic refs". It follows *real* symlinks only if they start with "refs/": otherwise it will just try to read them and update them as a regular file (i.e. it will allow the filesystem to follow them, but will overwrite such a symlink to somewhere else with a regular filename). If --no-deref is given, itself is overwritten, rather than the result of following the symbolic pointers. In general, using git update-ref HEAD "$head" should be a _lot_ safer than doing echo "$head" > "$GIT_DIR/HEAD" both from a symlink following standpoint *and* an error checking standpoint. The "refs/" rule for symlinks means that symlinks that point to "outside" the tree are safe: they'll be followed for reading but not for writing (so we'll never write through a ref symlink to some other tree, if you have copied a whole archive by creating a symlink tree). With `-d` flag, it deletes the named after verifying it still contains . With `--stdin`, update-ref reads instructions from standard input and performs all modifications together. Specify commands of the form: update SP SP [SP ] LF create SP SP LF delete SP [SP ] LF verify SP [SP ] LF option SP LF Quote fields containing whitespace as if they were strings in C source code; i.e., surrounded by double-quotes and with backslash escapes. Use 40 "0" characters or the empty string to specify a zero value. To specify a missing value, omit the value and its preceding SP entirely. Alternatively, use `-z` to specify in NUL-terminated format, without quoting: update SP NUL NUL [] NUL create SP NUL NUL delete SP NUL [] NUL verify SP NUL [] NUL option SP NUL In this format, use 40 "0" to specify a zero value, and use the empty string to specify a missing value. In either format, values can be specified in any form that Git recognizes as an object name. Commands in any other format or a repeated produce an error. Command meanings are: update:: Set to after verifying , if given. Specify a zero to ensure the ref does not exist after the update and/or a zero to make sure the ref does not exist before the update. create:: Create with after verifying it does not exist. The given may not be zero. delete:: Delete after verifying it exists with , if given. If given, may not be zero. verify:: Verify against but do not change it. If zero or missing, the ref must not exist. option:: Modify behavior of the next command naming a . The only valid option is `no-deref` to avoid dereferencing a symbolic ref. If all s can be locked with matching s simultaneously, all modifications are performed. Otherwise, no modifications are performed. Note that while each individual is updated or deleted atomically, a concurrent reader may still see a subset of the modifications. Logging Updates --------------- If config parameter "core.logAllRefUpdates" is true and the ref is one under "refs/heads/", "refs/remotes/", "refs/notes/", or the symbolic ref HEAD; or the file "$GIT_DIR/logs/" exists then `git update-ref` will append a line to the log file "$GIT_DIR/logs/" (dereferencing all symbolic refs before creating the log name) describing the change in ref value. Log lines are formatted as: . oldsha1 SP newsha1 SP committer LF + Where "oldsha1" is the 40 character hexadecimal value previously stored in , "newsha1" is the 40 character hexadecimal value of and "committer" is the committer's name, email address and date in the standard Git committer ident format. Optionally with -m: . oldsha1 SP newsha1 SP committer TAB message LF + Where all fields are as described above and "message" is the value supplied to the -m option. An update will fail (without changing ) if the current user is unable to create a new log file, append to the existing log file or does not have committer information available. GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-tag.txt0000644000175000017500000002456112476431550020501 0ustar formorerformorergit-tag(1) ========== NAME ---- git-tag - Create, list, delete or verify a tag object signed with GPG SYNOPSIS -------- [verse] 'git tag' [-a | -s | -u ] [-f] [-m | -F ] [ | ] 'git tag' -d ... 'git tag' [-n[]] -l [--contains ] [--points-at ] [--column[=] | --no-column] [...] [...] 'git tag' -v ... DESCRIPTION ----------- Add a tag reference in `refs/tags/`, unless `-d/-l/-v` is given to delete, list or verify tags. Unless `-f` is given, the named tag must not yet exist. If one of `-a`, `-s`, or `-u ` is passed, the command creates a 'tag' object, and requires a tag message. Unless `-m ` or `-F ` is given, an editor is started for the user to type in the tag message. If `-m ` or `-F ` is given and `-a`, `-s`, and `-u ` are absent, `-a` is implied. Otherwise just a tag reference for the SHA-1 object name of the commit object is created (i.e. a lightweight tag). A GnuPG signed tag object will be created when `-s` or `-u ` is used. When `-u ` is not used, the committer identity for the current user is used to find the GnuPG key for signing. The configuration variable `gpg.program` is used to specify custom GnuPG binary. Tag objects (created with `-a`, `-s`, or `-u`) are called "annotated" tags; they contain a creation date, the tagger name and e-mail, a tagging message, and an optional GnuPG signature. Whereas a "lightweight" tag is simply a name for an object (usually a commit object). Annotated tags are meant for release while lightweight tags are meant for private or temporary object labels. For this reason, some git commands for naming objects (like `git describe`) will ignore lightweight tags by default. OPTIONS ------- -a:: --annotate:: Make an unsigned, annotated tag object -s:: --sign:: Make a GPG-signed tag, using the default e-mail address's key. -u :: --local-user=:: Make a GPG-signed tag, using the given key. -f:: --force:: Replace an existing tag with the given name (instead of failing) -d:: --delete:: Delete existing tags with the given names. -v:: --verify:: Verify the gpg signature of the given tag names. -n:: specifies how many lines from the annotation, if any, are printed when using -l. The default is not to print any annotation lines. If no number is given to `-n`, only the first line is printed. If the tag is not annotated, the commit message is displayed instead. -l :: --list :: List tags with names that match the given pattern (or all if no pattern is given). Running "git tag" without arguments also lists all tags. The pattern is a shell wildcard (i.e., matched using fnmatch(3)). Multiple patterns may be given; if any of them matches, the tag is shown. --sort=:: Sort in a specific order. Supported type is "refname" (lexicographic order), "version:refname" or "v:refname" (tag names are treated as versions). Prepend "-" to reverse sort order. When this option is not given, the sort order defaults to the value configured for the 'tag.sort' variable if it exists, or lexicographic order otherwise. See linkgit:git-config[1]. --column[=]:: --no-column:: Display tag listing in columns. See configuration variable column.tag for option syntax.`--column` and `--no-column` without options are equivalent to 'always' and 'never' respectively. + This option is only applicable when listing tags without annotation lines. --contains []:: Only list tags which contain the specified commit (HEAD if not specified). --points-at :: Only list tags of the given object. -m :: --message=:: Use the given tag message (instead of prompting). If multiple `-m` options are given, their values are concatenated as separate paragraphs. Implies `-a` if none of `-a`, `-s`, or `-u ` is given. -F :: --file=:: Take the tag message from the given file. Use '-' to read the message from the standard input. Implies `-a` if none of `-a`, `-s`, or `-u ` is given. --cleanup=:: This option sets how the tag message is cleaned up. The '' can be one of 'verbatim', 'whitespace' and 'strip'. The 'strip' mode is default. The 'verbatim' mode does not change message at all, 'whitespace' removes just leading/trailing whitespace lines and 'strip' removes both whitespace and commentary. :: The name of the tag to create, delete, or describe. The new tag name must pass all checks defined by linkgit:git-check-ref-format[1]. Some of these checks may restrict the characters allowed in a tag name. :: :: The object that the new tag will refer to, usually a commit. Defaults to HEAD. CONFIGURATION ------------- By default, 'git tag' in sign-with-default mode (-s) will use your committer identity (of the form "Your Name <\your@email.address>") to find a key. If you want to use a different default key, you can specify it in the repository configuration as follows: ------------------------------------- [user] signingkey = ------------------------------------- DISCUSSION ---------- On Re-tagging ~~~~~~~~~~~~~ What should you do when you tag a wrong commit and you would want to re-tag? If you never pushed anything out, just re-tag it. Use "-f" to replace the old one. And you're done. But if you have pushed things out (or others could just read your repository directly), then others will have already seen the old tag. In that case you can do one of two things: . The sane thing. Just admit you screwed up, and use a different name. Others have already seen one tag-name, and if you keep the same name, you may be in the situation that two people both have "version X", but they actually have 'different' "X"'s. So just call it "X.1" and be done with it. . The insane thing. You really want to call the new version "X" too, 'even though' others have already seen the old one. So just use 'git tag -f' again, as if you hadn't already published the old one. However, Git does *not* (and it should not) change tags behind users back. So if somebody already got the old tag, doing a 'git pull' on your tree shouldn't just make them overwrite the old one. If somebody got a release tag from you, you cannot just change the tag for them by updating your own one. This is a big security issue, in that people MUST be able to trust their tag-names. If you really want to do the insane thing, you need to just fess up to it, and tell people that you messed up. You can do that by making a very public announcement saying: ------------ Ok, I messed up, and I pushed out an earlier version tagged as X. I then fixed something, and retagged the *fixed* tree as X again. If you got the wrong tag, and want the new one, please delete the old one and fetch the new one by doing: git tag -d X git fetch origin tag X to get my updated tag. You can test which tag you have by doing git rev-parse X which should return 0123456789abcdef.. if you have the new version. Sorry for the inconvenience. ------------ Does this seem a bit complicated? It *should* be. There is no way that it would be correct to just "fix" it automatically. People need to know that their tags might have been changed. On Automatic following ~~~~~~~~~~~~~~~~~~~~~~ If you are following somebody else's tree, you are most likely using remote-tracking branches (`refs/heads/origin` in traditional layout, or `refs/remotes/origin/master` in the separate-remote layout). You usually want the tags from the other end. On the other hand, if you are fetching because you would want a one-shot merge from somebody else, you typically do not want to get tags from there. This happens more often for people near the toplevel but not limited to them. Mere mortals when pulling from each other do not necessarily want to automatically get private anchor point tags from the other person. Often, "please pull" messages on the mailing list just provide two pieces of information: a repo URL and a branch name; this is designed to be easily cut&pasted at the end of a 'git fetch' command line: ------------ Linus, please pull from git://git..../proj.git master to get the following updates... ------------ becomes: ------------ $ git pull git://git..../proj.git master ------------ In such a case, you do not want to automatically follow the other person's tags. One important aspect of Git is its distributed nature, which largely means there is no inherent "upstream" or "downstream" in the system. On the face of it, the above example might seem to indicate that the tag namespace is owned by the upper echelon of people and that tags only flow downwards, but that is not the case. It only shows that the usage pattern determines who are interested in whose tags. A one-shot pull is a sign that a commit history is now crossing the boundary between one circle of people (e.g. "people who are primarily interested in the networking part of the kernel") who may have their own set of tags (e.g. "this is the third release candidate from the networking group to be proposed for general consumption with 2.6.21 release") to another circle of people (e.g. "people who integrate various subsystem improvements"). The latter are usually not interested in the detailed tags used internally in the former group (that is what "internal" means). That is why it is desirable not to follow tags automatically in this case. It may well be that among networking people, they may want to exchange the tags internal to their group, but in that workflow they are most likely tracking each other's progress by having remote-tracking branches. Again, the heuristic to automatically follow such tags is a good thing. On Backdating Tags ~~~~~~~~~~~~~~~~~~ If you have imported some changes from another VCS and would like to add tags for major releases of your work, it is useful to be able to specify the date to embed inside of the tag object; such data in the tag object affects, for example, the ordering of tags in the gitweb interface. To set the date used in future tag objects, set the environment variable GIT_COMMITTER_DATE (see the later discussion of possible values; the most common form is "YYYY-MM-DD HH:MM"). For example: ------------ $ GIT_COMMITTER_DATE="2006-10-02 10:31" git tag -s v1.0.1 ------------ include::date-formats.txt[] SEE ALSO -------- linkgit:git-check-ref-format[1]. linkgit:git-config[1]. GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-fmt-merge-msg.txt0000644000175000017500000000356612476431550022377 0ustar formorerformorergit-fmt-merge-msg(1) ==================== NAME ---- git-fmt-merge-msg - Produce a merge commit message SYNOPSIS -------- [verse] 'git fmt-merge-msg' [-m ] [--log[=] | --no-log] <$GIT_DIR/FETCH_HEAD 'git fmt-merge-msg' [-m ] [--log[=] | --no-log] -F DESCRIPTION ----------- Takes the list of merged objects on stdin and produces a suitable commit message to be used for the merge commit, usually to be passed as the '' argument of 'git merge'. This command is intended mostly for internal use by scripts automatically invoking 'git merge'. OPTIONS ------- --log[=]:: In addition to branch names, populate the log message with one-line descriptions from the actual commits that are being merged. At most commits from each merge parent will be used (20 if is omitted). This overrides the `merge.log` configuration variable. --no-log:: Do not list one-line descriptions from the actual commits being merged. --[no-]summary:: Synonyms to --log and --no-log; these are deprecated and will be removed in the future. -m :: --message :: Use instead of the branch names for the first line of the log message. For use with `--log`. -F :: --file :: Take the list of merged objects from instead of stdin. CONFIGURATION ------------- merge.branchdesc:: In addition to branch names, populate the log message with the branch description text associated with them. Defaults to false. merge.log:: In addition to branch names, populate the log message with at most the specified number of one-line descriptions from the actual commits that are being merged. Defaults to false, and true is a synonym for 20. merge.summary:: Synonym to `merge.log`; this is deprecated and will be removed in the future. SEE ALSO -------- linkgit:git-merge[1] GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-mergetool.txt0000644000175000017500000000655512476431550021726 0ustar formorerformorergit-mergetool(1) ================ NAME ---- git-mergetool - Run merge conflict resolution tools to resolve merge conflicts SYNOPSIS -------- [verse] 'git mergetool' [--tool=] [-y | --[no-]prompt] [...] DESCRIPTION ----------- Use `git mergetool` to run one of several merge utilities to resolve merge conflicts. It is typically run after 'git merge'. If one or more parameters are given, the merge tool program will be run to resolve differences on each file (skipping those without conflicts). Specifying a directory will include all unresolved files in that path. If no names are specified, 'git mergetool' will run the merge tool program on every file with merge conflicts. OPTIONS ------- -t :: --tool=:: Use the merge resolution program specified by . Valid values include emerge, gvimdiff, kdiff3, meld, vimdiff, and tortoisemerge. Run `git mergetool --tool-help` for the list of valid settings. + If a merge resolution program is not specified, 'git mergetool' will use the configuration variable `merge.tool`. If the configuration variable `merge.tool` is not set, 'git mergetool' will pick a suitable default. + You can explicitly provide a full path to the tool by setting the configuration variable `mergetool..path`. For example, you can configure the absolute path to kdiff3 by setting `mergetool.kdiff3.path`. Otherwise, 'git mergetool' assumes the tool is available in PATH. + Instead of running one of the known merge tool programs, 'git mergetool' can be customized to run an alternative program by specifying the command line to invoke in a configuration variable `mergetool..cmd`. + When 'git mergetool' is invoked with this tool (either through the `-t` or `--tool` option or the `merge.tool` configuration variable) the configured command line will be invoked with `$BASE` set to the name of a temporary file containing the common base for the merge, if available; `$LOCAL` set to the name of a temporary file containing the contents of the file on the current branch; `$REMOTE` set to the name of a temporary file containing the contents of the file to be merged, and `$MERGED` set to the name of the file to which the merge tool should write the result of the merge resolution. + If the custom merge tool correctly indicates the success of a merge resolution with its exit code, then the configuration variable `mergetool..trustExitCode` can be set to `true`. Otherwise, 'git mergetool' will prompt the user to indicate the success of the resolution after the custom tool has exited. --tool-help:: Print a list of merge tools that may be used with `--tool`. -y:: --no-prompt:: Don't prompt before each invocation of the merge resolution program. This is the default if the merge resolution program is explicitly specified with the `--tool` option or with the `merge.tool` configuration variable. --prompt:: Prompt before each invocation of the merge resolution program to give the user a chance to skip the path. TEMPORARY FILES --------------- `git mergetool` creates `*.orig` backup files while resolving merges. These are safe to remove once a file has been merged and its `git mergetool` session has completed. Setting the `mergetool.keepBackup` configuration variable to `false` causes `git mergetool` to automatically remove the backup as files are successfully merged. GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-stash.txt0000644000175000017500000002444512476431550021051 0ustar formorerformorergit-stash(1) ============ NAME ---- git-stash - Stash the changes in a dirty working directory away SYNOPSIS -------- [verse] 'git stash' list [] 'git stash' show [] 'git stash' drop [-q|--quiet] [] 'git stash' ( pop | apply ) [--index] [-q|--quiet] [] 'git stash' branch [] 'git stash' [save [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet] [-u|--include-untracked] [-a|--all] []] 'git stash' clear 'git stash' create [] 'git stash' store [-m|--message ] [-q|--quiet] DESCRIPTION ----------- Use `git stash` when you want to record the current state of the working directory and the index, but want to go back to a clean working directory. The command saves your local modifications away and reverts the working directory to match the `HEAD` commit. The modifications stashed away by this command can be listed with `git stash list`, inspected with `git stash show`, and restored (potentially on top of a different commit) with `git stash apply`. Calling `git stash` without any arguments is equivalent to `git stash save`. A stash is by default listed as "WIP on 'branchname' ...", but you can give a more descriptive message on the command line when you create one. The latest stash you created is stored in `refs/stash`; older stashes are found in the reflog of this reference and can be named using the usual reflog syntax (e.g. `stash@{0}` is the most recently created stash, `stash@{1}` is the one before it, `stash@{2.hours.ago}` is also possible). OPTIONS ------- save [-p|--patch] [-k|--[no-]keep-index] [-u|--include-untracked] [-a|--all] [-q|--quiet] []:: Save your local modifications to a new 'stash', and run `git reset --hard` to revert them. The part is optional and gives the description along with the stashed state. For quickly making a snapshot, you can omit _both_ "save" and , but giving only does not trigger this action to prevent a misspelled subcommand from making an unwanted stash. + If the `--keep-index` option is used, all changes already added to the index are left intact. + If the `--include-untracked` option is used, all untracked files are also stashed and then cleaned up with `git clean`, leaving the working directory in a very clean state. If the `--all` option is used instead then the ignored files are stashed and cleaned in addition to the untracked files. + With `--patch`, you can interactively select hunks from the diff between HEAD and the working tree to be stashed. The stash entry is constructed such that its index state is the same as the index state of your repository, and its worktree contains only the changes you selected interactively. The selected changes are then rolled back from your worktree. See the ``Interactive Mode'' section of linkgit:git-add[1] to learn how to operate the `--patch` mode. + The `--patch` option implies `--keep-index`. You can use `--no-keep-index` to override this. list []:: List the stashes that you currently have. Each 'stash' is listed with its name (e.g. `stash@{0}` is the latest stash, `stash@{1}` is the one before, etc.), the name of the branch that was current when the stash was made, and a short description of the commit the stash was based on. + ---------------------------------------------------------------- stash@{0}: WIP on submit: 6ebd0e2... Update git-stash documentation stash@{1}: On master: 9cc0589... Add git-stash ---------------------------------------------------------------- + The command takes options applicable to the 'git log' command to control what is shown and how. See linkgit:git-log[1]. show []:: Show the changes recorded in the stash as a diff between the stashed state and its original parent. When no `` is given, shows the latest one. By default, the command shows the diffstat, but it will accept any format known to 'git diff' (e.g., `git stash show -p stash@{1}` to view the second most recent stash in patch form). pop [--index] [-q|--quiet] []:: Remove a single stashed state from the stash list and apply it on top of the current working tree state, i.e., do the inverse operation of `git stash save`. The working directory must match the index. + Applying the state can fail with conflicts; in this case, it is not removed from the stash list. You need to resolve the conflicts by hand and call `git stash drop` manually afterwards. + If the `--index` option is used, then tries to reinstate not only the working tree's changes, but also the index's ones. However, this can fail, when you have conflicts (which are stored in the index, where you therefore can no longer apply the changes as they were originally). + When no `` is given, `stash@{0}` is assumed, otherwise `` must be a reference of the form `stash@{}`. apply [--index] [-q|--quiet] []:: Like `pop`, but do not remove the state from the stash list. Unlike `pop`, `` may be any commit that looks like a commit created by `stash save` or `stash create`. branch []:: Creates and checks out a new branch named `` starting from the commit at which the `` was originally created, applies the changes recorded in `` to the new working tree and index. If that succeeds, and `` is a reference of the form `stash@{}`, it then drops the ``. When no `` is given, applies the latest one. + This is useful if the branch on which you ran `git stash save` has changed enough that `git stash apply` fails due to conflicts. Since the stash is applied on top of the commit that was HEAD at the time `git stash` was run, it restores the originally stashed state with no conflicts. clear:: Remove all the stashed states. Note that those states will then be subject to pruning, and may be impossible to recover (see 'Examples' below for a possible strategy). drop [-q|--quiet] []:: Remove a single stashed state from the stash list. When no `` is given, it removes the latest one. i.e. `stash@{0}`, otherwise `` must be a valid stash log reference of the form `stash@{}`. create:: Create a stash (which is a regular commit object) and return its object name, without storing it anywhere in the ref namespace. This is intended to be useful for scripts. It is probably not the command you want to use; see "save" above. store:: Store a given stash created via 'git stash create' (which is a dangling merge commit) in the stash ref, updating the stash reflog. This is intended to be useful for scripts. It is probably not the command you want to use; see "save" above. DISCUSSION ---------- A stash is represented as a commit whose tree records the state of the working directory, and its first parent is the commit at `HEAD` when the stash was created. The tree of the second parent records the state of the index when the stash is made, and it is made a child of the `HEAD` commit. The ancestry graph looks like this: .----W / / -----H----I where `H` is the `HEAD` commit, `I` is a commit that records the state of the index, and `W` is a commit that records the state of the working tree. EXAMPLES -------- Pulling into a dirty tree:: When you are in the middle of something, you learn that there are upstream changes that are possibly relevant to what you are doing. When your local changes do not conflict with the changes in the upstream, a simple `git pull` will let you move forward. + However, there are cases in which your local changes do conflict with the upstream changes, and `git pull` refuses to overwrite your changes. In such a case, you can stash your changes away, perform a pull, and then unstash, like this: + ---------------------------------------------------------------- $ git pull ... file foobar not up to date, cannot merge. $ git stash $ git pull $ git stash pop ---------------------------------------------------------------- Interrupted workflow:: When you are in the middle of something, your boss comes in and demands that you fix something immediately. Traditionally, you would make a commit to a temporary branch to store your changes away, and return to your original branch to make the emergency fix, like this: + ---------------------------------------------------------------- # ... hack hack hack ... $ git checkout -b my_wip $ git commit -a -m "WIP" $ git checkout master $ edit emergency fix $ git commit -a -m "Fix in a hurry" $ git checkout my_wip $ git reset --soft HEAD^ # ... continue hacking ... ---------------------------------------------------------------- + You can use 'git stash' to simplify the above, like this: + ---------------------------------------------------------------- # ... hack hack hack ... $ git stash $ edit emergency fix $ git commit -a -m "Fix in a hurry" $ git stash pop # ... continue hacking ... ---------------------------------------------------------------- Testing partial commits:: You can use `git stash save --keep-index` when you want to make two or more commits out of the changes in the work tree, and you want to test each change before committing: + ---------------------------------------------------------------- # ... hack hack hack ... $ git add --patch foo # add just first part to the index $ git stash save --keep-index # save all other changes to the stash $ edit/build/test first part $ git commit -m 'First part' # commit fully tested change $ git stash pop # prepare to work on all other changes # ... repeat above five steps until one commit remains ... $ edit/build/test remaining parts $ git commit foo -m 'Remaining parts' ---------------------------------------------------------------- Recovering stashes that were cleared/dropped erroneously:: If you mistakenly drop or clear stashes, they cannot be recovered through the normal safety mechanisms. However, you can try the following incantation to get a list of stashes that are still in your repository, but not reachable any more: + ---------------------------------------------------------------- git fsck --unreachable | grep commit | cut -d\ -f3 | xargs git log --merges --no-walk --grep=WIP ---------------------------------------------------------------- SEE ALSO -------- linkgit:git-checkout[1], linkgit:git-commit[1], linkgit:git-reflog[1], linkgit:git-reset[1] GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-difftool.txt0000644000175000017500000001103112476431550021520 0ustar formorerformorergit-difftool(1) =============== NAME ---- git-difftool - Show changes using common diff tools SYNOPSIS -------- [verse] 'git difftool' [] [ []] [--] [...] DESCRIPTION ----------- 'git difftool' is a Git command that allows you to compare and edit files between revisions using common diff tools. 'git difftool' is a frontend to 'git diff' and accepts the same options and arguments. See linkgit:git-diff[1]. OPTIONS ------- -d:: --dir-diff:: Copy the modified files to a temporary location and perform a directory diff on them. This mode never prompts before launching the diff tool. -y:: --no-prompt:: Do not prompt before launching a diff tool. --prompt:: Prompt before each invocation of the diff tool. This is the default behaviour; the option is provided to override any configuration settings. -t :: --tool=:: Use the diff tool specified by . Valid values include emerge, kompare, meld, and vimdiff. Run `git difftool --tool-help` for the list of valid settings. + If a diff tool is not specified, 'git difftool' will use the configuration variable `diff.tool`. If the configuration variable `diff.tool` is not set, 'git difftool' will pick a suitable default. + You can explicitly provide a full path to the tool by setting the configuration variable `difftool..path`. For example, you can configure the absolute path to kdiff3 by setting `difftool.kdiff3.path`. Otherwise, 'git difftool' assumes the tool is available in PATH. + Instead of running one of the known diff tools, 'git difftool' can be customized to run an alternative program by specifying the command line to invoke in a configuration variable `difftool..cmd`. + When 'git difftool' is invoked with this tool (either through the `-t` or `--tool` option or the `diff.tool` configuration variable) the configured command line will be invoked with the following variables available: `$LOCAL` is set to the name of the temporary file containing the contents of the diff pre-image and `$REMOTE` is set to the name of the temporary file containing the contents of the diff post-image. `$MERGED` is the name of the file which is being compared. `$BASE` is provided for compatibility with custom merge tool commands and has the same value as `$MERGED`. --tool-help:: Print a list of diff tools that may be used with `--tool`. --[no-]symlinks:: 'git difftool''s default behavior is create symlinks to the working tree when run in `--dir-diff` mode and the right-hand side of the comparison yields the same content as the file in the working tree. + Specifying `--no-symlinks` instructs 'git difftool' to create copies instead. `--no-symlinks` is the default on Windows. -x :: --extcmd=:: Specify a custom command for viewing diffs. 'git-difftool' ignores the configured defaults and runs `$command $LOCAL $REMOTE` when this option is specified. Additionally, `$BASE` is set in the environment. -g:: --gui:: When 'git-difftool' is invoked with the `-g` or `--gui` option the default diff tool will be read from the configured `diff.guitool` variable instead of `diff.tool`. --[no-]trust-exit-code:: 'git-difftool' invokes a diff tool individually on each file. Errors reported by the diff tool are ignored by default. Use `--trust-exit-code` to make 'git-difftool' exit when an invoked diff tool returns a non-zero exit code. + 'git-difftool' will forward the exit code of the invoked tool when '--trust-exit-code' is used. See linkgit:git-diff[1] for the full list of supported options. CONFIG VARIABLES ---------------- 'git difftool' falls back to 'git mergetool' config variables when the difftool equivalents have not been defined. diff.tool:: The default diff tool to use. diff.guitool:: The default diff tool to use when `--gui` is specified. difftool..path:: Override the path for the given tool. This is useful in case your tool is not in the PATH. difftool..cmd:: Specify the command to invoke the specified diff tool. + See the `--tool=` option above for more details. difftool.prompt:: Prompt before each invocation of the diff tool. difftool.trustExitCode:: Exit difftool if the invoked diff tool returns a non-zero exit status. + See the `--trust-exit-code` option above for more details. SEE ALSO -------- linkgit:git-diff[1]:: Show changes between commits, commit and working tree, etc linkgit:git-mergetool[1]:: Run merge conflict resolution tools to resolve merge conflicts linkgit:git-config[1]:: Get and set repository or global options GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-clean.txt0000644000175000017500000001005412476431550021000 0ustar formorerformorergit-clean(1) ============ NAME ---- git-clean - Remove untracked files from the working tree SYNOPSIS -------- [verse] 'git clean' [-d] [-f] [-i] [-n] [-q] [-e ] [-x | -X] [--] ... DESCRIPTION ----------- Cleans the working tree by recursively removing files that are not under version control, starting from the current directory. Normally, only files unknown to Git are removed, but if the '-x' option is specified, ignored files are also removed. This can, for example, be useful to remove all build products. If any optional `...` arguments are given, only those paths are affected. OPTIONS ------- -d:: Remove untracked directories in addition to untracked files. If an untracked directory is managed by a different Git repository, it is not removed by default. Use -f option twice if you really want to remove such a directory. -f:: --force:: If the Git configuration variable clean.requireForce is not set to false, 'git clean' will refuse to run unless given -f, -n or -i. -i:: --interactive:: Show what would be done and clean files interactively. See ``Interactive mode'' for details. -n:: --dry-run:: Don't actually remove anything, just show what would be done. -q:: --quiet:: Be quiet, only report errors, but not the files that are successfully removed. -e :: --exclude=:: In addition to those found in .gitignore (per directory) and $GIT_DIR/info/exclude, also consider these patterns to be in the set of the ignore rules in effect. -x:: Don't use the standard ignore rules read from .gitignore (per directory) and $GIT_DIR/info/exclude, but do still use the ignore rules given with `-e` options. This allows removing all untracked files, including build products. This can be used (possibly in conjunction with 'git reset') to create a pristine working directory to test a clean build. -X:: Remove only files ignored by Git. This may be useful to rebuild everything from scratch, but keep manually created files. Interactive mode ---------------- When the command enters the interactive mode, it shows the files and directories to be cleaned, and goes into its interactive command loop. The command loop shows the list of subcommands available, and gives a prompt "What now> ". In general, when the prompt ends with a single '>', you can pick only one of the choices given and type return, like this: ------------ *** Commands *** 1: clean 2: filter by pattern 3: select by numbers 4: ask each 5: quit 6: help What now> 1 ------------ You also could say `c` or `clean` above as long as the choice is unique. The main command loop has 6 subcommands. clean:: Start cleaning files and directories, and then quit. filter by pattern:: This shows the files and directories to be deleted and issues an "Input ignore patterns>>" prompt. You can input space-separated patterns to exclude files and directories from deletion. E.g. "*.c *.h" will excludes files end with ".c" and ".h" from deletion. When you are satisfied with the filtered result, press ENTER (empty) back to the main menu. select by numbers:: This shows the files and directories to be deleted and issues an "Select items to delete>>" prompt. When the prompt ends with double '>>' like this, you can make more than one selection, concatenated with whitespace or comma. Also you can say ranges. E.g. "2-5 7,9" to choose 2,3,4,5,7,9 from the list. If the second number in a range is omitted, all remaining items are selected. E.g. "7-" to choose 7,8,9 from the list. You can say '*' to choose everything. Also when you are satisfied with the filtered result, press ENTER (empty) back to the main menu. ask each:: This will start to clean, and you must confirm one by one in order to delete items. Please note that this action is not as efficient as the above two actions. quit:: This lets you quit without do cleaning. help:: Show brief usage of interactive git-clean. SEE ALSO -------- linkgit:gitignore[5] GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-ls-files.txt0000644000175000017500000001414312476431550021437 0ustar formorerformorergit-ls-files(1) =============== NAME ---- git-ls-files - Show information about files in the index and the working tree SYNOPSIS -------- [verse] 'git ls-files' [-z] [-t] [-v] (--[cached|deleted|others|ignored|stage|unmerged|killed|modified])* (-[c|d|o|i|s|u|k|m])* [-x |--exclude=] [-X |--exclude-from=] [--exclude-per-directory=] [--exclude-standard] [--error-unmatch] [--with-tree=] [--full-name] [--abbrev] [--] [...] DESCRIPTION ----------- This merges the file listing in the directory cache index with the actual working directory list, and shows different combinations of the two. One or more of the options below may be used to determine the files shown: OPTIONS ------- -c:: --cached:: Show cached files in the output (default) -d:: --deleted:: Show deleted files in the output -m:: --modified:: Show modified files in the output -o:: --others:: Show other (i.e. untracked) files in the output -i:: --ignored:: Show only ignored files in the output. When showing files in the index, print only those matched by an exclude pattern. When showing "other" files, show only those matched by an exclude pattern. -s:: --stage:: Show staged contents' object name, mode bits and stage number in the output. --directory:: If a whole directory is classified as "other", show just its name (with a trailing slash) and not its whole contents. --no-empty-directory:: Do not list empty directories. Has no effect without --directory. -u:: --unmerged:: Show unmerged files in the output (forces --stage) -k:: --killed:: Show files on the filesystem that need to be removed due to file/directory conflicts for checkout-index to succeed. -z:: \0 line termination on output. -x :: --exclude=:: Skip untracked files matching pattern. Note that pattern is a shell wildcard pattern. See EXCLUDE PATTERNS below for more information. -X :: --exclude-from=:: Read exclude patterns from ; 1 per line. --exclude-per-directory=:: Read additional exclude patterns that apply only to the directory and its subdirectories in . --exclude-standard:: Add the standard Git exclusions: .git/info/exclude, .gitignore in each directory, and the user's global exclusion file. --error-unmatch:: If any does not appear in the index, treat this as an error (return 1). --with-tree=:: When using --error-unmatch to expand the user supplied (i.e. path pattern) arguments to paths, pretend that paths which were removed in the index since the named are still present. Using this option with `-s` or `-u` options does not make any sense. -t:: This feature is semi-deprecated. For scripting purpose, linkgit:git-status[1] `--porcelain` and linkgit:git-diff-files[1] `--name-status` are almost always superior alternatives, and users should look at linkgit:git-status[1] `--short` or linkgit:git-diff[1] `--name-status` for more user-friendly alternatives. + This option identifies the file status with the following tags (followed by a space) at the start of each line: H:: cached S:: skip-worktree M:: unmerged R:: removed/deleted C:: modified/changed K:: to be killed ?:: other -v:: Similar to `-t`, but use lowercase letters for files that are marked as 'assume unchanged' (see linkgit:git-update-index[1]). --full-name:: When run from a subdirectory, the command usually outputs paths relative to the current directory. This option forces paths to be output relative to the project top directory. --abbrev[=]:: Instead of showing the full 40-byte hexadecimal object lines, show only a partial prefix. Non default number of digits can be specified with --abbrev=. --debug:: After each line that describes a file, add more data about its cache entry. This is intended to show as much information as possible for manual inspection; the exact format may change at any time. \--:: Do not interpret any more arguments as options. :: Files to show. If no files are given all files which match the other specified criteria are shown. Output ------ 'git ls-files' just outputs the filenames unless '--stage' is specified in which case it outputs: [ ] 'git ls-files --unmerged' and 'git ls-files --stage' can be used to examine detailed information on unmerged paths. For an unmerged path, instead of recording a single mode/SHA-1 pair, the index records up to three such pairs; one from tree O in stage 1, A in stage 2, and B in stage 3. This information can be used by the user (or the porcelain) to see what should eventually be recorded at the path. (see linkgit:git-read-tree[1] for more information on state) When `-z` option is not used, TAB, LF, and backslash characters in pathnames are represented as `\t`, `\n`, and `\\`, respectively. Exclude Patterns ---------------- 'git ls-files' can use a list of "exclude patterns" when traversing the directory tree and finding files to show when the flags --others or --ignored are specified. linkgit:gitignore[5] specifies the format of exclude patterns. These exclude patterns come from these places, in order: 1. The command-line flag --exclude= specifies a single pattern. Patterns are ordered in the same order they appear in the command line. 2. The command-line flag --exclude-from= specifies a file containing a list of patterns. Patterns are ordered in the same order they appear in the file. 3. The command-line flag --exclude-per-directory= specifies a name of the file in each directory 'git ls-files' examines, normally `.gitignore`. Files in deeper directories take precedence. Patterns are ordered in the same order they appear in the files. A pattern specified on the command line with --exclude or read from the file specified with --exclude-from is relative to the top of the directory tree. A pattern read from a file specified by --exclude-per-directory is relative to the directory that the pattern file appears in. SEE ALSO -------- linkgit:git-read-tree[1], linkgit:gitignore[5] GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-cvsimport.txt0000644000175000017500000001710012476431550021743 0ustar formorerformorergit-cvsimport(1) ================ NAME ---- git-cvsimport - Salvage your data out of another SCM people love to hate SYNOPSIS -------- [verse] 'git cvsimport' [-o ] [-h] [-v] [-d ] [-A ] [-p ] [-P ] [-C ] [-z ] [-i] [-k] [-u] [-s ] [-a] [-m] [-M ] [-S ] [-L ] [-r ] [-R] [] DESCRIPTION ----------- *WARNING:* `git cvsimport` uses cvsps version 2, which is considered deprecated; it does not work with cvsps version 3 and later. If you are performing a one-shot import of a CVS repository consider using http://cvs2svn.tigris.org/cvs2git.html[cvs2git] or https://github.com/BartMassey/parsecvs[parsecvs]. Imports a CVS repository into Git. It will either create a new repository, or incrementally import into an existing one. Splitting the CVS log into patch sets is done by 'cvsps'. At least version 2.1 is required. *WARNING:* for certain situations the import leads to incorrect results. Please see the section <> for further reference. You should *never* do any work of your own on the branches that are created by 'git cvsimport'. By default initial import will create and populate a "master" branch from the CVS repository's main branch which you're free to work with; after that, you need to 'git merge' incremental imports, or any CVS branches, yourself. It is advisable to specify a named remote via -r to separate and protect the incoming branches. If you intend to set up a shared public repository that all developers can read/write, or if you want to use linkgit:git-cvsserver[1], then you probably want to make a bare clone of the imported repository, and use the clone as the shared repository. See linkgit:gitcvs-migration[7]. OPTIONS ------- -v:: Verbosity: let 'cvsimport' report what it is doing. -d :: The root of the CVS archive. May be local (a simple path) or remote; currently, only the :local:, :ext: and :pserver: access methods are supported. If not given, 'git cvsimport' will try to read it from `CVS/Root`. If no such file exists, it checks for the `CVSROOT` environment variable. :: The CVS module you want to import. Relative to . If not given, 'git cvsimport' tries to read it from `CVS/Repository`. -C :: The Git repository to import to. If the directory doesn't exist, it will be created. Default is the current directory. -r :: The Git remote to import this CVS repository into. Moves all CVS branches into remotes// akin to the way 'git clone' uses 'origin' by default. -o :: When no remote is specified (via -r) the 'HEAD' branch from CVS is imported to the 'origin' branch within the Git repository, as 'HEAD' already has a special meaning for Git. When a remote is specified the 'HEAD' branch is named remotes//master mirroring 'git clone' behaviour. Use this option if you want to import into a different branch. + Use '-o master' for continuing an import that was initially done by the old cvs2git tool. -i:: Import-only: don't perform a checkout after importing. This option ensures the working directory and index remain untouched and will not create them if they do not exist. -k:: Kill keywords: will extract files with '-kk' from the CVS archive to avoid noisy changesets. Highly recommended, but off by default to preserve compatibility with early imported trees. -u:: Convert underscores in tag and branch names to dots. -s :: Substitute the character "/" in branch names with -p :: Additional options for cvsps. The options '-u' and '-A' are implicit and should not be used here. + If you need to pass multiple options, separate them with a comma. -z :: Pass the timestamp fuzz factor to cvsps, in seconds. If unset, cvsps defaults to 300s. -P :: Instead of calling cvsps, read the provided cvsps output file. Useful for debugging or when cvsps is being handled outside cvsimport. -m:: Attempt to detect merges based on the commit message. This option will enable default regexes that try to capture the source branch name from the commit message. -M :: Attempt to detect merges based on the commit message with a custom regex. It can be used with '-m' to enable the default regexes as well. You must escape forward slashes. + The regex must capture the source branch name in $1. + This option can be used several times to provide several detection regexes. -S :: Skip paths matching the regex. -a:: Import all commits, including recent ones. cvsimport by default skips commits that have a timestamp less than 10 minutes ago. -L :: Limit the number of commits imported. Workaround for cases where cvsimport leaks memory. -A :: CVS by default uses the Unix username when writing its commit logs. Using this option and an author-conv-file maps the name recorded in CVS to author name, e-mail and optional time zone: + --------- exon=Andreas Ericsson spawn=Simon Pawn America/Chicago --------- + 'git cvsimport' will make it appear as those authors had their GIT_AUTHOR_NAME and GIT_AUTHOR_EMAIL set properly all along. If a time zone is specified, GIT_AUTHOR_DATE will have the corresponding offset applied. + For convenience, this data is saved to `$GIT_DIR/cvs-authors` each time the '-A' option is provided and read from that same file each time 'git cvsimport' is run. + It is not recommended to use this feature if you intend to export changes back to CVS again later with 'git cvsexportcommit'. -R:: Generate a `$GIT_DIR/cvs-revisions` file containing a mapping from CVS revision numbers to newly-created Git commit IDs. The generated file will contain one line for each (filename, revision) pair imported; each line will look like + --------- src/widget.c 1.1 1d862f173cdc7325b6fa6d2ae1cfd61fd1b512b7 --------- + The revision data is appended to the file if it already exists, for use when doing incremental imports. + This option may be useful if you have CVS revision numbers stored in commit messages, bug-tracking systems, email archives, and the like. -h:: Print a short usage message and exit. OUTPUT ------ If '-v' is specified, the script reports what it is doing. Otherwise, success is indicated the Unix way, i.e. by simply exiting with a zero exit status. [[issues]] ISSUES ------ Problems related to timestamps: * If timestamps of commits in the CVS repository are not stable enough to be used for ordering commits changes may show up in the wrong order. * If any files were ever "cvs import"ed more than once (e.g., import of more than one vendor release) the HEAD contains the wrong content. * If the timestamp order of different files cross the revision order within the commit matching time window the order of commits may be wrong. Problems related to branches: * Branches on which no commits have been made are not imported. * All files from the branching point are added to a branch even if never added in CVS. * This applies to files added to the source branch *after* a daughter branch was created: if previously no commit was made on the daughter branch they will erroneously be added to the daughter branch in git. Problems related to tags: * Multiple tags on the same revision are not imported. If you suspect that any of these issues may apply to the repository you want to import, consider using cvs2git: * cvs2git (part of cvs2svn), `http://subversion.apache.org/` GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/cmd-list.perl0000755000175000017500000000267212476431550021006 0ustar formorerformorer#!/usr/bin/perl -w use File::Compare qw(compare); sub format_one { my ($out, $nameattr) = @_; my ($name, $attr) = @$nameattr; my ($state, $description); $state = 0; open I, '<', "$name.txt" or die "No such file $name.txt"; while () { if (/^NAME$/) { $state = 1; next; } if ($state == 1 && /^----$/) { $state = 2; next; } next if ($state != 2); chomp; $description = $_; last; } close I; if (!defined $description) { die "No description found in $name.txt"; } if (my ($verify_name, $text) = ($description =~ /^($name) - (.*)/)) { print $out "linkgit:$name\[1\]::\n\t"; if ($attr =~ / deprecated /) { print $out "(deprecated) "; } print $out "$text.\n\n"; } else { die "Description does not match $name: $description"; } } my %cmds = (); for (sort <>) { next if /^#/; chomp; my ($name, $cat, $attr) = /^(\S+)\s+(.*?)(?:\s+(.*))?$/; $attr = '' unless defined $attr; push @{$cmds{$cat}}, [$name, " $attr "]; } for my $cat (qw(ancillaryinterrogators ancillarymanipulators mainporcelain plumbinginterrogators plumbingmanipulators synchingrepositories foreignscminterface purehelpers synchelpers)) { my $out = "cmds-$cat.txt"; open O, '>', "$out+" or die "Cannot open output file $out+"; for (@{$cmds{$cat}}) { format_one(\*O, $_); } close O; if (-f "$out" && compare("$out", "$out+") == 0) { unlink "$out+"; } else { print STDERR "$out\n"; rename "$out+", "$out"; } } cgit-0.11.2/git/Documentation/git-merge-index.txt0000644000175000017500000000461512476431550022130 0ustar formorerformorergit-merge-index(1) ================== NAME ---- git-merge-index - Run a merge for files needing merging SYNOPSIS -------- [verse] 'git merge-index' [-o] [-q] (-a | [--] *) DESCRIPTION ----------- This looks up the (s) in the index and, if there are any merge entries, passes the SHA-1 hash for those files as arguments 1, 2, 3 (empty argument if no file), and as argument 4. File modes for the three files are passed as arguments 5, 6 and 7. OPTIONS ------- \--:: Do not interpret any more arguments as options. -a:: Run merge against all files in the index that need merging. -o:: Instead of stopping at the first failed merge, do all of them in one shot - continue with merging even when previous merges returned errors, and only return the error code after all the merges. -q:: Do not complain about a failed merge program (a merge program failure usually indicates conflicts during the merge). This is for porcelains which might want to emit custom messages. If 'git merge-index' is called with multiple s (or -a) then it processes them in turn only stopping if merge returns a non-zero exit code. Typically this is run with a script calling Git's imitation of the 'merge' command from the RCS package. A sample script called 'git merge-one-file' is included in the distribution. ALERT ALERT ALERT! The Git "merge object order" is different from the RCS 'merge' program merge object order. In the above ordering, the original is first. But the argument order to the 3-way merge program 'merge' is to have the original in the middle. Don't ask me why. Examples: torvalds@ppc970:~/merge-test> git merge-index cat MM This is MM from the original tree. # original This is modified MM in the branch A. # merge1 This is modified MM in the branch B. # merge2 This is modified MM in the branch B. # current contents or torvalds@ppc970:~/merge-test> git merge-index cat AA MM cat: : No such file or directory This is added AA in the branch A. This is added AA in the branch B. This is added AA in the branch B. fatal: merge program failed where the latter example shows how 'git merge-index' will stop trying to merge once anything has returned an error (i.e., `cat` returned an error for the AA file, because it didn't exist in the original, and thus 'git merge-index' didn't even try to merge the MM thing). GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/gitcvs-migration.txt0000644000175000017500000001657112476431550022435 0ustar formorerformorergitcvs-migration(7) =================== NAME ---- gitcvs-migration - Git for CVS users SYNOPSIS -------- [verse] 'git cvsimport' * DESCRIPTION ----------- Git differs from CVS in that every working tree contains a repository with a full copy of the project history, and no repository is inherently more important than any other. However, you can emulate the CVS model by designating a single shared repository which people can synchronize with; this document explains how to do that. Some basic familiarity with Git is required. Having gone through linkgit:gittutorial[7] and linkgit:gitglossary[7] should be sufficient. Developing against a shared repository -------------------------------------- Suppose a shared repository is set up in /pub/repo.git on the host foo.com. Then as an individual committer you can clone the shared repository over ssh with: ------------------------------------------------ $ git clone foo.com:/pub/repo.git/ my-project $ cd my-project ------------------------------------------------ and hack away. The equivalent of 'cvs update' is ------------------------------------------------ $ git pull origin ------------------------------------------------ which merges in any work that others might have done since the clone operation. If there are uncommitted changes in your working tree, commit them first before running git pull. [NOTE] ================================ The 'pull' command knows where to get updates from because of certain configuration variables that were set by the first 'git clone' command; see `git config -l` and the linkgit:git-config[1] man page for details. ================================ You can update the shared repository with your changes by first committing your changes, and then using the 'git push' command: ------------------------------------------------ $ git push origin master ------------------------------------------------ to "push" those commits to the shared repository. If someone else has updated the repository more recently, 'git push', like 'cvs commit', will complain, in which case you must pull any changes before attempting the push again. In the 'git push' command above we specify the name of the remote branch to update (`master`). If we leave that out, 'git push' tries to update any branches in the remote repository that have the same name as a branch in the local repository. So the last 'push' can be done with either of: ------------ $ git push origin $ git push foo.com:/pub/project.git/ ------------ as long as the shared repository does not have any branches other than `master`. Setting Up a Shared Repository ------------------------------ We assume you have already created a Git repository for your project, possibly created from scratch or from a tarball (see linkgit:gittutorial[7]), or imported from an already existing CVS repository (see the next section). Assume your existing repo is at /home/alice/myproject. Create a new "bare" repository (a repository without a working tree) and fetch your project into it: ------------------------------------------------ $ mkdir /pub/my-repo.git $ cd /pub/my-repo.git $ git --bare init --shared $ git --bare fetch /home/alice/myproject master:master ------------------------------------------------ Next, give every team member read/write access to this repository. One easy way to do this is to give all the team members ssh access to the machine where the repository is hosted. If you don't want to give them a full shell on the machine, there is a restricted shell which only allows users to do Git pushes and pulls; see linkgit:git-shell[1]. Put all the committers in the same group, and make the repository writable by that group: ------------------------------------------------ $ chgrp -R $group /pub/my-repo.git ------------------------------------------------ Make sure committers have a umask of at most 027, so that the directories they create are writable and searchable by other group members. Importing a CVS archive ----------------------- First, install version 2.1 or higher of cvsps from http://www.cobite.com/cvsps/[http://www.cobite.com/cvsps/] and make sure it is in your path. Then cd to a checked out CVS working directory of the project you are interested in and run linkgit:git-cvsimport[1]: ------------------------------------------- $ git cvsimport -C ------------------------------------------- This puts a Git archive of the named CVS module in the directory , which will be created if necessary. The import checks out from CVS every revision of every file. Reportedly cvsimport can average some twenty revisions per second, so for a medium-sized project this should not take more than a couple of minutes. Larger projects or remote repositories may take longer. The main trunk is stored in the Git branch named `origin`, and additional CVS branches are stored in Git branches with the same names. The most recent version of the main trunk is also left checked out on the `master` branch, so you can start adding your own changes right away. The import is incremental, so if you call it again next month it will fetch any CVS updates that have been made in the meantime. For this to work, you must not modify the imported branches; instead, create new branches for your own changes, and merge in the imported branches as necessary. If you want a shared repository, you will need to make a bare clone of the imported directory, as described above. Then treat the imported directory as another development clone for purposes of merging incremental imports. Advanced Shared Repository Management ------------------------------------- Git allows you to specify scripts called "hooks" to be run at certain points. You can use these, for example, to send all commits to the shared repository to a mailing list. See linkgit:githooks[5]. You can enforce finer grained permissions using update hooks. See link:howto/update-hook-example.html[Controlling access to branches using update hooks]. Providing CVS Access to a Git Repository ---------------------------------------- It is also possible to provide true CVS access to a Git repository, so that developers can still use CVS; see linkgit:git-cvsserver[1] for details. Alternative Development Models ------------------------------ CVS users are accustomed to giving a group of developers commit access to a common repository. As we've seen, this is also possible with Git. However, the distributed nature of Git allows other development models, and you may want to first consider whether one of them might be a better fit for your project. For example, you can choose a single person to maintain the project's primary public repository. Other developers then clone this repository and each work in their own clone. When they have a series of changes that they're happy with, they ask the maintainer to pull from the branch containing the changes. The maintainer reviews their changes and pulls them into the primary repository, which other developers pull from as necessary to stay coordinated. The Linux kernel and other projects use variants of this model. With a small group, developers may just pull changes from each other's repositories without the need for a central maintainer. SEE ALSO -------- linkgit:gittutorial[7], linkgit:gittutorial-2[7], linkgit:gitcore-tutorial[7], linkgit:gitglossary[7], linkgit:giteveryday[7], link:user-manual.html[The Git User's Manual] GIT --- Part of the linkgit:git[1] suite. cgit-0.11.2/git/Documentation/blame-options.txt0000644000175000017500000000743612476431550021720 0ustar formorerformorer-b:: Show blank SHA-1 for boundary commits. This can also be controlled via the `blame.blankboundary` config option. --root:: Do not treat root commits as boundaries. This can also be controlled via the `blame.showroot` config option. --show-stats:: Include additional statistics at the end of blame output. -L ,:: -L ::: Annotate only the given line range. May be specified multiple times. Overlapping ranges are allowed. + and are optional. ``-L '' or ``-L ,'' spans from to end of file. ``-L ,'' spans from start of file to . + include::line-range-format.txt[] -l:: Show long rev (Default: off). -t:: Show raw timestamp (Default: off). -S :: Use revisions from revs-file instead of calling linkgit:git-rev-list[1]. --reverse:: Walk history forward instead of backward. Instead of showing the revision in which a line appeared, this shows the last revision in which a line has existed. This requires a range of revision like START..END where the path to blame exists in START. -p:: --porcelain:: Show in a format designed for machine consumption. --line-porcelain:: Show the porcelain format, but output commit information for each line, not just the first time a commit is referenced. Implies --porcelain. --incremental:: Show the result incrementally in a format designed for machine consumption. --encoding=:: Specifies the encoding used to output author names and commit summaries. Setting it to `none` makes blame output unconverted data. For more information see the discussion about encoding in the linkgit:git-log[1] manual page. --contents :: When is not specified, the command annotates the changes starting backwards from the working tree copy. This flag makes the command pretend as if the working tree copy has the contents of the named file (specify `-` to make the command read from the standard input). --date :: The value is one of the following alternatives: {relative,local,default,iso,rfc,short}. If --date is not provided, the value of the blame.date config variable is used. If the blame.date config variable is also not set, the iso format is used. For more information, See the discussion of the --date option at linkgit:git-log[1]. -M||:: Detect moved or copied lines within a file. When a commit moves or copies a block of lines (e.g. the original file has A and then B, and the commit changes it to B and then A), the traditional 'blame' algorithm notices only half of the movement and typically blames the lines that were moved up (i.e. B) to the parent and assigns blame to the lines that were moved down (i.e. A) to the child commit. With this option, both groups of lines are blamed on the parent by running extra passes of inspection. + is optional but it is the lower bound on the number of alphanumeric characters that Git must detect as moving/copying within a file for it to associate those lines with the parent commit. The default value is 20. -C||:: In addition to `-M`, detect lines moved or copied from other files that were modified in the same commit. This is useful when you reorganize your program and move code around across files. When this option is given twice, the command additionally looks for copies from other files in the commit that creates the file. When this option is given three times, the command additionally looks for copies from other files in any commit. + is optional but it is the lower bound on the number of alphanumeric characters that Git must detect as moving/copying between files for it to associate those lines with the parent commit. And the default value is 40. If there are more than one `-C` options given, the argument of the last `-C` will take effect. -h:: Show help message. cgit-0.11.2/git/Documentation/git-replace.txt0000644000175000017500000001147512476431550021341 0ustar formorerformorergit-replace(1) ============== NAME ---- git-replace - Create, list, delete refs to replace objects SYNOPSIS -------- [verse] 'git replace' [-f] 'git replace' [-f] --edit 'git replace' [-f] --graft [...] 'git replace' -d ... 'git replace' [--format=] [-l []] DESCRIPTION ----------- Adds a 'replace' reference in `refs/replace/` namespace. The name of the 'replace' reference is the SHA-1 of the object that is replaced. The content of the 'replace' reference is the SHA-1 of the replacement object. The replaced object and the replacement object must be of the same type. This restriction can be bypassed using `-f`. Unless `-f` is given, the 'replace' reference must not yet exist. There is no other restriction on the replaced and replacement objects. Merge commits can be replaced by non-merge commits and vice versa. Replacement references will be used by default by all Git commands except those doing reachability traversal (prune, pack transfer and fsck). It is possible to disable use of replacement references for any command using the `--no-replace-objects` option just after 'git'. For example if commit 'foo' has been replaced by commit 'bar': ------------------------------------------------ $ git --no-replace-objects cat-file commit foo ------------------------------------------------ shows information about commit 'foo', while: ------------------------------------------------ $ git cat-file commit foo ------------------------------------------------ shows information about commit 'bar'. The 'GIT_NO_REPLACE_OBJECTS' environment variable can be set to achieve the same effect as the `--no-replace-objects` option. OPTIONS ------- -f:: --force:: If an existing replace ref for the same object exists, it will be overwritten (instead of failing). -d:: --delete:: Delete existing replace refs for the given objects. --edit :: Edit an object's content interactively. The existing content for is pretty-printed into a temporary file, an editor is launched on the file, and the result is parsed to create a new object of the same type as . A replacement ref is then created to replace with the newly created object. See linkgit:git-var[1] for details about how the editor will be chosen. --raw:: When editing, provide the raw object contents rather than pretty-printed ones. Currently this only affects trees, which will be shown in their binary form. This is harder to work with, but can help when repairing a tree that is so corrupted it cannot be pretty-printed. Note that you may need to configure your editor to cleanly read and write binary data. --graft [...]:: Create a graft commit. A new commit is created with the same content as except that its parents will be [...] instead of 's parents. A replacement ref is then created to replace with the newly created commit. See contrib/convert-grafts-to-replace-refs.sh for an example script based on this option that can convert grafts to replace refs. -l :: --list :: List replace refs for objects that match the given pattern (or all if no pattern is given). Typing "git replace" without arguments, also lists all replace refs. --format=:: When listing, use the specified , which can be one of 'short', 'medium' and 'long'. When omitted, the format defaults to 'short'. FORMATS ------- The following format are available: * 'short': * 'medium': -> * 'long': () -> () CREATING REPLACEMENT OBJECTS ---------------------------- linkgit:git-filter-branch[1], linkgit:git-hash-object[1] and linkgit:git-rebase[1], among other git commands, can be used to create replacement objects from existing objects. The `--edit` option can also be used with 'git replace' to create a replacement object by editing an existing object. If you want to replace many blobs, trees or commits that are part of a string of commits, you may just want to create a replacement string of commits and then only replace the commit at the tip of the target string of commits with the commit at the tip of the replacement string of commits. BUGS ---- Comparing blobs or trees that have been replaced with those that replace them will not work properly. And using `git reset --hard` to go back to a replaced commit will move the branch to the replacement commit instead of the replaced commit. There may be other problems when using 'git rev-list' related to pending objects. SEE ALSO -------- linkgit:git-hash-object[1] linkgit:git-filter-branch[1] linkgit:git-rebase[1] linkgit:git-tag[1] linkgit:git-branch[1] linkgit:git-commit[1] linkgit:git-var[1] linkgit:git[1] GIT --- Part of the linkgit:git[1] suite cgit-0.11.2/git/Documentation/git-cherry-pick.txt0000644000175000017500000001746712476431550022155 0ustar formorerformorergit-cherry-pick(1) ================== NAME ---- git-cherry-pick - Apply the changes introduced by some existing commits SYNOPSIS -------- [verse] 'git cherry-pick' [--edit] [-n] [-m parent-number] [-s] [-x] [--ff] [-S[]] ... 'git cherry-pick' --continue 'git cherry-pick' --quit 'git cherry-pick' --abort DESCRIPTION ----------- Given one or more existing commits, apply the change each one introduces, recording a new commit for each. This requires your working tree to be clean (no modifications from the HEAD commit). When it is not obvious how to apply a change, the following happens: 1. The current branch and `HEAD` pointer stay at the last commit successfully made. 2. The `CHERRY_PICK_HEAD` ref is set to point at the commit that introduced the change that is difficult to apply. 3. Paths in which the change applied cleanly are updated both in the index file and in your working tree. 4. For conflicting paths, the index file records up to three versions, as described in the "TRUE MERGE" section of linkgit:git-merge[1]. The working tree files will include a description of the conflict bracketed by the usual conflict markers `<<<<<<<` and `>>>>>>>`. 5. No other modifications are made. See linkgit:git-merge[1] for some hints on resolving such conflicts. OPTIONS ------- ...:: Commits to cherry-pick. For a more complete list of ways to spell commits, see linkgit:gitrevisions[7]. Sets of commits can be passed but no traversal is done by default, as if the '--no-walk' option was specified, see linkgit:git-rev-list[1]. Note that specifying a range will feed all ... arguments to a single revision walk (see a later example that uses 'maint master..next'). -e:: --edit:: With this option, 'git cherry-pick' will let you edit the commit message prior to committing. -x:: When recording the commit, append a line that says "(cherry picked from commit ...)" to the original commit message in order to indicate which commit this change was cherry-picked from. This is done only for cherry picks without conflicts. Do not use this option if you are cherry-picking from your private branch because the information is useless to the recipient. If on the other hand you are cherry-picking between two publicly visible branches (e.g. backporting a fix to a maintenance branch for an older release from a development branch), adding this information can be useful. -r:: It used to be that the command defaulted to do `-x` described above, and `-r` was to disable it. Now the default is not to do `-x` so this option is a no-op. -m parent-number:: --mainline parent-number:: Usually you cannot cherry-pick a merge because you do not know which side of the merge should be considered the mainline. This option specifies the parent number (starting from 1) of the mainline and allows cherry-pick to replay the change relative to the specified parent. -n:: --no-commit:: Usually the command automatically creates a sequence of commits. This flag applies the changes necessary to cherry-pick each named commit to your working tree and the index, without making any commit. In addition, when this option is used, your index does not have to match the HEAD commit. The cherry-pick is done against the beginning state of your index. + This is useful when cherry-picking more than one commits' effect to your index in a row. -s:: --signoff:: Add Signed-off-by line at the end of the commit message. -S[]:: --gpg-sign[=]:: GPG-sign commits. --ff:: If the current HEAD is the same as the parent of the cherry-pick'ed commit, then a fast forward to this commit will be performed. --allow-empty:: By default, cherry-picking an empty commit will fail, indicating that an explicit invocation of `git commit --allow-empty` is required. This option overrides that behavior, allowing empty commits to be preserved automatically in a cherry-pick. Note that when "--ff" is in effect, empty commits that meet the "fast-forward" requirement will be kept even without this option. Note also, that use of this option only keeps commits that were initially empty (i.e. the commit recorded the same tree as its parent). Commits which are made empty due to a previous commit are dropped. To force the inclusion of those commits use `--keep-redundant-commits`. --allow-empty-message:: By default, cherry-picking a commit with an empty message will fail. This option overrides that behaviour, allowing commits with empty messages to be cherry picked. --keep-redundant-commits:: If a commit being cherry picked duplicates a commit already in the current history, it will become empty. By default these redundant commits are ignored. This option overrides that behavior and creates an empty commit object. Implies `--allow-empty`. --strategy=:: Use the given merge strategy. Should only be used once. See the MERGE STRATEGIES section in linkgit:git-merge[1] for details. -X