docker-0.9.1/0000755000175000017500000000000012317406115011100 5ustar tagtagdocker-0.9.1/tags.go0000644000175000017500000001240312314376205012370 0ustar tagtagpackage docker import ( "encoding/json" "fmt" "github.com/dotcloud/docker/utils" "io/ioutil" "os" "path/filepath" "sort" "strings" ) const DEFAULTTAG = "latest" type TagStore struct { path string graph *Graph Repositories map[string]Repository } type Repository map[string]string func NewTagStore(path string, graph *Graph) (*TagStore, error) { abspath, err := filepath.Abs(path) if err != nil { return nil, err } store := &TagStore{ path: abspath, graph: graph, Repositories: make(map[string]Repository), } // Load the json file if it exists, otherwise create it. if err := store.Reload(); os.IsNotExist(err) { if err := store.Save(); err != nil { return nil, err } } else if err != nil { return nil, err } return store, nil } func (store *TagStore) Save() error { // Store the json ball jsonData, err := json.Marshal(store) if err != nil { return err } if err := ioutil.WriteFile(store.path, jsonData, 0600); err != nil { return err } return nil } func (store *TagStore) Reload() error { jsonData, err := ioutil.ReadFile(store.path) if err != nil { return err } if err := json.Unmarshal(jsonData, store); err != nil { return err } return nil } func (store *TagStore) LookupImage(name string) (*Image, error) { // FIXME: standardize on returning nil when the image doesn't exist, and err for everything else // (so we can pass all errors here) repos, tag := utils.ParseRepositoryTag(name) if tag == "" { tag = DEFAULTTAG } img, err := store.GetImage(repos, tag) if err != nil { return nil, err } else if img == nil { if img, err = store.graph.Get(name); err != nil { return nil, err } } return img, nil } // Return a reverse-lookup table of all the names which refer to each image // Eg. {"43b5f19b10584": {"base:latest", "base:v1"}} func (store *TagStore) ByID() map[string][]string { byID := make(map[string][]string) for repoName, repository := range store.Repositories { for tag, id := range repository { name := repoName + ":" + tag if _, exists := byID[id]; !exists { byID[id] = []string{name} } else { byID[id] = append(byID[id], name) sort.Strings(byID[id]) } } } return byID } func (store *TagStore) ImageName(id string) string { if names, exists := store.ByID()[id]; exists && len(names) > 0 { return names[0] } return utils.TruncateID(id) } func (store *TagStore) DeleteAll(id string) error { names, exists := store.ByID()[id] if !exists || len(names) == 0 { return nil } for _, name := range names { if strings.Contains(name, ":") { nameParts := strings.Split(name, ":") if _, err := store.Delete(nameParts[0], nameParts[1]); err != nil { return err } } else { if _, err := store.Delete(name, ""); err != nil { return err } } } return nil } func (store *TagStore) Delete(repoName, tag string) (bool, error) { deleted := false if err := store.Reload(); err != nil { return false, err } if r, exists := store.Repositories[repoName]; exists { if tag != "" { if _, exists2 := r[tag]; exists2 { delete(r, tag) if len(r) == 0 { delete(store.Repositories, repoName) } deleted = true } else { return false, fmt.Errorf("No such tag: %s:%s", repoName, tag) } } else { delete(store.Repositories, repoName) deleted = true } } else { fmt.Errorf("No such repository: %s", repoName) } return deleted, store.Save() } func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { img, err := store.LookupImage(imageName) if err != nil { return err } if tag == "" { tag = DEFAULTTAG } if err := validateRepoName(repoName); err != nil { return err } if err := validateTagName(tag); err != nil { return err } if err := store.Reload(); err != nil { return err } var repo Repository if r, exists := store.Repositories[repoName]; exists { repo = r } else { repo = make(map[string]string) if old, exists := store.Repositories[repoName]; exists && !force { return fmt.Errorf("Conflict: Tag %s:%s is already set to %s", repoName, tag, old) } store.Repositories[repoName] = repo } repo[tag] = img.ID return store.Save() } func (store *TagStore) Get(repoName string) (Repository, error) { if err := store.Reload(); err != nil { return nil, err } if r, exists := store.Repositories[repoName]; exists { return r, nil } return nil, nil } func (store *TagStore) GetImage(repoName, tagOrID string) (*Image, error) { repo, err := store.Get(repoName) if err != nil { return nil, err } else if repo == nil { return nil, nil } if revision, exists := repo[tagOrID]; exists { return store.graph.Get(revision) } // If no matching tag is found, search through images for a matching image id for _, revision := range repo { if strings.HasPrefix(revision, tagOrID) { return store.graph.Get(revision) } } return nil, nil } // Validate the name of a repository func validateRepoName(name string) error { if name == "" { return fmt.Errorf("Repository name can't be empty") } return nil } // Validate the name of a tag func validateTagName(name string) error { if name == "" { return fmt.Errorf("Tag name can't be empty") } if strings.Contains(name, "/") || strings.Contains(name, ":") { return fmt.Errorf("Illegal tag name: %s", name) } return nil } docker-0.9.1/dockerversion/0000755000175000017500000000000012314376205013760 5ustar tagtagdocker-0.9.1/dockerversion/dockerversion.go0000644000175000017500000000117412314376205017167 0ustar tagtagpackage dockerversion // FIXME: this should be embedded in the docker/docker.go, // but we can't because distro policy requires us to // package a separate dockerinit binary, and that binary needs // to know its version too. var ( GITCOMMIT string VERSION string IAMSTATIC bool // whether or not Docker itself was compiled statically via ./hack/make.sh binary INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary INITPATH string // custom location to search for a valid dockerinit binary (available for packagers as a last resort escape hatch) ) docker-0.9.1/engine/0000755000175000017500000000000012314376205012350 5ustar tagtagdocker-0.9.1/engine/engine.go0000644000175000017500000001234412314376205014150 0ustar tagtagpackage engine import ( "bufio" "fmt" "github.com/dotcloud/docker/utils" "io" "log" "os" "runtime" "sort" "strings" ) type Handler func(*Job) Status var globalHandlers map[string]Handler func init() { globalHandlers = make(map[string]Handler) } func Register(name string, handler Handler) error { _, exists := globalHandlers[name] if exists { return fmt.Errorf("Can't overwrite global handler for command %s", name) } globalHandlers[name] = handler return nil } func unregister(name string) { delete(globalHandlers, name) } // The Engine is the core of Docker. // It acts as a store for *containers*, and allows manipulation of these // containers by executing *jobs*. type Engine struct { root string handlers map[string]Handler hack Hack // data for temporary hackery (see hack.go) id string Stdout io.Writer Stderr io.Writer Stdin io.Reader } func (eng *Engine) Root() string { return eng.root } func (eng *Engine) Register(name string, handler Handler) error { _, exists := eng.handlers[name] if exists { return fmt.Errorf("Can't overwrite handler for command %s", name) } eng.handlers[name] = handler return nil } // New initializes a new engine managing the directory specified at `root`. // `root` is used to store containers and any other state private to the engine. // Changing the contents of the root without executing a job will cause unspecified // behavior. func New(root string) (*Engine, error) { // Check for unsupported architectures if runtime.GOARCH != "amd64" { return nil, fmt.Errorf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) } // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.8 crashes are clearer. // For details see http://github.com/dotcloud/docker/issues/407 if k, err := utils.GetKernelVersion(); err != nil { log.Printf("WARNING: %s\n", err) } else { if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) } } } if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { return nil, err } eng := &Engine{ root: root, handlers: make(map[string]Handler), id: utils.RandomString(), Stdout: os.Stdout, Stderr: os.Stderr, Stdin: os.Stdin, } eng.Register("commands", func(job *Job) Status { for _, name := range eng.commands() { job.Printf("%s\n", name) } return StatusOK }) // Copy existing global handlers for k, v := range globalHandlers { eng.handlers[k] = v } return eng, nil } func (eng *Engine) String() string { return fmt.Sprintf("%s|%s", eng.Root(), eng.id[:8]) } // Commands returns a list of all currently registered commands, // sorted alphabetically. func (eng *Engine) commands() []string { names := make([]string, 0, len(eng.handlers)) for name := range eng.handlers { names = append(names, name) } sort.Strings(names) return names } // Job creates a new job which can later be executed. // This function mimics `Command` from the standard os/exec package. func (eng *Engine) Job(name string, args ...string) *Job { job := &Job{ Eng: eng, Name: name, Args: args, Stdin: NewInput(), Stdout: NewOutput(), Stderr: NewOutput(), env: &Env{}, } job.Stderr.Add(utils.NopWriteCloser(eng.Stderr)) handler, exists := eng.handlers[name] if exists { job.handler = handler } return job } // ParseJob creates a new job from a text description using a shell-like syntax. // // The following syntax is used to parse `input`: // // * Words are separated using standard whitespaces as separators. // * Quotes and backslashes are not interpreted. // * Words of the form 'KEY=[VALUE]' are added to the job environment. // * All other words are added to the job arguments. // // For example: // // job, _ := eng.ParseJob("VERBOSE=1 echo hello TEST=true world") // // The resulting job will have: // job.Args={"echo", "hello", "world"} // job.Env={"VERBOSE":"1", "TEST":"true"} // func (eng *Engine) ParseJob(input string) (*Job, error) { // FIXME: use a full-featured command parser scanner := bufio.NewScanner(strings.NewReader(input)) scanner.Split(bufio.ScanWords) var ( cmd []string env Env ) for scanner.Scan() { word := scanner.Text() kv := strings.SplitN(word, "=", 2) if len(kv) == 2 { env.Set(kv[0], kv[1]) } else { cmd = append(cmd, word) } } if len(cmd) == 0 { return nil, fmt.Errorf("empty command: '%s'", input) } job := eng.Job(cmd[0], cmd[1:]...) job.Env().Init(&env) return job, nil } func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) { if os.Getenv("TEST") == "" { prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n")) return fmt.Fprintf(eng.Stderr, prefixedFormat, args...) } return 0, nil } docker-0.9.1/engine/helpers_test.go0000644000175000017500000000061212314376205015377 0ustar tagtagpackage engine import ( "github.com/dotcloud/docker/utils" "testing" ) var globalTestID string func newTestEngine(t *testing.T) *Engine { tmp, err := utils.TestDirectory("") if err != nil { t.Fatal(err) } eng, err := New(tmp) if err != nil { t.Fatal(err) } return eng } func mkJob(t *testing.T, name string, args ...string) *Job { return newTestEngine(t).Job(name, args...) } docker-0.9.1/engine/http.go0000644000175000017500000000271212314376205013660 0ustar tagtagpackage engine import ( "net/http" "path" ) // ServeHTTP executes a job as specified by the http request `r`, and sends the // result as an http response. // This method allows an Engine instance to be passed as a standard http.Handler interface. // // Note that the protocol used in this methid is a convenience wrapper and is not the canonical // implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing, // and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response // once data has been written to the body, which makes it inconvenient to return metadata such // as the exit status. // func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) { var ( jobName = path.Base(r.URL.Path) jobArgs, exists = r.URL.Query()["a"] ) if !exists { jobArgs = []string{} } w.Header().Set("Job-Name", jobName) for _, arg := range jobArgs { w.Header().Add("Job-Args", arg) } job := eng.Job(jobName, jobArgs...) job.Stdout.Add(w) job.Stderr.Add(w) // FIXME: distinguish job status from engine error in Run() // The former should be passed as a special header, the former // should cause a 500 status w.WriteHeader(http.StatusOK) // The exit status cannot be sent reliably with HTTP1, because headers // can only be sent before the body. // (we could possibly use http footers via chunked encoding, but I couldn't find // how to use them in net/http) job.Run() } docker-0.9.1/engine/streams.go0000644000175000017500000001322612314376205014361 0ustar tagtagpackage engine import ( "bufio" "container/ring" "fmt" "io" "io/ioutil" "sync" ) type Output struct { sync.Mutex dests []io.Writer tasks sync.WaitGroup used bool } // NewOutput returns a new Output object with no destinations attached. // Writing to an empty Output will cause the written data to be discarded. func NewOutput() *Output { return &Output{} } // Return true if something was written on this output func (o *Output) Used() bool { o.Lock() defer o.Unlock() return o.used } // Add attaches a new destination to the Output. Any data subsequently written // to the output will be written to the new destination in addition to all the others. // This method is thread-safe. func (o *Output) Add(dst io.Writer) { o.Lock() defer o.Unlock() o.dests = append(o.dests, dst) } // Set closes and remove existing destination and then attaches a new destination to // the Output. Any data subsequently written to the output will be written to the new // destination in addition to all the others. This method is thread-safe. func (o *Output) Set(dst io.Writer) { o.Close() o.Lock() defer o.Unlock() o.dests = []io.Writer{dst} } // AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination, // and returns its reading end for consumption by the caller. // This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package. // This method is thread-safe. func (o *Output) AddPipe() (io.Reader, error) { r, w := io.Pipe() o.Add(w) return r, nil } // AddTail starts a new goroutine which will read all subsequent data written to the output, // line by line, and append the last `n` lines to `dst`. func (o *Output) AddTail(dst *[]string, n int) error { src, err := o.AddPipe() if err != nil { return err } o.tasks.Add(1) go func() { defer o.tasks.Done() Tail(src, n, dst) }() return nil } // AddString starts a new goroutine which will read all subsequent data written to the output, // line by line, and store the last line into `dst`. func (o *Output) AddString(dst *string) error { src, err := o.AddPipe() if err != nil { return err } o.tasks.Add(1) go func() { defer o.tasks.Done() lines := make([]string, 0, 1) Tail(src, 1, &lines) if len(lines) == 0 { *dst = "" } else { *dst = lines[0] } }() return nil } // Write writes the same data to all registered destinations. // This method is thread-safe. func (o *Output) Write(p []byte) (n int, err error) { o.Lock() defer o.Unlock() o.used = true var firstErr error for _, dst := range o.dests { _, err := dst.Write(p) if err != nil && firstErr == nil { firstErr = err } } return len(p), firstErr } // Close unregisters all destinations and waits for all background // AddTail and AddString tasks to complete. // The Close method of each destination is called if it exists. func (o *Output) Close() error { o.Lock() defer o.Unlock() var firstErr error for _, dst := range o.dests { if closer, ok := dst.(io.WriteCloser); ok { err := closer.Close() if err != nil && firstErr == nil { firstErr = err } } } o.tasks.Wait() return firstErr } type Input struct { src io.Reader sync.Mutex } // NewInput returns a new Input object with no source attached. // Reading to an empty Input will return io.EOF. func NewInput() *Input { return &Input{} } // Read reads from the input in a thread-safe way. func (i *Input) Read(p []byte) (n int, err error) { i.Mutex.Lock() defer i.Mutex.Unlock() if i.src == nil { return 0, io.EOF } return i.src.Read(p) } // Closes the src // Not thread safe on purpose func (i *Input) Close() error { if i.src != nil { if closer, ok := i.src.(io.WriteCloser); ok { return closer.Close() } } return nil } // Add attaches a new source to the input. // Add can only be called once per input. Subsequent calls will // return an error. func (i *Input) Add(src io.Reader) error { i.Mutex.Lock() defer i.Mutex.Unlock() if i.src != nil { return fmt.Errorf("Maximum number of sources reached: 1") } i.src = src return nil } // Tail reads from `src` line per line, and returns the last `n` lines as an array. // A ring buffer is used to only store `n` lines at any time. func Tail(src io.Reader, n int, dst *[]string) { scanner := bufio.NewScanner(src) r := ring.New(n) for scanner.Scan() { if n == 0 { continue } r.Value = scanner.Text() r = r.Next() } r.Do(func(v interface{}) { if v == nil { return } *dst = append(*dst, v.(string)) }) } // AddEnv starts a new goroutine which will decode all subsequent data // as a stream of json-encoded objects, and point `dst` to the last // decoded object. // The result `env` can be queried using the type-neutral Env interface. // It is not safe to query `env` until the Output is closed. func (o *Output) AddEnv() (dst *Env, err error) { src, err := o.AddPipe() if err != nil { return nil, err } dst = &Env{} o.tasks.Add(1) go func() { defer o.tasks.Done() decoder := NewDecoder(src) for { env, err := decoder.Decode() if err != nil { return } *dst = *env } }() return dst, nil } func (o *Output) AddListTable() (dst *Table, err error) { src, err := o.AddPipe() if err != nil { return nil, err } dst = NewTable("", 0) o.tasks.Add(1) go func() { defer o.tasks.Done() content, err := ioutil.ReadAll(src) if err != nil { return } if _, err := dst.ReadListFrom(content); err != nil { return } }() return dst, nil } func (o *Output) AddTable() (dst *Table, err error) { src, err := o.AddPipe() if err != nil { return nil, err } dst = NewTable("", 0) o.tasks.Add(1) go func() { defer o.tasks.Done() if _, err := dst.ReadFrom(src); err != nil { return } }() return dst, nil } docker-0.9.1/engine/env.go0000644000175000017500000001657112314376205013501 0ustar tagtagpackage engine import ( "bytes" "encoding/json" "fmt" "io" "sort" "strconv" "strings" ) type Env []string func (env *Env) Get(key string) (value string) { // FIXME: use Map() for _, kv := range *env { if strings.Index(kv, "=") == -1 { continue } parts := strings.SplitN(kv, "=", 2) if parts[0] != key { continue } if len(parts) < 2 { value = "" } else { value = parts[1] } } return } func (env *Env) Exists(key string) bool { _, exists := env.Map()[key] return exists } func (env *Env) Init(src *Env) { (*env) = make([]string, 0, len(*src)) for _, val := range *src { (*env) = append((*env), val) } } func (env *Env) GetBool(key string) (value bool) { s := strings.ToLower(strings.Trim(env.Get(key), " \t")) if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { return false } return true } func (env *Env) SetBool(key string, value bool) { if value { env.Set(key, "1") } else { env.Set(key, "0") } } func (env *Env) GetInt(key string) int { return int(env.GetInt64(key)) } func (env *Env) GetInt64(key string) int64 { s := strings.Trim(env.Get(key), " \t") val, err := strconv.ParseInt(s, 10, 64) if err != nil { return 0 } return val } func (env *Env) SetInt(key string, value int) { env.Set(key, fmt.Sprintf("%d", value)) } func (env *Env) SetInt64(key string, value int64) { env.Set(key, fmt.Sprintf("%d", value)) } // Returns nil if key not found func (env *Env) GetList(key string) []string { sval := env.Get(key) if sval == "" { return nil } l := make([]string, 0, 1) if err := json.Unmarshal([]byte(sval), &l); err != nil { l = append(l, sval) } return l } func (env *Env) GetSubEnv(key string) *Env { sval := env.Get(key) if sval == "" { return nil } buf := bytes.NewBufferString(sval) var sub Env if err := sub.Decode(buf); err != nil { return nil } return &sub } func (env *Env) SetSubEnv(key string, sub *Env) error { var buf bytes.Buffer if err := sub.Encode(&buf); err != nil { return err } env.Set(key, string(buf.Bytes())) return nil } func (env *Env) GetJson(key string, iface interface{}) error { sval := env.Get(key) if sval == "" { return nil } return json.Unmarshal([]byte(sval), iface) } func (env *Env) SetJson(key string, value interface{}) error { sval, err := json.Marshal(value) if err != nil { return err } env.Set(key, string(sval)) return nil } func (env *Env) SetList(key string, value []string) error { return env.SetJson(key, value) } func (env *Env) Set(key, value string) { *env = append(*env, key+"="+value) } func NewDecoder(src io.Reader) *Decoder { return &Decoder{ json.NewDecoder(src), } } type Decoder struct { *json.Decoder } func (decoder *Decoder) Decode() (*Env, error) { m := make(map[string]interface{}) if err := decoder.Decoder.Decode(&m); err != nil { return nil, err } env := &Env{} for key, value := range m { env.SetAuto(key, value) } return env, nil } // DecodeEnv decodes `src` as a json dictionary, and adds // each decoded key-value pair to the environment. // // If `src` cannot be decoded as a json dictionary, an error // is returned. func (env *Env) Decode(src io.Reader) error { m := make(map[string]interface{}) if err := json.NewDecoder(src).Decode(&m); err != nil { return err } for k, v := range m { env.SetAuto(k, v) } return nil } func (env *Env) SetAuto(k string, v interface{}) { // FIXME: we fix-convert float values to int, because // encoding/json decodes integers to float64, but cannot encode them back. // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) if fval, ok := v.(float64); ok { env.SetInt64(k, int64(fval)) } else if sval, ok := v.(string); ok { env.Set(k, sval) } else if val, err := json.Marshal(v); err == nil { env.Set(k, string(val)) } else { env.Set(k, fmt.Sprintf("%v", v)) } } func (env *Env) Encode(dst io.Writer) error { m := make(map[string]interface{}) for k, v := range env.Map() { var val interface{} if err := json.Unmarshal([]byte(v), &val); err == nil { // FIXME: we fix-convert float values to int, because // encoding/json decodes integers to float64, but cannot encode them back. // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) if fval, isFloat := val.(float64); isFloat { val = int(fval) } m[k] = val } else { m[k] = v } } if err := json.NewEncoder(dst).Encode(&m); err != nil { return err } return nil } func (env *Env) WriteTo(dst io.Writer) (n int64, err error) { // FIXME: return the number of bytes written to respect io.WriterTo return 0, env.Encode(dst) } func (env *Env) Import(src interface{}) (err error) { defer func() { if err != nil { err = fmt.Errorf("ImportEnv: %s", err) } }() var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(src); err != nil { return err } if err := env.Decode(&buf); err != nil { return err } return nil } func (env *Env) Map() map[string]string { m := make(map[string]string) for _, kv := range *env { parts := strings.SplitN(kv, "=", 2) m[parts[0]] = parts[1] } return m } type Table struct { Data []*Env sortKey string Chan chan *Env } func NewTable(sortKey string, sizeHint int) *Table { return &Table{ make([]*Env, 0, sizeHint), sortKey, make(chan *Env), } } func (t *Table) SetKey(sortKey string) { t.sortKey = sortKey } func (t *Table) Add(env *Env) { t.Data = append(t.Data, env) } func (t *Table) Len() int { return len(t.Data) } func (t *Table) Less(a, b int) bool { return t.lessBy(a, b, t.sortKey) } func (t *Table) lessBy(a, b int, by string) bool { keyA := t.Data[a].Get(by) keyB := t.Data[b].Get(by) intA, errA := strconv.ParseInt(keyA, 10, 64) intB, errB := strconv.ParseInt(keyB, 10, 64) if errA == nil && errB == nil { return intA < intB } return keyA < keyB } func (t *Table) Swap(a, b int) { tmp := t.Data[a] t.Data[a] = t.Data[b] t.Data[b] = tmp } func (t *Table) Sort() { sort.Sort(t) } func (t *Table) ReverseSort() { sort.Sort(sort.Reverse(t)) } func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { if _, err := dst.Write([]byte{'['}); err != nil { return -1, err } n = 1 for i, env := range t.Data { bytes, err := env.WriteTo(dst) if err != nil { return -1, err } n += bytes if i != len(t.Data)-1 { if _, err := dst.Write([]byte{','}); err != nil { return -1, err } n += 1 } } if _, err := dst.Write([]byte{']'}); err != nil { return -1, err } return n + 1, nil } func (t *Table) ToListString() (string, error) { buffer := bytes.NewBuffer(nil) if _, err := t.WriteListTo(buffer); err != nil { return "", err } return buffer.String(), nil } func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { for _, env := range t.Data { bytes, err := env.WriteTo(dst) if err != nil { return -1, err } n += bytes } return n, nil } func (t *Table) ReadListFrom(src []byte) (n int64, err error) { var array []interface{} if err := json.Unmarshal(src, &array); err != nil { return -1, err } for _, item := range array { if m, ok := item.(map[string]interface{}); ok { env := &Env{} for key, value := range m { env.SetAuto(key, value) } t.Add(env) } } return int64(len(src)), nil } func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { decoder := NewDecoder(src) for { env, err := decoder.Decode() if err == io.EOF { return 0, nil } else if err != nil { return -1, err } t.Add(env) } return 0, nil } docker-0.9.1/engine/job_test.go0000644000175000017500000000432112314376205014510 0ustar tagtagpackage engine import ( "os" "testing" ) func TestJobStatusOK(t *testing.T) { eng := newTestEngine(t) defer os.RemoveAll(eng.Root()) eng.Register("return_ok", func(job *Job) Status { return StatusOK }) err := eng.Job("return_ok").Run() if err != nil { t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err) } } func TestJobStatusErr(t *testing.T) { eng := newTestEngine(t) defer os.RemoveAll(eng.Root()) eng.Register("return_err", func(job *Job) Status { return StatusErr }) err := eng.Job("return_err").Run() if err == nil { t.Fatalf("When a job returns StatusErr, Run() should return an error") } } func TestJobStatusNotFound(t *testing.T) { eng := newTestEngine(t) defer os.RemoveAll(eng.Root()) eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound }) err := eng.Job("return_not_found").Run() if err == nil { t.Fatalf("When a job returns StatusNotFound, Run() should return an error") } } func TestJobStdoutString(t *testing.T) { eng := newTestEngine(t) defer os.RemoveAll(eng.Root()) // FIXME: test multiple combinations of output and status eng.Register("say_something_in_stdout", func(job *Job) Status { job.Printf("Hello world\n") return StatusOK }) job := eng.Job("say_something_in_stdout") var output string if err := job.Stdout.AddString(&output); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if expectedOutput := "Hello world"; output != expectedOutput { t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output) } } func TestJobStderrString(t *testing.T) { eng := newTestEngine(t) defer os.RemoveAll(eng.Root()) // FIXME: test multiple combinations of output and status eng.Register("say_something_in_stderr", func(job *Job) Status { job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n") return StatusOK }) job := eng.Job("say_something_in_stderr") var output string if err := job.Stderr.AddString(&output); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if expectedOutput := "Something happened"; output != expectedOutput { t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output) } } docker-0.9.1/engine/hack.go0000644000175000017500000000055612314376205013613 0ustar tagtagpackage engine type Hack map[string]interface{} func (eng *Engine) Hack_GetGlobalVar(key string) interface{} { if eng.hack == nil { return nil } val, exists := eng.hack[key] if !exists { return nil } return val } func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) { if eng.hack == nil { eng.hack = make(Hack) } eng.hack[key] = val } docker-0.9.1/engine/MAINTAINERS0000644000175000017500000000007612314376205014050 0ustar tagtag#Solomon Hykes Temporarily unavailable docker-0.9.1/engine/streams_test.go0000644000175000017500000001501112314376205015412 0ustar tagtagpackage engine import ( "bufio" "bytes" "fmt" "io" "io/ioutil" "strings" "testing" ) func TestOutputAddString(t *testing.T) { var testInputs = [][2]string{ { "hello, world!", "hello, world!", }, { "One\nTwo\nThree", "Three", }, { "", "", }, { "A line\nThen another nl-terminated line\n", "Then another nl-terminated line", }, { "A line followed by an empty line\n\n", "", }, } for _, testData := range testInputs { input := testData[0] expectedOutput := testData[1] o := NewOutput() var output string if err := o.AddString(&output); err != nil { t.Error(err) } if n, err := o.Write([]byte(input)); err != nil { t.Error(err) } else if n != len(input) { t.Errorf("Expected %d, got %d", len(input), n) } o.Close() if output != expectedOutput { t.Errorf("Last line is not stored as return string.\nInput: '%s'\nExpected: '%s'\nGot: '%s'", input, expectedOutput, output) } } } type sentinelWriteCloser struct { calledWrite bool calledClose bool } func (w *sentinelWriteCloser) Write(p []byte) (int, error) { w.calledWrite = true return len(p), nil } func (w *sentinelWriteCloser) Close() error { w.calledClose = true return nil } func TestOutputAddEnv(t *testing.T) { input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}" o := NewOutput() result, err := o.AddEnv() if err != nil { t.Fatal(err) } o.Write([]byte(input)) o.Close() if v := result.Get("foo"); v != "bar" { t.Errorf("Expected %v, got %v", "bar", v) } if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 { t.Errorf("Expected %v, got %v", 42, v) } if v := result.Get("this-value-doesnt-exist"); v != "" { t.Errorf("Expected %v, got %v", "", v) } } func TestOutputAddClose(t *testing.T) { o := NewOutput() var s sentinelWriteCloser o.Add(&s) if err := o.Close(); err != nil { t.Fatal(err) } // Write data after the output is closed. // Write should succeed, but no destination should receive it. if _, err := o.Write([]byte("foo bar")); err != nil { t.Fatal(err) } if !s.calledClose { t.Fatal("Output.Close() didn't close the destination") } } func TestOutputAddPipe(t *testing.T) { var testInputs = []string{ "hello, world!", "One\nTwo\nThree", "", "A line\nThen another nl-terminated line\n", "A line followed by an empty line\n\n", } for _, input := range testInputs { expectedOutput := input o := NewOutput() r, err := o.AddPipe() if err != nil { t.Fatal(err) } go func(o *Output) { if n, err := o.Write([]byte(input)); err != nil { t.Error(err) } else if n != len(input) { t.Errorf("Expected %d, got %d", len(input), n) } if err := o.Close(); err != nil { t.Error(err) } }(o) output, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } if string(output) != expectedOutput { t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot: '%s'", expectedOutput, output) } } } func TestTail(t *testing.T) { var tests = make(map[string][][]string) tests["hello, world!"] = [][]string{ {}, {"hello, world!"}, {"hello, world!"}, {"hello, world!"}, } tests["One\nTwo\nThree"] = [][]string{ {}, {"Three"}, {"Two", "Three"}, {"One", "Two", "Three"}, } for input, outputs := range tests { for n, expectedOutput := range outputs { var output []string Tail(strings.NewReader(input), n, &output) if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) { t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", expectedOutput, output) } } } } func TestOutputAddTail(t *testing.T) { var tests = make(map[string][][]string) tests["hello, world!"] = [][]string{ {}, {"hello, world!"}, {"hello, world!"}, {"hello, world!"}, } tests["One\nTwo\nThree"] = [][]string{ {}, {"Three"}, {"Two", "Three"}, {"One", "Two", "Three"}, } for input, outputs := range tests { for n, expectedOutput := range outputs { o := NewOutput() var output []string if err := o.AddTail(&output, n); err != nil { t.Error(err) } if n, err := o.Write([]byte(input)); err != nil { t.Error(err) } else if n != len(input) { t.Errorf("Expected %d, got %d", len(input), n) } o.Close() if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) { t.Errorf("Tail(%d) returned wrong result.\nExpected: %v\nGot: %v", n, expectedOutput, output) } } } } func lastLine(txt string) string { scanner := bufio.NewScanner(strings.NewReader(txt)) var lastLine string for scanner.Scan() { lastLine = scanner.Text() } return lastLine } func TestOutputAdd(t *testing.T) { o := NewOutput() b := &bytes.Buffer{} o.Add(b) input := "hello, world!" if n, err := o.Write([]byte(input)); err != nil { t.Fatal(err) } else if n != len(input) { t.Fatalf("Expected %d, got %d", len(input), n) } if output := b.String(); output != input { t.Fatal("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output) } } func TestOutputWriteError(t *testing.T) { o := NewOutput() buf := &bytes.Buffer{} o.Add(buf) r, w := io.Pipe() input := "Hello there" expectedErr := fmt.Errorf("This is an error") r.CloseWithError(expectedErr) o.Add(w) n, err := o.Write([]byte(input)) if err != expectedErr { t.Fatalf("Output.Write() should return the first error encountered, if any") } if buf.String() != input { t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error") } if n != len(input) { t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination") } } func TestInputAddEmpty(t *testing.T) { i := NewInput() var b bytes.Buffer if err := i.Add(&b); err != nil { t.Fatal(err) } data, err := ioutil.ReadAll(i) if err != nil { t.Fatal(err) } if len(data) > 0 { t.Fatalf("Read from empty input shoul yield no data") } } func TestInputAddTwo(t *testing.T) { i := NewInput() var b1 bytes.Buffer // First add should succeed if err := i.Add(&b1); err != nil { t.Fatal(err) } var b2 bytes.Buffer // Second add should fail if err := i.Add(&b2); err == nil { t.Fatalf("Adding a second source should return an error") } } func TestInputAddNotEmpty(t *testing.T) { i := NewInput() b := bytes.NewBufferString("hello world\nabc") expectedResult := b.String() i.Add(b) result, err := ioutil.ReadAll(i) if err != nil { t.Fatal(err) } if string(result) != expectedResult { t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result) } } docker-0.9.1/engine/env_test.go0000644000175000017500000000455412314376205014536 0ustar tagtagpackage engine import ( "testing" ) func TestNewJob(t *testing.T) { job := mkJob(t, "dummy", "--level=awesome") if job.Name != "dummy" { t.Fatalf("Wrong job name: %s", job.Name) } if len(job.Args) != 1 { t.Fatalf("Wrong number of job arguments: %d", len(job.Args)) } if job.Args[0] != "--level=awesome" { t.Fatalf("Wrong job arguments: %s", job.Args[0]) } } func TestSetenv(t *testing.T) { job := mkJob(t, "dummy") job.Setenv("foo", "bar") if val := job.Getenv("foo"); val != "bar" { t.Fatalf("Getenv returns incorrect value: %s", val) } job.Setenv("bar", "") if val := job.Getenv("bar"); val != "" { t.Fatalf("Getenv returns incorrect value: %s", val) } if val := job.Getenv("nonexistent"); val != "" { t.Fatalf("Getenv returns incorrect value: %s", val) } } func TestSetenvBool(t *testing.T) { job := mkJob(t, "dummy") job.SetenvBool("foo", true) if val := job.GetenvBool("foo"); !val { t.Fatalf("GetenvBool returns incorrect value: %t", val) } job.SetenvBool("bar", false) if val := job.GetenvBool("bar"); val { t.Fatalf("GetenvBool returns incorrect value: %t", val) } if val := job.GetenvBool("nonexistent"); val { t.Fatalf("GetenvBool returns incorrect value: %t", val) } } func TestSetenvInt(t *testing.T) { job := mkJob(t, "dummy") job.SetenvInt("foo", -42) if val := job.GetenvInt("foo"); val != -42 { t.Fatalf("GetenvInt returns incorrect value: %d", val) } job.SetenvInt("bar", 42) if val := job.GetenvInt("bar"); val != 42 { t.Fatalf("GetenvInt returns incorrect value: %d", val) } if val := job.GetenvInt("nonexistent"); val != 0 { t.Fatalf("GetenvInt returns incorrect value: %d", val) } } func TestSetenvList(t *testing.T) { job := mkJob(t, "dummy") job.SetenvList("foo", []string{"bar"}) if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" { t.Fatalf("GetenvList returns incorrect value: %v", val) } job.SetenvList("bar", nil) if val := job.GetenvList("bar"); val != nil { t.Fatalf("GetenvList returns incorrect value: %v", val) } if val := job.GetenvList("nonexistent"); val != nil { t.Fatalf("GetenvList returns incorrect value: %v", val) } } func TestEnviron(t *testing.T) { job := mkJob(t, "dummy") job.Setenv("foo", "bar") val, exists := job.Environ()["foo"] if !exists { t.Fatalf("foo not found in the environ") } if val != "bar" { t.Fatalf("bar not found in the environ") } } docker-0.9.1/engine/engine_test.go0000644000175000017500000001014612314376205015205 0ustar tagtagpackage engine import ( "bytes" "io/ioutil" "os" "path" "path/filepath" "strings" "testing" ) func TestRegister(t *testing.T) { if err := Register("dummy1", nil); err != nil { t.Fatal(err) } if err := Register("dummy1", nil); err == nil { t.Fatalf("Expecting error, got none") } // Register is global so let's cleanup to avoid conflicts defer unregister("dummy1") eng := newTestEngine(t) //Should fail because global handlers are copied //at the engine creation if err := eng.Register("dummy1", nil); err == nil { t.Fatalf("Expecting error, got none") } if err := eng.Register("dummy2", nil); err != nil { t.Fatal(err) } if err := eng.Register("dummy2", nil); err == nil { t.Fatalf("Expecting error, got none") } defer unregister("dummy2") } func TestJob(t *testing.T) { eng := newTestEngine(t) job1 := eng.Job("dummy1", "--level=awesome") if job1.handler != nil { t.Fatalf("job1.handler should be empty") } h := func(j *Job) Status { j.Printf("%s\n", j.Name) return 42 } eng.Register("dummy2", h) defer unregister("dummy2") job2 := eng.Job("dummy2", "--level=awesome") if job2.handler == nil { t.Fatalf("job2.handler shouldn't be nil") } if job2.handler(job2) != 42 { t.Fatalf("handler dummy2 was not found in job2") } } func TestEngineCommands(t *testing.T) { eng := newTestEngine(t) defer os.RemoveAll(eng.Root()) handler := func(job *Job) Status { return StatusOK } eng.Register("foo", handler) eng.Register("bar", handler) eng.Register("echo", handler) eng.Register("die", handler) var output bytes.Buffer commands := eng.Job("commands") commands.Stdout.Add(&output) commands.Run() expected := "bar\ncommands\ndie\necho\nfoo\n" if result := output.String(); result != expected { t.Fatalf("Unexpected output:\nExpected = %v\nResult = %v\n", expected, result) } } func TestEngineRoot(t *testing.T) { tmp, err := ioutil.TempDir("", "docker-test-TestEngineCreateDir") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) // We expect Root to resolve to an absolute path. // FIXME: this should not be necessary. // Until the above FIXME is implemented, let's check for the // current behavior. tmp, err = filepath.EvalSymlinks(tmp) if err != nil { t.Fatal(err) } tmp, err = filepath.Abs(tmp) if err != nil { t.Fatal(err) } dir := path.Join(tmp, "dir") eng, err := New(dir) if err != nil { t.Fatal(err) } if st, err := os.Stat(dir); err != nil { t.Fatal(err) } else if !st.IsDir() { t.Fatalf("engine.New() created something other than a directory at %s", dir) } if r := eng.Root(); r != dir { t.Fatalf("Expected: %v\nReceived: %v", dir, r) } } func TestEngineString(t *testing.T) { eng1 := newTestEngine(t) defer os.RemoveAll(eng1.Root()) eng2 := newTestEngine(t) defer os.RemoveAll(eng2.Root()) s1 := eng1.String() s2 := eng2.String() if eng1 == eng2 { t.Fatalf("Different engines should have different names (%v == %v)", s1, s2) } } func TestEngineLogf(t *testing.T) { eng := newTestEngine(t) defer os.RemoveAll(eng.Root()) input := "Test log line" if n, err := eng.Logf("%s\n", input); err != nil { t.Fatal(err) } else if n < len(input) { t.Fatalf("Test: Logf() should print at least as much as the input\ninput=%d\nprinted=%d", len(input), n) } } func TestParseJob(t *testing.T) { eng := newTestEngine(t) defer os.RemoveAll(eng.Root()) // Verify that the resulting job calls to the right place var called bool eng.Register("echo", func(job *Job) Status { called = true return StatusOK }) input := "echo DEBUG=1 hello world VERBOSITY=42" job, err := eng.ParseJob(input) if err != nil { t.Fatal(err) } if job.Name != "echo" { t.Fatalf("Invalid job name: %v", job.Name) } if strings.Join(job.Args, ":::") != "hello:::world" { t.Fatalf("Invalid job args: %v", job.Args) } if job.Env().Get("DEBUG") != "1" { t.Fatalf("Invalid job env: %v", job.Env) } if job.Env().Get("VERBOSITY") != "42" { t.Fatalf("Invalid job env: %v", job.Env) } if len(job.Env().Map()) != 2 { t.Fatalf("Invalid job env: %v", job.Env) } if err := job.Run(); err != nil { t.Fatal(err) } if !called { t.Fatalf("Job was not called") } } docker-0.9.1/engine/job.go0000644000175000017500000001213012314376205013446 0ustar tagtagpackage engine import ( "fmt" "io" "os" "strings" "time" ) // A job is the fundamental unit of work in the docker engine. // Everything docker can do should eventually be exposed as a job. // For example: execute a process in a container, create a new container, // download an archive from the internet, serve the http api, etc. // // The job API is designed after unix processes: a job has a name, arguments, // environment variables, standard streams for input, output and error, and // an exit status which can indicate success (0) or error (anything else). // // One slight variation is that jobs report their status as a string. The // string "0" indicates success, and any other strings indicates an error. // This allows for richer error reporting. // type Job struct { Eng *Engine Name string Args []string env *Env Stdout *Output Stderr *Output Stdin *Input handler Handler status Status end time.Time onExit []func() } type Status int const ( StatusOK Status = 0 StatusErr Status = 1 StatusNotFound Status = 127 ) // Run executes the job and blocks until the job completes. // If the job returns a failure status, an error is returned // which includes the status. func (job *Job) Run() error { // FIXME: make this thread-safe // FIXME: implement wait if !job.end.IsZero() { return fmt.Errorf("%s: job has already completed", job.Name) } // Log beginning and end of the job job.Eng.Logf("+job %s", job.CallString()) defer func() { job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString()) }() var errorMessage string job.Stderr.AddString(&errorMessage) if job.handler == nil { job.Errorf("%s: command not found", job.Name) job.status = 127 } else { job.status = job.handler(job) job.end = time.Now() } // Wait for all background tasks to complete if err := job.Stdout.Close(); err != nil { return err } if err := job.Stderr.Close(); err != nil { return err } if job.status != 0 { return fmt.Errorf("%s", errorMessage) } return nil } func (job *Job) CallString() string { return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", ")) } func (job *Job) StatusString() string { // If the job hasn't completed, status string is empty if job.end.IsZero() { return "" } var okerr string if job.status == StatusOK { okerr = "OK" } else { okerr = "ERR" } return fmt.Sprintf(" = %s (%d)", okerr, job.status) } // String returns a human-readable description of `job` func (job *Job) String() string { return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString()) } func (job *Job) Env() *Env { return job.env } func (job *Job) EnvExists(key string) (value bool) { return job.env.Exists(key) } func (job *Job) Getenv(key string) (value string) { return job.env.Get(key) } func (job *Job) GetenvBool(key string) (value bool) { return job.env.GetBool(key) } func (job *Job) SetenvBool(key string, value bool) { job.env.SetBool(key, value) } func (job *Job) GetenvSubEnv(key string) *Env { return job.env.GetSubEnv(key) } func (job *Job) SetenvSubEnv(key string, value *Env) error { return job.env.SetSubEnv(key, value) } func (job *Job) GetenvInt64(key string) int64 { return job.env.GetInt64(key) } func (job *Job) GetenvInt(key string) int { return job.env.GetInt(key) } func (job *Job) SetenvInt64(key string, value int64) { job.env.SetInt64(key, value) } func (job *Job) SetenvInt(key string, value int) { job.env.SetInt(key, value) } // Returns nil if key not found func (job *Job) GetenvList(key string) []string { return job.env.GetList(key) } func (job *Job) GetenvJson(key string, iface interface{}) error { return job.env.GetJson(key, iface) } func (job *Job) SetenvJson(key string, value interface{}) error { return job.env.SetJson(key, value) } func (job *Job) SetenvList(key string, value []string) error { return job.env.SetJson(key, value) } func (job *Job) Setenv(key, value string) { job.env.Set(key, value) } // DecodeEnv decodes `src` as a json dictionary, and adds // each decoded key-value pair to the environment. // // If `src` cannot be decoded as a json dictionary, an error // is returned. func (job *Job) DecodeEnv(src io.Reader) error { return job.env.Decode(src) } func (job *Job) EncodeEnv(dst io.Writer) error { return job.env.Encode(dst) } func (job *Job) ImportEnv(src interface{}) (err error) { return job.env.Import(src) } func (job *Job) Environ() map[string]string { return job.env.Map() } func (job *Job) Logf(format string, args ...interface{}) (n int, err error) { if os.Getenv("TEST") == "" { prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n")) return fmt.Fprintf(job.Stderr, prefixedFormat, args...) } return 0, nil } func (job *Job) Printf(format string, args ...interface{}) (n int, err error) { return fmt.Fprintf(job.Stdout, format, args...) } func (job *Job) Errorf(format string, args ...interface{}) Status { if format[len(format)-1] != '\n' { format = format + "\n" } fmt.Fprintf(job.Stderr, format, args...) return StatusErr } func (job *Job) Error(err error) Status { fmt.Fprintf(job.Stderr, "%s\n", err) return StatusErr } docker-0.9.1/engine/table_test.go0000644000175000017500000000105512314376205015026 0ustar tagtagpackage engine import ( "bytes" "encoding/json" "testing" ) func TestTableWriteTo(t *testing.T) { table := NewTable("", 0) e := &Env{} e.Set("foo", "bar") table.Add(e) var buf bytes.Buffer if _, err := table.WriteTo(&buf); err != nil { t.Fatal(err) } output := make(map[string]string) if err := json.Unmarshal(buf.Bytes(), &output); err != nil { t.Fatal(err) } if len(output) != 1 { t.Fatalf("Incorrect output: %v", output) } if val, exists := output["foo"]; !exists || val != "bar" { t.Fatalf("Inccorect output: %v", output) } } docker-0.9.1/Dockerfile0000644000175000017500000000660012314376205013077 0ustar tagtag# This file describes the standard way to build Docker, using docker # # Usage: # # # Assemble the full dev environment. This is slow the first time. # docker build -t docker . # # # Mount your source in an interactive container for quick testing: # docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -i -t docker bash # # # Run the test suite: # docker run -privileged docker hack/make.sh test # # # Publish a release: # docker run -privileged \ # -e AWS_S3_BUCKET=baz \ # -e AWS_ACCESS_KEY=foo \ # -e AWS_SECRET_KEY=bar \ # -e GPG_PASSPHRASE=gloubiboulga \ # docker hack/release.sh # # Note: Apparmor used to mess with privileged mode, but this is no longer # the case. Therefore, you don't have to disable it anymore. # docker-version 0.6.1 FROM ubuntu:13.10 MAINTAINER Tianon Gravi (@tianon) # Packaged dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ apt-utils \ aufs-tools \ automake \ btrfs-tools \ build-essential \ curl \ dpkg-sig \ git \ iptables \ libapparmor-dev \ libcap-dev \ libsqlite3-dev \ mercurial \ reprepro \ ruby1.9.1 \ ruby1.9.1-dev \ s3cmd=1.1.0* \ --no-install-recommends # Get and compile LXC 0.8 (since it is the most stable) RUN git clone --no-checkout https://github.com/lxc/lxc.git /usr/local/lxc && cd /usr/local/lxc && git checkout -q lxc-0.8.0 RUN cd /usr/local/lxc && ./autogen.sh && ./configure --disable-docs && make && make install # Get lvm2 source for compiling statically RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags # note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly # Compile and install lvm2 RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install Go RUN curl -s https://go.googlecode.com/files/go1.2.1.src.tar.gz | tar -v -C /usr/local -xz ENV PATH /usr/local/go/bin:$PATH ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 # Compile Go for cross compilation ENV DOCKER_CROSSPLATFORMS linux/386 linux/arm darwin/amd64 darwin/386 # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' # Grab Go's cover tool for dead-simple code coverage testing RUN go get code.google.com/p/go.tools/cmd/cover # TODO replace FPM with some very minimal debhelper stuff RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 # Setup s3cmd config RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' VOLUME /var/lib/docker WORKDIR /go/src/github.com/dotcloud/docker ENV DOCKER_BUILDTAGS apparmor # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] # Upload docker source ADD . /go/src/github.com/dotcloud/docker docker-0.9.1/Makefile0000644000175000017500000000204312314376205012542 0ustar tagtag.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) DOCKER_IMAGE := docker:$(GIT_BRANCH) DOCKER_DOCS_IMAGE := docker-docs:$(GIT_BRANCH) DOCKER_RUN_DOCKER := docker run --rm -i -t --privileged -e TESTFLAGS -v "$(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles" "$(DOCKER_IMAGE)" default: binary all: build $(DOCKER_RUN_DOCKER) hack/make.sh binary: build $(DOCKER_RUN_DOCKER) hack/make.sh binary cross: build $(DOCKER_RUN_DOCKER) hack/make.sh binary cross docs: docs-build docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" docs-shell: docs-build docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" bash test: build $(DOCKER_RUN_DOCKER) hack/make.sh test test-integration test-integration: build $(DOCKER_RUN_DOCKER) hack/make.sh test-integration shell: build $(DOCKER_RUN_DOCKER) bash build: bundles docker build -t "$(DOCKER_IMAGE)" . docs-build: docker build -t "$(DOCKER_DOCS_IMAGE)" docs bundles: mkdir bundles docker-0.9.1/utils/0000755000175000017500000000000012314376205012243 5ustar tagtagdocker-0.9.1/utils/progressreader.go0000644000175000017500000000303112314376205015616 0ustar tagtagpackage utils import ( "io" "time" ) // Reader with progress bar type progressReader struct { reader io.ReadCloser // Stream to read from output io.Writer // Where to send progress bar to progress JSONProgress lastUpdate int // How many bytes read at least update ID string action string sf *StreamFormatter newLine bool } func (r *progressReader) Read(p []byte) (n int, err error) { read, err := r.reader.Read(p) r.progress.Current += read updateEvery := 1024 * 512 //512kB if r.progress.Total > 0 { // Update progress for every 1% read if 1% < 512kB if increment := int(0.01 * float64(r.progress.Total)); increment < updateEvery { updateEvery = increment } } if r.progress.Current-r.lastUpdate > updateEvery || err != nil { r.output.Write(r.sf.FormatProgress(r.ID, r.action, &r.progress)) r.lastUpdate = r.progress.Current } // Send newline when complete if r.newLine && err != nil { r.output.Write(r.sf.FormatStatus("", "")) } return read, err } func (r *progressReader) Close() error { r.progress.Current = r.progress.Total r.output.Write(r.sf.FormatProgress(r.ID, r.action, &r.progress)) return r.reader.Close() } func ProgressReader(r io.ReadCloser, size int, output io.Writer, sf *StreamFormatter, newline bool, ID, action string) *progressReader { return &progressReader{ reader: r, output: NewWriteFlusher(output), ID: ID, action: action, progress: JSONProgress{Total: size, Start: time.Now().UTC().Unix()}, sf: sf, newLine: newline, } } docker-0.9.1/utils/http.go0000644000175000017500000001010612314376205013547 0ustar tagtagpackage utils import ( "bytes" "io" "net/http" "strings" ) // VersionInfo is used to model entities which has a version. // It is basically a tupple with name and version. type VersionInfo interface { Name() string Version() string } func validVersion(version VersionInfo) bool { stopChars := " \t\r\n/" if strings.ContainsAny(version.Name(), stopChars) { return false } if strings.ContainsAny(version.Version(), stopChars) { return false } return true } // Convert versions to a string and append the string to the string base. // // Each VersionInfo will be converted to a string in the format of // "product/version", where the "product" is get from the Name() method, while // version is get from the Version() method. Several pieces of verson information // will be concatinated and separated by space. func appendVersions(base string, versions ...VersionInfo) string { if len(versions) == 0 { return base } var buf bytes.Buffer if len(base) > 0 { buf.Write([]byte(base)) } for _, v := range versions { name := []byte(v.Name()) version := []byte(v.Version()) if len(name) == 0 || len(version) == 0 { continue } if !validVersion(v) { continue } buf.Write([]byte(v.Name())) buf.Write([]byte("/")) buf.Write([]byte(v.Version())) buf.Write([]byte(" ")) } return buf.String() } // HTTPRequestDecorator is used to change an instance of // http.Request. It could be used to add more header fields, // change body, etc. type HTTPRequestDecorator interface { // ChangeRequest() changes the request accordingly. // The changed request will be returned or err will be non-nil // if an error occur. ChangeRequest(req *http.Request) (newReq *http.Request, err error) } // HTTPUserAgentDecorator appends the product/version to the user agent field // of a request. type HTTPUserAgentDecorator struct { versions []VersionInfo } func NewHTTPUserAgentDecorator(versions ...VersionInfo) HTTPRequestDecorator { return &HTTPUserAgentDecorator{ versions: versions, } } func (h *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { if req == nil { return req, nil } userAgent := appendVersions(req.UserAgent(), h.versions...) if len(userAgent) > 0 { req.Header.Set("User-Agent", userAgent) } return req, nil } type HTTPMetaHeadersDecorator struct { Headers map[string][]string } func (h *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { if h.Headers == nil { return req, nil } for k, v := range h.Headers { req.Header[k] = v } return req, nil } type HTTPAuthDecorator struct { login string password string } func NewHTTPAuthDecorator(login, password string) HTTPRequestDecorator { return &HTTPAuthDecorator{ login: login, password: password, } } func (self *HTTPAuthDecorator) ChangeRequest(req *http.Request) (*http.Request, error) { req.SetBasicAuth(self.login, self.password) return req, nil } // HTTPRequestFactory creates an HTTP request // and applies a list of decorators on the request. type HTTPRequestFactory struct { decorators []HTTPRequestDecorator } func NewHTTPRequestFactory(d ...HTTPRequestDecorator) *HTTPRequestFactory { return &HTTPRequestFactory{ decorators: d, } } func (self *HTTPRequestFactory) AddDecorator(d ...HTTPRequestDecorator) { self.decorators = append(self.decorators, d...) } // NewRequest() creates a new *http.Request, // applies all decorators in the HTTPRequestFactory on the request, // then applies decorators provided by d on the request. func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) { req, err := http.NewRequest(method, urlStr, body) if err != nil { return nil, err } // By default, a nil factory should work. if h == nil { return req, nil } for _, dec := range h.decorators { req, err = dec.ChangeRequest(req) if err != nil { return nil, err } } for _, dec := range d { req, err = dec.ChangeRequest(req) if err != nil { return nil, err } } Debugf("%v -- HEADERS: %v", req.URL, req.Header) return req, err } docker-0.9.1/utils/jsonmessage_test.go0000644000175000017500000000133312314376205016147 0ustar tagtagpackage utils import ( "testing" ) func TestError(t *testing.T) { je := JSONError{404, "Not found"} if je.Error() != "Not found" { t.Fatalf("Expected 'Not found' got '%s'", je.Error()) } } func TestProgress(t *testing.T) { jp := JSONProgress{} if jp.String() != "" { t.Fatalf("Expected empty string, got '%s'", jp.String()) } jp2 := JSONProgress{Current: 1} if jp2.String() != " 1 B" { t.Fatalf("Expected ' 1 B', got '%s'", jp2.String()) } jp3 := JSONProgress{Current: 50, Total: 100} if jp3.String() != "[=========================> ] 50 B/100 B" { t.Fatalf("Expected '[=========================> ] 50 B/100 B', got '%s'", jp3.String()) } } docker-0.9.1/utils/daemon.go0000644000175000017500000000127512314376205014042 0ustar tagtagpackage utils import ( "fmt" "io/ioutil" "log" "os" "strconv" ) func CreatePidFile(pidfile string) error { if pidString, err := ioutil.ReadFile(pidfile); err == nil { pid, err := strconv.Atoi(string(pidString)) if err == nil { if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil { return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile) } } } file, err := os.Create(pidfile) if err != nil { return err } defer file.Close() _, err = fmt.Fprintf(file, "%d", os.Getpid()) return err } func RemovePidFile(pidfile string) { if err := os.Remove(pidfile); err != nil { log.Printf("Error removing %s: %s", pidfile, err) } } docker-0.9.1/utils/tarsum.go0000644000175000017500000000754312314376205014116 0ustar tagtagpackage utils import ( "bytes" "compress/gzip" "crypto/sha256" "encoding/hex" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "hash" "io" "sort" "strconv" "strings" ) type TarSum struct { io.Reader tarR *tar.Reader tarW *tar.Writer gz writeCloseFlusher bufTar *bytes.Buffer bufGz *bytes.Buffer h hash.Hash sums map[string]string currentFile string finished bool first bool DisableCompression bool } type writeCloseFlusher interface { io.WriteCloser Flush() error } type nopCloseFlusher struct { io.Writer } func (n *nopCloseFlusher) Close() error { return nil } func (n *nopCloseFlusher) Flush() error { return nil } func (ts *TarSum) encodeHeader(h *tar.Header) error { for _, elem := range [][2]string{ {"name", h.Name}, {"mode", strconv.Itoa(int(h.Mode))}, {"uid", strconv.Itoa(h.Uid)}, {"gid", strconv.Itoa(h.Gid)}, {"size", strconv.Itoa(int(h.Size))}, {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, {"typeflag", string([]byte{h.Typeflag})}, {"linkname", h.Linkname}, {"uname", h.Uname}, {"gname", h.Gname}, {"devmajor", strconv.Itoa(int(h.Devmajor))}, {"devminor", strconv.Itoa(int(h.Devminor))}, // {"atime", strconv.Itoa(int(h.AccessTime.UTC().Unix()))}, // {"ctime", strconv.Itoa(int(h.ChangeTime.UTC().Unix()))}, } { if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { return err } } return nil } func (ts *TarSum) Read(buf []byte) (int, error) { if ts.gz == nil { ts.bufTar = bytes.NewBuffer([]byte{}) ts.bufGz = bytes.NewBuffer([]byte{}) ts.tarR = tar.NewReader(ts.Reader) ts.tarW = tar.NewWriter(ts.bufTar) if !ts.DisableCompression { ts.gz = gzip.NewWriter(ts.bufGz) } else { ts.gz = &nopCloseFlusher{Writer: ts.bufGz} } ts.h = sha256.New() ts.h.Reset() ts.first = true ts.sums = make(map[string]string) } if ts.finished { return ts.bufGz.Read(buf) } buf2 := make([]byte, len(buf), cap(buf)) n, err := ts.tarR.Read(buf2) if err != nil { if err == io.EOF { if _, err := ts.h.Write(buf2[:n]); err != nil { return 0, err } if !ts.first { ts.sums[ts.currentFile] = hex.EncodeToString(ts.h.Sum(nil)) ts.h.Reset() } else { ts.first = false } currentHeader, err := ts.tarR.Next() if err != nil { if err == io.EOF { if err := ts.gz.Close(); err != nil { return 0, err } ts.finished = true return n, nil } return n, err } ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") if err := ts.encodeHeader(currentHeader); err != nil { return 0, err } if err := ts.tarW.WriteHeader(currentHeader); err != nil { return 0, err } if _, err := ts.tarW.Write(buf2[:n]); err != nil { return 0, err } ts.tarW.Flush() if _, err := io.Copy(ts.gz, ts.bufTar); err != nil { return 0, err } ts.gz.Flush() return ts.bufGz.Read(buf) } return n, err } // Filling the hash buffer if _, err = ts.h.Write(buf2[:n]); err != nil { return 0, err } // Filling the tar writter if _, err = ts.tarW.Write(buf2[:n]); err != nil { return 0, err } ts.tarW.Flush() // Filling the gz writter if _, err = io.Copy(ts.gz, ts.bufTar); err != nil { return 0, err } ts.gz.Flush() return ts.bufGz.Read(buf) } func (ts *TarSum) Sum(extra []byte) string { var sums []string for _, sum := range ts.sums { sums = append(sums, sum) } sort.Strings(sums) h := sha256.New() if extra != nil { h.Write(extra) } for _, sum := range sums { Debugf("-->%s<--", sum) h.Write([]byte(sum)) } checksum := "tarsum+sha256:" + hex.EncodeToString(h.Sum(nil)) Debugf("checksum processed: %s", checksum) return checksum } func (ts *TarSum) GetSums() map[string]string { return ts.sums } docker-0.9.1/utils/utils.go0000644000175000017500000006005312314376205013736 0ustar tagtagpackage utils import ( "bytes" "crypto/sha1" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "github.com/dotcloud/docker/dockerversion" "index/suffixarray" "io" "io/ioutil" "net/http" "os" "os/exec" "path/filepath" "regexp" "runtime" "strconv" "strings" "sync" "time" ) // A common interface to access the Fatal method of // both testing.B and testing.T. type Fataler interface { Fatal(args ...interface{}) } // Go is a basic promise implementation: it wraps calls a function in a goroutine, // and returns a channel which will later return the function's return value. func Go(f func() error) chan error { ch := make(chan error, 1) go func() { ch <- f() }() return ch } // Request a given URL and return an io.Reader func Download(url string) (resp *http.Response, err error) { if resp, err = http.Get(url); err != nil { return nil, err } if resp.StatusCode >= 400 { return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) } return resp, nil } func logf(level string, format string, a ...interface{}) { // Retrieve the stack infos _, file, line, ok := runtime.Caller(2) if !ok { file = "" line = -1 } else { file = file[strings.LastIndex(file, "/")+1:] } fmt.Fprintf(os.Stderr, fmt.Sprintf("[%s] %s:%d %s\n", level, file, line, format), a...) } // Debug function, if the debug flag is set, then display. Do nothing otherwise // If Docker is in damon mode, also send the debug info on the socket func Debugf(format string, a ...interface{}) { if os.Getenv("DEBUG") != "" { logf("debug", format, a...) } } func Errorf(format string, a ...interface{}) { logf("error", format, a...) } // HumanDuration returns a human-readable approximation of a duration // (eg. "About a minute", "4 hours ago", etc.) func HumanDuration(d time.Duration) string { if seconds := int(d.Seconds()); seconds < 1 { return "Less than a second" } else if seconds < 60 { return fmt.Sprintf("%d seconds", seconds) } else if minutes := int(d.Minutes()); minutes == 1 { return "About a minute" } else if minutes < 60 { return fmt.Sprintf("%d minutes", minutes) } else if hours := int(d.Hours()); hours == 1 { return "About an hour" } else if hours < 48 { return fmt.Sprintf("%d hours", hours) } else if hours < 24*7*2 { return fmt.Sprintf("%d days", hours/24) } else if hours < 24*30*3 { return fmt.Sprintf("%d weeks", hours/24/7) } else if hours < 24*365*2 { return fmt.Sprintf("%d months", hours/24/30) } return fmt.Sprintf("%f years", d.Hours()/24/365) } // HumanSize returns a human-readable approximation of a size // using SI standard (eg. "44kB", "17MB") func HumanSize(size int64) string { i := 0 var sizef float64 sizef = float64(size) units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} for sizef >= 1000.0 { sizef = sizef / 1000.0 i++ } return fmt.Sprintf("%.4g %s", sizef, units[i]) } // Parses a human-readable string representing an amount of RAM // in bytes, kibibytes, mebibytes or gibibytes, and returns the // number of bytes, or -1 if the string is unparseable. // Units are case-insensitive, and the 'b' suffix is optional. func RAMInBytes(size string) (bytes int64, err error) { re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$") if error != nil { return -1, error } matches := re.FindStringSubmatch(size) if len(matches) != 3 { return -1, fmt.Errorf("Invalid size: '%s'", size) } memLimit, error := strconv.ParseInt(matches[1], 10, 0) if error != nil { return -1, error } unit := strings.ToLower(matches[2]) if unit == "k" { memLimit *= 1024 } else if unit == "m" { memLimit *= 1024 * 1024 } else if unit == "g" { memLimit *= 1024 * 1024 * 1024 } return memLimit, nil } func Trunc(s string, maxlen int) string { if len(s) <= maxlen { return s } return s[:maxlen] } // Figure out the absolute path of our own binary (if it's still around). func SelfPath() string { path, err := exec.LookPath(os.Args[0]) if err != nil { if os.IsNotExist(err) { return "" } if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) { return "" } panic(err) } path, err = filepath.Abs(path) if err != nil { if os.IsNotExist(err) { return "" } panic(err) } return path } func dockerInitSha1(target string) string { f, err := os.Open(target) if err != nil { return "" } defer f.Close() h := sha1.New() _, err = io.Copy(h, f) if err != nil { return "" } return hex.EncodeToString(h.Sum(nil)) } func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this) if target == "" { return false } if dockerversion.IAMSTATIC { if selfPath == "" { return false } if target == selfPath { return true } targetFileInfo, err := os.Lstat(target) if err != nil { return false } selfPathFileInfo, err := os.Lstat(selfPath) if err != nil { return false } return os.SameFile(targetFileInfo, selfPathFileInfo) } return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1 } // Figure out the path of our dockerinit (which may be SelfPath()) func DockerInitPath(localCopy string) string { selfPath := SelfPath() if isValidDockerInitPath(selfPath, selfPath) { // if we're valid, don't bother checking anything else return selfPath } var possibleInits = []string{ localCopy, dockerversion.INITPATH, filepath.Join(filepath.Dir(selfPath), "dockerinit"), // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." // http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec "/usr/libexec/docker/dockerinit", "/usr/local/libexec/docker/dockerinit", // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." // http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA "/usr/lib/docker/dockerinit", "/usr/local/lib/docker/dockerinit", } for _, dockerInit := range possibleInits { if dockerInit == "" { continue } path, err := exec.LookPath(dockerInit) if err == nil { path, err = filepath.Abs(path) if err != nil { // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail? panic(err) } if isValidDockerInitPath(path, selfPath) { return path } } } return "" } type NopWriter struct{} func (*NopWriter) Write(buf []byte) (int, error) { return len(buf), nil } type nopWriteCloser struct { io.Writer } func (w *nopWriteCloser) Close() error { return nil } func NopWriteCloser(w io.Writer) io.WriteCloser { return &nopWriteCloser{w} } type bufReader struct { sync.Mutex buf *bytes.Buffer reader io.Reader err error wait sync.Cond } func NewBufReader(r io.Reader) *bufReader { reader := &bufReader{ buf: &bytes.Buffer{}, reader: r, } reader.wait.L = &reader.Mutex go reader.drain() return reader } func (r *bufReader) drain() { buf := make([]byte, 1024) for { n, err := r.reader.Read(buf) r.Lock() if err != nil { r.err = err } else { r.buf.Write(buf[0:n]) } r.wait.Signal() r.Unlock() if err != nil { break } } } func (r *bufReader) Read(p []byte) (n int, err error) { r.Lock() defer r.Unlock() for { n, err = r.buf.Read(p) if n > 0 { return n, err } if r.err != nil { return 0, r.err } r.wait.Wait() } } func (r *bufReader) Close() error { closer, ok := r.reader.(io.ReadCloser) if !ok { return nil } return closer.Close() } type WriteBroadcaster struct { sync.Mutex buf *bytes.Buffer writers map[StreamWriter]bool } type StreamWriter struct { wc io.WriteCloser stream string } func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) { w.Lock() sw := StreamWriter{wc: writer, stream: stream} w.writers[sw] = true w.Unlock() } type JSONLog struct { Log string `json:"log,omitempty"` Stream string `json:"stream,omitempty"` Created time.Time `json:"time"` } func (w *WriteBroadcaster) Write(p []byte) (n int, err error) { w.Lock() defer w.Unlock() w.buf.Write(p) for sw := range w.writers { lp := p if sw.stream != "" { lp = nil for { line, err := w.buf.ReadString('\n') if err != nil { w.buf.Write([]byte(line)) break } b, err := json.Marshal(&JSONLog{Log: line, Stream: sw.stream, Created: time.Now().UTC()}) if err != nil { // On error, evict the writer delete(w.writers, sw) continue } lp = append(lp, b...) lp = append(lp, '\n') } } if n, err := sw.wc.Write(lp); err != nil || n != len(lp) { // On error, evict the writer delete(w.writers, sw) } } return len(p), nil } func (w *WriteBroadcaster) CloseWriters() error { w.Lock() defer w.Unlock() for sw := range w.writers { sw.wc.Close() } w.writers = make(map[StreamWriter]bool) return nil } func NewWriteBroadcaster() *WriteBroadcaster { return &WriteBroadcaster{writers: make(map[StreamWriter]bool), buf: bytes.NewBuffer(nil)} } func GetTotalUsedFds() int { if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) } else { return len(fds) } return -1 } // TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. // This is used to retrieve image and container IDs by more convenient shorthand prefixes. type TruncIndex struct { sync.RWMutex index *suffixarray.Index ids map[string]bool bytes []byte } func NewTruncIndex() *TruncIndex { return &TruncIndex{ index: suffixarray.New([]byte{' '}), ids: make(map[string]bool), bytes: []byte{' '}, } } func (idx *TruncIndex) Add(id string) error { idx.Lock() defer idx.Unlock() if strings.Contains(id, " ") { return fmt.Errorf("Illegal character: ' '") } if _, exists := idx.ids[id]; exists { return fmt.Errorf("Id already exists: %s", id) } idx.ids[id] = true idx.bytes = append(idx.bytes, []byte(id+" ")...) idx.index = suffixarray.New(idx.bytes) return nil } func (idx *TruncIndex) Delete(id string) error { idx.Lock() defer idx.Unlock() if _, exists := idx.ids[id]; !exists { return fmt.Errorf("No such id: %s", id) } before, after, err := idx.lookup(id) if err != nil { return err } delete(idx.ids, id) idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...) idx.index = suffixarray.New(idx.bytes) return nil } func (idx *TruncIndex) lookup(s string) (int, int, error) { offsets := idx.index.Lookup([]byte(" "+s), -1) //log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes()) if offsets == nil || len(offsets) == 0 || len(offsets) > 1 { return -1, -1, fmt.Errorf("No such id: %s", s) } offsetBefore := offsets[0] + 1 offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ") return offsetBefore, offsetAfter, nil } func (idx *TruncIndex) Get(s string) (string, error) { idx.RLock() defer idx.RUnlock() before, after, err := idx.lookup(s) //log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after) if err != nil { return "", err } return string(idx.bytes[before:after]), err } // TruncateID returns a shorthand version of a string identifier for convenience. // A collision with other shorthands is very unlikely, but possible. // In case of a collision a lookup with TruncIndex.Get() will fail, and the caller // will need to use a langer prefix, or the full-length Id. func TruncateID(id string) string { shortLen := 12 if len(id) < shortLen { shortLen = len(id) } return id[:shortLen] } // Code c/c from io.Copy() modified to handle escape sequence func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { buf := make([]byte, 32*1024) for { nr, er := src.Read(buf) if nr > 0 { // ---- Docker addition // char 16 is C-p if nr == 1 && buf[0] == 16 { nr, er = src.Read(buf) // char 17 is C-q if nr == 1 && buf[0] == 17 { if err := src.Close(); err != nil { return 0, err } return 0, nil } } // ---- End of docker nw, ew := dst.Write(buf[0:nr]) if nw > 0 { written += int64(nw) } if ew != nil { err = ew break } if nr != nw { err = io.ErrShortWrite break } } if er == io.EOF { break } if er != nil { err = er break } } return written, err } func HashData(src io.Reader) (string, error) { h := sha256.New() if _, err := io.Copy(h, src); err != nil { return "", err } return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil } type KernelVersionInfo struct { Kernel int Major int Minor int Flavor string } func (k *KernelVersionInfo) String() string { return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) } // Compare two KernelVersionInfo struct. // Returns -1 if a < b, 0 if a == b, 1 it a > b func CompareKernelVersion(a, b *KernelVersionInfo) int { if a.Kernel < b.Kernel { return -1 } else if a.Kernel > b.Kernel { return 1 } if a.Major < b.Major { return -1 } else if a.Major > b.Major { return 1 } if a.Minor < b.Minor { return -1 } else if a.Minor > b.Minor { return 1 } return 0 } func GetKernelVersion() (*KernelVersionInfo, error) { var ( err error ) uts, err := uname() if err != nil { return nil, err } release := make([]byte, len(uts.Release)) i := 0 for _, c := range uts.Release { release[i] = byte(c) i++ } // Remove the \x00 from the release for Atoi to parse correctly release = release[:bytes.IndexByte(release, 0)] return ParseRelease(string(release)) } func ParseRelease(release string) (*KernelVersionInfo, error) { var ( kernel, major, minor, parsed int flavor, partial string ) // Ignore error from Sscanf to allow an empty flavor. Instead, just // make sure we got all the version numbers. parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) if parsed < 2 { return nil, errors.New("Can't parse kernel version " + release) } // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) if parsed < 1 { flavor = partial } return &KernelVersionInfo{ Kernel: kernel, Major: major, Minor: minor, Flavor: flavor, }, nil } // FIXME: this is deprecated by CopyWithTar in archive.go func CopyDirectory(source, dest string) error { if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil { return fmt.Errorf("Error copy: %s (%s)", err, output) } return nil } type NopFlusher struct{} func (f *NopFlusher) Flush() {} type WriteFlusher struct { sync.Mutex w io.Writer flusher http.Flusher } func (wf *WriteFlusher) Write(b []byte) (n int, err error) { wf.Lock() defer wf.Unlock() n, err = wf.w.Write(b) wf.flusher.Flush() return n, err } // Flush the stream immediately. func (wf *WriteFlusher) Flush() { wf.Lock() defer wf.Unlock() wf.flusher.Flush() } func NewWriteFlusher(w io.Writer) *WriteFlusher { var flusher http.Flusher if f, ok := w.(http.Flusher); ok { flusher = f } else { flusher = &NopFlusher{} } return &WriteFlusher{w: w, flusher: flusher} } func NewHTTPRequestError(msg string, res *http.Response) error { return &JSONError{ Message: msg, Code: res.StatusCode, } } func IsURL(str string) bool { return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") } func IsGIT(str string) bool { return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") } // GetResolvConf opens and read the content of /etc/resolv.conf. // It returns it as byte slice. func GetResolvConf() ([]byte, error) { resolv, err := ioutil.ReadFile("/etc/resolv.conf") if err != nil { Errorf("Error openning resolv.conf: %s", err) return nil, err } return resolv, nil } // CheckLocalDns looks into the /etc/resolv.conf, // it returns true if there is a local nameserver or if there is no nameserver. func CheckLocalDns(resolvConf []byte) bool { var parsedResolvConf = StripComments(resolvConf, []byte("#")) if !bytes.Contains(parsedResolvConf, []byte("nameserver")) { return true } for _, ip := range [][]byte{ []byte("127.0.0.1"), []byte("127.0.1.1"), } { if bytes.Contains(parsedResolvConf, ip) { return true } } return false } // StripComments parses input into lines and strips away comments. func StripComments(input []byte, commentMarker []byte) []byte { lines := bytes.Split(input, []byte("\n")) var output []byte for _, currentLine := range lines { var commentIndex = bytes.Index(currentLine, commentMarker) if commentIndex == -1 { output = append(output, currentLine...) } else { output = append(output, currentLine[:commentIndex]...) } output = append(output, []byte("\n")...) } return output } // GetNameserversAsCIDR returns nameservers (if any) listed in // /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") // This function's output is intended for net.ParseCIDR func GetNameserversAsCIDR(resolvConf []byte) []string { var parsedResolvConf = StripComments(resolvConf, []byte("#")) nameservers := []string{} re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`) for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) { var ns = re.FindSubmatch(line) if len(ns) > 0 { nameservers = append(nameservers, string(ns[1])+"/32") } } return nameservers } // FIXME: Change this not to receive default value as parameter func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { var ( proto string host string port int ) addr = strings.TrimSpace(addr) switch { case addr == "tcp://": return "", fmt.Errorf("Invalid bind address format: %s", addr) case strings.HasPrefix(addr, "unix://"): proto = "unix" addr = strings.TrimPrefix(addr, "unix://") if addr == "" { addr = defaultUnix } case strings.HasPrefix(addr, "tcp://"): proto = "tcp" addr = strings.TrimPrefix(addr, "tcp://") case strings.HasPrefix(addr, "fd://"): return addr, nil case addr == "": proto = "unix" addr = defaultUnix default: if strings.Contains(addr, "://") { return "", fmt.Errorf("Invalid bind address protocol: %s", addr) } proto = "tcp" } if proto != "unix" && strings.Contains(addr, ":") { hostParts := strings.Split(addr, ":") if len(hostParts) != 2 { return "", fmt.Errorf("Invalid bind address format: %s", addr) } if hostParts[0] != "" { host = hostParts[0] } else { host = defaultHost } if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 { port = p } else { return "", fmt.Errorf("Invalid bind address format: %s", addr) } } else if proto == "tcp" && !strings.Contains(addr, ":") { return "", fmt.Errorf("Invalid bind address format: %s", addr) } else { host = addr } if proto == "unix" { return fmt.Sprintf("%s://%s", proto, host), nil } return fmt.Sprintf("%s://%s:%d", proto, host, port), nil } func GetReleaseVersion() string { resp, err := http.Get("https://get.docker.io/latest") if err != nil { return "" } defer resp.Body.Close() if resp.ContentLength > 24 || resp.StatusCode != 200 { return "" } body, err := ioutil.ReadAll(resp.Body) if err != nil { return "" } return strings.TrimSpace(string(body)) } // Get a repos name and returns the right reposName + tag // The tag can be confusing because of a port in a repository name. // Ex: localhost.localdomain:5000/samalba/hipache:latest func ParseRepositoryTag(repos string) (string, string) { n := strings.LastIndex(repos, ":") if n < 0 { return repos, "" } if tag := repos[n+1:]; !strings.Contains(tag, "/") { return repos[:n], tag } return repos, "" } // An StatusError reports an unsuccessful exit by a command. type StatusError struct { Status string StatusCode int } func (e *StatusError) Error() string { return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) } func quote(word string, buf *bytes.Buffer) { // Bail out early for "simple" strings if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { buf.WriteString(word) return } buf.WriteString("'") for i := 0; i < len(word); i++ { b := word[i] if b == '\'' { // Replace literal ' with a close ', a \', and a open ' buf.WriteString("'\\''") } else { buf.WriteByte(b) } } buf.WriteString("'") } // Take a list of strings and escape them so they will be handled right // when passed as arguments to an program via a shell func ShellQuoteArguments(args []string) string { var buf bytes.Buffer for i, arg := range args { if i != 0 { buf.WriteByte(' ') } quote(arg, &buf) } return buf.String() } func PartParser(template, data string) (map[string]string, error) { // ip:public:private var ( templateParts = strings.Split(template, ":") parts = strings.Split(data, ":") out = make(map[string]string, len(templateParts)) ) if len(parts) != len(templateParts) { return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) } for i, t := range templateParts { value := "" if len(parts) > i { value = parts[i] } out[t] = value } return out, nil } var globalTestID string // TestDirectory creates a new temporary directory and returns its path. // The contents of directory at path `templateDir` is copied into the // new directory. func TestDirectory(templateDir string) (dir string, err error) { if globalTestID == "" { globalTestID = RandomString()[:4] } prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) if prefix == "" { prefix = "docker-test-" } dir, err = ioutil.TempDir("", prefix) if err = os.Remove(dir); err != nil { return } if templateDir != "" { if err = CopyDirectory(templateDir, dir); err != nil { return } } return } // GetCallerName introspects the call stack and returns the name of the // function `depth` levels down in the stack. func GetCallerName(depth int) string { // Use the caller function name as a prefix. // This helps trace temp directories back to their test. pc, _, _, _ := runtime.Caller(depth + 1) callerLongName := runtime.FuncForPC(pc).Name() parts := strings.Split(callerLongName, ".") callerShortName := parts[len(parts)-1] return callerShortName } func CopyFile(src, dst string) (int64, error) { if src == dst { return 0, nil } sf, err := os.Open(src) if err != nil { return 0, err } defer sf.Close() if err := os.Remove(dst); err != nil && !os.IsNotExist(err) { return 0, err } df, err := os.Create(dst) if err != nil { return 0, err } defer df.Close() return io.Copy(df, sf) } type readCloserWrapper struct { io.Reader closer func() error } func (r *readCloserWrapper) Close() error { return r.closer() } func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { return &readCloserWrapper{ Reader: r, closer: closer, } } // ReplaceOrAppendValues returns the defaults with the overrides either // replaced by env key or appended to the list func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { cache := make(map[string]int, len(defaults)) for i, e := range defaults { parts := strings.SplitN(e, "=", 2) cache[parts[0]] = i } for _, value := range overrides { parts := strings.SplitN(value, "=", 2) if i, exists := cache[parts[0]]; exists { defaults[i] = value } else { defaults = append(defaults, value) } } return defaults } // ReadSymlinkedDirectory returns the target directory of a symlink. // The target of the symbolic link may not be a file. func ReadSymlinkedDirectory(path string) (string, error) { var realPath string var err error if realPath, err = filepath.Abs(path); err != nil { return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) } realPathInfo, err := os.Stat(realPath) if err != nil { return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) } if !realPathInfo.Mode().IsDir() { return "", fmt.Errorf("canonical path points to a file '%s'", realPath) } return realPath, nil } docker-0.9.1/utils/uname_unsupported.go0000644000175000017500000000033312314376205016346 0ustar tagtag// +build !linux !amd64 package utils import ( "errors" ) type Utsname struct { Release [65]byte } func uname() (*Utsname, error) { return nil, errors.New("Kernel version detection is available only on linux") } docker-0.9.1/utils/utils_test.go0000644000175000017500000004230712314376205014777 0ustar tagtagpackage utils import ( "bytes" "errors" "io" "io/ioutil" "os" "strings" "testing" ) func TestBufReader(t *testing.T) { reader, writer := io.Pipe() bufreader := NewBufReader(reader) // Write everything down to a Pipe // Usually, a pipe should block but because of the buffered reader, // the writes will go through done := make(chan bool) go func() { writer.Write([]byte("hello world")) writer.Close() done <- true }() // Drain the reader *after* everything has been written, just to verify // it is indeed buffering <-done output, err := ioutil.ReadAll(bufreader) if err != nil { t.Fatal(err) } if !bytes.Equal(output, []byte("hello world")) { t.Error(string(output)) } } type dummyWriter struct { buffer bytes.Buffer failOnWrite bool } func (dw *dummyWriter) Write(p []byte) (n int, err error) { if dw.failOnWrite { return 0, errors.New("Fake fail") } return dw.buffer.Write(p) } func (dw *dummyWriter) String() string { return dw.buffer.String() } func (dw *dummyWriter) Close() error { return nil } func TestWriteBroadcaster(t *testing.T) { writer := NewWriteBroadcaster() // Test 1: Both bufferA and bufferB should contain "foo" bufferA := &dummyWriter{} writer.AddWriter(bufferA, "") bufferB := &dummyWriter{} writer.AddWriter(bufferB, "") writer.Write([]byte("foo")) if bufferA.String() != "foo" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferB.String() != "foo" { t.Errorf("Buffer contains %v", bufferB.String()) } // Test2: bufferA and bufferB should contain "foobar", // while bufferC should only contain "bar" bufferC := &dummyWriter{} writer.AddWriter(bufferC, "") writer.Write([]byte("bar")) if bufferA.String() != "foobar" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferB.String() != "foobar" { t.Errorf("Buffer contains %v", bufferB.String()) } if bufferC.String() != "bar" { t.Errorf("Buffer contains %v", bufferC.String()) } // Test3: Test eviction on failure bufferA.failOnWrite = true writer.Write([]byte("fail")) if bufferA.String() != "foobar" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferC.String() != "barfail" { t.Errorf("Buffer contains %v", bufferC.String()) } // Even though we reset the flag, no more writes should go in there bufferA.failOnWrite = false writer.Write([]byte("test")) if bufferA.String() != "foobar" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferC.String() != "barfailtest" { t.Errorf("Buffer contains %v", bufferC.String()) } writer.CloseWriters() } type devNullCloser int func (d devNullCloser) Close() error { return nil } func (d devNullCloser) Write(buf []byte) (int, error) { return len(buf), nil } // This test checks for races. It is only useful when run with the race detector. func TestRaceWriteBroadcaster(t *testing.T) { writer := NewWriteBroadcaster() c := make(chan bool) go func() { writer.AddWriter(devNullCloser(0), "") c <- true }() writer.Write([]byte("hello")) <-c } // Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. func TestTruncIndex(t *testing.T) { index := NewTruncIndex() // Get on an empty index if _, err := index.Get("foobar"); err == nil { t.Fatal("Get on an empty index should return an error") } // Spaces should be illegal in an id if err := index.Add("I have a space"); err == nil { t.Fatalf("Adding an id with ' ' should return an error") } id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" // Add an id if err := index.Add(id); err != nil { t.Fatal(err) } // Get a non-existing id assertIndexGet(t, index, "abracadabra", "", true) // Get the exact id assertIndexGet(t, index, id, id, false) // The first letter should match assertIndexGet(t, index, id[:1], id, false) // The first half should match assertIndexGet(t, index, id[:len(id)/2], id, false) // The second half should NOT match assertIndexGet(t, index, id[len(id)/2:], "", true) id2 := id[:6] + "blabla" // Add an id if err := index.Add(id2); err != nil { t.Fatal(err) } // Both exact IDs should work assertIndexGet(t, index, id, id, false) assertIndexGet(t, index, id2, id2, false) // 6 characters or less should conflict assertIndexGet(t, index, id[:6], "", true) assertIndexGet(t, index, id[:4], "", true) assertIndexGet(t, index, id[:1], "", true) // 7 characters should NOT conflict assertIndexGet(t, index, id[:7], id, false) assertIndexGet(t, index, id2[:7], id2, false) // Deleting a non-existing id should return an error if err := index.Delete("non-existing"); err == nil { t.Fatalf("Deleting a non-existing id should return an error") } // Deleting id2 should remove conflicts if err := index.Delete(id2); err != nil { t.Fatal(err) } // id2 should no longer work assertIndexGet(t, index, id2, "", true) assertIndexGet(t, index, id2[:7], "", true) assertIndexGet(t, index, id2[:11], "", true) // conflicts between id and id2 should be gone assertIndexGet(t, index, id[:6], id, false) assertIndexGet(t, index, id[:4], id, false) assertIndexGet(t, index, id[:1], id, false) // non-conflicting substrings should still not conflict assertIndexGet(t, index, id[:7], id, false) assertIndexGet(t, index, id[:15], id, false) assertIndexGet(t, index, id, id, false) } func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { if result, err := index.Get(input); err != nil && !expectError { t.Fatalf("Unexpected error getting '%s': %s", input, err) } else if err == nil && expectError { t.Fatalf("Getting '%s' should return an error", input) } else if result != expectedResult { t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) } } func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { if r := CompareKernelVersion(a, b); r != result { t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) } } func TestCompareKernelVersion(t *testing.T) { assertKernelVersion(t, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertKernelVersion(t, &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, -1) assertKernelVersion(t, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, 1) assertKernelVersion(t, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertKernelVersion(t, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 1) assertKernelVersion(t, &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20}, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, -1) } func TestHumanSize(t *testing.T) { size := strings.Trim(HumanSize(1000), " \t") expect := "1 kB" if size != expect { t.Errorf("1000 -> expected '%s', got '%s'", expect, size) } size = strings.Trim(HumanSize(1024), " \t") expect = "1.024 kB" if size != expect { t.Errorf("1024 -> expected '%s', got '%s'", expect, size) } } func TestRAMInBytes(t *testing.T) { assertRAMInBytes(t, "32", false, 32) assertRAMInBytes(t, "32b", false, 32) assertRAMInBytes(t, "32B", false, 32) assertRAMInBytes(t, "32k", false, 32*1024) assertRAMInBytes(t, "32K", false, 32*1024) assertRAMInBytes(t, "32kb", false, 32*1024) assertRAMInBytes(t, "32Kb", false, 32*1024) assertRAMInBytes(t, "32Mb", false, 32*1024*1024) assertRAMInBytes(t, "32Gb", false, 32*1024*1024*1024) assertRAMInBytes(t, "", true, -1) assertRAMInBytes(t, "hello", true, -1) assertRAMInBytes(t, "-32", true, -1) assertRAMInBytes(t, " 32 ", true, -1) assertRAMInBytes(t, "32 mb", true, -1) assertRAMInBytes(t, "32m b", true, -1) assertRAMInBytes(t, "32bm", true, -1) } func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes int64) { actualBytes, err := RAMInBytes(size) if (err != nil) && !expectError { t.Errorf("Unexpected error parsing '%s': %s", size, err) } if (err == nil) && expectError { t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes) } if actualBytes != expectedBytes { t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes) } } func TestParseHost(t *testing.T) { var ( defaultHttpHost = "127.0.0.1" defaultUnix = "/var/run/docker.sock" ) if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil { t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil { t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil { t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:4243"); err == nil { t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) } } func TestParseRepositoryTag(t *testing.T) { if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) } if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) } if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) } if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) } if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) } if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) } } func TestGetResolvConf(t *testing.T) { resolvConfUtils, err := GetResolvConf() if err != nil { t.Fatal(err) } resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") if err != nil { t.Fatal(err) } if string(resolvConfUtils) != string(resolvConfSystem) { t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.") } } func TestCheckLocalDns(t *testing.T) { for resolv, result := range map[string]bool{`# Dynamic nameserver 10.0.2.3 search dotcloud.net`: false, `# Dynamic #nameserver 127.0.0.1 nameserver 10.0.2.3 search dotcloud.net`: false, `# Dynamic nameserver 10.0.2.3 #not used 127.0.1.1 search dotcloud.net`: false, `# Dynamic #nameserver 10.0.2.3 #search dotcloud.net`: true, `# Dynamic nameserver 127.0.0.1 search dotcloud.net`: true, `# Dynamic nameserver 127.0.1.1 search dotcloud.net`: true, `# Dynamic `: true, ``: true, } { if CheckLocalDns([]byte(resolv)) != result { t.Fatalf("Wrong local dns detection: {%s} should be %v", resolv, result) } } } func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) { var ( a *KernelVersionInfo ) a, _ = ParseRelease(release) if r := CompareKernelVersion(a, b); r != result { t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) } if a.Flavor != b.Flavor { t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) } } func TestParseRelease(t *testing.T) { assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) } func TestParsePortMapping(t *testing.T) { data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") if err != nil { t.Fatal(err) } if len(data) != 3 { t.FailNow() } if data["ip"] != "192.168.1.1" { t.Fail() } if data["public"] != "80" { t.Fail() } if data["private"] != "8080" { t.Fail() } } func TestGetNameserversAsCIDR(t *testing.T) { for resolv, result := range map[string][]string{` nameserver 1.2.3.4 nameserver 40.3.200.10 search example.com`: {"1.2.3.4/32", "40.3.200.10/32"}, `search example.com`: {}, `nameserver 1.2.3.4 search example.com nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"}, ``: {}, ` nameserver 1.2.3.4 `: {"1.2.3.4/32"}, `search example.com nameserver 1.2.3.4 #nameserver 4.3.2.1`: {"1.2.3.4/32"}, `search example.com nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"}, } { test := GetNameserversAsCIDR([]byte(resolv)) if !StrSlicesEqual(test, result) { t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) } } } func StrSlicesEqual(a, b []string) bool { if len(a) != len(b) { return false } for i, v := range a { if v != b[i] { return false } } return true } func TestReplaceAndAppendEnvVars(t *testing.T) { var ( d = []string{"HOME=/"} o = []string{"HOME=/root", "TERM=xterm"} ) env := ReplaceOrAppendEnvValues(d, o) if len(env) != 2 { t.Fatalf("expected len of 2 got %d", len(env)) } if env[0] != "HOME=/root" { t.Fatalf("expected HOME=/root got '%s'", env[0]) } if env[1] != "TERM=xterm" { t.Fatalf("expected TERM=xterm got '%s'", env[1]) } } // Reading a symlink to a directory must return the directory func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { var err error if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { t.Errorf("failed to create directory: %s", err) } if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { t.Errorf("failed to create symlink: %s", err) } var path string if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { t.Fatalf("failed to read symlink to directory: %s", err) } if path != "/tmp/testReadSymlinkToExistingDirectory" { t.Fatalf("symlink returned unexpected directory: %s", path) } if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { t.Errorf("failed to remove temporary directory: %s", err) } if err = os.Remove("/tmp/dirLinkTest"); err != nil { t.Errorf("failed to remove symlink: %s", err) } } // Reading a non-existing symlink must fail func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { var path string var err error if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { t.Fatalf("error expected for non-existing symlink") } if path != "" { t.Fatalf("expected empty path, but '%s' was returned", path) } } // Reading a symlink to a file must fail func TestReadSymlinkedDirectoryToFile(t *testing.T) { var err error var file *os.File if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { t.Fatalf("failed to create file: %s", err) } file.Close() if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { t.Errorf("failed to create symlink: %s", err) } var path string if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") } if path != "" { t.Fatalf("path should've been empty: %s", path) } if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { t.Errorf("failed to remove file: %s", err) } if err = os.Remove("/tmp/fileLinkTest"); err != nil { t.Errorf("failed to remove symlink: %s", err) } } docker-0.9.1/utils/random.go0000644000175000017500000000037012314376205014052 0ustar tagtagpackage utils import ( "crypto/rand" "encoding/hex" "io" ) func RandomString() string { id := make([]byte, 32) _, err := io.ReadFull(rand.Reader, id) if err != nil { panic(err) // This shouldn't happen } return hex.EncodeToString(id) } docker-0.9.1/utils/jsonmessage.go0000644000175000017500000000762212314376205015117 0ustar tagtagpackage utils import ( "encoding/json" "fmt" "github.com/dotcloud/docker/pkg/term" "io" "strings" "time" ) type JSONError struct { Code int `json:"code,omitempty"` Message string `json:"message,omitempty"` } func (e *JSONError) Error() string { return e.Message } type JSONProgress struct { terminalFd uintptr Current int `json:"current,omitempty"` Total int `json:"total,omitempty"` Start int64 `json:"start,omitempty"` } func (p *JSONProgress) String() string { var ( width = 200 pbBox string numbersBox string timeLeftBox string ) ws, err := term.GetWinsize(p.terminalFd) if err == nil { width = int(ws.Width) } if p.Current <= 0 && p.Total <= 0 { return "" } current := HumanSize(int64(p.Current)) if p.Total <= 0 { return fmt.Sprintf("%8v", current) } total := HumanSize(int64(p.Total)) percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 if width > 110 { pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", 50-percentage)) } numbersBox = fmt.Sprintf("%8v/%v", current, total) if p.Current > 0 && p.Start > 0 && percentage < 50 { fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0)) perEntry := fromStart / time.Duration(p.Current) left := time.Duration(p.Total-p.Current) * perEntry left = (left / time.Second) * time.Second if width > 50 { timeLeftBox = " " + left.String() } } return pbBox + numbersBox + timeLeftBox } type JSONMessage struct { Stream string `json:"stream,omitempty"` Status string `json:"status,omitempty"` Progress *JSONProgress `json:"progressDetail,omitempty"` ProgressMessage string `json:"progress,omitempty"` //deprecated ID string `json:"id,omitempty"` From string `json:"from,omitempty"` Time int64 `json:"time,omitempty"` Error *JSONError `json:"errorDetail,omitempty"` ErrorMessage string `json:"error,omitempty"` //deprecated } func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.Error != nil { if jm.Error.Code == 401 { return fmt.Errorf("Authentication is required.") } return jm.Error } var endl string if isTerminal && jm.Stream == "" { // [2K = erase entire current line fmt.Fprintf(out, "%c[2K\r", 27) endl = "\r" } else if jm.Progress != nil { //disable progressbar in non-terminal return nil } if jm.Time != 0 { fmt.Fprintf(out, "[%s] ", time.Unix(jm.Time, 0)) } if jm.ID != "" { fmt.Fprintf(out, "%s: ", jm.ID) } if jm.From != "" { fmt.Fprintf(out, "(from %s) ", jm.From) } if jm.Progress != nil { fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) } else if jm.ProgressMessage != "" { //deprecated fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) } else if jm.Stream != "" { fmt.Fprintf(out, "%s%s", jm.Stream, endl) } else { fmt.Fprintf(out, "%s%s\n", jm.Status, endl) } return nil } func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error { var ( dec = json.NewDecoder(in) ids = make(map[string]int) diff = 0 ) for { var jm JSONMessage if err := dec.Decode(&jm); err != nil { if err == io.EOF { break } return err } if jm.Progress != nil { jm.Progress.terminalFd = terminalFd } if jm.Progress != nil || jm.ProgressMessage != "" { line, ok := ids[jm.ID] if !ok { line = len(ids) ids[jm.ID] = line fmt.Fprintf(out, "\n") diff = 0 } else { diff = len(ids) - line } if isTerminal { // [{diff}A = move cursor up diff rows fmt.Fprintf(out, "%c[%dA", 27, diff) } } err := jm.Display(out, isTerminal) if jm.ID != "" { if isTerminal { // [{diff}B = move cursor down diff rows fmt.Fprintf(out, "%c[%dB", 27, diff) } } if err != nil { return err } } return nil } docker-0.9.1/utils/fs.go0000644000175000017500000000365512314376205013213 0ustar tagtagpackage utils import ( "fmt" "os" "path/filepath" "strings" "syscall" ) // TreeSize walks a directory tree and returns its total size in bytes. func TreeSize(dir string) (size int64, err error) { data := make(map[uint64]bool) err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { // Ignore directory sizes if fileInfo == nil { return nil } s := fileInfo.Size() if fileInfo.IsDir() || s == 0 { return nil } // Check inode to handle hard links correctly inode := fileInfo.Sys().(*syscall.Stat_t).Ino // inode is not a uint64 on all platforms. Cast it to avoid issues. if _, exists := data[uint64(inode)]; exists { return nil } // inode is not a uint64 on all platforms. Cast it to avoid issues. data[uint64(inode)] = false size += s return nil }) return } // FollowSymlink will follow an existing link and scope it to the root // path provided. func FollowSymlinkInScope(link, root string) (string, error) { prev := "/" root, err := filepath.Abs(root) if err != nil { return "", err } link, err = filepath.Abs(link) if err != nil { return "", err } if !strings.HasPrefix(filepath.Dir(link), root) { return "", fmt.Errorf("%s is not within %s", link, root) } for _, p := range strings.Split(link, "/") { prev = filepath.Join(prev, p) prev = filepath.Clean(prev) for { stat, err := os.Lstat(prev) if err != nil { if os.IsNotExist(err) { break } return "", err } if stat.Mode()&os.ModeSymlink == os.ModeSymlink { dest, err := os.Readlink(prev) if err != nil { return "", err } switch dest[0] { case '/': prev = filepath.Join(root, dest) case '.': prev, _ = filepath.Abs(prev) if prev = filepath.Clean(filepath.Join(filepath.Dir(prev), dest)); len(prev) < len(root) { prev = filepath.Join(root, filepath.Base(dest)) } } } else { break } } } return prev, nil } docker-0.9.1/utils/testdata/0000755000175000017500000000000012314376205014054 5ustar tagtagdocker-0.9.1/utils/testdata/fs/0000755000175000017500000000000012314376205014464 5ustar tagtagdocker-0.9.1/utils/testdata/fs/g0000777000175000017500000000000012314376205020744 2../../../../../../../../../../../../rootustar tagtagdocker-0.9.1/utils/testdata/fs/a/0000755000175000017500000000000012314376205014704 5ustar tagtagdocker-0.9.1/utils/testdata/fs/a/f0000777000175000017500000000000012314376205017027 2../../../../testustar tagtagdocker-0.9.1/utils/testdata/fs/a/d0000777000175000017500000000000012314376205015272 2/bustar tagtagdocker-0.9.1/utils/testdata/fs/a/e0000777000175000017500000000000012314376205015427 2../bustar tagtagdocker-0.9.1/utils/testdata/fs/b/0000755000175000017500000000000012314376205014705 5ustar tagtagdocker-0.9.1/utils/testdata/fs/b/h0000777000175000017500000000000012314376205015440 2../gustar tagtagdocker-0.9.1/utils/uname_linux.go0000644000175000017500000000035612314376205015122 0ustar tagtag// +build amd64 package utils import ( "syscall" ) type Utsname syscall.Utsname func uname() (*syscall.Utsname, error) { uts := &syscall.Utsname{} if err := syscall.Uname(uts); err != nil { return nil, err } return uts, nil } docker-0.9.1/utils/streamformatter.go0000644000175000017500000000370712314376205016020 0ustar tagtagpackage utils import ( "encoding/json" "fmt" ) type StreamFormatter struct { json bool used bool } func NewStreamFormatter(json bool) *StreamFormatter { return &StreamFormatter{json, false} } const streamNewline = "\r\n" var streamNewlineBytes = []byte(streamNewline) func (sf *StreamFormatter) FormatStream(str string) []byte { sf.used = true if sf.json { b, err := json.Marshal(&JSONMessage{Stream: str}) if err != nil { return sf.FormatError(err) } return append(b, streamNewlineBytes...) } return []byte(str + "\r") } func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { sf.used = true str := fmt.Sprintf(format, a...) if sf.json { b, err := json.Marshal(&JSONMessage{ID: id, Status: str}) if err != nil { return sf.FormatError(err) } return append(b, streamNewlineBytes...) } return []byte(str + streamNewline) } func (sf *StreamFormatter) FormatError(err error) []byte { sf.used = true if sf.json { jsonError, ok := err.(*JSONError) if !ok { jsonError = &JSONError{Message: err.Error()} } if b, err := json.Marshal(&JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { return append(b, streamNewlineBytes...) } return []byte("{\"error\":\"format error\"}" + streamNewline) } return []byte("Error: " + err.Error() + streamNewline) } func (sf *StreamFormatter) FormatProgress(id, action string, progress *JSONProgress) []byte { if progress == nil { progress = &JSONProgress{} } sf.used = true if sf.json { b, err := json.Marshal(&JSONMessage{ Status: action, ProgressMessage: progress.String(), Progress: progress, ID: id, }) if err != nil { return nil } return b } endl := "\r" if progress.String() == "" { endl += "\n" } return []byte(action + " " + progress.String() + endl) } func (sf *StreamFormatter) Used() bool { return sf.used } func (sf *StreamFormatter) Json() bool { return sf.json } docker-0.9.1/utils/stdcopy.go0000644000175000017500000000747412314376205014273 0ustar tagtagpackage utils import ( "encoding/binary" "errors" "io" ) const ( StdWriterPrefixLen = 8 StdWriterFdIndex = 0 StdWriterSizeIndex = 4 ) type StdType [StdWriterPrefixLen]byte var ( Stdin StdType = StdType{0: 0} Stdout StdType = StdType{0: 1} Stderr StdType = StdType{0: 2} ) type StdWriter struct { io.Writer prefix StdType sizeBuf []byte } func (w *StdWriter) Write(buf []byte) (n int, err error) { if w == nil || w.Writer == nil { return 0, errors.New("Writer not instanciated") } binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) buf = append(w.prefix[:], buf...) n, err = w.Writer.Write(buf) return n - StdWriterPrefixLen, err } // NewStdWriter instanciates a new Writer. // Everything written to it will be encapsulated using a custom format, // and written to the underlying `w` stream. // This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. // `t` indicates the id of the stream to encapsulate. // It can be utils.Stdin, utils.Stdout, utils.Stderr. func NewStdWriter(w io.Writer, t StdType) *StdWriter { if len(t) != StdWriterPrefixLen { return nil } return &StdWriter{ Writer: w, prefix: t, sizeBuf: make([]byte, 4), } } var ErrInvalidStdHeader = errors.New("Unrecognized input header") // StdCopy is a modified version of io.Copy. // // StdCopy will demultiplex `src`, assuming that it contains two streams, // previously multiplexed together using a StdWriter instance. // As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. // // StdCopy will read until it hits EOF on `src`. It will then return a nil error. // In other words: if `err` is non nil, it indicates a real underlying error. // // `written` will hold the total number of bytes written to `dstout` and `dsterr`. func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { var ( buf = make([]byte, 32*1024+StdWriterPrefixLen+1) bufLen = len(buf) nr, nw int er, ew error out io.Writer frameSize int ) for { // Make sure we have at least a full header for nr < StdWriterPrefixLen { var nr2 int nr2, er = src.Read(buf[nr:]) if er == io.EOF { return written, nil } if er != nil { return 0, er } nr += nr2 } // Check the first byte to know where to write switch buf[StdWriterFdIndex] { case 0: fallthrough case 1: // Write on stdout out = dstout case 2: // Write on stderr out = dsterr default: Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex]) return 0, ErrInvalidStdHeader } // Retrieve the size of the frame frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) // Check if the buffer is big enough to read the frame. // Extend it if necessary. if frameSize+StdWriterPrefixLen > bufLen { Debugf("Extending buffer cap.") buf = append(buf, make([]byte, frameSize-len(buf)+1)...) bufLen = len(buf) } // While the amount of bytes read is less than the size of the frame + header, we keep reading for nr < frameSize+StdWriterPrefixLen { var nr2 int nr2, er = src.Read(buf[nr:]) if er == io.EOF { return written, nil } if er != nil { Debugf("Error reading frame: %s", er) return 0, er } nr += nr2 } // Write the retrieved frame (without header) nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen]) if nw > 0 { written += int64(nw) } if ew != nil { Debugf("Error writing frame: %s", ew) return 0, ew } // If the frame has not been fully written: error if nw != frameSize { Debugf("Error Short Write: (%d on %d)", nw, frameSize) return 0, io.ErrShortWrite } // Move the rest of the buffer to the beginning copy(buf, buf[frameSize+StdWriterPrefixLen:]) // Move the index nr -= frameSize + StdWriterPrefixLen } } docker-0.9.1/utils/checksum.go0000644000175000017500000000052212314376205014373 0ustar tagtagpackage utils import ( "encoding/hex" "hash" "io" ) type CheckSum struct { io.Reader Hash hash.Hash } func (cs *CheckSum) Read(buf []byte) (int, error) { n, err := cs.Reader.Read(buf) if err == nil { cs.Hash.Write(buf[:n]) } return n, err } func (cs *CheckSum) Sum() string { return hex.EncodeToString(cs.Hash.Sum(nil)) } docker-0.9.1/utils/fs_test.go0000644000175000017500000000335112314376205014243 0ustar tagtagpackage utils import ( "path/filepath" "testing" ) func abs(t *testing.T, p string) string { o, err := filepath.Abs(p) if err != nil { t.Fatal(err) } return o } func TestFollowSymLinkNormal(t *testing.T) { link := "testdata/fs/a/d/c/data" rewrite, err := FollowSymlinkInScope(link, "testdata") if err != nil { t.Fatal(err) } if expected := abs(t, "testdata/b/c/data"); expected != rewrite { t.Fatalf("Expected %s got %s", expected, rewrite) } } func TestFollowSymLinkRandomString(t *testing.T) { if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { t.Fatal("Random string should fail but didn't") } } func TestFollowSymLinkLastLink(t *testing.T) { link := "testdata/fs/a/d" rewrite, err := FollowSymlinkInScope(link, "testdata") if err != nil { t.Fatal(err) } if expected := abs(t, "testdata/b"); expected != rewrite { t.Fatalf("Expected %s got %s", expected, rewrite) } } func TestFollowSymLinkRelativeLink(t *testing.T) { link := "testdata/fs/a/e/c/data" rewrite, err := FollowSymlinkInScope(link, "testdata") if err != nil { t.Fatal(err) } if expected := abs(t, "testdata/fs/b/c/data"); expected != rewrite { t.Fatalf("Expected %s got %s", expected, rewrite) } } func TestFollowSymLinkRelativeLinkScope(t *testing.T) { link := "testdata/fs/a/f" rewrite, err := FollowSymlinkInScope(link, "testdata") if err != nil { t.Fatal(err) } if expected := abs(t, "testdata/test"); expected != rewrite { t.Fatalf("Expected %s got %s", expected, rewrite) } link = "testdata/fs/b/h" rewrite, err = FollowSymlinkInScope(link, "testdata") if err != nil { t.Fatal(err) } if expected := abs(t, "testdata/root"); expected != rewrite { t.Fatalf("Expected %s got %s", expected, rewrite) } } docker-0.9.1/dockerinit/0000755000175000017500000000000012314376205013236 5ustar tagtagdocker-0.9.1/dockerinit/dockerinit.go0000644000175000017500000000020412314376205015714 0ustar tagtagpackage main import ( "github.com/dotcloud/docker/sysinit" ) func main() { // Running in init mode sysinit.SysInit() return } docker-0.9.1/networkdriver/0000755000175000017500000000000012314376205014010 5ustar tagtagdocker-0.9.1/networkdriver/portallocator/0000755000175000017500000000000012314376205016675 5ustar tagtagdocker-0.9.1/networkdriver/portallocator/portallocator.go0000644000175000017500000000712512314376205022116 0ustar tagtagpackage portallocator import ( "errors" "github.com/dotcloud/docker/pkg/collections" "net" "sync" ) const ( BeginPortRange = 49153 EndPortRange = 65535 ) type ( portMappings map[string]*collections.OrderedIntSet ipMapping map[string]portMappings ) var ( ErrPortAlreadyAllocated = errors.New("port has already been allocated") ErrPortExceedsRange = errors.New("port exceeds upper range") ErrUnknownProtocol = errors.New("unknown protocol") ) var ( currentDynamicPort = map[string]int{ "tcp": BeginPortRange - 1, "udp": BeginPortRange - 1, } defaultIP = net.ParseIP("0.0.0.0") defaultAllocatedPorts = portMappings{} otherAllocatedPorts = ipMapping{} lock = sync.Mutex{} ) func init() { defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() } // RequestPort returns an available port if the port is 0 // If the provided port is not 0 then it will be checked if // it is available for allocation func RequestPort(ip net.IP, proto string, port int) (int, error) { lock.Lock() defer lock.Unlock() if err := validateProtocol(proto); err != nil { return 0, err } // If the user requested a specific port to be allocated if port > 0 { if err := registerSetPort(ip, proto, port); err != nil { return 0, err } return port, nil } return registerDynamicPort(ip, proto) } // ReleasePort will return the provided port back into the // pool for reuse func ReleasePort(ip net.IP, proto string, port int) error { lock.Lock() defer lock.Unlock() if err := validateProtocol(proto); err != nil { return err } allocated := defaultAllocatedPorts[proto] allocated.Remove(port) if !equalsDefault(ip) { registerIP(ip) // Remove the port for the specific ip address allocated = otherAllocatedPorts[ip.String()][proto] allocated.Remove(port) } return nil } func ReleaseAll() error { lock.Lock() defer lock.Unlock() currentDynamicPort["tcp"] = BeginPortRange - 1 currentDynamicPort["udp"] = BeginPortRange - 1 defaultAllocatedPorts = portMappings{} defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() otherAllocatedPorts = ipMapping{} return nil } func registerDynamicPort(ip net.IP, proto string) (int, error) { allocated := defaultAllocatedPorts[proto] port := nextPort(proto) if port > EndPortRange { return 0, ErrPortExceedsRange } if !equalsDefault(ip) { registerIP(ip) ipAllocated := otherAllocatedPorts[ip.String()][proto] ipAllocated.Push(port) } else { allocated.Push(port) } return port, nil } func registerSetPort(ip net.IP, proto string, port int) error { allocated := defaultAllocatedPorts[proto] if allocated.Exists(port) { return ErrPortAlreadyAllocated } if !equalsDefault(ip) { registerIP(ip) ipAllocated := otherAllocatedPorts[ip.String()][proto] if ipAllocated.Exists(port) { return ErrPortAlreadyAllocated } ipAllocated.Push(port) } else { allocated.Push(port) } return nil } func equalsDefault(ip net.IP) bool { return ip == nil || ip.Equal(defaultIP) } func nextPort(proto string) int { c := currentDynamicPort[proto] + 1 currentDynamicPort[proto] = c return c } func registerIP(ip net.IP) { if _, exists := otherAllocatedPorts[ip.String()]; !exists { otherAllocatedPorts[ip.String()] = portMappings{ "tcp": collections.NewOrderedIntSet(), "udp": collections.NewOrderedIntSet(), } } } func validateProtocol(proto string) error { if _, exists := defaultAllocatedPorts[proto]; !exists { return ErrUnknownProtocol } return nil } docker-0.9.1/networkdriver/portallocator/portallocator_test.go0000644000175000017500000000763612314376205023164 0ustar tagtagpackage portallocator import ( "net" "testing" ) func reset() { ReleaseAll() } func TestRequestNewPort(t *testing.T) { defer reset() port, err := RequestPort(defaultIP, "tcp", 0) if err != nil { t.Fatal(err) } if expected := BeginPortRange; port != expected { t.Fatalf("Expected port %d got %d", expected, port) } } func TestRequestSpecificPort(t *testing.T) { defer reset() port, err := RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } if port != 5000 { t.Fatalf("Expected port 5000 got %d", port) } } func TestReleasePort(t *testing.T) { defer reset() port, err := RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } if port != 5000 { t.Fatalf("Expected port 5000 got %d", port) } if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { t.Fatal(err) } } func TestReuseReleasedPort(t *testing.T) { defer reset() port, err := RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } if port != 5000 { t.Fatalf("Expected port 5000 got %d", port) } if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { t.Fatal(err) } port, err = RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } } func TestReleaseUnreadledPort(t *testing.T) { defer reset() port, err := RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } if port != 5000 { t.Fatalf("Expected port 5000 got %d", port) } port, err = RequestPort(defaultIP, "tcp", 5000) if err != ErrPortAlreadyAllocated { t.Fatalf("Expected error %s got %s", ErrPortAlreadyAllocated, err) } } func TestUnknowProtocol(t *testing.T) { defer reset() if _, err := RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol { t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err) } } func TestAllocateAllPorts(t *testing.T) { defer reset() for i := 0; i <= EndPortRange-BeginPortRange; i++ { port, err := RequestPort(defaultIP, "tcp", 0) if err != nil { t.Fatal(err) } if expected := BeginPortRange + i; port != expected { t.Fatalf("Expected port %d got %d", expected, port) } } if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrPortExceedsRange { t.Fatalf("Expected error %s got %s", ErrPortExceedsRange, err) } _, err := RequestPort(defaultIP, "udp", 0) if err != nil { t.Fatal(err) } } func BenchmarkAllocatePorts(b *testing.B) { defer reset() for i := 0; i < b.N; i++ { for i := 0; i <= EndPortRange-BeginPortRange; i++ { port, err := RequestPort(defaultIP, "tcp", 0) if err != nil { b.Fatal(err) } if expected := BeginPortRange + i; port != expected { b.Fatalf("Expected port %d got %d", expected, port) } } reset() } } func TestPortAllocation(t *testing.T) { defer reset() ip := net.ParseIP("192.168.0.1") ip2 := net.ParseIP("192.168.0.2") if port, err := RequestPort(ip, "tcp", 80); err != nil { t.Fatal(err) } else if port != 80 { t.Fatalf("Acquire(80) should return 80, not %d", port) } port, err := RequestPort(ip, "tcp", 0) if err != nil { t.Fatal(err) } if port <= 0 { t.Fatalf("Acquire(0) should return a non-zero port") } if _, err := RequestPort(ip, "tcp", port); err == nil { t.Fatalf("Acquiring a port already in use should return an error") } if newPort, err := RequestPort(ip, "tcp", 0); err != nil { t.Fatal(err) } else if newPort == port { t.Fatalf("Acquire(0) allocated the same port twice: %d", port) } if _, err := RequestPort(ip, "tcp", 80); err == nil { t.Fatalf("Acquiring a port already in use should return an error") } if _, err := RequestPort(ip2, "tcp", 80); err != nil { t.Fatalf("It should be possible to allocate the same port on a different interface") } if _, err := RequestPort(ip2, "tcp", 80); err == nil { t.Fatalf("Acquiring a port already in use should return an error") } if err := ReleasePort(ip, "tcp", 80); err != nil { t.Fatal(err) } if _, err := RequestPort(ip, "tcp", 80); err != nil { t.Fatal(err) } } docker-0.9.1/networkdriver/utils.go0000644000175000017500000000517512314376205015507 0ustar tagtagpackage networkdriver import ( "encoding/binary" "errors" "fmt" "net" "github.com/dotcloud/docker/pkg/netlink" ) var ( networkGetRoutesFct = netlink.NetworkGetRoutes ErrNoDefaultRoute = errors.New("no default route") ) func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error { if len(nameservers) > 0 { for _, ns := range nameservers { _, nsNetwork, err := net.ParseCIDR(ns) if err != nil { return err } if NetworkOverlaps(toCheck, nsNetwork) { return ErrNetworkOverlapsWithNameservers } } } return nil } func CheckRouteOverlaps(toCheck *net.IPNet) error { networks, err := networkGetRoutesFct() if err != nil { return err } for _, network := range networks { if network.IPNet != nil && NetworkOverlaps(toCheck, network.IPNet) { return ErrNetworkOverlaps } } return nil } // Detects overlap between one IPNet and another func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) { return true } if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) { return true } return false } // Calculates the first and last IP addresses in an IPNet func NetworkRange(network *net.IPNet) (net.IP, net.IP) { var ( netIP = network.IP.To4() firstIP = netIP.Mask(network.Mask) lastIP = net.IPv4(0, 0, 0, 0).To4() ) for i := 0; i < len(lastIP); i++ { lastIP[i] = netIP[i] | ^network.Mask[i] } return firstIP, lastIP } // Given a netmask, calculates the number of available hosts func NetworkSize(mask net.IPMask) int32 { m := net.IPv4Mask(0, 0, 0, 0) for i := 0; i < net.IPv4len; i++ { m[i] = ^mask[i] } return int32(binary.BigEndian.Uint32(m)) + 1 } // Return the IPv4 address of a network interface func GetIfaceAddr(name string) (net.Addr, error) { iface, err := net.InterfaceByName(name) if err != nil { return nil, err } addrs, err := iface.Addrs() if err != nil { return nil, err } var addrs4 []net.Addr for _, addr := range addrs { ip := (addr.(*net.IPNet)).IP if ip4 := ip.To4(); len(ip4) == net.IPv4len { addrs4 = append(addrs4, addr) } } switch { case len(addrs4) == 0: return nil, fmt.Errorf("Interface %v has no IP addresses", name) case len(addrs4) > 1: fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n", name, (addrs4[0].(*net.IPNet)).IP) } return addrs4[0], nil } func GetDefaultRouteIface() (*net.Interface, error) { rs, err := networkGetRoutesFct() if err != nil { return nil, fmt.Errorf("unable to get routes: %v", err) } for _, r := range rs { if r.Default { return r.Iface, nil } } return nil, ErrNoDefaultRoute } docker-0.9.1/networkdriver/network_test.go0000644000175000017500000001203612314376205017071 0ustar tagtagpackage networkdriver import ( "github.com/dotcloud/docker/pkg/netlink" "net" "testing" ) func TestNonOverlapingNameservers(t *testing.T) { network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } nameservers := []string{ "127.0.0.1/32", } if err := CheckNameserverOverlaps(nameservers, network); err != nil { t.Fatal(err) } } func TestOverlapingNameservers(t *testing.T) { network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } nameservers := []string{ "192.168.0.1/32", } if err := CheckNameserverOverlaps(nameservers, network); err == nil { t.Fatalf("Expected error %s got %s", ErrNetworkOverlapsWithNameservers, err) } } func TestCheckRouteOverlaps(t *testing.T) { orig := networkGetRoutesFct defer func() { networkGetRoutesFct = orig }() networkGetRoutesFct = func() ([]netlink.Route, error) { routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"} routes := []netlink.Route{} for _, addr := range routesData { _, netX, _ := net.ParseCIDR(addr) routes = append(routes, netlink.Route{IPNet: netX}) } return routes, nil } _, netX, _ := net.ParseCIDR("172.16.0.1/24") if err := CheckRouteOverlaps(netX); err != nil { t.Fatal(err) } _, netX, _ = net.ParseCIDR("10.0.2.0/24") if err := CheckRouteOverlaps(netX); err == nil { t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't") } } func TestCheckNameserverOverlaps(t *testing.T) { nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"} _, netX, _ := net.ParseCIDR("10.0.2.3/32") if err := CheckNameserverOverlaps(nameservers, netX); err == nil { t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX) } _, netX, _ = net.ParseCIDR("192.168.102.2/32") if err := CheckNameserverOverlaps(nameservers, netX); err != nil { t.Fatalf("%s should not overlap %v but it does", netX, nameservers) } } func AssertOverlap(CIDRx string, CIDRy string, t *testing.T) { _, netX, _ := net.ParseCIDR(CIDRx) _, netY, _ := net.ParseCIDR(CIDRy) if !NetworkOverlaps(netX, netY) { t.Errorf("%v and %v should overlap", netX, netY) } } func AssertNoOverlap(CIDRx string, CIDRy string, t *testing.T) { _, netX, _ := net.ParseCIDR(CIDRx) _, netY, _ := net.ParseCIDR(CIDRy) if NetworkOverlaps(netX, netY) { t.Errorf("%v and %v should not overlap", netX, netY) } } func TestNetworkOverlaps(t *testing.T) { //netY starts at same IP and ends within netX AssertOverlap("172.16.0.1/24", "172.16.0.1/25", t) //netY starts within netX and ends at same IP AssertOverlap("172.16.0.1/24", "172.16.0.128/25", t) //netY starts and ends within netX AssertOverlap("172.16.0.1/24", "172.16.0.64/25", t) //netY starts at same IP and ends outside of netX AssertOverlap("172.16.0.1/24", "172.16.0.1/23", t) //netY starts before and ends at same IP of netX AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) //netY starts before and ends outside of netX AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t) //netY starts and ends before netX AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t) //netX starts and ends before netY AssertNoOverlap("172.16.1.1/25", "172.16.2.1/24", t) } func TestNetworkRange(t *testing.T) { // Simple class C test _, network, _ := net.ParseCIDR("192.168.0.1/24") first, last := NetworkRange(network) if !first.Equal(net.ParseIP("192.168.0.0")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("192.168.0.255")) { t.Error(last.String()) } if size := NetworkSize(network.Mask); size != 256 { t.Error(size) } // Class A test _, network, _ = net.ParseCIDR("10.0.0.1/8") first, last = NetworkRange(network) if !first.Equal(net.ParseIP("10.0.0.0")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("10.255.255.255")) { t.Error(last.String()) } if size := NetworkSize(network.Mask); size != 16777216 { t.Error(size) } // Class A, random IP address _, network, _ = net.ParseCIDR("10.1.2.3/8") first, last = NetworkRange(network) if !first.Equal(net.ParseIP("10.0.0.0")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("10.255.255.255")) { t.Error(last.String()) } // 32bit mask _, network, _ = net.ParseCIDR("10.1.2.3/32") first, last = NetworkRange(network) if !first.Equal(net.ParseIP("10.1.2.3")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("10.1.2.3")) { t.Error(last.String()) } if size := NetworkSize(network.Mask); size != 1 { t.Error(size) } // 31bit mask _, network, _ = net.ParseCIDR("10.1.2.3/31") first, last = NetworkRange(network) if !first.Equal(net.ParseIP("10.1.2.2")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("10.1.2.3")) { t.Error(last.String()) } if size := NetworkSize(network.Mask); size != 2 { t.Error(size) } // 26bit mask _, network, _ = net.ParseCIDR("10.1.2.3/26") first, last = NetworkRange(network) if !first.Equal(net.ParseIP("10.1.2.0")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("10.1.2.63")) { t.Error(last.String()) } if size := NetworkSize(network.Mask); size != 64 { t.Error(size) } } docker-0.9.1/networkdriver/network.go0000644000175000017500000000036712314376205016036 0ustar tagtagpackage networkdriver import ( "errors" ) var ( ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver") ErrNetworkOverlaps = errors.New("requested network overlaps with existing network") ) docker-0.9.1/networkdriver/lxc/0000755000175000017500000000000012314376205014576 5ustar tagtagdocker-0.9.1/networkdriver/lxc/driver.go0000644000175000017500000003267212314376205016432 0ustar tagtagpackage lxc import ( "fmt" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/networkdriver" "github.com/dotcloud/docker/networkdriver/ipallocator" "github.com/dotcloud/docker/networkdriver/portallocator" "github.com/dotcloud/docker/networkdriver/portmapper" "github.com/dotcloud/docker/pkg/iptables" "github.com/dotcloud/docker/pkg/netlink" "github.com/dotcloud/docker/utils" "io/ioutil" "log" "net" "strings" "syscall" "unsafe" ) const ( DefaultNetworkBridge = "docker0" siocBRADDBR = 0x89a0 ) // Network interface represents the networking stack of a container type networkInterface struct { IP net.IP PortMappings []net.Addr // there are mappings to the host interfaces } var ( addrs = []string{ // Here we don't follow the convention of using the 1st IP of the range for the gateway. // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges. // In theory this shouldn't matter - in practice there's bound to be a few scripts relying // on the internal addressing or other stupid things like that. // The shouldn't, but hey, let's not break them unless we really have to. "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23 "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive "10.1.42.1/16", "10.42.42.1/16", "172.16.42.1/24", "172.16.43.1/24", "172.16.44.1/24", "10.0.42.1/24", "10.0.43.1/24", "192.168.42.1/24", "192.168.43.1/24", "192.168.44.1/24", } bridgeIface string bridgeNetwork *net.IPNet defaultBindingIP = net.ParseIP("0.0.0.0") currentInterfaces = make(map[string]*networkInterface) ) func InitDriver(job *engine.Job) engine.Status { var ( network *net.IPNet enableIPTables = job.GetenvBool("EnableIptables") icc = job.GetenvBool("InterContainerCommunication") ipForward = job.GetenvBool("EnableIpForward") bridgeIP = job.Getenv("BridgeIP") ) if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { defaultBindingIP = net.ParseIP(defaultIP) } bridgeIface = job.Getenv("BridgeIface") if bridgeIface == "" { bridgeIface = DefaultNetworkBridge } addr, err := networkdriver.GetIfaceAddr(bridgeIface) if err != nil { // If the iface is not found, try to create it job.Logf("creating new bridge for %s", bridgeIface) if err := createBridge(bridgeIP); err != nil { job.Error(err) return engine.StatusErr } job.Logf("getting iface addr") addr, err = networkdriver.GetIfaceAddr(bridgeIface) if err != nil { job.Error(err) return engine.StatusErr } network = addr.(*net.IPNet) } else { network = addr.(*net.IPNet) } // Configure iptables for link support if enableIPTables { if err := setupIPTables(addr, icc); err != nil { job.Error(err) return engine.StatusErr } } if ipForward { // Enable IPv4 forwarding if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err) } } // We can always try removing the iptables if err := iptables.RemoveExistingChain("DOCKER"); err != nil { job.Error(err) return engine.StatusErr } if enableIPTables { chain, err := iptables.NewChain("DOCKER", bridgeIface) if err != nil { job.Error(err) return engine.StatusErr } portmapper.SetIptablesChain(chain) } bridgeNetwork = network // https://github.com/dotcloud/docker/issues/2768 job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP) for name, f := range map[string]engine.Handler{ "allocate_interface": Allocate, "release_interface": Release, "allocate_port": AllocatePort, "link": LinkContainers, } { if err := job.Eng.Register(name, f); err != nil { job.Error(err) return engine.StatusErr } } return engine.StatusOK } func setupIPTables(addr net.Addr, icc bool) error { // Enable NAT natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"} if !iptables.Exists(natArgs...) { if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { return fmt.Errorf("Unable to enable network bridge NAT: %s", err) } else if len(output) != 0 { return fmt.Errorf("Error iptables postrouting: %s", output) } } var ( args = []string{"FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-j"} acceptArgs = append(args, "ACCEPT") dropArgs = append(args, "DROP") ) if !icc { iptables.Raw(append([]string{"-D"}, acceptArgs...)...) if !iptables.Exists(dropArgs...) { utils.Debugf("Disable inter-container communication") if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) } else if len(output) != 0 { return fmt.Errorf("Error disabling intercontainer communication: %s", output) } } } else { iptables.Raw(append([]string{"-D"}, dropArgs...)...) if !iptables.Exists(acceptArgs...) { utils.Debugf("Enable inter-container communication") if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil { return fmt.Errorf("Unable to allow intercontainer communication: %s", err) } else if len(output) != 0 { return fmt.Errorf("Error enabling intercontainer communication: %s", output) } } } // Accept all non-intercontainer outgoing packets outgoingArgs := []string{"FORWARD", "-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"} if !iptables.Exists(outgoingArgs...) { if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { return fmt.Errorf("Unable to allow outgoing packets: %s", err) } else if len(output) != 0 { return fmt.Errorf("Error iptables allow outgoing: %s", output) } } // Accept incoming packets for existing connections existingArgs := []string{"FORWARD", "-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} if !iptables.Exists(existingArgs...) { if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { return fmt.Errorf("Unable to allow incoming packets: %s", err) } else if len(output) != 0 { return fmt.Errorf("Error iptables allow incoming: %s", output) } } return nil } // CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, // and attempts to configure it with an address which doesn't conflict with any other interface on the host. // If it can't find an address which doesn't conflict, it will return an error. func createBridge(bridgeIP string) error { nameservers := []string{} resolvConf, _ := utils.GetResolvConf() // we don't check for an error here, because we don't really care // if we can't read /etc/resolv.conf. So instead we skip the append // if resolvConf is nil. It either doesn't exist, or we can't read it // for some reason. if resolvConf != nil { nameservers = append(nameservers, utils.GetNameserversAsCIDR(resolvConf)...) } var ifaceAddr string if len(bridgeIP) != 0 { _, _, err := net.ParseCIDR(bridgeIP) if err != nil { return err } ifaceAddr = bridgeIP } else { for _, addr := range addrs { _, dockerNetwork, err := net.ParseCIDR(addr) if err != nil { return err } if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil { if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil { ifaceAddr = addr break } else { utils.Debugf("%s %s", addr, err) } } } } if ifaceAddr == "" { return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) } utils.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) if err := createBridgeIface(bridgeIface); err != nil { return err } iface, err := net.InterfaceByName(bridgeIface) if err != nil { return err } ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) if err != nil { return err } if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { return fmt.Errorf("Unable to add private network: %s", err) } if err := netlink.NetworkLinkUp(iface); err != nil { return fmt.Errorf("Unable to start network bridge: %s", err) } return nil } // Create the actual bridge device. This is more backward-compatible than // netlink.NetworkLinkAdd and works on RHEL 6. func createBridgeIface(name string) error { s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP) if err != nil { utils.Debugf("Bridge socket creation failed IPv6 probably not enabled: %v", err) s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP) if err != nil { return fmt.Errorf("Error creating bridge creation socket: %s", err) } } defer syscall.Close(s) nameBytePtr, err := syscall.BytePtrFromString(name) if err != nil { return fmt.Errorf("Error converting bridge name %s to byte array: %s", name, err) } if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), siocBRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { return fmt.Errorf("Error creating bridge: %s", err) } return nil } // Allocate a network interface func Allocate(job *engine.Job) engine.Status { var ( ip *net.IP err error id = job.Args[0] requestedIP = net.ParseIP(job.Getenv("RequestedIP")) ) if requestedIP != nil { ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP) } else { ip, err = ipallocator.RequestIP(bridgeNetwork, nil) } if err != nil { job.Error(err) return engine.StatusErr } out := engine.Env{} out.Set("IP", ip.String()) out.Set("Mask", bridgeNetwork.Mask.String()) out.Set("Gateway", bridgeNetwork.IP.String()) out.Set("Bridge", bridgeIface) size, _ := bridgeNetwork.Mask.Size() out.SetInt("IPPrefixLen", size) currentInterfaces[id] = &networkInterface{ IP: *ip, } out.WriteTo(job.Stdout) return engine.StatusOK } // release an interface for a select ip func Release(job *engine.Job) engine.Status { var ( id = job.Args[0] containerInterface = currentInterfaces[id] ip net.IP port int proto string ) if containerInterface == nil { return job.Errorf("No network information to release for %s", id) } for _, nat := range containerInterface.PortMappings { if err := portmapper.Unmap(nat); err != nil { log.Printf("Unable to unmap port %s: %s", nat, err) } // this is host mappings switch a := nat.(type) { case *net.TCPAddr: proto = "tcp" ip = a.IP port = a.Port case *net.UDPAddr: proto = "udp" ip = a.IP port = a.Port } if err := portallocator.ReleasePort(ip, proto, port); err != nil { log.Printf("Unable to release port %s", nat) } } if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil { log.Printf("Unable to release ip %s\n", err) } return engine.StatusOK } // Allocate an external port and map it to the interface func AllocatePort(job *engine.Job) engine.Status { var ( err error ip = defaultBindingIP id = job.Args[0] hostIP = job.Getenv("HostIP") hostPort = job.GetenvInt("HostPort") containerPort = job.GetenvInt("ContainerPort") proto = job.Getenv("Proto") network = currentInterfaces[id] ) if hostIP != "" { ip = net.ParseIP(hostIP) } // host ip, proto, and host port hostPort, err = portallocator.RequestPort(ip, proto, hostPort) if err != nil { job.Error(err) return engine.StatusErr } var ( container net.Addr host net.Addr ) if proto == "tcp" { host = &net.TCPAddr{IP: ip, Port: hostPort} container = &net.TCPAddr{IP: network.IP, Port: containerPort} } else { host = &net.UDPAddr{IP: ip, Port: hostPort} container = &net.UDPAddr{IP: network.IP, Port: containerPort} } if err := portmapper.Map(container, ip, hostPort); err != nil { portallocator.ReleasePort(ip, proto, hostPort) job.Error(err) return engine.StatusErr } network.PortMappings = append(network.PortMappings, host) out := engine.Env{} out.Set("HostIP", ip.String()) out.SetInt("HostPort", hostPort) if _, err := out.WriteTo(job.Stdout); err != nil { job.Error(err) return engine.StatusErr } return engine.StatusOK } func LinkContainers(job *engine.Job) engine.Status { var ( action = job.Args[0] childIP = job.Getenv("ChildIP") parentIP = job.Getenv("ParentIP") ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") ) split := func(p string) (string, string) { parts := strings.Split(p, "/") return parts[0], parts[1] } for _, p := range ports { port, proto := split(p) if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", proto, "-s", parentIP, "--dport", port, "-d", childIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { job.Error(err) return engine.StatusErr } else if len(output) != 0 { job.Errorf("Error toggle iptables forward: %s", output) return engine.StatusErr } if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", proto, "-s", childIP, "--sport", port, "-d", parentIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { job.Error(err) return engine.StatusErr } else if len(output) != 0 { job.Errorf("Error toggle iptables forward: %s", output) return engine.StatusErr } } return engine.StatusOK } docker-0.9.1/networkdriver/ipallocator/0000755000175000017500000000000012314376205016321 5ustar tagtagdocker-0.9.1/networkdriver/ipallocator/allocator_test.go0000644000175000017500000001167712314376205021703 0ustar tagtagpackage ipallocator import ( "fmt" "net" "testing" ) func reset() { allocatedIPs = networkSet{} availableIPS = networkSet{} } func TestRequestNewIps(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } for i := 2; i < 10; i++ { ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } if expected := fmt.Sprintf("192.168.0.%d", i); ip.String() != expected { t.Fatalf("Expected ip %s got %s", expected, ip.String()) } } } func TestReleaseIp(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } } func TestGetReleasedIp(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } value := ip.String() if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } if ip.String() != value { t.Fatalf("Expected to receive same ip %s got %s", value, ip.String()) } } func TestRequesetSpecificIp(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } ip := net.ParseIP("192.168.1.5") if _, err := RequestIP(network, &ip); err != nil { t.Fatal(err) } } func TestConversion(t *testing.T) { ip := net.ParseIP("127.0.0.1") i := ipToInt(&ip) if i == 0 { t.Fatal("converted to zero") } conv := intToIP(i) if !ip.Equal(*conv) { t.Error(conv.String()) } } func TestIPAllocator(t *testing.T) { expectedIPs := []net.IP{ 0: net.IPv4(127, 0, 0, 2), 1: net.IPv4(127, 0, 0, 3), 2: net.IPv4(127, 0, 0, 4), 3: net.IPv4(127, 0, 0, 5), 4: net.IPv4(127, 0, 0, 6), } gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") network := &net.IPNet{IP: gwIP, Mask: n.Mask} // Pool after initialisation (f = free, u = used) // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) // ↑ // Check that we get 5 IPs, from 127.0.0.2–127.0.0.6, in that // order. for i := 0; i < 5; i++ { ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } assertIPEquals(t, &expectedIPs[i], ip) } // Before loop begin // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) // ↑ // After i = 0 // 2(u) - 3(f) - 4(f) - 5(f) - 6(f) // ↑ // After i = 1 // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) // ↑ // After i = 2 // 2(u) - 3(u) - 4(u) - 5(f) - 6(f) // ↑ // After i = 3 // 2(u) - 3(u) - 4(u) - 5(u) - 6(f) // ↑ // After i = 4 // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) // ↑ // Check that there are no more IPs ip, err := RequestIP(network, nil) if err == nil { t.Fatalf("There shouldn't be any IP addresses at this point, got %s\n", ip) } // Release some IPs in non-sequential order if err := ReleaseIP(network, &expectedIPs[3]); err != nil { t.Fatal(err) } // 2(u) - 3(u) - 4(u) - 5(f) - 6(u) // ↑ if err := ReleaseIP(network, &expectedIPs[2]); err != nil { t.Fatal(err) } // 2(u) - 3(u) - 4(f) - 5(f) - 6(u) // ↑ if err := ReleaseIP(network, &expectedIPs[4]); err != nil { t.Fatal(err) } // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) // ↑ // Make sure that IPs are reused in sequential order, starting // with the first released IP newIPs := make([]*net.IP, 3) for i := 0; i < 3; i++ { ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } newIPs[i] = ip } // Before loop begin // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) // ↑ // After i = 0 // 2(u) - 3(u) - 4(f) - 5(u) - 6(f) // ↑ // After i = 1 // 2(u) - 3(u) - 4(f) - 5(u) - 6(u) // ↑ // After i = 2 // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) // ↑ // Reordered these because the new set will always return the // lowest ips first and not in the order that they were released assertIPEquals(t, &expectedIPs[2], newIPs[0]) assertIPEquals(t, &expectedIPs[3], newIPs[1]) assertIPEquals(t, &expectedIPs[4], newIPs[2]) _, err = RequestIP(network, nil) if err == nil { t.Fatal("There shouldn't be any IP addresses at this point") } } func TestAllocateFirstIP(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 0}, Mask: []byte{255, 255, 255, 0}, } firstIP := network.IP.To4().Mask(network.Mask) first := ipToInt(&firstIP) + 1 ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } allocated := ipToInt(ip) if allocated == first { t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated) } } func assertIPEquals(t *testing.T, ip1, ip2 *net.IP) { if !ip1.Equal(*ip2) { t.Fatalf("Expected IP %s, got %s", ip1, ip2) } } docker-0.9.1/networkdriver/ipallocator/allocator.go0000644000175000017500000000721212314376205020632 0ustar tagtagpackage ipallocator import ( "encoding/binary" "errors" "github.com/dotcloud/docker/networkdriver" "github.com/dotcloud/docker/pkg/collections" "net" "sync" ) type networkSet map[string]*collections.OrderedIntSet var ( ErrNoAvailableIPs = errors.New("no available ip addresses on network") ErrIPAlreadyAllocated = errors.New("ip already allocated") ) var ( lock = sync.Mutex{} allocatedIPs = networkSet{} availableIPS = networkSet{} ) // RequestIP requests an available ip from the given network. It // will return the next available ip if the ip provided is nil. If the // ip provided is not nil it will validate that the provided ip is available // for use or return an error func RequestIP(address *net.IPNet, ip *net.IP) (*net.IP, error) { lock.Lock() defer lock.Unlock() checkAddress(address) if ip == nil { next, err := getNextIp(address) if err != nil { return nil, err } return next, nil } if err := registerIP(address, ip); err != nil { return nil, err } return ip, nil } // ReleaseIP adds the provided ip back into the pool of // available ips to be returned for use. func ReleaseIP(address *net.IPNet, ip *net.IP) error { lock.Lock() defer lock.Unlock() checkAddress(address) var ( existing = allocatedIPs[address.String()] available = availableIPS[address.String()] pos = getPosition(address, ip) ) existing.Remove(int(pos)) available.Push(int(pos)) return nil } // convert the ip into the position in the subnet. Only // position are saved in the set func getPosition(address *net.IPNet, ip *net.IP) int32 { var ( first, _ = networkdriver.NetworkRange(address) base = ipToInt(&first) i = ipToInt(ip) ) return i - base } // return an available ip if one is currently available. If not, // return the next available ip for the nextwork func getNextIp(address *net.IPNet) (*net.IP, error) { var ( ownIP = ipToInt(&address.IP) available = availableIPS[address.String()] allocated = allocatedIPs[address.String()] first, _ = networkdriver.NetworkRange(address) base = ipToInt(&first) size = int(networkdriver.NetworkSize(address.Mask)) max = int32(size - 2) // size -1 for the broadcast address, -1 for the gateway address pos = int32(available.Pop()) ) // We pop and push the position not the ip if pos != 0 { ip := intToIP(int32(base + pos)) allocated.Push(int(pos)) return ip, nil } var ( firstNetIP = address.IP.To4().Mask(address.Mask) firstAsInt = ipToInt(&firstNetIP) + 1 ) pos = int32(allocated.PullBack()) for i := int32(0); i < max; i++ { pos = pos%max + 1 next := int32(base + pos) if next == ownIP || next == firstAsInt { continue } if !allocated.Exists(int(pos)) { ip := intToIP(next) allocated.Push(int(pos)) return ip, nil } } return nil, ErrNoAvailableIPs } func registerIP(address *net.IPNet, ip *net.IP) error { var ( existing = allocatedIPs[address.String()] available = availableIPS[address.String()] pos = getPosition(address, ip) ) if existing.Exists(int(pos)) { return ErrIPAlreadyAllocated } available.Remove(int(pos)) return nil } // Converts a 4 bytes IP into a 32 bit integer func ipToInt(ip *net.IP) int32 { return int32(binary.BigEndian.Uint32(ip.To4())) } // Converts 32 bit integer into a 4 bytes IP address func intToIP(n int32) *net.IP { b := make([]byte, 4) binary.BigEndian.PutUint32(b, uint32(n)) ip := net.IP(b) return &ip } func checkAddress(address *net.IPNet) { key := address.String() if _, exists := allocatedIPs[key]; !exists { allocatedIPs[key] = collections.NewOrderedIntSet() availableIPS[key] = collections.NewOrderedIntSet() } } docker-0.9.1/networkdriver/portmapper/0000755000175000017500000000000012314376205016201 5ustar tagtagdocker-0.9.1/networkdriver/portmapper/mapper.go0000644000175000017500000000556212314376205020024 0ustar tagtagpackage portmapper import ( "errors" "fmt" "github.com/dotcloud/docker/pkg/iptables" "github.com/dotcloud/docker/pkg/proxy" "net" "sync" ) type mapping struct { proto string userlandProxy proxy.Proxy host net.Addr container net.Addr } var ( chain *iptables.Chain lock sync.Mutex // udp:ip:port currentMappings = make(map[string]*mapping) newProxy = proxy.NewProxy ) var ( ErrUnknownBackendAddressType = errors.New("unknown container address type not supported") ErrPortMappedForIP = errors.New("port is already mapped to ip") ErrPortNotMapped = errors.New("port is not mapped") ) func SetIptablesChain(c *iptables.Chain) { chain = c } func Map(container net.Addr, hostIP net.IP, hostPort int) error { lock.Lock() defer lock.Unlock() var m *mapping switch container.(type) { case *net.TCPAddr: m = &mapping{ proto: "tcp", host: &net.TCPAddr{IP: hostIP, Port: hostPort}, container: container, } case *net.UDPAddr: m = &mapping{ proto: "udp", host: &net.UDPAddr{IP: hostIP, Port: hostPort}, container: container, } default: return ErrUnknownBackendAddressType } key := getKey(m.host) if _, exists := currentMappings[key]; exists { return ErrPortMappedForIP } containerIP, containerPort := getIPAndPort(m.container) if err := forward(iptables.Add, m.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { return err } p, err := newProxy(m.host, m.container) if err != nil { // need to undo the iptables rules before we reutrn forward(iptables.Delete, m.proto, hostIP, hostPort, containerIP.String(), containerPort) return err } m.userlandProxy = p currentMappings[key] = m go p.Run() return nil } func Unmap(host net.Addr) error { lock.Lock() defer lock.Unlock() key := getKey(host) data, exists := currentMappings[key] if !exists { return ErrPortNotMapped } data.userlandProxy.Close() delete(currentMappings, key) containerIP, containerPort := getIPAndPort(data.container) hostIP, hostPort := getIPAndPort(data.host) if err := forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { return err } return nil } func getKey(a net.Addr) string { switch t := a.(type) { case *net.TCPAddr: return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp") case *net.UDPAddr: return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp") } return "" } func getIPAndPort(a net.Addr) (net.IP, int) { switch t := a.(type) { case *net.TCPAddr: return t.IP, t.Port case *net.UDPAddr: return t.IP, t.Port } return nil, 0 } func forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error { if chain == nil { return nil } return chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort) } docker-0.9.1/networkdriver/portmapper/mapper_test.go0000644000175000017500000000451512314376205021060 0ustar tagtagpackage portmapper import ( "github.com/dotcloud/docker/pkg/iptables" "github.com/dotcloud/docker/pkg/proxy" "net" "testing" ) func init() { // override this func to mock out the proxy server newProxy = proxy.NewStubProxy } func reset() { chain = nil currentMappings = make(map[string]*mapping) } func TestSetIptablesChain(t *testing.T) { defer reset() c := &iptables.Chain{ Name: "TEST", Bridge: "192.168.1.1", } if chain != nil { t.Fatal("chain should be nil at init") } SetIptablesChain(c) if chain == nil { t.Fatal("chain should not be nil after set") } } func TestMapPorts(t *testing.T) { dstIp1 := net.ParseIP("192.168.0.1") dstIp2 := net.ParseIP("192.168.0.2") dstAddr1 := &net.TCPAddr{IP: dstIp1, Port: 80} dstAddr2 := &net.TCPAddr{IP: dstIp2, Port: 80} srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")} if err := Map(srcAddr1, dstIp1, 80); err != nil { t.Fatalf("Failed to allocate port: %s", err) } if Map(srcAddr1, dstIp1, 80) == nil { t.Fatalf("Port is in use - mapping should have failed") } if Map(srcAddr2, dstIp1, 80) == nil { t.Fatalf("Port is in use - mapping should have failed") } if err := Map(srcAddr2, dstIp2, 80); err != nil { t.Fatalf("Failed to allocate port: %s", err) } if Unmap(dstAddr1) != nil { t.Fatalf("Failed to release port") } if Unmap(dstAddr2) != nil { t.Fatalf("Failed to release port") } if Unmap(dstAddr2) == nil { t.Fatalf("Port already released, but no error reported") } } func TestGetUDPKey(t *testing.T) { addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} key := getKey(addr) if expected := "192.168.1.5:53/udp"; key != expected { t.Fatalf("expected key %s got %s", expected, key) } } func TestGetTCPKey(t *testing.T) { addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80} key := getKey(addr) if expected := "192.168.1.5:80/tcp"; key != expected { t.Fatalf("expected key %s got %s", expected, key) } } func TestGetUDPIPAndPort(t *testing.T) { addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} ip, port := getIPAndPort(addr) if expected := "192.168.1.5"; ip.String() != expected { t.Fatalf("expected ip %s got %s", expected, ip) } if ep := 53; port != ep { t.Fatalf("expected port %d got %d", ep, port) } } docker-0.9.1/version.go0000644000175000017500000000155112314376205013121 0ustar tagtagpackage docker import ( "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/utils" "runtime" ) func GetVersion(job *engine.Job) engine.Status { if _, err := dockerVersion().WriteTo(job.Stdout); err != nil { job.Errorf("%s", err) return engine.StatusErr } return engine.StatusOK } // dockerVersion returns detailed version information in the form of a queriable // environment. func dockerVersion() *engine.Env { v := &engine.Env{} v.Set("Version", dockerversion.VERSION) v.Set("GitCommit", dockerversion.GITCOMMIT) v.Set("GoVersion", runtime.Version()) v.Set("Os", runtime.GOOS) v.Set("Arch", runtime.GOARCH) // FIXME:utils.GetKernelVersion should only be needed here if kernelVersion, err := utils.GetKernelVersion(); err == nil { v.Set("KernelVersion", kernelVersion.String()) } return v } docker-0.9.1/buildfile.go0000644000175000017500000005167412314376205013406 0ustar tagtagpackage docker import ( "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "net/url" "os" "path" "path/filepath" "reflect" "regexp" "sort" "strings" ) var ( ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") ) type BuildFile interface { Build(io.Reader) (string, error) CmdFrom(string) error CmdRun(string) error } type buildFile struct { runtime *Runtime srv *Server image string maintainer string config *runconfig.Config contextPath string context *utils.TarSum verbose bool utilizeCache bool rm bool authConfig *auth.AuthConfig configFile *auth.ConfigFile tmpContainers map[string]struct{} tmpImages map[string]struct{} outStream io.Writer errStream io.Writer // Deprecated, original writer used for ImagePull. To be removed. outOld io.Writer sf *utils.StreamFormatter } func (b *buildFile) clearTmp(containers map[string]struct{}) { for c := range containers { tmp := b.runtime.Get(c) if err := b.runtime.Destroy(tmp); err != nil { fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) } else { fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c)) } } } func (b *buildFile) CmdFrom(name string) error { image, err := b.runtime.repositories.LookupImage(name) if err != nil { if b.runtime.graph.IsNotExist(err) { remote, tag := utils.ParseRepositoryTag(name) pullRegistryAuth := b.authConfig if len(b.configFile.Configs) > 0 { // The request came with a full auth config file, we prefer to use that endpoint, _, err := registry.ResolveRepositoryName(remote) if err != nil { return err } resolvedAuth := b.configFile.ResolveAuthConfig(endpoint) pullRegistryAuth = &resolvedAuth } job := b.srv.Eng.Job("pull", remote, tag) job.SetenvBool("json", b.sf.Json()) job.SetenvBool("parallel", true) job.SetenvJson("authConfig", pullRegistryAuth) job.Stdout.Add(b.outOld) if err := job.Run(); err != nil { return err } image, err = b.runtime.repositories.LookupImage(name) if err != nil { return err } } else { return err } } b.image = image.ID b.config = &runconfig.Config{} if image.Config != nil { b.config = image.Config } if b.config.Env == nil || len(b.config.Env) == 0 { b.config.Env = append(b.config.Env, "HOME=/", "PATH="+defaultPathEnv) } // Process ONBUILD triggers if they exist if nTriggers := len(b.config.OnBuild); nTriggers != 0 { fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers) } for n, step := range b.config.OnBuild { splitStep := strings.Split(step, " ") stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " ")) switch stepInstruction { case "ONBUILD": return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step) case "MAINTAINER", "FROM": return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step) } if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil { return err } } b.config.OnBuild = []string{} return nil } // The ONBUILD command declares a build instruction to be executed in any future build // using the current image as a base. func (b *buildFile) CmdOnbuild(trigger string) error { splitTrigger := strings.Split(trigger, " ") triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " ")) switch triggerInstruction { case "ONBUILD": return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") case "MAINTAINER", "FROM": return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) } b.config.OnBuild = append(b.config.OnBuild, trigger) return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) } func (b *buildFile) CmdMaintainer(name string) error { b.maintainer = name return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name)) } // probeCache checks to see if image-caching is enabled (`b.utilizeCache`) // and if so attempts to look up the current `b.image` and `b.config` pair // in the current server `b.srv`. If an image is found, probeCache returns // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there // is any error, it returns `(false, err)`. func (b *buildFile) probeCache() (bool, error) { if b.utilizeCache { if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil { return false, err } else if cache != nil { fmt.Fprintf(b.outStream, " ---> Using cache\n") utils.Debugf("[BUILDER] Use cached version") b.image = cache.ID return true, nil } else { utils.Debugf("[BUILDER] Cache miss") } } return false, nil } func (b *buildFile) CmdRun(args string) error { if b.image == "" { return fmt.Errorf("Please provide a source image with `from` prior to run") } config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil) if err != nil { return err } cmd := b.config.Cmd b.config.Cmd = nil runconfig.Merge(b.config, config) defer func(cmd []string) { b.config.Cmd = cmd }(cmd) utils.Debugf("Command to be executed: %v", b.config.Cmd) hit, err := b.probeCache() if err != nil { return err } if hit { return nil } c, err := b.create() if err != nil { return err } // Ensure that we keep the container mounted until the commit // to avoid unmounting and then mounting directly again c.Mount() defer c.Unmount() err = b.run(c) if err != nil { return err } if err := b.commit(c.ID, cmd, "run"); err != nil { return err } return nil } func (b *buildFile) FindEnvKey(key string) int { for k, envVar := range b.config.Env { envParts := strings.SplitN(envVar, "=", 2) if key == envParts[0] { return k } } return -1 } func (b *buildFile) ReplaceEnvMatches(value string) (string, error) { exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)") if err != nil { return value, err } matches := exp.FindAllString(value, -1) for _, match := range matches { match = match[strings.Index(match, "$"):] matchKey := strings.Trim(match, "${}") for _, envVar := range b.config.Env { envParts := strings.SplitN(envVar, "=", 2) envKey := envParts[0] envValue := envParts[1] if envKey == matchKey { value = strings.Replace(value, match, envValue, -1) break } } } return value, nil } func (b *buildFile) CmdEnv(args string) error { tmp := strings.SplitN(args, " ", 2) if len(tmp) != 2 { return fmt.Errorf("Invalid ENV format") } key := strings.Trim(tmp[0], " \t") value := strings.Trim(tmp[1], " \t") envKey := b.FindEnvKey(key) replacedValue, err := b.ReplaceEnvMatches(value) if err != nil { return err } replacedVar := fmt.Sprintf("%s=%s", key, replacedValue) if envKey >= 0 { b.config.Env[envKey] = replacedVar } else { b.config.Env = append(b.config.Env, replacedVar) } return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar)) } func (b *buildFile) buildCmdFromJson(args string) []string { var cmd []string if err := json.Unmarshal([]byte(args), &cmd); err != nil { utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err) cmd = []string{"/bin/sh", "-c", args} } return cmd } func (b *buildFile) CmdCmd(args string) error { cmd := b.buildCmdFromJson(args) b.config.Cmd = cmd if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { return err } return nil } func (b *buildFile) CmdEntrypoint(args string) error { entrypoint := b.buildCmdFromJson(args) b.config.Entrypoint = entrypoint if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil { return err } return nil } func (b *buildFile) CmdExpose(args string) error { ports := strings.Split(args, " ") b.config.PortSpecs = append(ports, b.config.PortSpecs...) return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) } func (b *buildFile) CmdUser(args string) error { b.config.User = args return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args)) } func (b *buildFile) CmdInsert(args string) error { return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") } func (b *buildFile) CmdCopy(args string) error { return fmt.Errorf("COPY has been deprecated. Please use ADD instead") } func (b *buildFile) CmdWorkdir(workdir string) error { b.config.WorkingDir = workdir return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) } func (b *buildFile) CmdVolume(args string) error { if args == "" { return fmt.Errorf("Volume cannot be empty") } var volume []string if err := json.Unmarshal([]byte(args), &volume); err != nil { volume = []string{args} } if b.config.Volumes == nil { b.config.Volumes = map[string]struct{}{} } for _, v := range volume { b.config.Volumes[v] = struct{}{} } if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil { return err } return nil } func (b *buildFile) checkPathForAddition(orig string) error { origPath := path.Join(b.contextPath, orig) if p, err := filepath.EvalSymlinks(origPath); err != nil { if os.IsNotExist(err) { return fmt.Errorf("%s: no such file or directory", orig) } return err } else { origPath = p } if !strings.HasPrefix(origPath, b.contextPath) { return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) } _, err := os.Stat(origPath) if err != nil { if os.IsNotExist(err) { return fmt.Errorf("%s: no such file or directory", orig) } return err } return nil } func (b *buildFile) addContext(container *Container, orig, dest string, remote bool) error { var ( origPath = path.Join(b.contextPath, orig) destPath = path.Join(container.BasefsPath(), dest) ) // Preserve the trailing '/' if strings.HasSuffix(dest, "/") { destPath = destPath + "/" } fi, err := os.Stat(origPath) if err != nil { if os.IsNotExist(err) { return fmt.Errorf("%s: no such file or directory", orig) } return err } if fi.IsDir() { if err := archive.CopyWithTar(origPath, destPath); err != nil { return err } return nil } // First try to unpack the source as an archive // to support the untar feature we need to clean up the path a little bit // because tar is very forgiving. First we need to strip off the archive's // filename from the path but this is only added if it does not end in / . tarDest := destPath if strings.HasSuffix(tarDest, "/") { tarDest = filepath.Dir(destPath) } // If we are adding a remote file, do not try to untar it if !remote { // try to successfully untar the orig if err := archive.UntarPath(origPath, tarDest); err == nil { return nil } utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err) } // If that fails, just copy it as a regular file // but do not use all the magic path handling for the tar path if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { return err } if err := archive.CopyWithTar(origPath, destPath); err != nil { return err } return nil } func (b *buildFile) CmdAdd(args string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use ADD") } tmp := strings.SplitN(args, " ", 2) if len(tmp) != 2 { return fmt.Errorf("Invalid ADD format") } orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) if err != nil { return err } dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) if err != nil { return err } cmd := b.config.Cmd b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)} b.config.Image = b.image var ( origPath = orig destPath = dest remoteHash string isRemote bool ) if utils.IsURL(orig) { isRemote = true resp, err := utils.Download(orig) if err != nil { return err } tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } tmpFileName := path.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } defer os.RemoveAll(tmpDirName) if _, err = io.Copy(tmpFile, resp.Body); err != nil { tmpFile.Close() return err } origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) tmpFile.Close() // Process the checksum r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum := utils.TarSum{Reader: r, DisableCompression: true} remoteHash = tarSum.Sum(nil) r.Close() // If the destination is a directory, figure out the filename. if strings.HasSuffix(dest, "/") { u, err := url.Parse(orig) if err != nil { return err } path := u.Path if strings.HasSuffix(path, "/") { path = path[:len(path)-1] } parts := strings.Split(path, "/") filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } destPath = dest + filename } } if err := b.checkPathForAddition(origPath); err != nil { return err } // Hash path and check the cache if b.utilizeCache { var ( hash string sums = b.context.GetSums() ) if remoteHash != "" { hash = remoteHash } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil { return err } else if fi.IsDir() { var subfiles []string for file, sum := range sums { absFile := path.Join(b.contextPath, file) absOrigPath := path.Join(b.contextPath, origPath) if strings.HasPrefix(absFile, absOrigPath) { subfiles = append(subfiles, sum) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) } else { if origPath[0] == '/' && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "./") if h, ok := sums[origPath]; ok { hash = "file:" + h } } b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)} hit, err := b.probeCache() if err != nil { return err } // If we do not have a hash, never use the cache if hit && hash != "" { return nil } } // Create the container and start it container, _, err := b.runtime.Create(b.config, "") if err != nil { return err } b.tmpContainers[container.ID] = struct{}{} if err := container.Mount(); err != nil { return err } defer container.Unmount() if err := b.addContext(container, origPath, destPath, isRemote); err != nil { return err } if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil { return err } b.config.Cmd = cmd return nil } type StdoutFormater struct { io.Writer *utils.StreamFormatter } func (sf *StdoutFormater) Write(buf []byte) (int, error) { formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) n, err := sf.Writer.Write(formattedBuf) if n != len(formattedBuf) { return n, io.ErrShortWrite } return len(buf), err } type StderrFormater struct { io.Writer *utils.StreamFormatter } func (sf *StderrFormater) Write(buf []byte) (int, error) { formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") n, err := sf.Writer.Write(formattedBuf) if n != len(formattedBuf) { return n, io.ErrShortWrite } return len(buf), err } func (b *buildFile) create() (*Container, error) { if b.image == "" { return nil, fmt.Errorf("Please provide a source image with `from` prior to run") } b.config.Image = b.image // Create the container and start it c, _, err := b.runtime.Create(b.config, "") if err != nil { return nil, err } b.tmpContainers[c.ID] = struct{}{} fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) // override the entry point that may have been picked up from the base image c.Path = b.config.Cmd[0] c.Args = b.config.Cmd[1:] return c, nil } func (b *buildFile) run(c *Container) error { var errCh chan error if b.verbose { errCh = utils.Go(func() error { return <-c.Attach(nil, nil, b.outStream, b.errStream) }) } //start the container if err := c.Start(); err != nil { return err } if errCh != nil { if err := <-errCh; err != nil { return err } } // Wait for it to finish if ret := c.Wait(); ret != 0 { err := &utils.JSONError{ Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret), Code: ret, } return err } return nil } // Commit the container with the autorun command func (b *buildFile) commit(id string, autoCmd []string, comment string) error { if b.image == "" { return fmt.Errorf("Please provide a source image with `from` prior to commit") } b.config.Image = b.image if id == "" { cmd := b.config.Cmd b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} defer func(cmd []string) { b.config.Cmd = cmd }(cmd) hit, err := b.probeCache() if err != nil { return err } if hit { return nil } container, warnings, err := b.runtime.Create(b.config, "") if err != nil { return err } for _, warning := range warnings { fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning) } b.tmpContainers[container.ID] = struct{}{} fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) id = container.ID if err := container.Mount(); err != nil { return err } defer container.Unmount() } container := b.runtime.Get(id) if container == nil { return fmt.Errorf("An error occured while creating the container") } // Note: Actually copy the struct autoConfig := *b.config autoConfig.Cmd = autoCmd // Commit the container image, err := b.runtime.Commit(container, "", "", "", b.maintainer, &autoConfig) if err != nil { return err } b.tmpImages[image.ID] = struct{}{} b.image = image.ID return nil } // Long lines can be split with a backslash var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`) func (b *buildFile) Build(context io.Reader) (string, error) { tmpdirPath, err := ioutil.TempDir("", "docker-build") if err != nil { return "", err } decompressedStream, err := archive.DecompressStream(context) if err != nil { return "", err } b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true} if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { return "", err } defer os.RemoveAll(tmpdirPath) b.contextPath = tmpdirPath filename := path.Join(tmpdirPath, "Dockerfile") if _, err := os.Stat(filename); os.IsNotExist(err) { return "", fmt.Errorf("Can't build a directory with no Dockerfile") } fileBytes, err := ioutil.ReadFile(filename) if err != nil { return "", err } if len(fileBytes) == 0 { return "", ErrDockerfileEmpty } dockerfile := string(fileBytes) dockerfile = lineContinuation.ReplaceAllString(dockerfile, "") stepN := 0 for _, line := range strings.Split(dockerfile, "\n") { line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n") // Skip comments and empty line if len(line) == 0 || line[0] == '#' { continue } if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil { return "", err } stepN += 1 } if b.image != "" { fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image)) if b.rm { b.clearTmp(b.tmpContainers) } return b.image, nil } return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n") } // BuildStep parses a single build step from `instruction` and executes it in the current context. func (b *buildFile) BuildStep(name, expression string) error { fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression) tmp := strings.SplitN(expression, " ", 2) if len(tmp) != 2 { return fmt.Errorf("Invalid Dockerfile format") } instruction := strings.ToLower(strings.Trim(tmp[0], " ")) arguments := strings.Trim(tmp[1], " ") method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:])) if !exists { fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction)) return nil } ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface() if ret != nil { return ret.(error) } fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image)) return nil } func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig, authConfigFile *auth.ConfigFile) BuildFile { return &buildFile{ runtime: srv.runtime, srv: srv, config: &runconfig.Config{}, outStream: outStream, errStream: errStream, tmpContainers: make(map[string]struct{}), tmpImages: make(map[string]struct{}), verbose: verbose, utilizeCache: utilizeCache, rm: rm, sf: sf, authConfig: auth, configFile: authConfigFile, outOld: outOld, } } docker-0.9.1/AUTHORS0000644000175000017500000003117312314376205012160 0ustar tagtag# This file lists all individuals having contributed content to the repository. # If you're submitting a patch, please add your name here in alphabetical order as part of the patch. # # For a list of active project maintainers, see the MAINTAINERS file. # Aanand Prasad Aaron Feng Abel Muiño Alexander Larsson Alexey Shamrin Alex Gaynor Alexis THOMAS Al Tobey Andrea Luzzardi Andreas Savvides Andreas Tiefenthaler Andrew Duckworth Andrew Macgregor Andrew Munsell Andrews Medina Andy Chambers andy diller Andy Rothfusz Andy Smith Anthony Bishopric Anton Nikitin Antony Messerli apocas Asbjørn Enge Barry Allard Bartłomiej Piotrowski Benoit Chesneau Ben Sargent Ben Toews Ben Wiklund Bhiraj Butala Bouke Haarsma Brandon Liu Brandon Philips Brian Dorsey Brian Goff Brian McCallister Brian Olsen Brian Shumate Briehan Lombaard Bruno Bigras Caleb Spare Calen Pennington Carl X. Su Charles Hooper Charles Lindsay Chia-liang Kao Chris St. Pierre Christopher Currie Christopher Rigor Christophe Troestler Clayton Coleman Colin Dunklau Colin Rice Cory Forsyth cressie176 Dan Buch Dan Hirsch Daniel Exner Daniel Garcia Daniel Gasienica Daniel Mizyrycki Daniel Norberg Daniel Nordberg Daniel Robinson Daniel Von Fange Daniel YC Lin Danny Yates Darren Coxall David Anderson David Calavera David Mcanulty David Sissitka Deni Bertovic Dinesh Subhraveti dkumor Dmitry Demeshchuk Dominik Honnef Don Spaulding Dražen Lučanin Dr Nic Williams Dustin Sallings Edmund Wagner Elias Probst Emil Hernvall Emily Rose Eric Hanchrow Eric Lee Eric Myhre Erno Hopearuoho eugenkrizo Evan Krall Evan Phoenix Evan Wies Eystein Måløy Stenberg ezbercih Fabio Falci Fabio Rehm Fabrizio Regini Faiz Khan Fareed Dudhia Fernando Flavio Castelli Francisco Souza Frank Macreery Frederick F. Kautz IV Frederik Loeffert Freek Kalter Gabe Rosenhouse Gabriel Monroy Galen Sampson Gareth Rushgrove Gereon Frey Gert van Valkenhoef Graydon Hoare Greg Thornton grunny Guillaume J. Charmes Gurjeet Singh Guruprasad Harley Laue Hector Castro Hunter Blanks inglesp Isaac Dupree Isao Jonas Jake Moshenko James Allen James Carr James Mills James Turnbull jaseg Jason McVetta Jean-Baptiste Barth Jean-Baptiste Dalido Jeff Lindsay Jeremy Grosser Jérôme Petazzoni Jesse Dubay Jim Alateras Jimmy Cuadra Joe Beda Joe Van Dyk Joffrey F Johan Euphrosine Johannes 'fish' Ziemke Johan Rydberg John Costa John Feminella John Gardiner Myers John Warwick Jonas Pfenniger Jonathan Mueller Jonathan Rudenberg Jon Wedaman Joost Cassee Jordan Arentsen Jordan Sissel Joseph Anthony Pasquale Holsten Joseph Hager Josh Hawn Josh Poimboeuf JP Julien Barbier Julien Dubois Justin Force Justin Plock Karan Lyons Karl Grzeszczak Kawsar Saiyeed Keli Hu Ken Cochrane Kevin Clark Kevin J. Lynagh Keyvan Fatehi kim0 Kim BKC Carlbacker Kimbro Staken Kiran Gangadharan Konstantin Pelykh Kyle Conroy Laurie Voss Liang-Chi Hsieh Lokesh Mandvekar Louis Opter lukaspustina Mahesh Tiyyagura Manuel Meurer Manuel Woelker Marc Kuo Marco Hennings Marcus Farkas Marcus Ramberg Marek Goldmann Mark Allen Mark McGranaghan Marko Mikulicic Markus Fix Martijn van Oosterhout Martin Redmond Mathieu Le Marec - Pasquet Matt Apperson Matt Bachmann Matt Haggard Matthew Mueller mattymo Maxime Petazzoni Maxim Treskin meejah Michael Crosby Michael Gorsuch Michael Stapelberg Miguel Angel Fernández Mike Gaffney Mike Naberezny Mikhail Sobolev Mohit Soni Morten Siebuhr Nan Monnand Deng Nate Jones Nathan Kleyn Nelson Chen Niall O'Higgins Nick Payne Nick Stenning Nick Stinemates Nicolas Dudebout Nicolas Kaiser Nolan Darilek odk- Oguz Bilgic Ole Reifschneider O.S.Tezer pandrew Pascal Borreli pattichen Paul Bowsher Paul Hammond Paul Lietar Paul Morie Paul Nasrat Paul Peter Braden Peter Waller Phil Spitler Piergiuliano Bossi Pierre-Alain RIVIERE Piotr Bogdan pysqz Quentin Brossard Rafal Jeczalik Ramkumar Ramachandra Ramon van Alteren Renato Riccieri Santos Zannon rgstephens Rhys Hiltner Richo Healey Rick Bradley Robert Obryk Roberto G. Hashioka Roberto Hashioka Rodrigo Vaz Roel Van Nyen Roger Peppe Ryan Fowler Ryan O'Donnell Ryan Seto Sam Alba Sam J Sharpe Samuel Andaya Scott Bessler Sean Cronin Sean P. Kane Shawn Landden Shawn Siefkas Shih-Yuan Lee shin- Silas Sewell Simon Taranto Sjoerd Langkemper Solomon Hykes Song Gao Sridatta Thatipamala Sridhar Ratnakumar Steeve Morin Stefan Praszalowicz sudosurootdev Sven Dowideit Sylvain Bellemare tang0th Tatsuki Sugiura Tehmasp Chaudhri Thatcher Peskens Thermionix Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Hansen Thomas LEVEIL Tianon Gravi Tim Bosse Tim Terhorst Tobias Bieniek Tobias Schmidt Tobias Schwab Todd Lunter Tom Hulihan Tommaso Visconti Travis Cline Tyler Brock Tzu-Jung Lee Ulysse Carion unclejack vgeta Victor Coisne Victor Lyuboslavsky Victor Vieux Vincent Batts Vincent Bernat Vincent Woo Vinod Kulkarni Vitor Monteiro Vivek Agarwal Vladimir Kirillov Vladimir Rutsky Walter Stanish WarheadsSE Wes Morgan Will Dietz William Delanoue Will Rouesnel Will Weaver Xiuming Chen Yang Bai Yurii Rashkovskii Zain Memon Zaiste! Zilin Du zimbatm docker-0.9.1/server.go0000644000175000017500000020444412314376205012750 0ustar tagtagpackage docker import ( "encoding/json" "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/pkg/signal" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "log" "net/http" "net/url" "os" "os/exec" gosignal "os/signal" "path" "path/filepath" "runtime" "strconv" "strings" "sync" "syscall" "time" ) // jobInitApi runs the remote api server `srv` as a daemon, // Only one api server can run at the same time - this is enforced by a pidfile. // The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. func InitServer(job *engine.Job) engine.Status { job.Logf("Creating server") srv, err := NewServer(job.Eng, DaemonConfigFromJob(job)) if err != nil { return job.Error(err) } if srv.runtime.config.Pidfile != "" { job.Logf("Creating pidfile") if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil { // FIXME: do we need fatal here instead of returning a job error? log.Fatal(err) } } job.Logf("Setting up signal traps") c := make(chan os.Signal, 1) gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) go func() { sig := <-c log.Printf("Received signal '%v', exiting\n", sig) utils.RemovePidFile(srv.runtime.config.Pidfile) srv.Close() os.Exit(0) }() job.Eng.Hack_SetGlobalVar("httpapi.server", srv) job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime) for name, handler := range map[string]engine.Handler{ "export": srv.ContainerExport, "create": srv.ContainerCreate, "stop": srv.ContainerStop, "restart": srv.ContainerRestart, "start": srv.ContainerStart, "kill": srv.ContainerKill, "wait": srv.ContainerWait, "tag": srv.ImageTag, "resize": srv.ContainerResize, "commit": srv.ContainerCommit, "info": srv.DockerInfo, "container_delete": srv.ContainerDestroy, "image_export": srv.ImageExport, "images": srv.Images, "history": srv.ImageHistory, "viz": srv.ImagesViz, "container_copy": srv.ContainerCopy, "insert": srv.ImageInsert, "attach": srv.ContainerAttach, "search": srv.ImagesSearch, "changes": srv.ContainerChanges, "top": srv.ContainerTop, "load": srv.ImageLoad, "build": srv.Build, "pull": srv.ImagePull, "import": srv.ImageImport, "image_delete": srv.ImageDelete, "inspect": srv.JobInspect, "events": srv.Events, "push": srv.ImagePush, "containers": srv.Containers, "auth": srv.Auth, } { if err := job.Eng.Register(name, handler); err != nil { return job.Error(err) } } return engine.StatusOK } // simpleVersionInfo is a simple implementation of // the interface VersionInfo, which is used // to provide version information for some product, // component, etc. It stores the product name and the version // in string and returns them on calls to Name() and Version(). type simpleVersionInfo struct { name string version string } func (v *simpleVersionInfo) Name() string { return v.name } func (v *simpleVersionInfo) Version() string { return v.version } // ContainerKill send signal to the container // If no signal is given (sig 0), then Kill with SIGKILL and wait // for the container to exit. // If a signal is given, then just send it to the container and return. func (srv *Server) ContainerKill(job *engine.Job) engine.Status { if n := len(job.Args); n < 1 || n > 2 { return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) } var ( name = job.Args[0] sig uint64 err error ) // If we have a signal, look at it. Otherwise, do nothing if len(job.Args) == 2 && job.Args[1] != "" { // Check if we passed the singal as a number: // The largest legal signal is 31, so let's parse on 5 bits sig, err = strconv.ParseUint(job.Args[1], 10, 5) if err != nil { // The signal is not a number, treat it as a string sig = uint64(signal.SignalMap[job.Args[1]]) if sig == 0 { return job.Errorf("Invalid signal: %s", job.Args[1]) } } } if container := srv.runtime.Get(name); container != nil { // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { if err := container.Kill(); err != nil { return job.Errorf("Cannot kill container %s: %s", name, err) } srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image)) } else { // Otherwise, just send the requested signal if err := container.kill(int(sig)); err != nil { return job.Errorf("Cannot kill container %s: %s", name, err) } // FIXME: Add event for signals } } else { return job.Errorf("No such container: %s", name) } return engine.StatusOK } func (srv *Server) Auth(job *engine.Job) engine.Status { var ( err error authConfig = &auth.AuthConfig{} ) job.GetenvJson("authConfig", authConfig) // TODO: this is only done here because auth and registry need to be merged into one pkg if addr := authConfig.ServerAddress; addr != "" && addr != auth.IndexServerAddress() { addr, err = registry.ExpandAndVerifyRegistryUrl(addr) if err != nil { return job.Error(err) } authConfig.ServerAddress = addr } status, err := auth.Login(authConfig, srv.HTTPRequestFactory(nil)) if err != nil { return job.Error(err) } job.Printf("%s\n", status) return engine.StatusOK } func (srv *Server) Events(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s FROM", job.Name) } var ( from = job.Args[0] since = job.GetenvInt64("since") ) sendEvent := func(event *utils.JSONMessage) error { b, err := json.Marshal(event) if err != nil { return fmt.Errorf("JSON error") } _, err = job.Stdout.Write(b) if err != nil { // On error, evict the listener utils.Errorf("%s", err) srv.Lock() delete(srv.listeners, from) srv.Unlock() return err } return nil } listener := make(chan utils.JSONMessage) srv.Lock() srv.listeners[from] = listener srv.Unlock() job.Stdout.Write(nil) // flush if since != 0 { // If since, send previous events that happened after the timestamp for _, event := range srv.GetEvents() { if event.Time >= since { err := sendEvent(&event) if err != nil && err.Error() == "JSON error" { continue } if err != nil { job.Error(err) return engine.StatusErr } } } } for event := range listener { err := sendEvent(&event) if err != nil && err.Error() == "JSON error" { continue } if err != nil { return job.Error(err) } } return engine.StatusOK } func (srv *Server) ContainerExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s container_id", job.Name) } name := job.Args[0] if container := srv.runtime.Get(name); container != nil { data, err := container.Export() if err != nil { return job.Errorf("%s: %s", name, err) } defer data.Close() // Stream the entire contents of the container (basically a volatile snapshot) if _, err := io.Copy(job.Stdout, data); err != nil { return job.Errorf("%s: %s", name, err) } // FIXME: factor job-specific LogEvent to engine.Job.Run() srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image)) return engine.StatusOK } return job.Errorf("No such container: %s", name) } // ImageExport exports all images with the given tag. All versions // containing the same tag are exported. The resulting output is an // uncompressed tar ball. // name is the set of tags to export. // out is the writer where the images are written to. func (srv *Server) ImageExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] // get image json tempdir, err := ioutil.TempDir("", "docker-export-") if err != nil { return job.Error(err) } defer os.RemoveAll(tempdir) utils.Debugf("Serializing %s", name) rootRepo, err := srv.runtime.repositories.Get(name) if err != nil { return job.Error(err) } if rootRepo != nil { for _, id := range rootRepo { image, err := srv.ImageInspect(id) if err != nil { return job.Error(err) } if err := srv.exportImage(image, tempdir); err != nil { return job.Error(err) } } // write repositories rootRepoMap := map[string]Repository{} rootRepoMap[name] = rootRepo rootRepoJson, _ := json.Marshal(rootRepoMap) if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.ModeAppend); err != nil { return job.Error(err) } } else { image, err := srv.ImageInspect(name) if err != nil { return job.Error(err) } if err := srv.exportImage(image, tempdir); err != nil { return job.Error(err) } } fs, err := archive.Tar(tempdir, archive.Uncompressed) if err != nil { return job.Error(err) } defer fs.Close() if _, err := io.Copy(job.Stdout, fs); err != nil { return job.Error(err) } return engine.StatusOK } func (srv *Server) exportImage(image *Image, tempdir string) error { for i := image; i != nil; { // temporary directory tmpImageDir := path.Join(tempdir, i.ID) if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil { if os.IsExist(err) { return nil } return err } var version = "1.0" var versionBuf = []byte(version) if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.ModeAppend); err != nil { return err } // serialize json b, err := json.Marshal(i) if err != nil { return err } if err := ioutil.WriteFile(path.Join(tmpImageDir, "json"), b, os.ModeAppend); err != nil { return err } // serialize filesystem fs, err := i.TarLayer() if err != nil { return err } defer fs.Close() fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) if err != nil { return err } if _, err = io.Copy(fsTar, fs); err != nil { return err } fsTar.Close() // find parent if i.Parent != "" { i, err = srv.ImageInspect(i.Parent) if err != nil { return err } } else { i = nil } } return nil } func (srv *Server) Build(job *engine.Job) engine.Status { if len(job.Args) != 0 { return job.Errorf("Usage: %s\n", job.Name) } var ( remoteURL = job.Getenv("remote") repoName = job.Getenv("t") suppressOutput = job.GetenvBool("q") noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") authConfig = &auth.AuthConfig{} configFile = &auth.ConfigFile{} tag string context io.ReadCloser ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("configFile", configFile) repoName, tag = utils.ParseRepositoryTag(repoName) if remoteURL == "" { context = ioutil.NopCloser(job.Stdin) } else if utils.IsGIT(remoteURL) { if !strings.HasPrefix(remoteURL, "git://") { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { return job.Error(err) } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { return job.Errorf("Error trying to use git: %s (%s)", err, output) } c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return job.Error(err) } context = c } else if utils.IsURL(remoteURL) { f, err := utils.Download(remoteURL) if err != nil { return job.Error(err) } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return job.Error(err) } c, err := archive.Generate("Dockerfile", string(dockerFile)) if err != nil { return job.Error(err) } context = c } defer context.Close() sf := utils.NewStreamFormatter(job.GetenvBool("json")) b := NewBuildFile(srv, &StdoutFormater{ Writer: job.Stdout, StreamFormatter: sf, }, &StderrFormater{ Writer: job.Stdout, StreamFormatter: sf, }, !suppressOutput, !noCache, rm, job.Stdout, sf, authConfig, configFile) id, err := b.Build(context) if err != nil { return job.Error(err) } if repoName != "" { srv.runtime.repositories.Set(repoName, tag, id, false) } return engine.StatusOK } // Loads a set of images into the repository. This is the complementary of ImageExport. // The input stream is an uncompressed tar ball containing images and metadata. func (srv *Server) ImageLoad(job *engine.Job) engine.Status { tmpImageDir, err := ioutil.TempDir("", "docker-import-") if err != nil { return job.Error(err) } defer os.RemoveAll(tmpImageDir) var ( repoTarFile = path.Join(tmpImageDir, "repo.tar") repoDir = path.Join(tmpImageDir, "repo") ) tarFile, err := os.Create(repoTarFile) if err != nil { return job.Error(err) } if _, err := io.Copy(tarFile, job.Stdin); err != nil { return job.Error(err) } tarFile.Close() repoFile, err := os.Open(repoTarFile) if err != nil { return job.Error(err) } if err := os.Mkdir(repoDir, os.ModeDir); err != nil { return job.Error(err) } if err := archive.Untar(repoFile, repoDir, nil); err != nil { return job.Error(err) } dirs, err := ioutil.ReadDir(repoDir) if err != nil { return job.Error(err) } for _, d := range dirs { if d.IsDir() { if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil { return job.Error(err) } } } repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) if err == nil { repositories := map[string]Repository{} if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { return job.Error(err) } for imageName, tagMap := range repositories { for tag, address := range tagMap { if err := srv.runtime.repositories.Set(imageName, tag, address, true); err != nil { return job.Error(err) } } } } else if !os.IsNotExist(err) { return job.Error(err) } return engine.StatusOK } func (srv *Server) recursiveLoad(address, tmpImageDir string) error { if _, err := srv.ImageInspect(address); err != nil { utils.Debugf("Loading %s", address) imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) if err != nil { utils.Debugf("Error reading json", err) return err } layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar")) if err != nil { utils.Debugf("Error reading embedded tar", err) return err } img, err := NewImgJSON(imageJson) if err != nil { utils.Debugf("Error unmarshalling json", err) return err } if img.Parent != "" { if !srv.runtime.graph.Exists(img.Parent) { if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil { return err } } } if err := srv.runtime.graph.Register(imageJson, layer, img); err != nil { return err } } utils.Debugf("Completed processing %s", address) return nil } func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s TERM", job.Name) } var ( term = job.Args[0] metaHeaders = map[string][]string{} authConfig = &auth.AuthConfig{} ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), auth.IndexServerAddress()) if err != nil { return job.Error(err) } results, err := r.SearchRepositories(term) if err != nil { return job.Error(err) } outs := engine.NewTable("star_count", 0) for _, result := range results.Results { out := &engine.Env{} out.Import(result) outs.Add(out) } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } func (srv *Server) ImageInsert(job *engine.Job) engine.Status { if len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name) } var ( name = job.Args[0] url = job.Args[1] path = job.Args[2] ) sf := utils.NewStreamFormatter(job.GetenvBool("json")) out := utils.NewWriteFlusher(job.Stdout) img, err := srv.runtime.repositories.LookupImage(name) if err != nil { return job.Error(err) } file, err := utils.Download(url) if err != nil { return job.Error(err) } defer file.Body.Close() config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.sysInfo) if err != nil { return job.Error(err) } c, _, err := srv.runtime.Create(config, "") if err != nil { return job.Error(err) } if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, utils.TruncateID(img.ID), "Downloading"), path); err != nil { return job.Error(err) } // FIXME: Handle custom repo, tag comment, author img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil) if err != nil { out.Write(sf.FormatError(err)) return engine.StatusErr } out.Write(sf.FormatStatus("", img.ID)) return engine.StatusOK } func (srv *Server) ImagesViz(job *engine.Job) engine.Status { images, _ := srv.runtime.graph.Map() if images == nil { return engine.StatusOK } job.Stdout.Write([]byte("digraph docker {\n")) var ( parentImage *Image err error ) for _, image := range images { parentImage, err = image.GetParent() if err != nil { return job.Errorf("Error while getting parent image: %v", err) } if parentImage != nil { job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) } else { job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) } } reporefs := make(map[string][]string) for name, repository := range srv.runtime.repositories.Repositories { for tag, id := range repository { reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag)) } } for id, repos := range reporefs { job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) } job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) return engine.StatusOK } func (srv *Server) Images(job *engine.Job) engine.Status { var ( allImages map[string]*Image err error ) if job.GetenvBool("all") { allImages, err = srv.runtime.graph.Map() } else { allImages, err = srv.runtime.graph.Heads() } if err != nil { return job.Error(err) } lookup := make(map[string]*engine.Env) for name, repository := range srv.runtime.repositories.Repositories { if job.Getenv("filter") != "" { if match, _ := path.Match(job.Getenv("filter"), name); !match { continue } } for tag, id := range repository { image, err := srv.runtime.graph.Get(id) if err != nil { log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err) continue } if out, exists := lookup[id]; exists { out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) } else { out := &engine.Env{} delete(allImages, id) out.Set("ParentId", image.Parent) out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) out.Set("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.getParentsSize(0)+image.Size) lookup[id] = out } } } outs := engine.NewTable("Created", len(lookup)) for _, value := range lookup { outs.Add(value) } // Display images which aren't part of a repository/tag if job.Getenv("filter") == "" { for _, image := range allImages { out := &engine.Env{} out.Set("ParentId", image.Parent) out.SetList("RepoTags", []string{":"}) out.Set("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.getParentsSize(0)+image.Size) outs.Add(out) } } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } func (srv *Server) DockerInfo(job *engine.Job) engine.Status { images, _ := srv.runtime.graph.Map() var imgcount int if images == nil { imgcount = 0 } else { imgcount = len(images) } kernelVersion := "" if kv, err := utils.GetKernelVersion(); err == nil { kernelVersion = kv.String() } // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) initPath := utils.DockerInitPath("") if initPath == "" { // if that fails, we'll just return the path from the runtime initPath = srv.runtime.sysInitPath } v := &engine.Env{} v.SetInt("Containers", len(srv.runtime.List())) v.SetInt("Images", imgcount) v.Set("Driver", srv.runtime.driver.String()) v.SetJson("DriverStatus", srv.runtime.driver.Status()) v.SetBool("MemoryLimit", srv.runtime.sysInfo.MemoryLimit) v.SetBool("SwapLimit", srv.runtime.sysInfo.SwapLimit) v.SetBool("IPv4Forwarding", !srv.runtime.sysInfo.IPv4ForwardingDisabled) v.SetBool("Debug", os.Getenv("DEBUG") != "") v.SetInt("NFd", utils.GetTotalUsedFds()) v.SetInt("NGoroutines", runtime.NumGoroutine()) v.Set("ExecutionDriver", srv.runtime.execDriver.Name()) v.SetInt("NEventsListener", len(srv.listeners)) v.Set("KernelVersion", kernelVersion) v.Set("IndexServerAddress", auth.IndexServerAddress()) v.Set("InitSha1", dockerversion.INITSHA1) v.Set("InitPath", initPath) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } func (srv *Server) ImageHistory(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } name := job.Args[0] image, err := srv.runtime.repositories.LookupImage(name) if err != nil { return job.Error(err) } lookupMap := make(map[string][]string) for name, repository := range srv.runtime.repositories.Repositories { for tag, id := range repository { // If the ID already has a reverse lookup, do not update it unless for "latest" if _, exists := lookupMap[id]; !exists { lookupMap[id] = []string{} } lookupMap[id] = append(lookupMap[id], name+":"+tag) } } outs := engine.NewTable("Created", 0) err = image.WalkHistory(func(img *Image) error { out := &engine.Env{} out.Set("Id", img.ID) out.SetInt64("Created", img.Created.Unix()) out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " ")) out.SetList("Tags", lookupMap[img.ID]) out.SetInt64("Size", img.Size) outs.Add(out) return nil }) outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } func (srv *Server) ContainerTop(job *engine.Job) engine.Status { if len(job.Args) != 1 && len(job.Args) != 2 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) } var ( name = job.Args[0] psArgs = "-ef" ) if len(job.Args) == 2 && job.Args[1] != "" { psArgs = job.Args[1] } if container := srv.runtime.Get(name); container != nil { if !container.State.IsRunning() { return job.Errorf("Container %s is not running", name) } pids, err := srv.runtime.execDriver.GetPidsForContainer(container.ID) if err != nil { return job.Error(err) } output, err := exec.Command("ps", psArgs).Output() if err != nil { return job.Errorf("Error running ps: %s", err) } lines := strings.Split(string(output), "\n") header := strings.Fields(lines[0]) out := &engine.Env{} out.SetList("Titles", header) pidIndex := -1 for i, name := range header { if name == "PID" { pidIndex = i } } if pidIndex == -1 { return job.Errorf("Couldn't find PID field in ps output") } processes := [][]string{} for _, line := range lines[1:] { if len(line) == 0 { continue } fields := strings.Fields(line) p, err := strconv.Atoi(fields[pidIndex]) if err != nil { return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) } for _, pid := range pids { if pid == p { // Make sure number of fields equals number of header titles // merging "overhanging" fields process := fields[:len(header)-1] process = append(process, strings.Join(fields[len(header)-1:], " ")) processes = append(processes, process) } } } out.SetJson("Processes", processes) out.WriteTo(job.Stdout) return engine.StatusOK } return job.Errorf("No such container: %s", name) } func (srv *Server) ContainerChanges(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] if container := srv.runtime.Get(name); container != nil { outs := engine.NewTable("", 0) changes, err := container.Changes() if err != nil { return job.Error(err) } for _, change := range changes { out := &engine.Env{} if err := out.Import(change); err != nil { return job.Error(err) } outs.Add(out) } if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } } else { return job.Errorf("No such container: %s", name) } return engine.StatusOK } func (srv *Server) Containers(job *engine.Job) engine.Status { var ( foundBefore bool displayed int all = job.GetenvBool("all") since = job.Getenv("since") before = job.Getenv("before") n = job.GetenvInt("limit") size = job.GetenvBool("size") ) outs := engine.NewTable("Created", 0) names := map[string][]string{} srv.runtime.containerGraph.Walk("/", func(p string, e *graphdb.Entity) error { names[e.ID()] = append(names[e.ID()], p) return nil }, -1) for _, container := range srv.runtime.List() { if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" { continue } if before != "" && !foundBefore { if container.ID == before || utils.TruncateID(container.ID) == before { foundBefore = true } continue } if n > 0 && displayed == n { break } if container.ID == since || utils.TruncateID(container.ID) == since { break } displayed++ out := &engine.Env{} out.Set("Id", container.ID) out.SetList("Names", names[container.ID]) out.Set("Image", srv.runtime.repositories.ImageName(container.Image)) out.Set("Command", fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))) out.SetInt64("Created", container.Created.Unix()) out.Set("Status", container.State.String()) str, err := container.NetworkSettings.PortMappingAPI().ToListString() if err != nil { return job.Error(err) } out.Set("Ports", str) if size { sizeRw, sizeRootFs := container.GetSize() out.SetInt64("SizeRw", sizeRw) out.SetInt64("SizeRootFs", sizeRootFs) } outs.Add(out) } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] container := srv.runtime.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } var config = container.Config var newConfig runconfig.Config if err := job.GetenvJson("config", &newConfig); err != nil { return job.Error(err) } if err := runconfig.Merge(&newConfig, config); err != nil { return job.Error(err) } img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig) if err != nil { return job.Error(err) } job.Printf("%s\n", img.ID) return engine.StatusOK } func (srv *Server) ImageTag(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := srv.runtime.repositories.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { return job.Error(err) } return engine.StatusOK } func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error { history, err := r.GetRemoteHistory(imgID, endpoint, token) if err != nil { return err } out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil)) // FIXME: Try to stream the images? // FIXME: Launch the getRemoteImage() in goroutines for i := len(history) - 1; i >= 0; i-- { id := history[i] // ensure no two downloads of the same layer happen at the same time if c, err := srv.poolAdd("pull", "layer:"+id); err != nil { utils.Errorf("Image (id: %s) pull is already running, skipping: %v", id, err) <-c } defer srv.poolRemove("pull", "layer:"+id) if !srv.runtime.graph.Exists(id) { out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) var ( imgJSON []byte imgSize int err error img *Image ) retries := 5 for j := 1; j <= retries; j++ { imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) if err != nil && j == retries { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) return err } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } img, err = NewImgJSON(imgJSON) if err != nil && j == retries { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) return fmt.Errorf("Failed to parse json: %s", err) } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else { break } } // Get the layer out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling fs layer", nil)) layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token) if err != nil { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) return err } defer layer.Close() if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) return err } } out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) } return nil } func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error { out.Write(sf.FormatStatus("", "Pulling repository %s", localName)) repoData, err := r.GetRepositoryData(remoteName) if err != nil { return err } utils.Debugf("Retrieving the tag list") tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens) if err != nil { utils.Errorf("%v", err) return err } for tag, id := range tagsList { repoData.ImgList[id] = ®istry.ImgData{ ID: id, Tag: tag, Checksum: "", } } utils.Debugf("Registering tags") // If no tag has been specified, pull them all if askedTag == "" { for tag, id := range tagsList { repoData.ImgList[id].Tag = tag } } else { // Otherwise, check that the tag exists and use only that one id, exists := tagsList[askedTag] if !exists { return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName) } repoData.ImgList[id].Tag = askedTag } errors := make(chan error) for _, image := range repoData.ImgList { downloadImage := func(img *registry.ImgData) { if askedTag != "" && img.Tag != askedTag { utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID) if parallel { errors <- nil } return } if img.Tag == "" { utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) if parallel { errors <- nil } return } // ensure no two downloads of the same image happen at the same time if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil { if c != nil { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) <-c out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) } else { utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) } if parallel { errors <- nil } return } defer srv.poolRemove("pull", "img:"+img.ID) out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil)) success := false var lastErr error for _, ep := range repoData.Endpoints { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil)) if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { // Its not ideal that only the last error is returned, it would be better to concatenate the errors. // As the error is also given to the output stream the user will see the error. lastErr = err out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil)) continue } success = true break } if !success { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, %s", img.Tag, localName, lastErr), nil)) if parallel { errors <- fmt.Errorf("Could not find repository on any of the indexed registries.") return } } out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) if parallel { errors <- nil } } if parallel { go downloadImage(image) } else { downloadImage(image) } } if parallel { var lastError error for i := 0; i < len(repoData.ImgList); i++ { if err := <-errors; err != nil { lastError = err } } if lastError != nil { return lastError } } for tag, id := range tagsList { if askedTag != "" && tag != askedTag { continue } if err := srv.runtime.repositories.Set(localName, tag, id, true); err != nil { return err } } if err := srv.runtime.repositories.Save(); err != nil { return err } return nil } func (srv *Server) poolAdd(kind, key string) (chan struct{}, error) { srv.Lock() defer srv.Unlock() if c, exists := srv.pullingPool[key]; exists { return c, fmt.Errorf("pull %s is already in progress", key) } if c, exists := srv.pushingPool[key]; exists { return c, fmt.Errorf("push %s is already in progress", key) } c := make(chan struct{}) switch kind { case "pull": srv.pullingPool[key] = c case "push": srv.pushingPool[key] = c default: return nil, fmt.Errorf("Unknown pool type") } return c, nil } func (srv *Server) poolRemove(kind, key string) error { srv.Lock() defer srv.Unlock() switch kind { case "pull": if c, exists := srv.pullingPool[key]; exists { close(c) delete(srv.pullingPool, key) } case "push": if c, exists := srv.pushingPool[key]; exists { close(c) delete(srv.pushingPool, key) } default: return fmt.Errorf("Unknown pool type") } return nil } func (srv *Server) ImagePull(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 && n != 2 { return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) } var ( localName = job.Args[0] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = &auth.AuthConfig{} metaHeaders map[string][]string ) if len(job.Args) > 1 { tag = job.Args[1] } job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) c, err := srv.poolAdd("pull", localName+":"+tag) if err != nil { if c != nil { // Another pull of the same repository is already taking place; just wait for it to finish job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) <-c return engine.StatusOK } return job.Error(err) } defer srv.poolRemove("pull", localName+":"+tag) // Resolve the Repository name from fqn to endpoint + name hostname, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { return job.Error(err) } endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) if err != nil { return job.Error(err) } if endpoint == auth.IndexServerAddress() { // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" localName = remoteName } if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil { return job.Error(err) } return engine.StatusOK } // Retrieve the all the images to be uploaded in the correct order func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[string][]string, error) { var ( imageList []string imagesSeen map[string]bool = make(map[string]bool) tagsByImage map[string][]string = make(map[string][]string) ) for tag, id := range localRepo { var imageListForThisTag []string tagsByImage[id] = append(tagsByImage[id], tag) for img, err := srv.runtime.graph.Get(id); img != nil; img, err = img.GetParent() { if err != nil { return nil, nil, err } if imagesSeen[img.ID] { // This image is already on the list, we can ignore it and all its parents break } imagesSeen[img.ID] = true imageListForThisTag = append(imageListForThisTag, img.ID) } // reverse the image list for this tag (so the "most"-parent image is first) for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] } // append to main image list imageList = append(imageList, imageListForThisTag...) } utils.Debugf("Image list: %v", imageList) utils.Debugf("Tags by image: %v", tagsByImage) return imageList, tagsByImage, nil } func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, sf *utils.StreamFormatter) error { out = utils.NewWriteFlusher(out) utils.Debugf("Local repo: %s", localRepo) imgList, tagsByImage, err := srv.getImageList(localRepo) if err != nil { return err } out.Write(sf.FormatStatus("", "Sending image list")) var repoData *registry.RepositoryData var imageIndex []*registry.ImgData for _, imgId := range imgList { if tags, exists := tagsByImage[imgId]; exists { // If an image has tags you must add an entry in the image index // for each tag for _, tag := range tags { imageIndex = append(imageIndex, ®istry.ImgData{ ID: imgId, Tag: tag, }) } } else { // If the image does not have a tag it still needs to be sent to the // registry with an empty tag so that it is accociated with the repository imageIndex = append(imageIndex, ®istry.ImgData{ ID: imgId, Tag: "", }) } } utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo) for _, data := range imageIndex { utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag) } // Register all the images in a repository with the registry // If an image is not in this list it will not be associated with the repository repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) if err != nil { return err } for _, ep := range repoData.Endpoints { out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo))) for _, imgId := range imgList { if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId))) } else { if _, err := srv.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil { // FIXME: Continue on error? return err } } for _, tag := range tagsByImage[imgId] { out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag)) if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil { return err } } } } if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil { return err } return nil } func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { out = utils.NewWriteFlusher(out) jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json")) if err != nil { return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) } out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) imgData := ®istry.ImgData{ ID: imgID, } // Send the json if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { if err == registry.ErrAlreadyExists { out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) return "", nil } return "", err } layerData, err := srv.runtime.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out) if err != nil { return "", fmt.Errorf("Failed to generate layer archive: %s", err) } defer os.RemoveAll(layerData.Name()) // Send the layer checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) if err != nil { return "", err } imgData.Checksum = checksum imgData.ChecksumPayload = checksumPayload // Send the checksum if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { return "", err } out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) return imgData.Checksum, nil } // FIXME: Allow to interrupt current push when new push of same image is done. func (srv *Server) ImagePush(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } var ( localName = job.Args[0] sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = &auth.AuthConfig{} metaHeaders map[string][]string ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) if _, err := srv.poolAdd("push", localName); err != nil { return job.Error(err) } defer srv.poolRemove("push", localName) // Resolve the Repository name from fqn to endpoint + name hostname, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { return job.Error(err) } endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } img, err := srv.runtime.graph.Get(localName) r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) if err2 != nil { return job.Error(err2) } if err != nil { reposLen := len(srv.runtime.repositories.Repositories[localName]) job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) // If it fails, try to get the repository if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists { if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil { return job.Error(err) } return engine.StatusOK } return job.Error(err) } var token []string job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil { return job.Error(err) } return engine.StatusOK } func (srv *Server) ImageImport(job *engine.Job) engine.Status { if n := len(job.Args); n != 2 && n != 3 { return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) } var ( src = job.Args[0] repo = job.Args[1] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) archive archive.ArchiveReader resp *http.Response ) if len(job.Args) > 2 { tag = job.Args[2] } if src == "-" { archive = job.Stdin } else { u, err := url.Parse(src) if err != nil { return job.Error(err) } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) // Download with curl (pretty progress bar) // If curl is not available, fallback to http.Get() resp, err = utils.Download(u.String()) if err != nil { return job.Error(err) } progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") defer progressReader.Close() archive = progressReader } img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil) if err != nil { return job.Error(err) } // Optionally register the image at REPO/TAG if repo != "" { if err := srv.runtime.repositories.Set(repo, tag, img.ID, true); err != nil { return job.Error(err) } } job.Stdout.Write(sf.FormatStatus("", img.ID)) return engine.StatusOK } func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { var name string if len(job.Args) == 1 { name = job.Args[0] } else if len(job.Args) > 1 { return job.Errorf("Usage: %s", job.Name) } config := runconfig.ContainerConfigFromJob(job) if config.Memory != 0 && config.Memory < 524288 { return job.Errorf("Minimum memory limit allowed is 512k") } if config.Memory > 0 && !srv.runtime.sysInfo.MemoryLimit { job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") config.Memory = 0 } if config.Memory > 0 && !srv.runtime.sysInfo.SwapLimit { job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") config.MemorySwap = -1 } resolvConf, err := utils.GetResolvConf() if err != nil { return job.Error(err) } if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", defaultDns) config.Dns = defaultDns } container, buildWarnings, err := srv.runtime.Create(config, name) if err != nil { if srv.runtime.graph.IsNotExist(err) { _, tag := utils.ParseRepositoryTag(config.Image) if tag == "" { tag = DEFAULTTAG } return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) } return job.Error(err) } if !container.Config.NetworkDisabled && srv.runtime.sysInfo.IPv4ForwardingDisabled { job.Errorf("IPv4 forwarding is disabled.\n") } srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image)) // FIXME: this is necessary because runtime.Create might return a nil container // with a non-nil error. This should not happen! Once it's fixed we // can remove this workaround. if container != nil { job.Printf("%s\n", container.ID) } for _, warning := range buildWarnings { job.Errorf("%s\n", warning) } return engine.StatusOK } func (srv *Server) ContainerRestart(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] t = 10 ) if job.EnvExists("t") { t = job.GetenvInt("t") } if container := srv.runtime.Get(name); container != nil { if err := container.Restart(int(t)); err != nil { return job.Errorf("Cannot restart container %s: %s\n", name, err) } srv.LogEvent("restart", container.ID, srv.runtime.repositories.ImageName(container.Image)) } else { return job.Errorf("No such container: %s\n", name) } return engine.StatusOK } func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] removeVolume := job.GetenvBool("removeVolume") removeLink := job.GetenvBool("removeLink") forceRemove := job.GetenvBool("forceRemove") container := srv.runtime.Get(name) if removeLink { if container == nil { return job.Errorf("No such link: %s", name) } name, err := getFullName(name) if err != nil { job.Error(err) } parent, n := path.Split(name) if parent == "/" { return job.Errorf("Conflict, cannot remove the default name of the container") } pe := srv.runtime.containerGraph.Get(parent) if pe == nil { return job.Errorf("Cannot get parent %s for name %s", parent, name) } parentContainer := srv.runtime.Get(pe.ID()) if parentContainer != nil && parentContainer.activeLinks != nil { if link, exists := parentContainer.activeLinks[n]; exists { link.Disable() } else { utils.Debugf("Could not find active link for %s", name) } } if err := srv.runtime.containerGraph.Delete(name); err != nil { return job.Error(err) } return engine.StatusOK } if container != nil { if container.State.IsRunning() { if forceRemove { if err := container.Stop(5); err != nil { return job.Errorf("Could not stop running container, cannot remove - %v", err) } } else { return job.Errorf("Impossible to remove a running container, please stop it first or use -f") } } if err := srv.runtime.Destroy(container); err != nil { return job.Errorf("Cannot destroy container %s: %s", name, err) } srv.LogEvent("destroy", container.ID, srv.runtime.repositories.ImageName(container.Image)) if removeVolume { var ( volumes = make(map[string]struct{}) binds = make(map[string]struct{}) usedVolumes = make(map[string]*Container) ) // the volume id is always the base of the path getVolumeId := func(p string) string { return filepath.Base(strings.TrimSuffix(p, "/layer")) } // populate bind map so that they can be skipped and not removed for _, bind := range container.hostConfig.Binds { source := strings.Split(bind, ":")[0] // TODO: refactor all volume stuff, all of it // this is very important that we eval the link // or comparing the keys to container.Volumes will not work p, err := filepath.EvalSymlinks(source) if err != nil { return job.Error(err) } source = p binds[source] = struct{}{} } // Store all the deleted containers volumes for _, volumeId := range container.Volumes { // Skip the volumes mounted from external // bind mounts here will will be evaluated for a symlink if _, exists := binds[volumeId]; exists { continue } volumeId = getVolumeId(volumeId) volumes[volumeId] = struct{}{} } // Retrieve all volumes from all remaining containers for _, container := range srv.runtime.List() { for _, containerVolumeId := range container.Volumes { containerVolumeId = getVolumeId(containerVolumeId) usedVolumes[containerVolumeId] = container } } for volumeId := range volumes { // If the requested volu if c, exists := usedVolumes[volumeId]; exists { log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID) continue } if err := srv.runtime.volumes.Delete(volumeId); err != nil { return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err) } } } } else { return job.Errorf("No such container: %s", name) } return engine.StatusOK } func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force bool) error { var ( repoName, tag string tags = []string{} ) repoName, tag = utils.ParseRepositoryTag(name) if tag == "" { tag = DEFAULTTAG } img, err := srv.runtime.repositories.LookupImage(name) if err != nil { if r, _ := srv.runtime.repositories.Get(repoName); r != nil { return fmt.Errorf("No such image: %s:%s", repoName, tag) } return fmt.Errorf("No such image: %s", name) } if strings.Contains(img.ID, name) { repoName = "" tag = "" } byParents, err := srv.runtime.graph.ByParent() if err != nil { return err } //If delete by id, see if the id belong only to one repository if repoName == "" { for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] { parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag) if repoName == "" || repoName == parsedRepo { repoName = parsedRepo if parsedTag != "" { tags = append(tags, parsedTag) } } else if repoName != parsedRepo && !force { // the id belongs to multiple repos, like base:latest and user:test, // in that case return conflict return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) } } } else { tags = append(tags, tag) } if !first && len(tags) > 0 { return nil } //Untag the current image for _, tag := range tags { tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag) if err != nil { return err } if tagDeleted { out := &engine.Env{} out.Set("Untagged", repoName+":"+tag) imgs.Add(out) srv.LogEvent("untag", img.ID, "") } } tags = srv.runtime.repositories.ByID()[img.ID] if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { if len(byParents[img.ID]) == 0 { if err := srv.canDeleteImage(img.ID); err != nil { return err } if err := srv.runtime.repositories.DeleteAll(img.ID); err != nil { return err } if err := srv.runtime.graph.Delete(img.ID); err != nil { return err } out := &engine.Env{} out.Set("Deleted", img.ID) imgs.Add(out) srv.LogEvent("delete", img.ID, "") if img.Parent != "" { err := srv.DeleteImage(img.Parent, imgs, false, force) if first { return err } } } } return nil } func (srv *Server) ImageDelete(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } imgs := engine.NewTable("", 0) if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force")); err != nil { return job.Error(err) } if len(imgs.Data) == 0 { return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) } if _, err := imgs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } func (srv *Server) canDeleteImage(imgID string) error { for _, container := range srv.runtime.List() { parent, err := srv.runtime.repositories.LookupImage(container.Image) if err != nil { return err } if err := parent.WalkHistory(func(p *Image) error { if imgID == p.ID { return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it", utils.TruncateID(imgID), utils.TruncateID(container.ID)) } return nil }); err != nil { return err } } return nil } func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*Image, error) { // Retrieve all images images, err := srv.runtime.graph.Map() if err != nil { return nil, err } // Store the tree in a map of map (map[parentId][childId]) imageMap := make(map[string]map[string]struct{}) for _, img := range images { if _, exists := imageMap[img.Parent]; !exists { imageMap[img.Parent] = make(map[string]struct{}) } imageMap[img.Parent][img.ID] = struct{}{} } // Loop on the children of the given image and check the config var match *Image for elem := range imageMap[imgID] { img, err := srv.runtime.graph.Get(elem) if err != nil { return nil, err } if runconfig.Compare(&img.ContainerConfig, config) { if match == nil || match.Created.Before(img.Created) { match = img } } } return match, nil } func (srv *Server) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { runtime := srv.runtime if hostConfig != nil && hostConfig.Links != nil { for _, l := range hostConfig.Links { parts, err := parseLink(l) if err != nil { return err } child, err := srv.runtime.GetByName(parts["name"]) if err != nil { return err } if child == nil { return fmt.Errorf("Could not get container for %s", parts["name"]) } if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil { return err } } // After we load all the links into the runtime // set them to nil on the hostconfig hostConfig.Links = nil if err := container.writeHostConfig(); err != nil { return err } } return nil } func (srv *Server) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) } name := job.Args[0] runtime := srv.runtime container := runtime.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { hostConfig := runconfig.ContainerHostConfigFromJob(job) // Validate the HostConfig binds. Make sure that: // 1) the source of a bind mount isn't / // The bind mount "/:/foo" isn't allowed. // 2) Check that the source exists // The source to be bind mounted must exist. for _, bind := range hostConfig.Binds { splitBind := strings.Split(bind, ":") source := splitBind[0] // refuse to bind mount "/" to the container if source == "/" { return job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind) } // ensure the source exists on the host _, err := os.Stat(source) if err != nil && os.IsNotExist(err) { err = os.MkdirAll(source, 0755) if err != nil { return job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error()) } } } // Register any links from the host config before starting the container if err := srv.RegisterLinks(container, hostConfig); err != nil { return job.Error(err) } container.hostConfig = hostConfig container.ToDisk() } if err := container.Start(); err != nil { return job.Errorf("Cannot start container %s: %s", name, err) } srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image)) return engine.StatusOK } func (srv *Server) ContainerStop(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] t = 10 ) if job.EnvExists("t") { t = job.GetenvInt("t") } if container := srv.runtime.Get(name); container != nil { if err := container.Stop(int(t)); err != nil { return job.Errorf("Cannot stop container %s: %s\n", name, err) } srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image)) } else { return job.Errorf("No such container: %s\n", name) } return engine.StatusOK } func (srv *Server) ContainerWait(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s", job.Name) } name := job.Args[0] if container := srv.runtime.Get(name); container != nil { status := container.Wait() job.Printf("%d\n", status) return engine.StatusOK } return job.Errorf("%s: no such container: %s", job.Name, name) } func (srv *Server) ContainerResize(job *engine.Job) engine.Status { if len(job.Args) != 3 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) } name := job.Args[0] height, err := strconv.Atoi(job.Args[1]) if err != nil { return job.Error(err) } width, err := strconv.Atoi(job.Args[2]) if err != nil { return job.Error(err) } if container := srv.runtime.Get(name); container != nil { if err := container.Resize(height, width); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) } func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] logs = job.GetenvBool("logs") stream = job.GetenvBool("stream") stdin = job.GetenvBool("stdin") stdout = job.GetenvBool("stdout") stderr = job.GetenvBool("stderr") ) container := srv.runtime.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } //logs if logs { cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs utils.Debugf("Old logs format") if stdout { cLog, err := container.ReadLog("stdout") if err != nil { utils.Errorf("Error reading logs (stdout): %s", err) } else if _, err := io.Copy(job.Stdout, cLog); err != nil { utils.Errorf("Error streaming logs (stdout): %s", err) } } if stderr { cLog, err := container.ReadLog("stderr") if err != nil { utils.Errorf("Error reading logs (stderr): %s", err) } else if _, err := io.Copy(job.Stderr, cLog); err != nil { utils.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { utils.Errorf("Error reading logs (json): %s", err) } else { dec := json.NewDecoder(cLog) for { l := &utils.JSONLog{} if err := dec.Decode(l); err == io.EOF { break } else if err != nil { utils.Errorf("Error streaming logs: %s", err) break } if l.Stream == "stdout" && stdout { fmt.Fprintf(job.Stdout, "%s", l.Log) } if l.Stream == "stderr" && stderr { fmt.Fprintf(job.Stderr, "%s", l.Log) } } } } //stream if stream { if container.State.IsGhost() { return job.Errorf("Impossible to attach to a ghost container") } var ( cStdin io.ReadCloser cStdout, cStderr io.Writer cStdinCloser io.Closer ) if stdin { r, w := io.Pipe() go func() { defer w.Close() defer utils.Debugf("Closing buffered stdin pipe") io.Copy(w, job.Stdin) }() cStdin = r cStdinCloser = job.Stdin } if stdout { cStdout = job.Stdout } if stderr { cStderr = job.Stderr } <-container.Attach(cStdin, cStdinCloser, cStdout, cStderr) // If we are in stdinonce mode, wait for the process to end // otherwise, simply return if container.Config.StdinOnce && !container.Config.Tty { container.Wait() } } return engine.StatusOK } func (srv *Server) ContainerInspect(name string) (*Container, error) { if container := srv.runtime.Get(name); container != nil { return container, nil } return nil, fmt.Errorf("No such container: %s", name) } func (srv *Server) ImageInspect(name string) (*Image, error) { if image, err := srv.runtime.repositories.LookupImage(name); err == nil && image != nil { return image, nil } return nil, fmt.Errorf("No such image: %s", name) } func (srv *Server) JobInspect(job *engine.Job) engine.Status { // TODO: deprecate KIND/conflict if n := len(job.Args); n != 2 { return job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name) } var ( name = job.Args[0] kind = job.Args[1] object interface{} conflict = job.GetenvBool("conflict") //should the job detect conflict between containers and images image, errImage = srv.ImageInspect(name) container, errContainer = srv.ContainerInspect(name) ) if conflict && image != nil && container != nil { return job.Errorf("Conflict between containers and images") } switch kind { case "image": if errImage != nil { return job.Error(errImage) } object = image case "container": if errContainer != nil { return job.Error(errContainer) } object = &struct { *Container HostConfig *runconfig.HostConfig }{container, container.hostConfig} default: return job.Errorf("Unknown kind: %s", kind) } b, err := json.Marshal(object) if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) } var ( name = job.Args[0] resource = job.Args[1] ) if container := srv.runtime.Get(name); container != nil { data, err := container.Copy(resource) if err != nil { return job.Error(err) } defer data.Close() if _, err := io.Copy(job.Stdout, data); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) } func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) { runtime, err := NewRuntime(config, eng) if err != nil { return nil, err } srv := &Server{ Eng: eng, runtime: runtime, pullingPool: make(map[string]chan struct{}), pushingPool: make(map[string]chan struct{}), events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events listeners: make(map[string]chan utils.JSONMessage), running: true, } runtime.srv = srv return srv, nil } func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { srv.Lock() defer srv.Unlock() v := dockerVersion() httpVersion := make([]utils.VersionInfo, 0, 4) httpVersion = append(httpVersion, &simpleVersionInfo{"docker", v.Get("Version")}) httpVersion = append(httpVersion, &simpleVersionInfo{"go", v.Get("GoVersion")}) httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", v.Get("GitCommit")}) httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", v.Get("KernelVersion")}) httpVersion = append(httpVersion, &simpleVersionInfo{"os", v.Get("Os")}) httpVersion = append(httpVersion, &simpleVersionInfo{"arch", v.Get("Arch")}) ud := utils.NewHTTPUserAgentDecorator(httpVersion...) md := &utils.HTTPMetaHeadersDecorator{ Headers: metaHeaders, } factory := utils.NewHTTPRequestFactory(ud, md) return factory } func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage { now := time.Now().UTC().Unix() jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now} srv.AddEvent(jm) for _, c := range srv.listeners { select { // non blocking channel case c <- jm: default: } } return &jm } func (srv *Server) AddEvent(jm utils.JSONMessage) { srv.Lock() defer srv.Unlock() srv.events = append(srv.events, jm) } func (srv *Server) GetEvents() []utils.JSONMessage { srv.RLock() defer srv.RUnlock() return srv.events } func (srv *Server) SetRunning(status bool) { srv.Lock() defer srv.Unlock() srv.running = status } func (srv *Server) IsRunning() bool { srv.RLock() defer srv.RUnlock() return srv.running } func (srv *Server) Close() error { if srv == nil { return nil } srv.SetRunning(false) if srv.runtime == nil { return nil } return srv.runtime.Close() } type Server struct { sync.RWMutex runtime *Runtime pullingPool map[string]chan struct{} pushingPool map[string]chan struct{} events []utils.JSONMessage listeners map[string]chan utils.JSONMessage Eng *engine.Engine running bool } docker-0.9.1/graphdriver/0000755000175000017500000000000012314376205013420 5ustar tagtagdocker-0.9.1/graphdriver/btrfs/0000755000175000017500000000000012314376205014540 5ustar tagtagdocker-0.9.1/graphdriver/btrfs/btrfs.go0000644000175000017500000000777512314376205016227 0ustar tagtag// +build linux,amd64 package btrfs /* #include #include #include */ import "C" import ( "fmt" "github.com/dotcloud/docker/graphdriver" "os" "path" "syscall" "unsafe" ) func init() { graphdriver.Register("btrfs", Init) } func Init(home string) (graphdriver.Driver, error) { rootdir := path.Dir(home) var buf syscall.Statfs_t if err := syscall.Statfs(rootdir, &buf); err != nil { return nil, err } if buf.Type != 0x9123683E { return nil, fmt.Errorf("%s is not a btrfs filesystem", rootdir) } return &Driver{ home: home, }, nil } type Driver struct { home string } func (d *Driver) String() string { return "btrfs" } func (d *Driver) Status() [][2]string { return nil } func (d *Driver) Cleanup() error { return nil } func free(p *C.char) { C.free(unsafe.Pointer(p)) } func openDir(path string) (*C.DIR, error) { Cpath := C.CString(path) defer free(Cpath) dir := C.opendir(Cpath) if dir == nil { return nil, fmt.Errorf("Can't open dir") } return dir, nil } func closeDir(dir *C.DIR) { if dir != nil { C.closedir(dir) } } func getDirFd(dir *C.DIR) uintptr { return uintptr(C.dirfd(dir)) } func subvolCreate(path, name string) error { dir, err := openDir(path) if err != nil { return err } defer closeDir(dir) var args C.struct_btrfs_ioctl_vol_args for i, c := range []byte(name) { args.name[i] = C.char(c) } _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) } return nil } func subvolSnapshot(src, dest, name string) error { srcDir, err := openDir(src) if err != nil { return err } defer closeDir(srcDir) destDir, err := openDir(dest) if err != nil { return err } defer closeDir(destDir) var args C.struct_btrfs_ioctl_vol_args_v2 args.fd = C.__s64(getDirFd(srcDir)) for i, c := range []byte(name) { args.name[i] = C.char(c) } _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) } return nil } func subvolDelete(path, name string) error { dir, err := openDir(path) if err != nil { return err } defer closeDir(dir) var args C.struct_btrfs_ioctl_vol_args for i, c := range []byte(name) { args.name[i] = C.char(c) } _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to destroy btrfs snapshot: %v", errno.Error()) } return nil } func (d *Driver) subvolumesDir() string { return path.Join(d.home, "subvolumes") } func (d *Driver) subvolumesDirId(id string) string { return path.Join(d.subvolumesDir(), id) } func (d *Driver) Create(id string, parent string) error { subvolumes := path.Join(d.home, "subvolumes") if err := os.MkdirAll(subvolumes, 0700); err != nil { return err } if parent == "" { if err := subvolCreate(subvolumes, id); err != nil { return err } } else { parentDir, err := d.Get(parent) if err != nil { return err } if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { return err } } return nil } func (d *Driver) Remove(id string) error { dir := d.subvolumesDirId(id) if _, err := os.Stat(dir); err != nil { return err } if err := subvolDelete(d.subvolumesDir(), id); err != nil { return err } return os.RemoveAll(dir) } func (d *Driver) Get(id string) (string, error) { dir := d.subvolumesDirId(id) st, err := os.Stat(dir) if err != nil { return "", err } if !st.IsDir() { return "", fmt.Errorf("%s: not a directory", dir) } return dir, nil } func (d *Driver) Put(id string) { // Get() creates no runtime resources (like e.g. mounts) // so this doesn't need to do anything. } func (d *Driver) Exists(id string) bool { dir := d.subvolumesDirId(id) _, err := os.Stat(dir) return err == nil } docker-0.9.1/graphdriver/btrfs/dummy_unsupported.go0000644000175000017500000000004712314376205020673 0ustar tagtag// +build !linux !amd64 package btrfs docker-0.9.1/graphdriver/driver.go0000644000175000017500000000363612314376205015252 0ustar tagtagpackage graphdriver import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/utils" "os" "path" ) type InitFunc func(root string) (Driver, error) type Driver interface { String() string Create(id, parent string) error Remove(id string) error Get(id string) (dir string, err error) Put(id string) Exists(id string) bool Status() [][2]string Cleanup() error } type Differ interface { Diff(id string) (archive.Archive, error) Changes(id string) ([]archive.Change, error) ApplyDiff(id string, diff archive.ArchiveReader) error DiffSize(id string) (bytes int64, err error) } var ( DefaultDriver string // All registred drivers drivers map[string]InitFunc // Slice of drivers that should be used in an order priority = []string{ "aufs", "devicemapper", "vfs", // experimental, has to be enabled manually for now "btrfs", } ) func init() { drivers = make(map[string]InitFunc) } func Register(name string, initFunc InitFunc) error { if _, exists := drivers[name]; exists { return fmt.Errorf("Name already registered %s", name) } drivers[name] = initFunc return nil } func GetDriver(name, home string) (Driver, error) { if initFunc, exists := drivers[name]; exists { return initFunc(path.Join(home, name)) } return nil, fmt.Errorf("No such driver: %s", name) } func New(root string) (driver Driver, err error) { for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { if name != "" { return GetDriver(name, root) } } // Check for priority drivers first for _, name := range priority { if driver, err = GetDriver(name, root); err != nil { utils.Debugf("Error loading driver %s: %s", name, err) continue } return driver, nil } // Check all registered drivers if no priority driver is found for _, initFunc := range drivers { if driver, err = initFunc(root); err != nil { continue } return driver, nil } return nil, err } docker-0.9.1/graphdriver/devmapper/0000755000175000017500000000000012314376205015403 5ustar tagtagdocker-0.9.1/graphdriver/devmapper/driver_test.go0000644000175000017500000005434312314376205020275 0ustar tagtag// +build linux,amd64 package devmapper import ( "fmt" "github.com/dotcloud/docker/graphdriver" "io/ioutil" "path" "runtime" "strings" "syscall" "testing" ) func init() { // Reduce the size the the base fs and loopback for the tests DefaultDataLoopbackSize = 300 * 1024 * 1024 DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 DefaultBaseFsSize = 300 * 1024 * 1024 } // denyAllDevmapper mocks all calls to libdevmapper in the unit tests, and denies them by default func denyAllDevmapper() { // Hijack all calls to libdevmapper with default panics. // Authorized calls are selectively hijacked in each tests. DmTaskCreate = func(t int) *CDmTask { panic("DmTaskCreate: this method should not be called here") } DmTaskRun = func(task *CDmTask) int { panic("DmTaskRun: this method should not be called here") } DmTaskSetName = func(task *CDmTask, name string) int { panic("DmTaskSetName: this method should not be called here") } DmTaskSetMessage = func(task *CDmTask, message string) int { panic("DmTaskSetMessage: this method should not be called here") } DmTaskSetSector = func(task *CDmTask, sector uint64) int { panic("DmTaskSetSector: this method should not be called here") } DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { panic("DmTaskSetCookie: this method should not be called here") } DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { panic("DmTaskSetAddNode: this method should not be called here") } DmTaskSetRo = func(task *CDmTask) int { panic("DmTaskSetRo: this method should not be called here") } DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { panic("DmTaskAddTarget: this method should not be called here") } DmTaskGetInfo = func(task *CDmTask, info *Info) int { panic("DmTaskGetInfo: this method should not be called here") } DmGetNextTarget = func(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { panic("DmGetNextTarget: this method should not be called here") } DmUdevWait = func(cookie uint) int { panic("DmUdevWait: this method should not be called here") } DmSetDevDir = func(dir string) int { panic("DmSetDevDir: this method should not be called here") } DmGetLibraryVersion = func(version *string) int { panic("DmGetLibraryVersion: this method should not be called here") } DmLogInitVerbose = func(level int) { panic("DmLogInitVerbose: this method should not be called here") } DmTaskDestroy = func(task *CDmTask) { panic("DmTaskDestroy: this method should not be called here") } LogWithErrnoInit = func() { panic("LogWithErrnoInit: this method should not be called here") } } func denyAllSyscall() { sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { panic("sysMount: this method should not be called here") } sysUnmount = func(target string, flags int) (err error) { panic("sysUnmount: this method should not be called here") } sysCloseOnExec = func(fd int) { panic("sysCloseOnExec: this method should not be called here") } sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { panic("sysSyscall: this method should not be called here") } // Not a syscall, but forbidding it here anyway Mounted = func(mnt string) (bool, error) { panic("devmapper.Mounted: this method should not be called here") } // osOpenFile = os.OpenFile // osNewFile = os.NewFile // osCreate = os.Create // osStat = os.Stat // osIsNotExist = os.IsNotExist // osIsExist = os.IsExist // osMkdirAll = os.MkdirAll // osRemoveAll = os.RemoveAll // osRename = os.Rename // osReadlink = os.Readlink // execRun = func(name string, args ...string) error { // return exec.Command(name, args...).Run() // } } func mkTestDirectory(t *testing.T) string { dir, err := ioutil.TempDir("", "docker-test-devmapper-") if err != nil { t.Fatal(err) } return dir } func newDriver(t *testing.T) *Driver { home := mkTestDirectory(t) d, err := Init(home) if err != nil { t.Fatal(err) } return d.(*Driver) } func cleanup(d *Driver) { d.Cleanup() osRemoveAll(d.home) } type Set map[string]bool func (r Set) Assert(t *testing.T, names ...string) { for _, key := range names { required := true if strings.HasPrefix(key, "?") { key = key[1:] required = false } if _, exists := r[key]; !exists && required { t.Fatalf("Key not set: %s", key) } delete(r, key) } if len(r) != 0 { t.Fatalf("Unexpected keys: %v", r) } } func TestInit(t *testing.T) { var ( calls = make(Set) taskMessages = make(Set) taskTypes = make(Set) home = mkTestDirectory(t) ) defer osRemoveAll(home) func() { denyAllDevmapper() DmSetDevDir = func(dir string) int { calls["DmSetDevDir"] = true expectedDir := "/dev" if dir != expectedDir { t.Fatalf("Wrong libdevmapper call\nExpected: DmSetDevDir(%v)\nReceived: DmSetDevDir(%v)\n", expectedDir, dir) } return 0 } LogWithErrnoInit = func() { calls["DmLogWithErrnoInit"] = true } var task1 CDmTask DmTaskCreate = func(taskType int) *CDmTask { calls["DmTaskCreate"] = true taskTypes[fmt.Sprintf("%d", taskType)] = true return &task1 } DmTaskSetName = func(task *CDmTask, name string) int { calls["DmTaskSetName"] = true expectedTask := &task1 if task != expectedTask { t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", expectedTask, task) } // FIXME: use Set.AssertRegexp() if !strings.HasPrefix(name, "docker-") && !strings.HasPrefix(name, "/dev/mapper/docker-") || !strings.HasSuffix(name, "-pool") && !strings.HasSuffix(name, "-base") { t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", "docker-...-pool", name) } return 1 } DmTaskRun = func(task *CDmTask) int { calls["DmTaskRun"] = true expectedTask := &task1 if task != expectedTask { t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskRun(%v)\nReceived: DmTaskRun(%v)\n", expectedTask, task) } return 1 } DmTaskGetInfo = func(task *CDmTask, info *Info) int { calls["DmTaskGetInfo"] = true expectedTask := &task1 if task != expectedTask { t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskGetInfo(%v)\nReceived: DmTaskGetInfo(%v)\n", expectedTask, task) } // This will crash if info is not dereferenceable info.Exists = 0 return 1 } DmTaskSetSector = func(task *CDmTask, sector uint64) int { calls["DmTaskSetSector"] = true expectedTask := &task1 if task != expectedTask { t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) } if expectedSector := uint64(0); sector != expectedSector { t.Fatalf("Wrong libdevmapper call to DmTaskSetSector\nExpected: %v\nReceived: %v\n", expectedSector, sector) } return 1 } DmTaskSetMessage = func(task *CDmTask, message string) int { calls["DmTaskSetMessage"] = true expectedTask := &task1 if task != expectedTask { t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) } taskMessages[message] = true return 1 } DmTaskDestroy = func(task *CDmTask) { calls["DmTaskDestroy"] = true expectedTask := &task1 if task != expectedTask { t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) } } DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { calls["DmTaskSetTarget"] = true expectedTask := &task1 if task != expectedTask { t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) } if start != 0 { t.Fatalf("Wrong start: %d != %d", start, 0) } if ttype != "thin" && ttype != "thin-pool" { t.Fatalf("Wrong ttype: %s", ttype) } // Quick smoke test if params == "" { t.Fatalf("Params should not be empty") } return 1 } fakeCookie := uint(4321) DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { calls["DmTaskSetCookie"] = true expectedTask := &task1 if task != expectedTask { t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) } if flags != 0 { t.Fatalf("Cookie flags should be 0 (not %x)", flags) } *cookie = fakeCookie return 1 } DmUdevWait = func(cookie uint) int { calls["DmUdevWait"] = true if cookie != fakeCookie { t.Fatalf("Wrong cookie: %d != %d", cookie, fakeCookie) } return 1 } DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { if addNode != AddNodeOnCreate { t.Fatalf("Wrong AddNoteType: %v (expected %v)", addNode, AddNodeOnCreate) } calls["DmTaskSetAddNode"] = true return 1 } execRun = func(name string, args ...string) error { calls["execRun"] = true if name != "mkfs.ext4" { t.Fatalf("Expected %s to be executed, not %s", "mkfs.ext4", name) } return nil } driver, err := Init(home) if err != nil { t.Fatal(err) } defer func() { if err := driver.Cleanup(); err != nil { t.Fatal(err) } }() }() // Put all tests in a function to make sure the garbage collection will // occur. // Call GC to cleanup runtime.Finalizers runtime.GC() calls.Assert(t, "DmSetDevDir", "DmLogWithErrnoInit", "DmTaskSetName", "DmTaskRun", "DmTaskGetInfo", "DmTaskDestroy", "execRun", "DmTaskCreate", "DmTaskSetTarget", "DmTaskSetCookie", "DmUdevWait", "DmTaskSetSector", "DmTaskSetMessage", "DmTaskSetAddNode", ) taskTypes.Assert(t, "0", "6", "17") taskMessages.Assert(t, "create_thin 0", "set_transaction_id 0 1") } func fakeInit() func(home string) (graphdriver.Driver, error) { oldInit := Init Init = func(home string) (graphdriver.Driver, error) { return &Driver{ home: home, }, nil } return oldInit } func restoreInit(init func(home string) (graphdriver.Driver, error)) { Init = init } func mockAllDevmapper(calls Set) { DmSetDevDir = func(dir string) int { calls["DmSetDevDir"] = true return 0 } LogWithErrnoInit = func() { calls["DmLogWithErrnoInit"] = true } DmTaskCreate = func(taskType int) *CDmTask { calls["DmTaskCreate"] = true return &CDmTask{} } DmTaskSetName = func(task *CDmTask, name string) int { calls["DmTaskSetName"] = true return 1 } DmTaskRun = func(task *CDmTask) int { calls["DmTaskRun"] = true return 1 } DmTaskGetInfo = func(task *CDmTask, info *Info) int { calls["DmTaskGetInfo"] = true return 1 } DmTaskSetSector = func(task *CDmTask, sector uint64) int { calls["DmTaskSetSector"] = true return 1 } DmTaskSetMessage = func(task *CDmTask, message string) int { calls["DmTaskSetMessage"] = true return 1 } DmTaskDestroy = func(task *CDmTask) { calls["DmTaskDestroy"] = true } DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { calls["DmTaskSetTarget"] = true return 1 } DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { calls["DmTaskSetCookie"] = true return 1 } DmUdevWait = func(cookie uint) int { calls["DmUdevWait"] = true return 1 } DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { calls["DmTaskSetAddNode"] = true return 1 } execRun = func(name string, args ...string) error { calls["execRun"] = true return nil } } func TestDriverName(t *testing.T) { denyAllDevmapper() defer denyAllDevmapper() oldInit := fakeInit() defer restoreInit(oldInit) d := newDriver(t) if d.String() != "devicemapper" { t.Fatalf("Expected driver name to be devicemapper got %s", d.String()) } } func TestDriverCreate(t *testing.T) { denyAllDevmapper() denyAllSyscall() defer denyAllSyscall() defer denyAllDevmapper() calls := make(Set) mockAllDevmapper(calls) sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { calls["sysMount"] = true // FIXME: compare the exact source and target strings (inodes + devname) if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) } if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) } if expectedFstype := "ext4"; fstype != expectedFstype { t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) } if expectedFlags := uintptr(3236757504); flags != expectedFlags { t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) } return nil } Mounted = func(mnt string) (bool, error) { calls["Mounted"] = true if !strings.HasPrefix(mnt, "/tmp/docker-test-devmapper-") || !strings.HasSuffix(mnt, "/mnt/1") { t.Fatalf("Wrong mounted call\nExpected: Mounted(%v)\nReceived: Mounted(%v)\n", "/tmp/docker-test-devmapper-.../mnt/1", mnt) } return false, nil } sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { calls["sysSyscall"] = true if trap != sysSysIoctl { t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) } switch a2 { case LoopSetFd: calls["ioctl.loopsetfd"] = true case LoopCtlGetFree: calls["ioctl.loopctlgetfree"] = true case LoopGetStatus64: calls["ioctl.loopgetstatus"] = true case LoopSetStatus64: calls["ioctl.loopsetstatus"] = true case LoopClrFd: calls["ioctl.loopclrfd"] = true case LoopSetCapacity: calls["ioctl.loopsetcapacity"] = true case BlkGetSize64: calls["ioctl.blkgetsize"] = true default: t.Fatalf("Unexpected IOCTL. Received %d", a2) } return 0, 0, 0 } func() { d := newDriver(t) calls.Assert(t, "DmSetDevDir", "DmLogWithErrnoInit", "DmTaskSetName", "DmTaskRun", "DmTaskGetInfo", "execRun", "DmTaskCreate", "DmTaskSetTarget", "DmTaskSetCookie", "DmUdevWait", "DmTaskSetSector", "DmTaskSetMessage", "DmTaskSetAddNode", "sysSyscall", "ioctl.blkgetsize", "ioctl.loopsetfd", "ioctl.loopsetstatus", "?ioctl.loopctlgetfree", ) if err := d.Create("1", ""); err != nil { t.Fatal(err) } calls.Assert(t, "DmTaskCreate", "DmTaskGetInfo", "sysMount", "DmTaskRun", "DmTaskSetTarget", "DmTaskSetSector", "DmTaskSetCookie", "DmUdevWait", "DmTaskSetName", "DmTaskSetMessage", "DmTaskSetAddNode", ) }() runtime.GC() calls.Assert(t, "DmTaskDestroy", ) } func TestDriverRemove(t *testing.T) { denyAllDevmapper() denyAllSyscall() defer denyAllSyscall() defer denyAllDevmapper() calls := make(Set) mockAllDevmapper(calls) sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { calls["sysMount"] = true // FIXME: compare the exact source and target strings (inodes + devname) if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) } if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) } if expectedFstype := "ext4"; fstype != expectedFstype { t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) } if expectedFlags := uintptr(3236757504); flags != expectedFlags { t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) } return nil } sysUnmount = func(target string, flags int) (err error) { calls["sysUnmount"] = true // FIXME: compare the exact source and target strings (inodes + devname) if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) } if expectedFlags := 0; flags != expectedFlags { t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) } return nil } Mounted = func(mnt string) (bool, error) { calls["Mounted"] = true return false, nil } sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { calls["sysSyscall"] = true if trap != sysSysIoctl { t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) } switch a2 { case LoopSetFd: calls["ioctl.loopsetfd"] = true case LoopCtlGetFree: calls["ioctl.loopctlgetfree"] = true case LoopGetStatus64: calls["ioctl.loopgetstatus"] = true case LoopSetStatus64: calls["ioctl.loopsetstatus"] = true case LoopClrFd: calls["ioctl.loopclrfd"] = true case LoopSetCapacity: calls["ioctl.loopsetcapacity"] = true case BlkGetSize64: calls["ioctl.blkgetsize"] = true default: t.Fatalf("Unexpected IOCTL. Received %d", a2) } return 0, 0, 0 } func() { d := newDriver(t) calls.Assert(t, "DmSetDevDir", "DmLogWithErrnoInit", "DmTaskSetName", "DmTaskRun", "DmTaskGetInfo", "execRun", "DmTaskCreate", "DmTaskSetTarget", "DmTaskSetCookie", "DmUdevWait", "DmTaskSetSector", "DmTaskSetMessage", "DmTaskSetAddNode", "sysSyscall", "ioctl.blkgetsize", "ioctl.loopsetfd", "ioctl.loopsetstatus", "?ioctl.loopctlgetfree", ) if err := d.Create("1", ""); err != nil { t.Fatal(err) } calls.Assert(t, "DmTaskCreate", "DmTaskGetInfo", "sysMount", "DmTaskRun", "DmTaskSetTarget", "DmTaskSetSector", "DmTaskSetCookie", "DmUdevWait", "DmTaskSetName", "DmTaskSetMessage", "DmTaskSetAddNode", ) Mounted = func(mnt string) (bool, error) { calls["Mounted"] = true return true, nil } if err := d.Remove("1"); err != nil { t.Fatal(err) } calls.Assert(t, "DmTaskRun", "DmTaskSetSector", "DmTaskSetName", "DmTaskSetMessage", "DmTaskCreate", "DmTaskGetInfo", "DmTaskSetCookie", "DmTaskSetTarget", "DmTaskSetAddNode", "DmUdevWait", "sysUnmount", ) }() runtime.GC() calls.Assert(t, "DmTaskDestroy", ) } func TestCleanup(t *testing.T) { t.Skip("FIXME: not a unit test") t.Skip("Unimplemented") d := newDriver(t) defer osRemoveAll(d.home) mountPoints := make([]string, 2) if err := d.Create("1", ""); err != nil { t.Fatal(err) } // Mount the id p, err := d.Get("1") if err != nil { t.Fatal(err) } mountPoints[0] = p if err := d.Create("2", "1"); err != nil { t.Fatal(err) } p, err = d.Get("2") if err != nil { t.Fatal(err) } mountPoints[1] = p // Ensure that all the mount points are currently mounted for _, p := range mountPoints { if mounted, err := Mounted(p); err != nil { t.Fatal(err) } else if !mounted { t.Fatalf("Expected %s to be mounted", p) } } // Ensure that devices are active for _, p := range []string{"1", "2"} { if !d.HasActivatedDevice(p) { t.Fatalf("Expected %s to have an active device", p) } } if err := d.Cleanup(); err != nil { t.Fatal(err) } // Ensure that all the mount points are no longer mounted for _, p := range mountPoints { if mounted, err := Mounted(p); err != nil { t.Fatal(err) } else if mounted { t.Fatalf("Expected %s to not be mounted", p) } } // Ensure that devices are no longer activated for _, p := range []string{"1", "2"} { if d.HasActivatedDevice(p) { t.Fatalf("Expected %s not be an active device", p) } } } func TestNotMounted(t *testing.T) { t.Skip("FIXME: not a unit test") t.Skip("Not implemented") d := newDriver(t) defer cleanup(d) if err := d.Create("1", ""); err != nil { t.Fatal(err) } mounted, err := Mounted(path.Join(d.home, "mnt", "1")) if err != nil { t.Fatal(err) } if mounted { t.Fatal("Id 1 should not be mounted") } } func TestMounted(t *testing.T) { t.Skip("FIXME: not a unit test") d := newDriver(t) defer cleanup(d) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if _, err := d.Get("1"); err != nil { t.Fatal(err) } mounted, err := Mounted(path.Join(d.home, "mnt", "1")) if err != nil { t.Fatal(err) } if !mounted { t.Fatal("Id 1 should be mounted") } } func TestInitCleanedDriver(t *testing.T) { t.Skip("FIXME: not a unit test") d := newDriver(t) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if _, err := d.Get("1"); err != nil { t.Fatal(err) } if err := d.Cleanup(); err != nil { t.Fatal(err) } driver, err := Init(d.home) if err != nil { t.Fatal(err) } d = driver.(*Driver) defer cleanup(d) if _, err := d.Get("1"); err != nil { t.Fatal(err) } } func TestMountMountedDriver(t *testing.T) { t.Skip("FIXME: not a unit test") d := newDriver(t) defer cleanup(d) if err := d.Create("1", ""); err != nil { t.Fatal(err) } // Perform get on same id to ensure that it will // not be mounted twice if _, err := d.Get("1"); err != nil { t.Fatal(err) } if _, err := d.Get("1"); err != nil { t.Fatal(err) } } func TestGetReturnsValidDevice(t *testing.T) { t.Skip("FIXME: not a unit test") d := newDriver(t) defer cleanup(d) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if !d.HasDevice("1") { t.Fatalf("Expected id 1 to be in device set") } if _, err := d.Get("1"); err != nil { t.Fatal(err) } if !d.HasActivatedDevice("1") { t.Fatalf("Expected id 1 to be activated") } if !d.HasInitializedDevice("1") { t.Fatalf("Expected id 1 to be initialized") } } func TestDriverGetSize(t *testing.T) { t.Skip("FIXME: not a unit test") t.Skipf("Size is currently not implemented") d := newDriver(t) defer cleanup(d) if err := d.Create("1", ""); err != nil { t.Fatal(err) } mountPoint, err := d.Get("1") if err != nil { t.Fatal(err) } size := int64(1024) f, err := osCreate(path.Join(mountPoint, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } f.Close() // diffSize, err := d.DiffSize("1") // if err != nil { // t.Fatal(err) // } // if diffSize != size { // t.Fatalf("Expected size %d got %d", size, diffSize) // } } func assertMap(t *testing.T, m map[string]bool, keys ...string) { for _, key := range keys { if _, exists := m[key]; !exists { t.Fatalf("Key not set: %s", key) } delete(m, key) } if len(m) != 0 { t.Fatalf("Unexpected keys: %v", m) } } docker-0.9.1/graphdriver/devmapper/ioctl.go0000644000175000017500000000320312314376205017042 0ustar tagtag// +build linux,amd64 package devmapper import ( "unsafe" ) func ioctlLoopCtlGetFree(fd uintptr) (int, error) { index, _, err := sysSyscall(sysSysIoctl, fd, LoopCtlGetFree, 0) if err != 0 { return 0, err } return int(index), nil } func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetFd, sparseFd); err != 0 { return err } return nil } func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return err } return nil } func ioctlLoopClrFd(loopFd uintptr) error { if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopClrFd, 0); err != 0 { return err } return nil } func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { loopInfo := &LoopInfo64{} if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return nil, err } return loopInfo, nil } func ioctlLoopSetCapacity(loopFd uintptr, value int) error { if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { return err } return nil } func ioctlBlkGetSize64(fd uintptr) (int64, error) { var size int64 if _, _, err := sysSyscall(sysSysIoctl, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { return 0, err } return size, nil } func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { var r [2]uint64 r[0] = offset r[1] = length if _, _, err := sysSyscall(sysSysIoctl, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { return err } return nil } docker-0.9.1/graphdriver/devmapper/driver.go0000644000175000017500000000655212314376205017235 0ustar tagtag// +build linux,amd64 package devmapper import ( "fmt" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/utils" "io/ioutil" "os" "path" ) func init() { graphdriver.Register("devicemapper", Init) } // Placeholder interfaces, to be replaced // at integration. // End of placeholder interfaces. type Driver struct { *DeviceSet home string } var Init = func(home string) (graphdriver.Driver, error) { deviceSet, err := NewDeviceSet(home, true) if err != nil { return nil, err } d := &Driver{ DeviceSet: deviceSet, home: home, } return d, nil } func (d *Driver) String() string { return "devicemapper" } func (d *Driver) Status() [][2]string { s := d.DeviceSet.Status() status := [][2]string{ {"Pool Name", s.PoolName}, {"Data file", s.DataLoopback}, {"Metadata file", s.MetadataLoopback}, {"Data Space Used", fmt.Sprintf("%.1f Mb", float64(s.Data.Used)/(1024*1024))}, {"Data Space Total", fmt.Sprintf("%.1f Mb", float64(s.Data.Total)/(1024*1024))}, {"Metadata Space Used", fmt.Sprintf("%.1f Mb", float64(s.Metadata.Used)/(1024*1024))}, {"Metadata Space Total", fmt.Sprintf("%.1f Mb", float64(s.Metadata.Total)/(1024*1024))}, } return status } func (d *Driver) Cleanup() error { return d.DeviceSet.Shutdown() } func (d *Driver) Create(id, parent string) error { if err := d.DeviceSet.AddDevice(id, parent); err != nil { return err } mp := path.Join(d.home, "mnt", id) if err := d.mount(id, mp); err != nil { return err } if err := osMkdirAll(path.Join(mp, "rootfs"), 0755); err != nil && !osIsExist(err) { return err } // Create an "id" file with the container/image id in it to help reconscruct this in case // of later problems if err := ioutil.WriteFile(path.Join(mp, "id"), []byte(id), 0600); err != nil { return err } // We float this reference so that the next Get call can // steal it, so we don't have to unmount if err := d.DeviceSet.UnmountDevice(id, UnmountFloat); err != nil { return err } return nil } func (d *Driver) Remove(id string) error { if !d.DeviceSet.HasDevice(id) { // Consider removing a non-existing device a no-op // This is useful to be able to progress on container removal // if the underlying device has gone away due to earlier errors return nil } // Sink the float from create in case no Get() call was made if err := d.DeviceSet.UnmountDevice(id, UnmountSink); err != nil { return err } // This assumes the device has been properly Get/Put:ed and thus is unmounted if err := d.DeviceSet.DeleteDevice(id); err != nil { return err } mp := path.Join(d.home, "mnt", id) if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { return err } return nil } func (d *Driver) Get(id string) (string, error) { mp := path.Join(d.home, "mnt", id) if err := d.mount(id, mp); err != nil { return "", err } return path.Join(mp, "rootfs"), nil } func (d *Driver) Put(id string) { if err := d.DeviceSet.UnmountDevice(id, UnmountRegular); err != nil { utils.Errorf("Warning: error unmounting device %s: %s\n", id, err) } } func (d *Driver) mount(id, mountPoint string) error { // Create the target directories if they don't exist if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) { return err } // Mount the device return d.DeviceSet.MountDevice(id, mountPoint) } func (d *Driver) Exists(id string) bool { return d.Devices[id] != nil } docker-0.9.1/graphdriver/devmapper/sys.go0000644000175000017500000000241412314376205016551 0ustar tagtag// +build linux,amd64 package devmapper import ( "os" "os/exec" "syscall" ) type ( sysStatT syscall.Stat_t sysErrno syscall.Errno osFile struct{ *os.File } ) var ( sysMount = syscall.Mount sysUnmount = syscall.Unmount sysCloseOnExec = syscall.CloseOnExec sysSyscall = syscall.Syscall osOpenFile = func(name string, flag int, perm os.FileMode) (*osFile, error) { f, err := os.OpenFile(name, flag, perm) return &osFile{File: f}, err } osOpen = func(name string) (*osFile, error) { f, err := os.Open(name); return &osFile{File: f}, err } osNewFile = os.NewFile osCreate = os.Create osStat = os.Stat osIsNotExist = os.IsNotExist osIsExist = os.IsExist osMkdirAll = os.MkdirAll osRemoveAll = os.RemoveAll osRename = os.Rename osReadlink = os.Readlink execRun = func(name string, args ...string) error { return exec.Command(name, args...).Run() } ) const ( sysMsMgcVal = syscall.MS_MGC_VAL sysMsRdOnly = syscall.MS_RDONLY sysEInval = syscall.EINVAL sysSysIoctl = syscall.SYS_IOCTL sysEBusy = syscall.EBUSY osORdOnly = os.O_RDONLY osORdWr = os.O_RDWR osOCreate = os.O_CREATE osModeDevice = os.ModeDevice ) func toSysStatT(i interface{}) *sysStatT { return (*sysStatT)(i.(*syscall.Stat_t)) } docker-0.9.1/graphdriver/devmapper/devmapper_log.go0000644000175000017500000000074212314376205020561 0ustar tagtag// +build linux,amd64 package devmapper import "C" // Due to the way cgo works this has to be in a separate file, as devmapper.go has // definitions in the cgo block, which is incompatible with using "//export" //export DevmapperLogCallback func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { if dmLogger != nil { dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), C.GoString(message)) } } docker-0.9.1/graphdriver/devmapper/devmapper_doc.go0000644000175000017500000000534512314376205020551 0ustar tagtagpackage devmapper // Definition of struct dm_task and sub structures (from lvm2) // // struct dm_ioctl { // /* // * The version number is made up of three parts: // * major - no backward or forward compatibility, // * minor - only backwards compatible, // * patch - both backwards and forwards compatible. // * // * All clients of the ioctl interface should fill in the // * version number of the interface that they were // * compiled with. // * // * All recognised ioctl commands (ie. those that don't // * return -ENOTTY) fill out this field, even if the // * command failed. // */ // uint32_t version[3]; /* in/out */ // uint32_t data_size; /* total size of data passed in // * including this struct */ // uint32_t data_start; /* offset to start of data // * relative to start of this struct */ // uint32_t target_count; /* in/out */ // int32_t open_count; /* out */ // uint32_t flags; /* in/out */ // /* // * event_nr holds either the event number (input and output) or the // * udev cookie value (input only). // * The DM_DEV_WAIT ioctl takes an event number as input. // * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls // * use the field as a cookie to return in the DM_COOKIE // * variable with the uevents they issue. // * For output, the ioctls return the event number, not the cookie. // */ // uint32_t event_nr; /* in/out */ // uint32_t padding; // uint64_t dev; /* in/out */ // char name[DM_NAME_LEN]; /* device name */ // char uuid[DM_UUID_LEN]; /* unique identifier for // * the block device */ // char data[7]; /* padding or data */ // }; // struct target { // uint64_t start; // uint64_t length; // char *type; // char *params; // struct target *next; // }; // typedef enum { // DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ // DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ // } dm_add_node_t; // struct dm_task { // int type; // char *dev_name; // char *mangled_dev_name; // struct target *head, *tail; // int read_only; // uint32_t event_nr; // int major; // int minor; // int allow_default_major_fallback; // uid_t uid; // gid_t gid; // mode_t mode; // uint32_t read_ahead; // uint32_t read_ahead_flags; // union { // struct dm_ioctl *v4; // } dmi; // char *newname; // char *message; // char *geometry; // uint64_t sector; // int no_flush; // int no_open_count; // int skip_lockfs; // int query_inactive_table; // int suppress_identical_reload; // dm_add_node_t add_node; // uint64_t existing_table_size; // int cookie_set; // int new_uuid; // int secure_data; // int retry_remove; // int enable_checks; // int expected_errno; // char *uuid; // char *mangled_uuid; // }; // docker-0.9.1/graphdriver/devmapper/deviceset.go0000644000175000017500000006652712314376205017725 0ustar tagtag// +build linux,amd64 package devmapper import ( "encoding/json" "errors" "fmt" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "path" "path/filepath" "strconv" "strings" "sync" "time" ) var ( DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 ) type DevInfo struct { Hash string `json:"-"` DeviceId int `json:"device_id"` Size uint64 `json:"size"` TransactionId uint64 `json:"transaction_id"` Initialized bool `json:"initialized"` devices *DeviceSet `json:"-"` mountCount int `json:"-"` mountPath string `json:"-"` // A floating mount means one reference is not owned and // will be stolen by the next mount. This allows us to // avoid unmounting directly after creation before the // first get (since we need to mount to set up the device // a bit first). floating bool `json:"-"` // The global DeviceSet lock guarantees that we serialize all // the calls to libdevmapper (which is not threadsafe), but we // sometimes release that lock while sleeping. In that case // this per-device lock is still held, protecting against // other accesses to the device that we're doing the wait on. lock sync.Mutex `json:"-"` } type MetaData struct { Devices map[string]*DevInfo `json:devices` } type DeviceSet struct { MetaData sync.Mutex // Protects Devices map and serializes calls into libdevmapper root string devicePrefix string TransactionId uint64 NewTransactionId uint64 nextFreeDevice int sawBusy bool } type DiskUsage struct { Used uint64 Total uint64 } type Status struct { PoolName string DataLoopback string MetadataLoopback string Data DiskUsage Metadata DiskUsage SectorSize uint64 } type DevStatus struct { DeviceId int Size uint64 TransactionId uint64 SizeInSectors uint64 MappedSectors uint64 HighestMappedSector uint64 } type UnmountMode int const ( UnmountRegular UnmountMode = iota UnmountFloat UnmountSink ) func getDevName(name string) string { return "/dev/mapper/" + name } func (info *DevInfo) Name() string { hash := info.Hash if hash == "" { hash = "base" } return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) } func (info *DevInfo) DevName() string { return getDevName(info.Name()) } func (devices *DeviceSet) loopbackDir() string { return path.Join(devices.root, "devicemapper") } func (devices *DeviceSet) jsonFile() string { return path.Join(devices.loopbackDir(), "json") } func (devices *DeviceSet) getPoolName() string { return devices.devicePrefix + "-pool" } func (devices *DeviceSet) getPoolDevName() string { return getDevName(devices.getPoolName()) } func (devices *DeviceSet) hasImage(name string) bool { dirname := devices.loopbackDir() filename := path.Join(dirname, name) _, err := osStat(filename) return err == nil } // ensureImage creates a sparse file of bytes at the path // /devicemapper/. // If the file already exists, it does nothing. // Either way it returns the full path. func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { dirname := devices.loopbackDir() filename := path.Join(dirname, name) if err := osMkdirAll(dirname, 0700); err != nil && !osIsExist(err) { return "", err } if _, err := osStat(filename); err != nil { if !osIsNotExist(err) { return "", err } utils.Debugf("Creating loopback file %s for device-manage use", filename) file, err := osOpenFile(filename, osORdWr|osOCreate, 0600) if err != nil { return "", err } defer file.Close() if err = file.Truncate(size); err != nil { return "", err } } return filename, nil } func (devices *DeviceSet) allocateDeviceId() int { // TODO: Add smarter reuse of deleted devices id := devices.nextFreeDevice devices.nextFreeDevice = devices.nextFreeDevice + 1 return id } func (devices *DeviceSet) allocateTransactionId() uint64 { devices.NewTransactionId = devices.NewTransactionId + 1 return devices.NewTransactionId } func (devices *DeviceSet) saveMetadata() error { jsonData, err := json.Marshal(devices.MetaData) if err != nil { return fmt.Errorf("Error encoding metadata to json: %s", err) } tmpFile, err := ioutil.TempFile(filepath.Dir(devices.jsonFile()), ".json") if err != nil { return fmt.Errorf("Error creating metadata file: %s", err) } n, err := tmpFile.Write(jsonData) if err != nil { return fmt.Errorf("Error writing metadata to %s: %s", tmpFile.Name(), err) } if n < len(jsonData) { return io.ErrShortWrite } if err := tmpFile.Sync(); err != nil { return fmt.Errorf("Error syncing metadata file %s: %s", tmpFile.Name(), err) } if err := tmpFile.Close(); err != nil { return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) } if err := osRename(tmpFile.Name(), devices.jsonFile()); err != nil { return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) } if devices.NewTransactionId != devices.TransactionId { if err = setTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { return fmt.Errorf("Error setting devmapper transition ID: %s", err) } devices.TransactionId = devices.NewTransactionId } return nil } func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { utils.Debugf("registerDevice(%v, %v)", id, hash) info := &DevInfo{ Hash: hash, DeviceId: id, Size: size, TransactionId: devices.allocateTransactionId(), Initialized: false, devices: devices, } devices.Devices[hash] = info if err := devices.saveMetadata(); err != nil { // Try to remove unused device delete(devices.Devices, hash) return nil, err } return info, nil } func (devices *DeviceSet) activateDeviceIfNeeded(hash string) error { utils.Debugf("activateDeviceIfNeeded(%v)", hash) info := devices.Devices[hash] if info == nil { return fmt.Errorf("Unknown device %s", hash) } if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { return nil } return activateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) } func (devices *DeviceSet) createFilesystem(info *DevInfo) error { devname := info.DevName() err := execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0,lazy_journal_init=0", devname) if err != nil { err = execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0", devname) } if err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } return nil } func (devices *DeviceSet) loadMetaData() error { utils.Debugf("loadMetadata()") defer utils.Debugf("loadMetadata END") _, _, _, params, err := getStatus(devices.getPoolName()) if err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } if _, err := fmt.Sscanf(params, "%d", &devices.TransactionId); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } devices.NewTransactionId = devices.TransactionId jsonData, err := ioutil.ReadFile(devices.jsonFile()) if err != nil && !osIsNotExist(err) { utils.Debugf("\n--->Err: %s\n", err) return err } devices.MetaData.Devices = make(map[string]*DevInfo) if jsonData != nil { if err := json.Unmarshal(jsonData, &devices.MetaData); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } } for hash, d := range devices.Devices { d.Hash = hash d.devices = devices if d.DeviceId >= devices.nextFreeDevice { devices.nextFreeDevice = d.DeviceId + 1 } // If the transaction id is larger than the actual one we lost the device due to some crash if d.TransactionId > devices.TransactionId { utils.Debugf("Removing lost device %s with id %d", hash, d.TransactionId) delete(devices.Devices, hash) } } return nil } func (devices *DeviceSet) setupBaseImage() error { oldInfo := devices.Devices[""] if oldInfo != nil && oldInfo.Initialized { return nil } if oldInfo != nil && !oldInfo.Initialized { utils.Debugf("Removing uninitialized base image") if err := devices.deleteDevice(""); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } } utils.Debugf("Initializing base device-manager snapshot") id := devices.allocateDeviceId() // Create initial device if err := createDevice(devices.getPoolDevName(), id); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } utils.Debugf("Registering base device (id %v) with FS size %v", id, DefaultBaseFsSize) info, err := devices.registerDevice(id, "", DefaultBaseFsSize) if err != nil { _ = deleteDevice(devices.getPoolDevName(), id) utils.Debugf("\n--->Err: %s\n", err) return err } utils.Debugf("Creating filesystem on base device-manager snapshot") if err = devices.activateDeviceIfNeeded(""); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } if err := devices.createFilesystem(info); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } info.Initialized = true if err = devices.saveMetadata(); err != nil { info.Initialized = false utils.Debugf("\n--->Err: %s\n", err) return err } return nil } func setCloseOnExec(name string) { if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { for _, i := range fileInfos { link, _ := osReadlink(filepath.Join("/proc/self/fd", i.Name())) if link == name { fd, err := strconv.Atoi(i.Name()) if err == nil { sysCloseOnExec(fd) } } } } } func (devices *DeviceSet) log(level int, file string, line int, dmError int, message string) { if level >= 7 { return // Ignore _LOG_DEBUG } if strings.Contains(message, "busy") { devices.sawBusy = true } utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } func major(device uint64) uint64 { return (device >> 8) & 0xfff } func minor(device uint64) uint64 { return (device & 0xff) | ((device >> 12) & 0xfff00) } func (devices *DeviceSet) ResizePool(size int64) error { dirname := devices.loopbackDir() datafilename := path.Join(dirname, "data") metadatafilename := path.Join(dirname, "metadata") datafile, err := osOpenFile(datafilename, osORdWr, 0) if datafile == nil { return err } defer datafile.Close() fi, err := datafile.Stat() if fi == nil { return err } if fi.Size() > size { return fmt.Errorf("Can't shrink file") } dataloopback := FindLoopDeviceFor(datafile) if dataloopback == nil { return fmt.Errorf("Unable to find loopback mount for: %s", datafilename) } defer dataloopback.Close() metadatafile, err := osOpenFile(metadatafilename, osORdWr, 0) if metadatafile == nil { return err } defer metadatafile.Close() metadataloopback := FindLoopDeviceFor(metadatafile) if metadataloopback == nil { return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename) } defer metadataloopback.Close() // Grow loopback file if err := datafile.Truncate(size); err != nil { return fmt.Errorf("Unable to grow loopback file: %s", err) } // Reload size for loopback device if err := LoopbackSetCapacity(dataloopback); err != nil { return fmt.Errorf("Unable to update loopback capacity: %s", err) } // Suspend the pool if err := suspendDevice(devices.getPoolName()); err != nil { return fmt.Errorf("Unable to suspend pool: %s", err) } // Reload with the new block sizes if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback); err != nil { return fmt.Errorf("Unable to reload pool: %s", err) } // Resume the pool if err := resumeDevice(devices.getPoolName()); err != nil { return fmt.Errorf("Unable to resume pool: %s", err) } return nil } func (devices *DeviceSet) initDevmapper(doInit bool) error { logInit(devices) // Make sure the sparse images exist in /devicemapper/data and // /devicemapper/metadata hasData := devices.hasImage("data") hasMetadata := devices.hasImage("metadata") if !doInit && !hasData { return errors.New("Loopback data file not found") } if !doInit && !hasMetadata { return errors.New("Loopback metadata file not found") } createdLoopback := !hasData || !hasMetadata data, err := devices.ensureImage("data", DefaultDataLoopbackSize) if err != nil { utils.Debugf("Error device ensureImage (data): %s\n", err) return err } metadata, err := devices.ensureImage("metadata", DefaultMetaDataLoopbackSize) if err != nil { utils.Debugf("Error device ensureImage (metadata): %s\n", err) return err } // Set the device prefix from the device id and inode of the docker root dir st, err := osStat(devices.root) if err != nil { return fmt.Errorf("Error looking up dir %s: %s", devices.root, err) } sysSt := toSysStatT(st.Sys()) // "reg-" stands for "regular file". // In the future we might use "dev-" for "device file", etc. // docker-maj,min[-inode] stands for: // - Managed by docker // - The target of this device is at major and minor // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) utils.Debugf("Generated prefix: %s", devices.devicePrefix) // Check for the existence of the device -pool utils.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) info, err := getInfo(devices.getPoolName()) if info == nil { utils.Debugf("Error device getInfo: %s", err) return err } // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files // that are not Close-on-exec, and lxc-start will die if it inherits any unexpected files, // so we add this badhack to make sure it closes itself setCloseOnExec("/dev/mapper/control") // If the pool doesn't exist, create it if info.Exists == 0 { utils.Debugf("Pool doesn't exist. Creating it.") dataFile, err := attachLoopDevice(data) if err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } defer dataFile.Close() metadataFile, err := attachLoopDevice(metadata) if err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } defer metadataFile.Close() if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } } // If we didn't just create the data or metadata image, we need to // load the metadata from the existing file. if !createdLoopback { if err = devices.loadMetaData(); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } } // Setup the base image if doInit { if err := devices.setupBaseImage(); err != nil { utils.Debugf("Error device setupBaseImage: %s\n", err) return err } } return nil } func (devices *DeviceSet) AddDevice(hash, baseHash string) error { devices.Lock() defer devices.Unlock() if devices.Devices[hash] != nil { return fmt.Errorf("hash %s already exists", hash) } baseInfo := devices.Devices[baseHash] if baseInfo == nil { return fmt.Errorf("Error adding device for '%s': can't find device for parent '%s'", hash, baseHash) } baseInfo.lock.Lock() defer baseInfo.lock.Unlock() deviceId := devices.allocateDeviceId() if err := devices.createSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { utils.Debugf("Error creating snap device: %s\n", err) return err } if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { deleteDevice(devices.getPoolDevName(), deviceId) utils.Debugf("Error registering device: %s\n", err) return err } return nil } func (devices *DeviceSet) deleteDevice(hash string) error { info := devices.Devices[hash] if info == nil { return fmt.Errorf("hash %s doesn't exists", hash) } // This is a workaround for the kernel not discarding block so // on the thin pool when we remove a thinp device, so we do it // manually if err := devices.activateDeviceIfNeeded(hash); err == nil { if err := BlockDeviceDiscard(info.DevName()); err != nil { utils.Debugf("Error discarding block on device: %s (ignoring)\n", err) } } devinfo, _ := getInfo(info.Name()) if devinfo != nil && devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { utils.Debugf("Error removing device: %s\n", err) return err } } if info.Initialized { info.Initialized = false if err := devices.saveMetadata(); err != nil { utils.Debugf("Error saving meta data: %s\n", err) return err } } if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { utils.Debugf("Error deleting device: %s\n", err) return err } devices.allocateTransactionId() delete(devices.Devices, info.Hash) if err := devices.saveMetadata(); err != nil { devices.Devices[info.Hash] = info utils.Debugf("Error saving meta data: %s\n", err) return err } return nil } func (devices *DeviceSet) DeleteDevice(hash string) error { devices.Lock() defer devices.Unlock() info := devices.Devices[hash] if info == nil { return fmt.Errorf("Unknown device %s", hash) } info.lock.Lock() defer info.lock.Unlock() return devices.deleteDevice(hash) } func (devices *DeviceSet) deactivatePool() error { utils.Debugf("[devmapper] deactivatePool()") defer utils.Debugf("[devmapper] deactivatePool END") devname := devices.getPoolDevName() devinfo, err := getInfo(devname) if err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } if devinfo.Exists != 0 { return removeDevice(devname) } return nil } func (devices *DeviceSet) deactivateDevice(hash string) error { utils.Debugf("[devmapper] deactivateDevice(%s)", hash) defer utils.Debugf("[devmapper] deactivateDevice END") info := devices.Devices[hash] if info == nil { return fmt.Errorf("Unknown device %s", hash) } devinfo, err := getInfo(info.Name()) if err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } if devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } } return nil } // Issues the underlying dm remove operation and then waits // for it to finish. func (devices *DeviceSet) removeDeviceAndWait(devname string) error { var err error for i := 0; i < 1000; i++ { devices.sawBusy = false err = removeDevice(devname) if err == nil { break } if !devices.sawBusy { return err } // If we see EBUSY it may be a transient error, // sleep a bit a retry a few times. devices.Unlock() time.Sleep(10 * time.Millisecond) devices.Lock() } if err != nil { return err } if err := devices.waitRemove(devname); err != nil { return err } return nil } // waitRemove blocks until either: // a) the device registered at - is removed, // or b) the 10 second timeout expires. func (devices *DeviceSet) waitRemove(devname string) error { utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) i := 0 for ; i < 1000; i += 1 { devinfo, err := getInfo(devname) if err != nil { // If there is an error we assume the device doesn't exist. // The error might actually be something else, but we can't differentiate. return nil } if i%100 == 0 { utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) } if devinfo.Exists == 0 { break } devices.Unlock() time.Sleep(10 * time.Millisecond) devices.Lock() } if i == 1000 { return fmt.Errorf("Timeout while waiting for device %s to be removed", devname) } return nil } // waitClose blocks until either: // a) the device registered at - is closed, // or b) the 10 second timeout expires. func (devices *DeviceSet) waitClose(hash string) error { info := devices.Devices[hash] if info == nil { return fmt.Errorf("Unknown device %s", hash) } i := 0 for ; i < 1000; i += 1 { devinfo, err := getInfo(info.Name()) if err != nil { return err } if i%100 == 0 { utils.Debugf("Waiting for unmount of %s: opencount=%d", hash, devinfo.OpenCount) } if devinfo.OpenCount == 0 { break } devices.Unlock() time.Sleep(10 * time.Millisecond) devices.Lock() } if i == 1000 { return fmt.Errorf("Timeout while waiting for device %s to close", hash) } return nil } func (devices *DeviceSet) Shutdown() error { devices.Lock() defer devices.Unlock() utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) for _, info := range devices.Devices { info.lock.Lock() if info.mountCount > 0 { if err := sysUnmount(info.mountPath, 0); err != nil { utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) } } info.lock.Unlock() } for _, d := range devices.Devices { d.lock.Lock() if err := devices.waitClose(d.Hash); err != nil { utils.Errorf("Warning: error waiting for device %s to unmount: %s\n", d.Hash, err) } if err := devices.deactivateDevice(d.Hash); err != nil { utils.Debugf("Shutdown deactivate %s , error: %s\n", d.Hash, err) } d.lock.Unlock() } if err := devices.deactivatePool(); err != nil { utils.Debugf("Shutdown deactivate pool , error: %s\n", err) } return nil } func (devices *DeviceSet) MountDevice(hash, path string) error { devices.Lock() defer devices.Unlock() info := devices.Devices[hash] if info == nil { return fmt.Errorf("Unknown device %s", hash) } info.lock.Lock() defer info.lock.Unlock() if info.mountCount > 0 { if path != info.mountPath { return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) } if info.floating { // Steal floating ref info.floating = false } else { info.mountCount++ } return nil } if err := devices.activateDeviceIfNeeded(hash); err != nil { return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) } var flags uintptr = sysMsMgcVal err := sysMount(info.DevName(), path, "ext4", flags, "discard") if err != nil && err == sysEInval { err = sysMount(info.DevName(), path, "ext4", flags, "") } if err != nil { return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) } info.mountCount = 1 info.mountPath = path info.floating = false return devices.setInitialized(hash) } func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { utils.Debugf("[devmapper] UnmountDevice(hash=%s, mode=%d)", hash, mode) defer utils.Debugf("[devmapper] UnmountDevice END") devices.Lock() defer devices.Unlock() info := devices.Devices[hash] if info == nil { return fmt.Errorf("UnmountDevice: no such device %s\n", hash) } info.lock.Lock() defer info.lock.Unlock() if mode == UnmountFloat { if info.floating { return fmt.Errorf("UnmountDevice: can't float floating reference %s\n", hash) } // Leave this reference floating info.floating = true return nil } if mode == UnmountSink { if !info.floating { // Someone already sunk this return nil } // Otherwise, treat this as a regular unmount } if info.mountCount == 0 { return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) } info.mountCount-- if info.mountCount > 0 { return nil } utils.Debugf("[devmapper] Unmount(%s)", info.mountPath) if err := sysUnmount(info.mountPath, 0); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } utils.Debugf("[devmapper] Unmount done") // Wait for the unmount to be effective, // by watching the value of Info.OpenCount for the device if err := devices.waitClose(hash); err != nil { return err } devices.deactivateDevice(hash) info.mountPath = "" return nil } func (devices *DeviceSet) HasDevice(hash string) bool { devices.Lock() defer devices.Unlock() return devices.Devices[hash] != nil } func (devices *DeviceSet) HasInitializedDevice(hash string) bool { devices.Lock() defer devices.Unlock() info := devices.Devices[hash] return info != nil && info.Initialized } func (devices *DeviceSet) HasActivatedDevice(hash string) bool { devices.Lock() defer devices.Unlock() info := devices.Devices[hash] if info == nil { return false } info.lock.Lock() defer info.lock.Unlock() devinfo, _ := getInfo(info.Name()) return devinfo != nil && devinfo.Exists != 0 } func (devices *DeviceSet) setInitialized(hash string) error { info := devices.Devices[hash] if info == nil { return fmt.Errorf("Unknown device %s", hash) } info.Initialized = true if err := devices.saveMetadata(); err != nil { info.Initialized = false utils.Debugf("\n--->Err: %s\n", err) return err } return nil } func (devices *DeviceSet) List() []string { devices.Lock() defer devices.Unlock() ids := make([]string, len(devices.Devices)) i := 0 for k := range devices.Devices { ids[i] = k i++ } return ids } func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { var params string _, sizeInSectors, _, params, err = getStatus(devName) if err != nil { return } if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { return } return } func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { devices.Lock() defer devices.Unlock() info := devices.Devices[hash] if info == nil { return nil, fmt.Errorf("No device %s", hash) } info.lock.Lock() defer info.lock.Unlock() status := &DevStatus{ DeviceId: info.DeviceId, Size: info.Size, TransactionId: info.TransactionId, } if err := devices.activateDeviceIfNeeded(hash); err != nil { return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) } if sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()); err != nil { return nil, err } else { status.SizeInSectors = sizeInSectors status.MappedSectors = mappedSectors status.HighestMappedSector = highestMappedSector } return status, nil } func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { var params string if _, totalSizeInSectors, _, params, err = getStatus(devices.getPoolName()); err == nil { _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionId, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) } return } func (devices *DeviceSet) Status() *Status { devices.Lock() defer devices.Unlock() status := &Status{} status.PoolName = devices.getPoolName() status.DataLoopback = path.Join(devices.loopbackDir(), "data") status.MetadataLoopback = path.Join(devices.loopbackDir(), "metadata") totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() if err == nil { // Convert from blocks to bytes blockSizeInSectors := totalSizeInSectors / dataTotal status.Data.Used = dataUsed * blockSizeInSectors * 512 status.Data.Total = dataTotal * blockSizeInSectors * 512 // metadata blocks are always 4k status.Metadata.Used = metadataUsed * 4096 status.Metadata.Total = metadataTotal * 4096 status.SectorSize = blockSizeInSectors * 512 } return status } func NewDeviceSet(root string, doInit bool) (*DeviceSet, error) { SetDevDir("/dev") devices := &DeviceSet{ root: root, MetaData: MetaData{Devices: make(map[string]*DevInfo)}, } if err := devices.initDevmapper(doInit); err != nil { return nil, err } return devices, nil } docker-0.9.1/graphdriver/devmapper/devmapper_wrapper.go0000644000175000017500000001374712314376205021471 0ustar tagtag// +build linux,amd64 package devmapper /* #cgo LDFLAGS: -L. -ldevmapper #include #include // FIXME: present only for defines, maybe we can remove it? #include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? #ifndef LOOP_CTL_GET_FREE #define LOOP_CTL_GET_FREE 0x4C82 #endif #ifndef LO_FLAGS_PARTSCAN #define LO_FLAGS_PARTSCAN 8 #endif // FIXME: Can't we find a way to do the logging in pure Go? extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) { char buffer[256]; va_list ap; va_start(ap, f); vsnprintf(buffer, 256, f, ap); va_end(ap); DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); } static void log_with_errno_init() { dm_log_with_errno_init(log_cb); } */ import "C" import ( "unsafe" ) type ( CDmTask C.struct_dm_task CLoopInfo64 C.struct_loop_info64 LoopInfo64 struct { loDevice uint64 /* ioctl r/o */ loInode uint64 /* ioctl r/o */ loRdevice uint64 /* ioctl r/o */ loOffset uint64 loSizelimit uint64 /* bytes, 0 == max available */ loNumber uint32 /* ioctl r/o */ loEncrypt_type uint32 loEncrypt_key_size uint32 /* ioctl w/o */ loFlags uint32 /* ioctl r/o */ loFileName [LoNameSize]uint8 loCryptName [LoNameSize]uint8 loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ loInit [2]uint64 } ) // IOCTL consts const ( BlkGetSize64 = C.BLKGETSIZE64 BlkDiscard = C.BLKDISCARD LoopSetFd = C.LOOP_SET_FD LoopCtlGetFree = C.LOOP_CTL_GET_FREE LoopGetStatus64 = C.LOOP_GET_STATUS64 LoopSetStatus64 = C.LOOP_SET_STATUS64 LoopClrFd = C.LOOP_CLR_FD LoopSetCapacity = C.LOOP_SET_CAPACITY ) const ( LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY LoFlagsPartScan = C.LO_FLAGS_PARTSCAN LoKeySize = C.LO_KEY_SIZE LoNameSize = C.LO_NAME_SIZE ) var ( DmGetLibraryVersion = dmGetLibraryVersionFct DmGetNextTarget = dmGetNextTargetFct DmLogInitVerbose = dmLogInitVerboseFct DmSetDevDir = dmSetDevDirFct DmTaskAddTarget = dmTaskAddTargetFct DmTaskCreate = dmTaskCreateFct DmTaskDestroy = dmTaskDestroyFct DmTaskGetInfo = dmTaskGetInfoFct DmTaskRun = dmTaskRunFct DmTaskSetAddNode = dmTaskSetAddNodeFct DmTaskSetCookie = dmTaskSetCookieFct DmTaskSetMessage = dmTaskSetMessageFct DmTaskSetName = dmTaskSetNameFct DmTaskSetRo = dmTaskSetRoFct DmTaskSetSector = dmTaskSetSectorFct DmUdevWait = dmUdevWaitFct LogWithErrnoInit = logWithErrnoInitFct ) func free(p *C.char) { C.free(unsafe.Pointer(p)) } func dmTaskDestroyFct(task *CDmTask) { C.dm_task_destroy((*C.struct_dm_task)(task)) } func dmTaskCreateFct(taskType int) *CDmTask { return (*CDmTask)(C.dm_task_create(C.int(taskType))) } func dmTaskRunFct(task *CDmTask) int { ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) return int(ret) } func dmTaskSetNameFct(task *CDmTask, name string) int { Cname := C.CString(name) defer free(Cname) return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) } func dmTaskSetMessageFct(task *CDmTask, message string) int { Cmessage := C.CString(message) defer free(Cmessage) return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) } func dmTaskSetSectorFct(task *CDmTask, sector uint64) int { return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) } func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int { cCookie := C.uint32_t(*cookie) defer func() { *cookie = uint(cCookie) }() return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) } func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int { return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) } func dmTaskSetRoFct(task *CDmTask) int { return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) } func dmTaskAddTargetFct(task *CDmTask, start, size uint64, ttype, params string) int { Cttype := C.CString(ttype) defer free(Cttype) Cparams := C.CString(params) defer free(Cparams) return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) } func dmTaskGetInfoFct(task *CDmTask, info *Info) int { Cinfo := C.struct_dm_info{} defer func() { info.Exists = int(Cinfo.exists) info.Suspended = int(Cinfo.suspended) info.LiveTable = int(Cinfo.live_table) info.InactiveTable = int(Cinfo.inactive_table) info.OpenCount = int32(Cinfo.open_count) info.EventNr = uint32(Cinfo.event_nr) info.Major = uint32(Cinfo.major) info.Minor = uint32(Cinfo.minor) info.ReadOnly = int(Cinfo.read_only) info.TargetCount = int32(Cinfo.target_count) }() return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) } func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { var ( Cstart, Clength C.uint64_t CtargetType, Cparams *C.char ) defer func() { *start = uint64(Cstart) *length = uint64(Clength) *target = C.GoString(CtargetType) *params = C.GoString(Cparams) }() nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams) return uintptr(nextp) } func dmUdevWaitFct(cookie uint) int { return int(C.dm_udev_wait(C.uint32_t(cookie))) } func dmLogInitVerboseFct(level int) { C.dm_log_init_verbose(C.int(level)) } func logWithErrnoInitFct() { C.log_with_errno_init() } func dmSetDevDirFct(dir string) int { Cdir := C.CString(dir) defer free(Cdir) return int(C.dm_set_dev_dir(Cdir)) } func dmGetLibraryVersionFct(version *string) int { buffer := C.CString(string(make([]byte, 128))) defer free(buffer) defer func() { *version = C.GoString(buffer) }() return int(C.dm_get_library_version(buffer, 128)) } docker-0.9.1/graphdriver/devmapper/attach_loopback.go0000644000175000017500000000631512314376205021055 0ustar tagtag// +build linux,amd64 package devmapper import ( "fmt" "github.com/dotcloud/docker/utils" ) func stringToLoopName(src string) [LoNameSize]uint8 { var dst [LoNameSize]uint8 copy(dst[:], src[:]) return dst } func getNextFreeLoopbackIndex() (int, error) { f, err := osOpenFile("/dev/loop-control", osORdOnly, 0644) if err != nil { return 0, err } defer f.Close() index, err := ioctlLoopCtlGetFree(f.Fd()) if index < 0 { index = 0 } return index, err } func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) { // Start looking for a free /dev/loop for { target := fmt.Sprintf("/dev/loop%d", index) index++ fi, err := osStat(target) if err != nil { if osIsNotExist(err) { utils.Errorf("There are no more loopback device available.") } return nil, ErrAttachLoopbackDevice } if fi.Mode()&osModeDevice != osModeDevice { utils.Errorf("Loopback device %s is not a block device.", target) continue } // OpenFile adds O_CLOEXEC loopFile, err = osOpenFile(target, osORdWr, 0644) if err != nil { utils.Errorf("Error openning loopback device: %s", err) return nil, ErrAttachLoopbackDevice } // Try to attach to the loop file if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { loopFile.Close() // If the error is EBUSY, then try the next loopback if err != sysEBusy { utils.Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } // Otherwise, we keep going with the loop continue } // In case of success, we finished. Break the loop. break } // This can't happen, but let's be sure if loopFile == nil { utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) return nil, ErrAttachLoopbackDevice } return loopFile, nil } // attachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *osFile. func attachLoopDevice(sparseName string) (loop *osFile, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start loopking for a // loopback from index 0. startIndex, err := getNextFreeLoopbackIndex() if err != nil { utils.Debugf("Error retrieving the next available loopback: %s", err) } // OpenFile adds O_CLOEXEC sparseFile, err := osOpenFile(sparseName, osORdWr, 0644) if err != nil { utils.Errorf("Error openning sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice } defer sparseFile.Close() loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) if err != nil { return nil, err } // Set the status of the loopback device loopInfo := &LoopInfo64{ loFileName: stringToLoopName(loopFile.Name()), loOffset: 0, loFlags: LoFlagsAutoClear, } if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { utils.Errorf("Cannot set up loopback device info: %s", err) // If the call failed, then free the loopback device if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { utils.Errorf("Error while cleaning up the loopback device") } loopFile.Close() return nil, ErrAttachLoopbackDevice } return loopFile, nil } docker-0.9.1/graphdriver/devmapper/mount.go0000644000175000017500000000106412314376205017075 0ustar tagtag// +build linux,amd64 package devmapper import ( "path/filepath" ) // FIXME: this is copy-pasted from the aufs driver. // It should be moved into the core. var Mounted = func(mountpoint string) (bool, error) { mntpoint, err := osStat(mountpoint) if err != nil { if osIsNotExist(err) { return false, nil } return false, err } parent, err := osStat(filepath.Join(mountpoint, "..")) if err != nil { return false, err } mntpointSt := toSysStatT(mntpoint.Sys()) parentSt := toSysStatT(parent.Sys()) return mntpointSt.Dev != parentSt.Dev, nil } docker-0.9.1/graphdriver/devmapper/devmapper.go0000644000175000017500000003314512314376205017723 0ustar tagtag// +build linux,amd64 package devmapper import ( "errors" "fmt" "github.com/dotcloud/docker/utils" "runtime" "syscall" ) type DevmapperLogger interface { log(level int, file string, line int, dmError int, message string) } const ( DeviceCreate TaskType = iota DeviceReload DeviceRemove DeviceRemoveAll DeviceSuspend DeviceResume DeviceInfo DeviceDeps DeviceRename DeviceVersion DeviceStatus DeviceTable DeviceWaitevent DeviceList DeviceClear DeviceMknodes DeviceListVersions DeviceTargetMsg DeviceSetGeometry ) const ( AddNodeOnResume AddNodeType = iota AddNodeOnCreate ) var ( ErrTaskRun = errors.New("dm_task_run failed") ErrTaskSetName = errors.New("dm_task_set_name failed") ErrTaskSetMessage = errors.New("dm_task_set_message failed") ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") ErrTaskSetRo = errors.New("dm_task_set_ro failed") ErrTaskAddTarget = errors.New("dm_task_add_target failed") ErrTaskSetSector = errors.New("dm_task_set_sector failed") ErrTaskGetInfo = errors.New("dm_task_get_info failed") ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") ErrNilCookie = errors.New("cookie ptr can't be nil") ErrAttachLoopbackDevice = errors.New("loopback mounting failed") ErrGetBlockSize = errors.New("Can't get block size") ErrUdevWait = errors.New("wait on udev cookie failed") ErrSetDevDir = errors.New("dm_set_dev_dir failed") ErrGetLibraryVersion = errors.New("dm_get_library_version failed") ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") ErrRunRemoveDevice = errors.New("running removeDevice failed") ErrInvalidAddNode = errors.New("Invalide AddNoce type") ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") ) type ( Task struct { unmanaged *CDmTask } Info struct { Exists int Suspended int LiveTable int InactiveTable int OpenCount int32 EventNr uint32 Major uint32 Minor uint32 ReadOnly int TargetCount int32 } TaskType int AddNodeType int ) func (t *Task) destroy() { if t != nil { DmTaskDestroy(t.unmanaged) runtime.SetFinalizer(t, nil) } } func TaskCreate(tasktype TaskType) *Task { Ctask := DmTaskCreate(int(tasktype)) if Ctask == nil { return nil } task := &Task{unmanaged: Ctask} runtime.SetFinalizer(task, (*Task).destroy) return task } func (t *Task) Run() error { if res := DmTaskRun(t.unmanaged); res != 1 { return ErrTaskRun } return nil } func (t *Task) SetName(name string) error { if res := DmTaskSetName(t.unmanaged, name); res != 1 { return ErrTaskSetName } return nil } func (t *Task) SetMessage(message string) error { if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { return ErrTaskSetMessage } return nil } func (t *Task) SetSector(sector uint64) error { if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { return ErrTaskSetSector } return nil } func (t *Task) SetCookie(cookie *uint, flags uint16) error { if cookie == nil { return ErrNilCookie } if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { return ErrTaskSetCookie } return nil } func (t *Task) SetAddNode(addNode AddNodeType) error { if addNode != AddNodeOnResume && addNode != AddNodeOnCreate { return ErrInvalidAddNode } if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { return ErrTaskSetAddNode } return nil } func (t *Task) SetRo() error { if res := DmTaskSetRo(t.unmanaged); res != 1 { return ErrTaskSetRo } return nil } func (t *Task) AddTarget(start, size uint64, ttype, params string) error { if res := DmTaskAddTarget(t.unmanaged, start, size, ttype, params); res != 1 { return ErrTaskAddTarget } return nil } func (t *Task) GetInfo() (*Info, error) { info := &Info{} if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { return nil, ErrTaskGetInfo } return info, nil } func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, length uint64, targetType string, params string) { return DmGetNextTarget(t.unmanaged, next, &start, &length, &targetType, ¶ms), start, length, targetType, params } func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) { loopInfo, err := ioctlLoopGetStatus64(file.Fd()) if err != nil { utils.Errorf("Error get loopback backing file: %s\n", err) return 0, 0, ErrGetLoopbackBackingFile } return loopInfo.loDevice, loopInfo.loInode, nil } func LoopbackSetCapacity(file *osFile) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { utils.Errorf("Error loopbackSetCapacity: %s", err) return ErrLoopbackSetCapacity } return nil } func FindLoopDeviceFor(file *osFile) *osFile { stat, err := file.Stat() if err != nil { return nil } targetInode := stat.Sys().(*sysStatT).Ino targetDevice := stat.Sys().(*sysStatT).Dev for i := 0; true; i++ { path := fmt.Sprintf("/dev/loop%d", i) file, err := osOpenFile(path, osORdWr, 0) if err != nil { if osIsNotExist(err) { return nil } // Ignore all errors until the first not-exist // we want to continue looking for the file continue } dev, inode, err := getLoopbackBackingFile(file) if err == nil && dev == targetDevice && inode == targetInode { return file } file.Close() } return nil } func UdevWait(cookie uint) error { if res := DmUdevWait(cookie); res != 1 { utils.Debugf("Failed to wait on udev cookie %d", cookie) return ErrUdevWait } return nil } func LogInitVerbose(level int) { DmLogInitVerbose(level) } var dmLogger DevmapperLogger = nil func logInit(logger DevmapperLogger) { dmLogger = logger LogWithErrnoInit() } func SetDevDir(dir string) error { if res := DmSetDevDir(dir); res != 1 { utils.Debugf("Error dm_set_dev_dir") return ErrSetDevDir } return nil } func GetLibraryVersion() (string, error) { var version string if res := DmGetLibraryVersion(&version); res != 1 { return "", ErrGetLibraryVersion } return version, nil } // Useful helper for cleanup func RemoveDevice(name string) error { task := TaskCreate(DeviceRemove) if task == nil { return ErrCreateRemoveTask } if err := task.SetName(name); err != nil { utils.Debugf("Can't set task name %s", name) return err } if err := task.Run(); err != nil { return ErrRunRemoveDevice } return nil } func GetBlockDeviceSize(file *osFile) (uint64, error) { size, err := ioctlBlkGetSize64(file.Fd()) if err != nil { utils.Errorf("Error getblockdevicesize: %s", err) return 0, ErrGetBlockSize } return uint64(size), nil } func BlockDeviceDiscard(path string) error { file, err := osOpenFile(path, osORdWr, 0) if err != nil { return err } defer file.Close() size, err := GetBlockDeviceSize(file) if err != nil { return err } if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { return err } // Without this sometimes the remove of the device that happens after // discard fails with EBUSY. syscall.Sync() return nil } // This is the programmatic example of "dmsetup create" func createPool(poolName string, dataFile, metadataFile *osFile) error { task, err := createTask(DeviceCreate, poolName) if task == nil { return err } size, err := GetBlockDeviceSize(dataFile) if err != nil { return fmt.Errorf("Can't get data size") } params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("Can't add target") } var cookie uint = 0 if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can't set cookie") } if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceCreate (createPool)") } UdevWait(cookie) return nil } func reloadPool(poolName string, dataFile, metadataFile *osFile) error { task, err := createTask(DeviceReload, poolName) if task == nil { return err } size, err := GetBlockDeviceSize(dataFile) if err != nil { return fmt.Errorf("Can't get data size") } params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768" if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("Can't add target") } if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceCreate") } return nil } func createTask(t TaskType, name string) (*Task, error) { task := TaskCreate(t) if task == nil { return nil, fmt.Errorf("Can't create task of type %d", int(t)) } if err := task.SetName(name); err != nil { return nil, fmt.Errorf("Can't set task name %s", name) } return task, nil } func getInfo(name string) (*Info, error) { task, err := createTask(DeviceInfo, name) if task == nil { return nil, err } if err := task.Run(); err != nil { return nil, err } return task.GetInfo() } func getStatus(name string) (uint64, uint64, string, string, error) { task, err := createTask(DeviceStatus, name) if task == nil { utils.Debugf("getStatus: Error createTask: %s", err) return 0, 0, "", "", err } if err := task.Run(); err != nil { utils.Debugf("getStatus: Error Run: %s", err) return 0, 0, "", "", err } devinfo, err := task.GetInfo() if err != nil { utils.Debugf("getStatus: Error GetInfo: %s", err) return 0, 0, "", "", err } if devinfo.Exists == 0 { utils.Debugf("getStatus: Non existing device %s", name) return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) } _, start, length, targetType, params := task.GetNextTarget(0) return start, length, targetType, params, nil } func setTransactionId(poolName string, oldId uint64, newId uint64) error { task, err := createTask(DeviceTargetMsg, poolName) if task == nil { return err } if err := task.SetSector(0); err != nil { return fmt.Errorf("Can't set sector") } if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil { return fmt.Errorf("Can't set message") } if err := task.Run(); err != nil { return fmt.Errorf("Error running setTransactionId") } return nil } func suspendDevice(name string) error { task, err := createTask(DeviceSuspend, name) if task == nil { return err } if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceSuspend: %s", err) } return nil } func resumeDevice(name string) error { task, err := createTask(DeviceResume, name) if task == nil { return err } var cookie uint = 0 if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can't set cookie") } if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceResume") } UdevWait(cookie) return nil } func createDevice(poolName string, deviceId int) error { utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, deviceId) task, err := createTask(DeviceTargetMsg, poolName) if task == nil { return err } if err := task.SetSector(0); err != nil { return fmt.Errorf("Can't set sector") } if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil { return fmt.Errorf("Can't set message") } if err := task.Run(); err != nil { return fmt.Errorf("Error running createDevice") } return nil } func deleteDevice(poolName string, deviceId int) error { task, err := createTask(DeviceTargetMsg, poolName) if task == nil { return err } if err := task.SetSector(0); err != nil { return fmt.Errorf("Can't set sector") } if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil { return fmt.Errorf("Can't set message") } if err := task.Run(); err != nil { return fmt.Errorf("Error running deleteDevice") } return nil } func removeDevice(name string) error { utils.Debugf("[devmapper] removeDevice START") defer utils.Debugf("[devmapper] removeDevice END") task, err := createTask(DeviceRemove, name) if task == nil { return err } if err = task.Run(); err != nil { return fmt.Errorf("Error running removeDevice") } return nil } func activateDevice(poolName string, name string, deviceId int, size uint64) error { task, err := createTask(DeviceCreate, name) if task == nil { return err } params := fmt.Sprintf("%s %d", poolName, deviceId) if err := task.AddTarget(0, size/512, "thin", params); err != nil { return fmt.Errorf("Can't add target") } if err := task.SetAddNode(AddNodeOnCreate); err != nil { return fmt.Errorf("Can't add node") } var cookie uint = 0 if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can't set cookie") } if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceCreate (activateDevice)") } UdevWait(cookie) return nil } func (devices *DeviceSet) createSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error { devinfo, _ := getInfo(baseName) doSuspend := devinfo != nil && devinfo.Exists != 0 if doSuspend { if err := suspendDevice(baseName); err != nil { return err } } task, err := createTask(DeviceTargetMsg, poolName) if task == nil { if doSuspend { resumeDevice(baseName) } return err } if err := task.SetSector(0); err != nil { if doSuspend { resumeDevice(baseName) } return fmt.Errorf("Can't set sector") } if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil { if doSuspend { resumeDevice(baseName) } return fmt.Errorf("Can't set message") } if err := task.Run(); err != nil { if doSuspend { resumeDevice(baseName) } return fmt.Errorf("Error running DeviceCreate (createSnapDevice)") } if doSuspend { if err := resumeDevice(baseName); err != nil { return err } } return nil } docker-0.9.1/graphdriver/devmapper/devmapper_test.go0000644000175000017500000001452112314376205020757 0ustar tagtag// +build linux,amd64 package devmapper import ( "testing" ) func TestTaskCreate(t *testing.T) { t.Skip("FIXME: not a unit test") // Test success taskCreate(t, DeviceInfo) // Test Failure DmTaskCreate = dmTaskCreateFail defer func() { DmTaskCreate = dmTaskCreateFct }() if task := TaskCreate(-1); task != nil { t.Fatalf("An error should have occured while creating an invalid task.") } } func TestTaskRun(t *testing.T) { t.Skip("FIXME: not a unit test") task := taskCreate(t, DeviceInfo) // Test success // Perform the RUN if err := task.Run(); err != nil { t.Fatal(err) } // Make sure we don't have error with GetInfo if _, err := task.GetInfo(); err != nil { t.Fatal(err) } // Test failure DmTaskRun = dmTaskRunFail defer func() { DmTaskRun = dmTaskRunFct }() task = taskCreate(t, DeviceInfo) // Perform the RUN if err := task.Run(); err != ErrTaskRun { t.Fatalf("An error should have occured while running task.") } // Make sure GetInfo also fails if _, err := task.GetInfo(); err != ErrTaskGetInfo { t.Fatalf("GetInfo should fail if task.Run() failed.") } } func TestTaskSetName(t *testing.T) { t.Skip("FIXME: not a unit test") task := taskCreate(t, DeviceInfo) // Test success if err := task.SetName("test"); err != nil { t.Fatal(err) } // Test failure DmTaskSetName = dmTaskSetNameFail defer func() { DmTaskSetName = dmTaskSetNameFct }() if err := task.SetName("test"); err != ErrTaskSetName { t.Fatalf("An error should have occured while runnign SetName.") } } func TestTaskSetMessage(t *testing.T) { t.Skip("FIXME: not a unit test") task := taskCreate(t, DeviceInfo) // Test success if err := task.SetMessage("test"); err != nil { t.Fatal(err) } // Test failure DmTaskSetMessage = dmTaskSetMessageFail defer func() { DmTaskSetMessage = dmTaskSetMessageFct }() if err := task.SetMessage("test"); err != ErrTaskSetMessage { t.Fatalf("An error should have occured while runnign SetMessage.") } } func TestTaskSetSector(t *testing.T) { t.Skip("FIXME: not a unit test") task := taskCreate(t, DeviceInfo) // Test success if err := task.SetSector(128); err != nil { t.Fatal(err) } DmTaskSetSector = dmTaskSetSectorFail defer func() { DmTaskSetSector = dmTaskSetSectorFct }() // Test failure if err := task.SetSector(0); err != ErrTaskSetSector { t.Fatalf("An error should have occured while running SetSector.") } } func TestTaskSetCookie(t *testing.T) { t.Skip("FIXME: not a unit test") var ( cookie uint = 0 task = taskCreate(t, DeviceInfo) ) // Test success if err := task.SetCookie(&cookie, 0); err != nil { t.Fatal(err) } // Test failure if err := task.SetCookie(nil, 0); err != ErrNilCookie { t.Fatalf("An error should have occured while running SetCookie with nil cookie.") } DmTaskSetCookie = dmTaskSetCookieFail defer func() { DmTaskSetCookie = dmTaskSetCookieFct }() if err := task.SetCookie(&cookie, 0); err != ErrTaskSetCookie { t.Fatalf("An error should have occured while running SetCookie.") } } func TestTaskSetAddNode(t *testing.T) { t.Skip("FIXME: not a unit test") task := taskCreate(t, DeviceInfo) // Test success if err := task.SetAddNode(0); err != nil { t.Fatal(err) } // Test failure if err := task.SetAddNode(-1); err != ErrInvalidAddNode { t.Fatalf("An error should have occured running SetAddNode with wrong node.") } DmTaskSetAddNode = dmTaskSetAddNodeFail defer func() { DmTaskSetAddNode = dmTaskSetAddNodeFct }() if err := task.SetAddNode(0); err != ErrTaskSetAddNode { t.Fatalf("An error should have occured running SetAddNode.") } } func TestTaskSetRo(t *testing.T) { t.Skip("FIXME: not a unit test") task := taskCreate(t, DeviceInfo) // Test success if err := task.SetRo(); err != nil { t.Fatal(err) } // Test failure DmTaskSetRo = dmTaskSetRoFail defer func() { DmTaskSetRo = dmTaskSetRoFct }() if err := task.SetRo(); err != ErrTaskSetRo { t.Fatalf("An error should have occured running SetRo.") } } func TestTaskAddTarget(t *testing.T) { t.Skip("FIXME: not a unit test") task := taskCreate(t, DeviceInfo) // Test success if err := task.AddTarget(0, 128, "thinp", ""); err != nil { t.Fatal(err) } // Test failure DmTaskAddTarget = dmTaskAddTargetFail defer func() { DmTaskAddTarget = dmTaskAddTargetFct }() if err := task.AddTarget(0, 128, "thinp", ""); err != ErrTaskAddTarget { t.Fatalf("An error should have occured running AddTarget.") } } // func TestTaskGetInfo(t *testing.T) { // task := taskCreate(t, DeviceInfo) // // Test success // if _, err := task.GetInfo(); err != nil { // t.Fatal(err) // } // // Test failure // DmTaskGetInfo = dmTaskGetInfoFail // defer func() { DmTaskGetInfo = dmTaskGetInfoFct }() // if _, err := task.GetInfo(); err != ErrTaskGetInfo { // t.Fatalf("An error should have occured running GetInfo.") // } // } // func TestTaskGetNextTarget(t *testing.T) { // task := taskCreate(t, DeviceInfo) // if next, _, _, _, _ := task.GetNextTarget(0); next == 0 { // t.Fatalf("The next target should not be 0.") // } // } /// Utils func taskCreate(t *testing.T, taskType TaskType) *Task { task := TaskCreate(taskType) if task == nil { t.Fatalf("Error creating task") } return task } /// Failure function replacement func dmTaskCreateFail(t int) *CDmTask { return nil } func dmTaskRunFail(task *CDmTask) int { return -1 } func dmTaskSetNameFail(task *CDmTask, name string) int { return -1 } func dmTaskSetMessageFail(task *CDmTask, message string) int { return -1 } func dmTaskSetSectorFail(task *CDmTask, sector uint64) int { return -1 } func dmTaskSetCookieFail(task *CDmTask, cookie *uint, flags uint16) int { return -1 } func dmTaskSetAddNodeFail(task *CDmTask, addNode AddNodeType) int { return -1 } func dmTaskSetRoFail(task *CDmTask) int { return -1 } func dmTaskAddTargetFail(task *CDmTask, start, size uint64, ttype, params string) int { return -1 } func dmTaskGetInfoFail(task *CDmTask, info *Info) int { return -1 } func dmGetNextTargetFail(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { return 0 } func dmAttachLoopDeviceFail(filename string, fd *int) string { return "" } func sysGetBlockSizeFail(fd uintptr, size *uint64) sysErrno { return 1 } func dmUdevWaitFail(cookie uint) int { return -1 } func dmSetDevDirFail(dir string) int { return -1 } func dmGetLibraryVersionFail(version *string) int { return -1 } docker-0.9.1/graphdriver/aufs/0000755000175000017500000000000012314376205014356 5ustar tagtagdocker-0.9.1/graphdriver/aufs/mount_unsupported.go0000644000175000017500000000035212314376205020517 0ustar tagtag// +build !linux !amd64 package aufs import "errors" const MsRemount = 0 func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { return errors.New("mount is not implemented on darwin") } docker-0.9.1/graphdriver/aufs/aufs_test.go0000644000175000017500000003026712314376205016712 0ustar tagtagpackage aufs import ( "crypto/sha256" "encoding/hex" "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/graphdriver" "io/ioutil" "os" "path" "testing" ) var ( tmp = path.Join(os.TempDir(), "aufs-tests", "aufs") ) func testInit(dir string, t *testing.T) graphdriver.Driver { d, err := Init(dir) if err != nil { if err == ErrAufsNotSupported { t.Skip(err) } else { t.Fatal(err) } } return d } func newDriver(t *testing.T) *Driver { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) } d := testInit(tmp, t) return d.(*Driver) } func TestNewDriver(t *testing.T) { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) } d := testInit(tmp, t) defer os.RemoveAll(tmp) if d == nil { t.Fatalf("Driver should not be nil") } } func TestAufsString(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if d.String() != "aufs" { t.Fatalf("Expected aufs got %s", d.String()) } } func TestCreateDirStructure(t *testing.T) { newDriver(t) defer os.RemoveAll(tmp) paths := []string{ "mnt", "layers", "diff", } for _, p := range paths { if _, err := os.Stat(path.Join(tmp, p)); err != nil { t.Fatal(err) } } } // We should be able to create two drivers with the same dir structure func TestNewDriverFromExistingDir(t *testing.T) { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) } testInit(tmp, t) testInit(tmp, t) os.RemoveAll(tmp) } func TestCreateNewDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } } func TestCreateNewDirStructure(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } paths := []string{ "mnt", "diff", "layers", } for _, p := range paths { if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { t.Fatal(err) } } } func TestRemoveImage(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Remove("1"); err != nil { t.Fatal(err) } paths := []string{ "mnt", "diff", "layers", } for _, p := range paths { if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) } } } func TestGetWithoutParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1") if err != nil { t.Fatal(err) } expected := path.Join(tmp, "diff", "1") if diffPath != expected { t.Fatalf("Expected path %s got %s", expected, diffPath) } } func TestCleanupWithNoDirs(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Cleanup(); err != nil { t.Fatal(err) } } func TestCleanupWithDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Cleanup(); err != nil { t.Fatal(err) } } func TestMountedFalseResponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } response, err := d.mounted("1") if err != nil { t.Fatal(err) } if response != false { t.Fatalf("Response if dir id 1 is mounted should be false") } } func TestMountedTrueReponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1"); err != nil { t.Fatal(err) } _, err := d.Get("2") if err != nil { t.Fatal(err) } response, err := d.mounted("2") if err != nil { t.Fatal(err) } if response != true { t.Fatalf("Response if dir id 2 is mounted should be true") } } func TestMountWithParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1"); err != nil { t.Fatal(err) } defer func() { if err := d.Cleanup(); err != nil { t.Fatal(err) } }() mntPath, err := d.Get("2") if err != nil { t.Fatal(err) } if mntPath == "" { t.Fatal("mntPath should not be empty string") } expected := path.Join(tmp, "mnt", "2") if mntPath != expected { t.Fatalf("Expected %s got %s", expected, mntPath) } } func TestRemoveMountedDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1"); err != nil { t.Fatal(err) } defer func() { if err := d.Cleanup(); err != nil { t.Fatal(err) } }() mntPath, err := d.Get("2") if err != nil { t.Fatal(err) } if mntPath == "" { t.Fatal("mntPath should not be empty string") } mounted, err := d.mounted("2") if err != nil { t.Fatal(err) } if !mounted { t.Fatalf("Dir id 2 should be mounted") } if err := d.Remove("2"); err != nil { t.Fatal(err) } } func TestCreateWithInvalidParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "docker"); err == nil { t.Fatalf("Error should not be nil with parent does not exist") } } func TestGetDiff(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } f.Close() a, err := d.Diff("1") if err != nil { t.Fatal(err) } if a == nil { t.Fatalf("Archive should not be nil") } } func TestChanges(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1"); err != nil { t.Fatal(err) } defer func() { if err := d.Cleanup(); err != nil { t.Fatal(err) } }() mntPoint, err := d.Get("2") if err != nil { t.Fatal(err) } // Create a file to save in the mountpoint f, err := os.Create(path.Join(mntPoint, "test.txt")) if err != nil { t.Fatal(err) } if _, err := f.WriteString("testline"); err != nil { t.Fatal(err) } if err := f.Close(); err != nil { t.Fatal(err) } changes, err := d.Changes("2") if err != nil { t.Fatal(err) } if len(changes) != 1 { t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) } change := changes[0] expectedPath := "/test.txt" if change.Path != expectedPath { t.Fatalf("Expected path %s got %s", expectedPath, change.Path) } if change.Kind != archive.ChangeAdd { t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) } if err := d.Create("3", "2"); err != nil { t.Fatal(err) } mntPoint, err = d.Get("3") if err != nil { t.Fatal(err) } // Create a file to save in the mountpoint f, err = os.Create(path.Join(mntPoint, "test2.txt")) if err != nil { t.Fatal(err) } if _, err := f.WriteString("testline"); err != nil { t.Fatal(err) } if err := f.Close(); err != nil { t.Fatal(err) } changes, err = d.Changes("3") if err != nil { t.Fatal(err) } if len(changes) != 1 { t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) } change = changes[0] expectedPath = "/test2.txt" if change.Path != expectedPath { t.Fatalf("Expected path %s got %s", expectedPath, change.Path) } if change.Kind != archive.ChangeAdd { t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) } } func TestDiffSize(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } s, err := f.Stat() if err != nil { t.Fatal(err) } size = s.Size() if err := f.Close(); err != nil { t.Fatal(err) } diffSize, err := d.DiffSize("1") if err != nil { t.Fatal(err) } if diffSize != size { t.Fatalf("Expected size to be %d got %d", size, diffSize) } } func TestChildDiffSize(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } s, err := f.Stat() if err != nil { t.Fatal(err) } size = s.Size() if err := f.Close(); err != nil { t.Fatal(err) } diffSize, err := d.DiffSize("1") if err != nil { t.Fatal(err) } if diffSize != size { t.Fatalf("Expected size to be %d got %d", size, diffSize) } if err := d.Create("2", "1"); err != nil { t.Fatal(err) } diffSize, err = d.DiffSize("2") if err != nil { t.Fatal(err) } // The diff size for the child should be zero if diffSize != 0 { t.Fatalf("Expected size to be %d got %d", 0, diffSize) } } func TestExists(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", ""); err != nil { t.Fatal(err) } if d.Exists("none") { t.Fatal("id name should not exist in the driver") } if !d.Exists("1") { t.Fatal("id 1 should exist in the driver") } } func TestStatus(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", ""); err != nil { t.Fatal(err) } status := d.Status() if status == nil || len(status) == 0 { t.Fatal("Status should not be nil or empty") } rootDir := status[0] dirs := status[1] if rootDir[0] != "Root Dir" { t.Fatalf("Expected Root Dir got %s", rootDir[0]) } if rootDir[1] != d.rootPath() { t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) } if dirs[0] != "Dirs" { t.Fatalf("Expected Dirs got %s", dirs[0]) } if dirs[1] != "1" { t.Fatalf("Expected 1 got %s", dirs[1]) } } func TestApplyDiff(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } f.Close() diff, err := d.Diff("1") if err != nil { t.Fatal(err) } if err := d.Create("2", ""); err != nil { t.Fatal(err) } if err := d.Create("3", "2"); err != nil { t.Fatal(err) } if err := d.ApplyDiff("3", diff); err != nil { t.Fatal(err) } // Ensure that the file is in the mount point for id 3 mountPoint, err := d.Get("3") if err != nil { t.Fatal(err) } if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { t.Fatal(err) } } func hash(c string) string { h := sha256.New() fmt.Fprint(h, c) return hex.EncodeToString(h.Sum(nil)) } func TestMountMoreThan42Layers(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() var last string var expected int for i := 1; i < 127; i++ { expected++ var ( parent = fmt.Sprintf("%d", i-1) current = fmt.Sprintf("%d", i) ) if parent == "0" { parent = "" } else { parent = hash(parent) } current = hash(current) if err := d.Create(current, parent); err != nil { t.Logf("Current layer %d", i) t.Fatal(err) } point, err := d.Get(current) if err != nil { t.Logf("Current layer %d", i) t.Fatal(err) } f, err := os.Create(path.Join(point, current)) if err != nil { t.Logf("Current layer %d", i) t.Fatal(err) } f.Close() if i%10 == 0 { if err := os.Remove(path.Join(point, parent)); err != nil { t.Logf("Current layer %d", i) t.Fatal(err) } expected-- } last = current } // Perform the actual mount for the top most image point, err := d.Get(last) if err != nil { t.Fatal(err) } files, err := ioutil.ReadDir(point) if err != nil { t.Fatal(err) } if len(files) != expected { t.Fatalf("Expected %d got %d", expected, len(files)) } } docker-0.9.1/graphdriver/aufs/aufs.go0000644000175000017500000002141612314376205015647 0ustar tagtag/* aufs driver directory structure . ├── layers // Metadata of layers │   ├── 1 │   ├── 2 │   └── 3 ├── diffs // Content of the layer │   ├── 1 // Contains layers that need to be mounted for the id │   ├── 2 │   └── 3 └── mnt // Mount points for the rw layers to be mounted ├── 1 ├── 2 └── 3 */ package aufs import ( "bufio" "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/graphdriver" mountpk "github.com/dotcloud/docker/pkg/mount" "github.com/dotcloud/docker/utils" "os" "os/exec" "path" "strings" "sync" ) var ( ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") ) func init() { graphdriver.Register("aufs", Init) } type Driver struct { root string sync.Mutex // Protects concurrent modification to active active map[string]int } // New returns a new AUFS driver. // An error is returned if AUFS is not supported. func Init(root string) (graphdriver.Driver, error) { // Try to load the aufs kernel module if err := supportsAufs(); err != nil { return nil, err } paths := []string{ "mnt", "diff", "layers", } a := &Driver{ root: root, active: make(map[string]int), } // Create the root aufs driver dir and return // if it already exists // If not populate the dir structure if err := os.MkdirAll(root, 0755); err != nil { if os.IsExist(err) { return a, nil } return nil, err } for _, p := range paths { if err := os.MkdirAll(path.Join(root, p), 0755); err != nil { return nil, err } } return a, nil } // Return a nil error if the kernel supports aufs // We cannot modprobe because inside dind modprobe fails // to run func supportsAufs() error { // We can try to modprobe aufs first before looking at // proc/filesystems for when aufs is supported exec.Command("modprobe", "aufs").Run() f, err := os.Open("/proc/filesystems") if err != nil { return err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.Contains(s.Text(), "aufs") { return nil } } return ErrAufsNotSupported } func (a Driver) rootPath() string { return a.root } func (Driver) String() string { return "aufs" } func (a Driver) Status() [][2]string { ids, _ := loadIds(path.Join(a.rootPath(), "layers")) return [][2]string{ {"Root Dir", a.rootPath()}, {"Dirs", fmt.Sprintf("%d", len(ids))}, } } // Exists returns true if the given id is registered with // this driver func (a Driver) Exists(id string) bool { if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { return false } return true } // Three folders are created for each id // mnt, layers, and diff func (a *Driver) Create(id, parent string) error { if err := a.createDirsFor(id); err != nil { return err } // Write the layers metadata f, err := os.Create(path.Join(a.rootPath(), "layers", id)) if err != nil { return err } defer f.Close() if parent != "" { ids, err := getParentIds(a.rootPath(), parent) if err != nil { return err } if _, err := fmt.Fprintln(f, parent); err != nil { return err } for _, i := range ids { if _, err := fmt.Fprintln(f, i); err != nil { return err } } } return nil } func (a *Driver) createDirsFor(id string) error { paths := []string{ "mnt", "diff", } for _, p := range paths { if err := os.MkdirAll(path.Join(a.rootPath(), p, id), 0755); err != nil { return err } } return nil } // Unmount and remove the dir information func (a *Driver) Remove(id string) error { // Protect the a.active from concurrent access a.Lock() defer a.Unlock() if a.active[id] != 0 { utils.Errorf("Warning: removing active id %s\n", id) } // Make sure the dir is umounted first if err := a.unmount(id); err != nil { return err } tmpDirs := []string{ "mnt", "diff", } // Atomically remove each directory in turn by first moving it out of the // way (so that docker doesn't find it anymore) before doing removal of // the whole tree. for _, p := range tmpDirs { realPath := path.Join(a.rootPath(), p, id) tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { return err } defer os.RemoveAll(tmpPath) } // Remove the layers file for the id if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { return err } return nil } // Return the rootfs path for the id // This will mount the dir at it's given path func (a *Driver) Get(id string) (string, error) { ids, err := getParentIds(a.rootPath(), id) if err != nil { if !os.IsNotExist(err) { return "", err } ids = []string{} } // Protect the a.active from concurrent access a.Lock() defer a.Unlock() count := a.active[id] // If a dir does not have a parent ( no layers )do not try to mount // just return the diff path to the data out := path.Join(a.rootPath(), "diff", id) if len(ids) > 0 { out = path.Join(a.rootPath(), "mnt", id) if count == 0 { if err := a.mount(id); err != nil { return "", err } } } a.active[id] = count + 1 return out, nil } func (a *Driver) Put(id string) { // Protect the a.active from concurrent access a.Lock() defer a.Unlock() if count := a.active[id]; count > 1 { a.active[id] = count - 1 } else { ids, _ := getParentIds(a.rootPath(), id) // We only mounted if there are any parents if ids != nil && len(ids) > 0 { a.unmount(id) } delete(a.active, id) } } // Returns an archive of the contents for the id func (a *Driver) Diff(id string) (archive.Archive, error) { return archive.TarFilter(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ Compression: archive.Uncompressed, }) } func (a *Driver) ApplyDiff(id string, diff archive.ArchiveReader) error { return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) } // Returns the size of the contents for the id func (a *Driver) DiffSize(id string) (int64, error) { return utils.TreeSize(path.Join(a.rootPath(), "diff", id)) } func (a *Driver) Changes(id string) ([]archive.Change, error) { layers, err := a.getParentLayerPaths(id) if err != nil { return nil, err } return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) } func (a *Driver) getParentLayerPaths(id string) ([]string, error) { parentIds, err := getParentIds(a.rootPath(), id) if err != nil { return nil, err } if len(parentIds) == 0 { return nil, fmt.Errorf("Dir %s does not have any parent layers", id) } layers := make([]string, len(parentIds)) // Get the diff paths for all the parent ids for i, p := range parentIds { layers[i] = path.Join(a.rootPath(), "diff", p) } return layers, nil } func (a *Driver) mount(id string) error { // If the id is mounted or we get an error return if mounted, err := a.mounted(id); err != nil || mounted { return err } var ( target = path.Join(a.rootPath(), "mnt", id) rw = path.Join(a.rootPath(), "diff", id) ) layers, err := a.getParentLayerPaths(id) if err != nil { return err } if err := a.aufsMount(layers, rw, target); err != nil { return err } return nil } func (a *Driver) unmount(id string) error { if mounted, err := a.mounted(id); err != nil || !mounted { return err } target := path.Join(a.rootPath(), "mnt", id) return Unmount(target) } func (a *Driver) mounted(id string) (bool, error) { target := path.Join(a.rootPath(), "mnt", id) return mountpk.Mounted(target) } // During cleanup aufs needs to unmount all mountpoints func (a *Driver) Cleanup() error { ids, err := loadIds(path.Join(a.rootPath(), "layers")) if err != nil { return err } for _, id := range ids { if err := a.unmount(id); err != nil { utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) } } return nil } func (a *Driver) aufsMount(ro []string, rw, target string) (err error) { defer func() { if err != nil { Unmount(target) } }() if err = a.tryMount(ro, rw, target); err != nil { if err = a.mountRw(rw, target); err != nil { return } for _, layer := range ro { branch := fmt.Sprintf("append:%s=ro+wh", layer) if err = mount("none", target, "aufs", MsRemount, branch); err != nil { return } } } return } // Try to mount using the aufs fast path, if this fails then // append ro layers. func (a *Driver) tryMount(ro []string, rw, target string) (err error) { var ( rwBranch = fmt.Sprintf("%s=rw", rw) roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:")) ) return mount("none", target, "aufs", 0, fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches)) } func (a *Driver) mountRw(rw, target string) error { return mount("none", target, "aufs", 0, fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw)) } func rollbackMount(target string, err error) { if err != nil { Unmount(target) } } docker-0.9.1/graphdriver/aufs/mount_linux.go0000644000175000017500000000036012314376205017265 0ustar tagtag// +build amd64 package aufs import "syscall" const MsRemount = syscall.MS_REMOUNT func mount(source string, target string, fstype string, flags uintptr, data string) error { return syscall.Mount(source, target, fstype, flags, data) } docker-0.9.1/graphdriver/aufs/dirs.go0000644000175000017500000000153012314376205015645 0ustar tagtagpackage aufs import ( "bufio" "io/ioutil" "os" "path" ) // Return all the directories func loadIds(root string) ([]string, error) { dirs, err := ioutil.ReadDir(root) if err != nil { return nil, err } out := []string{} for _, d := range dirs { if !d.IsDir() { out = append(out, d.Name()) } } return out, nil } // Read the layers file for the current id and return all the // layers represented by new lines in the file // // If there are no lines in the file then the id has no parent // and an empty slice is returned. func getParentIds(root, id string) ([]string, error) { f, err := os.Open(path.Join(root, "layers", id)) if err != nil { return nil, err } defer f.Close() out := []string{} s := bufio.NewScanner(f) for s.Scan() { if t := s.Text(); t != "" { out = append(out, s.Text()) } } return out, s.Err() } docker-0.9.1/graphdriver/aufs/migrate.go0000644000175000017500000001106712314376205016342 0ustar tagtagpackage aufs import ( "encoding/json" "fmt" "io/ioutil" "os" "path" ) type metadata struct { ID string `json:"id"` ParentID string `json:"parent,omitempty"` Image string `json:"Image,omitempty"` parent *metadata } func pathExists(pth string) bool { if _, err := os.Stat(pth); err != nil { return false } return true } // Migrate existing images and containers from docker < 0.7.x // // The format pre 0.7 is for docker to store the metadata and filesystem // content in the same directory. For the migration to work we need to move Image layer // data from /var/lib/docker/graph//layers to the diff of the registered id. // // Next we need to migrate the container's rw layer to diff of the driver. After the // contents are migrated we need to register the image and container ids with the // driver. // // For the migration we try to move the folder containing the layer files, if that // fails because the data is currently mounted we will fallback to creating a // symlink. func (a *Driver) Migrate(pth string, setupInit func(p string) error) error { if pathExists(path.Join(pth, "graph")) { if err := a.migrateRepositories(pth); err != nil { return err } if err := a.migrateImages(path.Join(pth, "graph")); err != nil { return err } return a.migrateContainers(path.Join(pth, "containers"), setupInit) } return nil } func (a *Driver) migrateRepositories(pth string) error { name := path.Join(pth, "repositories") if err := os.Rename(name, name+"-aufs"); err != nil && !os.IsNotExist(err) { return err } return nil } func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) error { fis, err := ioutil.ReadDir(pth) if err != nil { return err } for _, fi := range fis { if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "rw")) { if err := tryRelocate(path.Join(pth, id, "rw"), path.Join(a.rootPath(), "diff", id)); err != nil { return err } if !a.Exists(id) { metadata, err := loadMetadata(path.Join(pth, id, "config.json")) if err != nil { return err } initID := fmt.Sprintf("%s-init", id) if err := a.Create(initID, metadata.Image); err != nil { return err } initPath, err := a.Get(initID) if err != nil { return err } // setup init layer if err := setupInit(initPath); err != nil { return err } if err := a.Create(id, initID); err != nil { return err } } } } return nil } func (a *Driver) migrateImages(pth string) error { fis, err := ioutil.ReadDir(pth) if err != nil { return err } var ( m = make(map[string]*metadata) current *metadata exists bool ) for _, fi := range fis { if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "layer")) { if current, exists = m[id]; !exists { current, err = loadMetadata(path.Join(pth, id, "json")) if err != nil { return err } m[id] = current } } } for _, v := range m { v.parent = m[v.ParentID] } migrated := make(map[string]bool) for _, v := range m { if err := a.migrateImage(v, pth, migrated); err != nil { return err } } return nil } func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool) error { if !migrated[m.ID] { if m.parent != nil { a.migrateImage(m.parent, pth, migrated) } if err := tryRelocate(path.Join(pth, m.ID, "layer"), path.Join(a.rootPath(), "diff", m.ID)); err != nil { return err } if !a.Exists(m.ID) { if err := a.Create(m.ID, m.ParentID); err != nil { return err } } migrated[m.ID] = true } return nil } // tryRelocate will try to rename the old path to the new pack and if // the operation fails, it will fallback to a symlink func tryRelocate(oldPath, newPath string) error { s, err := os.Lstat(newPath) if err != nil && !os.IsNotExist(err) { return err } // If the destination is a symlink then we already tried to relocate once before // and it failed so we delete it and try to remove if s != nil && s.Mode()&os.ModeSymlink == os.ModeSymlink { if err := os.RemoveAll(newPath); err != nil { return err } } if err := os.Rename(oldPath, newPath); err != nil { if sErr := os.Symlink(oldPath, newPath); sErr != nil { return fmt.Errorf("Unable to relocate %s to %s: Rename err %s Symlink err %s", oldPath, newPath, err, sErr) } } return nil } func loadMetadata(pth string) (*metadata, error) { f, err := os.Open(pth) if err != nil { return nil, err } defer f.Close() var ( out = &metadata{} dec = json.NewDecoder(f) ) if err := dec.Decode(out); err != nil { return nil, err } return out, nil } docker-0.9.1/graphdriver/aufs/mount.go0000644000175000017500000000053712314376205016054 0ustar tagtagpackage aufs import ( "github.com/dotcloud/docker/utils" "os/exec" "syscall" ) func Unmount(target string) error { if err := exec.Command("auplink", target, "flush").Run(); err != nil { utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err) } if err := syscall.Unmount(target, 0); err != nil { return err } return nil } docker-0.9.1/graphdriver/vfs/0000755000175000017500000000000012314376205014216 5ustar tagtagdocker-0.9.1/graphdriver/vfs/driver.go0000644000175000017500000000340712314376205016044 0ustar tagtagpackage vfs import ( "fmt" "github.com/dotcloud/docker/graphdriver" "os" "os/exec" "path" ) func init() { graphdriver.Register("vfs", Init) } func Init(home string) (graphdriver.Driver, error) { d := &Driver{ home: home, } return d, nil } type Driver struct { home string } func (d *Driver) String() string { return "vfs" } func (d *Driver) Status() [][2]string { return nil } func (d *Driver) Cleanup() error { return nil } func copyDir(src, dst string) error { if output, err := exec.Command("cp", "-aT", "--reflink=auto", src, dst).CombinedOutput(); err != nil { return fmt.Errorf("Error VFS copying directory: %s (%s)", err, output) } return nil } func (d *Driver) Create(id string, parent string) error { dir := d.dir(id) if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { return err } if err := os.Mkdir(dir, 0700); err != nil { return err } if parent == "" { return nil } parentDir, err := d.Get(parent) if err != nil { return fmt.Errorf("%s: %s", parent, err) } if err := copyDir(parentDir, dir); err != nil { return err } return nil } func (d *Driver) dir(id string) string { return path.Join(d.home, "dir", path.Base(id)) } func (d *Driver) Remove(id string) error { if _, err := os.Stat(d.dir(id)); err != nil { return err } return os.RemoveAll(d.dir(id)) } func (d *Driver) Get(id string) (string, error) { dir := d.dir(id) if st, err := os.Stat(dir); err != nil { return "", err } else if !st.IsDir() { return "", fmt.Errorf("%s: not a directory", dir) } return dir, nil } func (d *Driver) Put(id string) { // The vfs driver has no runtime resources (e.g. mounts) // to clean up, so we don't need anything here } func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } docker-0.9.1/state.go0000644000175000017500000000231512314376205012553 0ustar tagtagpackage docker import ( "fmt" "github.com/dotcloud/docker/utils" "sync" "time" ) type State struct { sync.RWMutex Running bool Pid int ExitCode int StartedAt time.Time FinishedAt time.Time Ghost bool } // String returns a human-readable description of the state func (s *State) String() string { s.RLock() defer s.RUnlock() if s.Running { if s.Ghost { return fmt.Sprintf("Ghost") } return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } return fmt.Sprintf("Exit %d", s.ExitCode) } func (s *State) IsRunning() bool { s.RLock() defer s.RUnlock() return s.Running } func (s *State) IsGhost() bool { s.RLock() defer s.RUnlock() return s.Ghost } func (s *State) GetExitCode() int { s.RLock() defer s.RUnlock() return s.ExitCode } func (s *State) SetGhost(val bool) { s.Lock() defer s.Unlock() s.Ghost = val } func (s *State) SetRunning(pid int) { s.Lock() defer s.Unlock() s.Running = true s.Ghost = false s.ExitCode = 0 s.Pid = pid s.StartedAt = time.Now().UTC() } func (s *State) SetStopped(exitCode int) { s.Lock() defer s.Unlock() s.Running = false s.Pid = 0 s.FinishedAt = time.Now().UTC() s.ExitCode = exitCode } docker-0.9.1/tags_unit_test.go0000644000175000017500000000416012314376205014467 0ustar tagtagpackage docker import ( "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/utils" "os" "path" "testing" ) const ( testImageName = "myapp" testImageID = "foo" ) func mkTestTagStore(root string, t *testing.T) *TagStore { driver, err := graphdriver.New(root) if err != nil { t.Fatal(err) } graph, err := NewGraph(root, driver) if err != nil { t.Fatal(err) } store, err := NewTagStore(path.Join(root, "tags"), graph) if err != nil { t.Fatal(err) } archive, err := fakeTar() if err != nil { t.Fatal(err) } img := &Image{ID: testImageID} // FIXME: this fails on Darwin with: // tags_unit_test.go:36: mkdir /var/folders/7g/b3ydb5gx4t94ndr_cljffbt80000gq/T/docker-test569b-tRunner-075013689/vfs/dir/foo/etc/postgres: permission denied if err := graph.Register(nil, archive, img); err != nil { t.Fatal(err) } if err := store.Set(testImageName, "", testImageID, false); err != nil { t.Fatal(err) } return store } func TestLookupImage(t *testing.T) { tmp, err := utils.TestDirectory("") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) store := mkTestTagStore(tmp, t) defer store.graph.driver.Cleanup() if img, err := store.LookupImage(testImageName); err != nil { t.Fatal(err) } else if img == nil { t.Errorf("Expected 1 image, none found") } if img, err := store.LookupImage(testImageName + ":" + DEFAULTTAG); err != nil { t.Fatal(err) } else if img == nil { t.Errorf("Expected 1 image, none found") } if img, err := store.LookupImage(testImageName + ":" + "fail"); err == nil { t.Errorf("Expected error, none found") } else if img != nil { t.Errorf("Expected 0 image, 1 found") } if img, err := store.LookupImage("fail:fail"); err == nil { t.Errorf("Expected error, none found") } else if img != nil { t.Errorf("Expected 0 image, 1 found") } if img, err := store.LookupImage(testImageID); err != nil { t.Fatal(err) } else if img == nil { t.Errorf("Expected 1 image, none found") } if img, err := store.LookupImage(testImageName + ":" + testImageID); err != nil { t.Fatal(err) } else if img == nil { t.Errorf("Expected 1 image, none found") } } docker-0.9.1/contrib/0000755000175000017500000000000012314376205012543 5ustar tagtagdocker-0.9.1/contrib/mkimage-unittest.sh0000755000175000017500000000255212314376205016375 0ustar tagtag#!/usr/bin/env bash # Generate a very minimal filesystem based on busybox-static, # and load it into the local docker under the name "docker-ut". missing_pkg() { echo "Sorry, I could not locate $1" echo "Try 'apt-get install ${2:-$1}'?" exit 1 } BUSYBOX=$(which busybox) [ "$BUSYBOX" ] || missing_pkg busybox busybox-static SOCAT=$(which socat) [ "$SOCAT" ] || missing_pkg socat shopt -s extglob set -ex ROOTFS=`mktemp -d /tmp/rootfs-busybox.XXXXXXXXXX` trap "rm -rf $ROOTFS" INT QUIT TERM cd $ROOTFS mkdir bin etc dev dev/pts lib proc sys tmp touch etc/resolv.conf cp /etc/nsswitch.conf etc/nsswitch.conf echo root:x:0:0:root:/:/bin/sh > etc/passwd echo daemon:x:1:1:daemon:/usr/sbin:/bin/sh >> etc/passwd echo root:x:0: > etc/group echo daemon:x:1: >> etc/group ln -s lib lib64 ln -s bin sbin cp $BUSYBOX $SOCAT bin for X in $(busybox --list) do ln -s busybox bin/$X done rm bin/init ln bin/busybox bin/init cp -P /lib/x86_64-linux-gnu/lib{pthread*,c*(-*),dl*(-*),nsl*(-*),nss_*,util*(-*),wrap,z}.so* lib cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib cp -P /usr/lib/x86_64-linux-gnu/lib{crypto,ssl}.so* lib for X in console null ptmx random stdin stdout stderr tty urandom zero do cp -a /dev/$X dev done chmod 0755 $ROOTFS # See #486 tar --numeric-owner -cf- . | docker import - docker-ut docker run -i -u root docker-ut /bin/echo Success. rm -rf $ROOTFS docker-0.9.1/contrib/mkseccomp.pl0000755000175000017500000000422612314376205015070 0ustar tagtag#!/usr/bin/perl # # A simple helper script to help people build seccomp profiles for # Docker/LXC. The goal is mostly to reduce the attack surface to the # kernel, by restricting access to rarely used, recently added or not used # syscalls. # # This script processes one or more files which contain the list of system # calls to be allowed. See mkseccomp.sample for more information how you # can configure the list of syscalls. When run, this script produces output # which, when stored in a file, can be passed to docker as follows: # # docker run -lxc-conf="lxc.seccomp=$file" # # The included sample file shows how to cut about a quarter of all syscalls, # which affecting most applications. # # For specific situations it is possible to reduce the list further. By # reducing the list to just those syscalls required by a certain application # you can make it difficult for unknown/unexpected code to run. # # Run this script as follows: # # ./mkseccomp.pl < mkseccomp.sample >syscalls.list # or # ./mkseccomp.pl mkseccomp.sample >syscalls.list # # Multiple files can be specified, in which case the lists of syscalls are # combined. # # By Martijn van Oosterhout Nov 2013 # How it works: # # This program basically spawns two processes to form a chain like: # # | cpp | use strict; use warnings; if( -t ) { print STDERR "Helper script to make seccomp filters for Docker/LXC.\n"; print STDERR "Usage: mkseccomp.pl < [files...]\n"; exit 1; } my $pid = open(my $in, "-|") // die "Couldn't fork1 ($!)\n"; if($pid == 0) { # Child $pid = open(my $out, "|-") // die "Couldn't fork2 ($!)\n"; if($pid == 0) { # Child, which execs cpp exec "cpp" or die "Couldn't exec cpp ($!)\n"; exit 1; } # Process the DATA section and output to cpp print $out "#include \n"; while(<>) { if(/^\w/) { print $out "__NR_$_"; } } close $out; exit 0; } # Print header and then process output from cpp. print "1\n"; print "whitelist\n"; while(<$in>) { print if( /^[0-9]/ ); } docker-0.9.1/contrib/mkimage-rinse.sh0000755000175000017500000000643612314376205015643 0ustar tagtag#!/usr/bin/env bash # # Create a base CentOS Docker image. # This script is useful on systems with rinse available (e.g., # building a CentOS image on Debian). See contrib/mkimage-yum.sh for # a way to build CentOS images on systems with yum installed. set -e repo="$1" distro="$2" mirror="$3" if [ ! "$repo" ] || [ ! "$distro" ]; then self="$(basename $0)" echo >&2 "usage: $self repo distro [mirror]" echo >&2 echo >&2 " ie: $self username/centos centos-5" echo >&2 " $self username/centos centos-6" echo >&2 echo >&2 " ie: $self username/slc slc-5" echo >&2 " $self username/slc slc-6" echo >&2 echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" echo >&2 echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' echo >&2 ' expected values of "mirror".' echo >&2 echo >&2 'This script is tested to work with the original upstream version of rinse,' echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' echo >&2 exit 1 fi target="/tmp/docker-rootfs-rinse-$distro-$$-$RANDOM" cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" returnTo="$(pwd -P)" rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) if [ "$mirror" ]; then rinseArgs+=( --mirror "$mirror" ) fi set -x mkdir -p "$target" sudo rinse "${rinseArgs[@]}" cd "$target" # rinse fails a little at setting up /dev, so we'll just wipe it out and create our own sudo rm -rf dev sudo mkdir -m 755 dev ( cd dev sudo ln -sf /proc/self/fd ./ sudo mkdir -m 755 pts sudo mkdir -m 1777 shm sudo mknod -m 600 console c 5 1 sudo mknod -m 600 initctl p sudo mknod -m 666 full c 1 7 sudo mknod -m 666 null c 1 3 sudo mknod -m 666 ptmx c 5 2 sudo mknod -m 666 random c 1 8 sudo mknod -m 666 tty c 5 0 sudo mknod -m 666 tty0 c 4 0 sudo mknod -m 666 urandom c 1 9 sudo mknod -m 666 zero c 1 5 ) # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" # locales sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} # docs sudo rm -rf usr/share/{man,doc,info,gnome/help} # cracklib sudo rm -rf usr/share/cracklib # i18n sudo rm -rf usr/share/i18n # yum cache sudo rm -rf var/cache/yum sudo mkdir -p --mode=0755 var/cache/yum # sln sudo rm -rf sbin/sln # ldconfig #sudo rm -rf sbin/ldconfig sudo rm -rf etc/ld.so.cache var/cache/ldconfig sudo mkdir -p --mode=0755 var/cache/ldconfig # allow networking init scripts inside the container to work without extra steps echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null # to restore locales later: # yum reinstall glibc-common version= if [ -r etc/redhat-release ]; then version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" elif [ -r etc/SuSE-release ]; then version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" fi if [ -z "$version" ]; then echo >&2 "warning: cannot autodetect OS version, using $distro as tag" sleep 20 version="$distro" fi sudo tar --numeric-owner -c . | docker import - $repo:$version docker run -i -t $repo:$version echo success cd "$returnTo" sudo rm -rf "$target" docker-0.9.1/contrib/host-integration/0000755000175000017500000000000012314376205016041 5ustar tagtagdocker-0.9.1/contrib/host-integration/Dockerfile.dev0000644000175000017500000000135512314376205020614 0ustar tagtag# # This Dockerfile will create an image that allows to generate upstart and # systemd scripts (more to come) # # docker-version 0.6.2 # FROM ubuntu:12.10 MAINTAINER Guillaume J. Charmes RUN apt-get update && apt-get install -y wget git mercurial # Install Go RUN wget --no-check-certificate https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz -O go-1.1.2.tar.gz RUN tar -xzvf go-1.1.2.tar.gz && mv /go /goroot RUN mkdir /go ENV GOROOT /goroot ENV GOPATH /go ENV PATH $GOROOT/bin:$PATH RUN go get github.com/dotcloud/docker && cd /go/src/github.com/dotcloud/docker && git checkout v0.6.3 ADD manager.go /manager/ RUN cd /manager && go build -o /usr/bin/manager ENTRYPOINT ["/usr/bin/manager"] docker-0.9.1/contrib/host-integration/manager.sh0000755000175000017500000000224112314376205020011 0ustar tagtag#!/bin/sh set -e usage() { echo >&2 "usage: $0 [-a author] [-d description] container [manager]" echo >&2 " ie: $0 -a 'John Smith' 4ec9612a37cd systemd" echo >&2 " ie: $0 -d 'Super Cool System' 4ec9612a37cd # defaults to upstart" exit 1 } auth='' desc='' have_auth= have_desc= while getopts a:d: opt; do case "$opt" in a) auth="$OPTARG" have_auth=1 ;; d) desc="$OPTARG" have_desc=1 ;; esac done shift $(($OPTIND - 1)) [ $# -ge 1 -a $# -le 2 ] || usage cid="$1" script="${2:-upstart}" if [ ! -e "manager/$script" ]; then echo >&2 "Error: manager type '$script' is unknown (PRs always welcome!)." echo >&2 'The currently supported types are:' echo >&2 " $(cd manager && echo *)" exit 1 fi # TODO https://github.com/dotcloud/docker/issues/734 (docker inspect formatting) #if command -v docker > /dev/null 2>&1; then # image="$(docker inspect -f '{{.Image}}' "$cid")" # if [ "$image" ]; then # if [ -z "$have_auth" ]; then # auth="$(docker inspect -f '{{.Author}}' "$image")" # fi # if [ -z "$have_desc" ]; then # desc="$(docker inspect -f '{{.Comment}}' "$image")" # fi # fi #fi exec "manager/$script" "$cid" "$auth" "$desc" docker-0.9.1/contrib/host-integration/Dockerfile.min0000644000175000017500000000020012314376205020605 0ustar tagtagFROM busybox MAINTAINER Guillaume J. Charmes ADD manager /usr/bin/ ENTRYPOINT ["/usr/bin/manager"] docker-0.9.1/contrib/host-integration/manager/0000755000175000017500000000000012314376205017453 5ustar tagtagdocker-0.9.1/contrib/host-integration/manager/systemd0000755000175000017500000000040112314376205021064 0ustar tagtag#!/bin/sh set -e cid="$1" auth="$2" desc="$3" cat <<-EOF [Unit] Description=$desc Author=$auth After=docker.service [Service] ExecStart=/usr/bin/docker start -a $cid ExecStop=/usr/bin/docker stop -t 2 $cid [Install] WantedBy=local.target EOF docker-0.9.1/contrib/host-integration/manager/upstart0000755000175000017500000000044112314376205021102 0ustar tagtag#!/bin/sh set -e cid="$1" auth="$2" desc="$3" cat <<-EOF description "$(echo "$desc" | sed 's/"/\\"/g')" author "$(echo "$auth" | sed 's/"/\\"/g')" start on filesystem and started lxc-net and started docker stop on runlevel [!2345] respawn exec /usr/bin/docker start -a "$cid" EOF docker-0.9.1/contrib/host-integration/manager.go0000644000175000017500000000643112314376205020006 0ustar tagtagpackage main import ( "bytes" "encoding/json" "flag" "fmt" "github.com/dotcloud/docker" "os" "strings" "text/template" ) var templates = map[string]string{ "upstart": `description "{{.description}}" author "{{.author}}" start on filesystem and started lxc-net and started docker stop on runlevel [!2345] respawn exec /home/vagrant/goroot/bin/docker start -a {{.container_id}} `, "systemd": `[Unit] Description={{.description}} Author={{.author}} After=docker.service [Service] Restart=always ExecStart=/usr/bin/docker start -a {{.container_id}} ExecStop=/usr/bin/docker stop -t 2 {{.container_id}} [Install] WantedBy=local.target `, } func main() { // Parse command line for custom options kind := flag.String("t", "upstart", "Type of manager requested") author := flag.String("a", "", "Author of the image") description := flag.String("d", "", "Description of the image") flag.Usage = func() { fmt.Fprintf(os.Stderr, "\nUsage: manager \n\n") flag.PrintDefaults() } flag.Parse() // We require at least the container ID if flag.NArg() != 1 { println(flag.NArg()) flag.Usage() return } // Check that the requested process manager is supported if _, exists := templates[*kind]; !exists { panic("Unknown script template") } // Load the requested template tpl, err := template.New("processManager").Parse(templates[*kind]) if err != nil { panic(err) } // Create stdout/stderr buffers bufOut := bytes.NewBuffer(nil) bufErr := bytes.NewBuffer(nil) // Instanciate the Docker CLI cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock") // Retrieve the container info if err := cli.CmdInspect(flag.Arg(0)); err != nil { // As of docker v0.6.3, CmdInspect always returns nil panic(err) } // If there is nothing in the error buffer, then the Docker daemon is there and the container has been found if bufErr.Len() == 0 { // Unmarshall the resulting container data c := []*docker.Container{{}} if err := json.Unmarshal(bufOut.Bytes(), &c); err != nil { panic(err) } // Reset the buffers bufOut.Reset() bufErr.Reset() // Retrieve the info of the linked image if err := cli.CmdInspect(c[0].Image); err != nil { panic(err) } // If there is nothing in the error buffer, then the image has been found. if bufErr.Len() == 0 { // Unmarshall the resulting image data img := []*docker.Image{{}} if err := json.Unmarshal(bufOut.Bytes(), &img); err != nil { panic(err) } // If no author has been set, use the one from the image if *author == "" && img[0].Author != "" { *author = strings.Replace(img[0].Author, "\"", "", -1) } // If no description has been set, use the comment from the image if *description == "" && img[0].Comment != "" { *description = strings.Replace(img[0].Comment, "\"", "", -1) } } } /// Old version: Wrtie the resulting script to file // f, err := os.OpenFile(kind, os.O_CREATE|os.O_WRONLY, 0755) // if err != nil { // panic(err) // } // defer f.Close() // Create a map with needed data data := map[string]string{ "author": *author, "description": *description, "container_id": flag.Arg(0), } // Process the template and output it on Stdout if err := tpl.Execute(os.Stdout, data); err != nil { panic(err) } } docker-0.9.1/contrib/mkimage-arch-pacman.conf0000644000175000017500000000521612314376205017200 0ustar tagtag# # /etc/pacman.conf # # See the pacman.conf(5) manpage for option and repository directives # # GENERAL OPTIONS # [options] # The following paths are commented out with their default values listed. # If you wish to use different paths, uncomment and update the paths. #RootDir = / #DBPath = /var/lib/pacman/ #CacheDir = /var/cache/pacman/pkg/ #LogFile = /var/log/pacman.log #GPGDir = /etc/pacman.d/gnupg/ HoldPkg = pacman glibc #XferCommand = /usr/bin/curl -C - -f %u > %o #XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u #CleanMethod = KeepInstalled #UseDelta = 0.7 Architecture = auto # Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup #IgnorePkg = #IgnoreGroup = #NoUpgrade = #NoExtract = # Misc options #UseSyslog #Color #TotalDownload # We cannot check disk space from within a chroot environment #CheckSpace #VerbosePkgLists # By default, pacman accepts packages signed by keys that its local keyring # trusts (see pacman-key and its man page), as well as unsigned packages. SigLevel = Required DatabaseOptional LocalFileSigLevel = Optional #RemoteFileSigLevel = Required # NOTE: You must run `pacman-key --init` before first using pacman; the local # keyring can then be populated with the keys of all official Arch Linux # packagers with `pacman-key --populate archlinux`. # # REPOSITORIES # - can be defined here or included from another file # - pacman will search repositories in the order defined here # - local/custom mirrors can be added here or in separate files # - repositories listed first will take precedence when packages # have identical names, regardless of version number # - URLs will have $repo replaced by the name of the current repo # - URLs will have $arch replaced by the name of the architecture # # Repository entries are of the format: # [repo-name] # Server = ServerName # Include = IncludePath # # The header [repo-name] is crucial - it must be present and # uncommented to enable the repo. # # The testing repositories are disabled by default. To enable, uncomment the # repo name header and Include lines. You can add preferred servers immediately # after the header, and they will be used before the default mirrors. #[testing] #Include = /etc/pacman.d/mirrorlist [core] Include = /etc/pacman.d/mirrorlist [extra] Include = /etc/pacman.d/mirrorlist #[community-testing] #Include = /etc/pacman.d/mirrorlist [community] Include = /etc/pacman.d/mirrorlist # An example of a custom package repository. See the pacman manpage for # tips on creating your own repositories. #[custom] #SigLevel = Optional TrustAll #Server = file:///home/custompkgs docker-0.9.1/contrib/prepare-commit-msg.hook0000644000175000017500000000061012314376205017132 0ustar tagtag#!/bin/sh # Auto sign all commits to allow them to be used by the Docker project. # see https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work # GH_USER=$(git config --get github.user) SOB=$(git var GIT_AUTHOR_IDENT | sed -n "s/^\(.*>\).*$/Docker-DCO-1.1-Signed-off-by: \1 \(github: $GH_USER\)/p") grep -qs "^$SOB" "$1" || { echo echo "$SOB" } >> "$1" docker-0.9.1/contrib/mkimage-yum.sh0000755000175000017500000000470012314376205015325 0ustar tagtag#!/usr/bin/env bash # # Create a base CentOS Docker image. # # This script is useful on systems with yum installed (e.g., building # a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way # to build CentOS images on other systems. usage() { cat < OPTIONS: -y The path to the yum config to install packages from. The default is /etc/yum.conf. EOOPTS exit 1 } # option defaults yum_config=/etc/yum.conf while getopts ":y:h" opt; do case $opt in y) yum_config=$OPTARG ;; h) usage ;; \?) echo "Invalid option: -$OPTARG" usage ;; esac done shift $((OPTIND - 1)) name=$1 if [[ -z $name ]]; then usage fi #-------------------- target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) set -x mkdir -m 755 "$target"/dev mknod -m 600 "$target"/dev/console c 5 1 mknod -m 600 "$target"/dev/initctl p mknod -m 666 "$target"/dev/full c 1 7 mknod -m 666 "$target"/dev/null c 1 3 mknod -m 666 "$target"/dev/ptmx c 5 2 mknod -m 666 "$target"/dev/random c 1 8 mknod -m 666 "$target"/dev/tty c 5 0 mknod -m 666 "$target"/dev/tty0 c 4 0 mknod -m 666 "$target"/dev/urandom c 1 9 mknod -m 666 "$target"/dev/zero c 1 5 yum -c "$yum_config" --installroot="$target" --setopt=tsflags=nodocs \ --setopt=group_package_types=mandatory -y groupinstall Core yum -c "$yum_config" --installroot="$target" -y clean all cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" version=$name fi tar --numeric-owner -c -C "$target" . | docker import - $name:$version docker run -i -t $name:$version echo success rm -rf "$target" docker-0.9.1/contrib/MAINTAINERS0000644000175000017500000000005512314376205014240 0ustar tagtagTianon Gravi (@tianon) docker-0.9.1/contrib/desktop-integration/0000755000175000017500000000000012314376205016535 5ustar tagtagdocker-0.9.1/contrib/desktop-integration/iceweasel/0000755000175000017500000000000012314376205020476 5ustar tagtagdocker-0.9.1/contrib/desktop-integration/iceweasel/Dockerfile0000644000175000017500000000333012314376205022467 0ustar tagtag# VERSION: 0.7 # DESCRIPTION: Create iceweasel container with its dependencies # AUTHOR: Daniel Mizyrycki # COMMENTS: # This file describes how to build a Iceweasel container with all # dependencies installed. It uses native X11 unix socket and alsa # sound devices. Tested on Debian 7.2 # USAGE: # # Download Iceweasel Dockerfile # wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile # # # Build iceweasel image # docker build -t iceweasel -rm . # # # Run stateful data-on-host iceweasel. For ephemeral, remove -v /data/iceweasel:/data # docker run -v /data/iceweasel:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ # -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ # -e DISPLAY=unix$DISPLAY iceweasel # # # To run stateful dockerized data containers # docker run -volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \ # -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ # -e DISPLAY=unix$DISPLAY iceweasel docker-version 0.6.5 # Base docker image FROM debian:wheezy MAINTAINER Daniel Mizyrycki # Install Iceweasel and "sudo" RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq iceweasel sudo # create sysadmin account RUN useradd -m -d /data -p saIVpsc0EVTwA sysadmin RUN sed -Ei 's/sudo:x:27:/sudo:x:27:sysadmin/' /etc/group RUN sed -Ei 's/(\%sudo\s+ALL=\(ALL\:ALL\) )ALL/\1 NOPASSWD:ALL/' /etc/sudoers # Autorun iceweasel. -no-remote is necessary to create a new container, as # iceweasel appears to communicate with itself through X11. CMD ["/usr/bin/sudo", "-u", "sysadmin", "-H", "-E", "/usr/bin/iceweasel", "-no-remote"] docker-0.9.1/contrib/desktop-integration/data/0000755000175000017500000000000012314376205017446 5ustar tagtagdocker-0.9.1/contrib/desktop-integration/data/Dockerfile0000644000175000017500000000211212314376205021434 0ustar tagtag# VERSION: 0.1 # DESCRIPTION: Create data image sharing /data volume # AUTHOR: Daniel Mizyrycki # COMMENTS: # This image is used as base for all data containers. # /data volume is owned by sysadmin. # USAGE: # # Download data Dockerfile # wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile # # # Build data image # docker build -t data -rm . # # # Create a data container. (eg: iceweasel-data) # docker run -name iceweasel-data data true # # # List data from it # docker run -volumes-from iceweasel-data busybox ls -al /data docker-version 0.6.5 # Smallest base image, just to launch a container FROM busybox MAINTAINER Daniel Mizyrycki # Create a regular user RUN echo 'sysadmin:x:1000:1000::/data:/bin/sh' >> /etc/passwd RUN echo 'sysadmin:x:1000:' >> /etc/group # Create directory for that user RUN mkdir /data RUN chown sysadmin.sysadmin /data # Add content to /data. This will keep sysadmin ownership RUN touch /data/init_volume # Create /data volume VOLUME /data docker-0.9.1/contrib/desktop-integration/README.md0000644000175000017500000000051012314376205020010 0ustar tagtagDesktop Integration =================== The ./contrib/desktop-integration contains examples of typical dockerized desktop applications. Examples ======== * Data container: ./data/Dockerfile creates a data image sharing /data volume * Iceweasel: ./iceweasel/Dockerfile shows a way to dockerize a common multimedia application docker-0.9.1/contrib/udev/0000755000175000017500000000000012314376205013506 5ustar tagtagdocker-0.9.1/contrib/udev/80-docker.rules0000644000175000017500000000052712314376205016262 0ustar tagtag# hide docker's loopback devices from udisks, and thus from user desktops SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" docker-0.9.1/contrib/mkseccomp.sample0000644000175000017500000001631112314376205015731 0ustar tagtag/* This sample file is an example for mkseccomp.pl to produce a seccomp file * which restricts syscalls that are only useful for an admin but allows the * vast majority of normal userspace programs to run normally. * * The format of this file is one line per syscall. This is then processed * and passed to 'cpp' to convert the names to numbers using whatever is * correct for your platform. As such C-style comments are permitted. Note * this also means that C preprocessor macros are also allowed. So it is * possible to create groups surrounded by #ifdef/#endif and control their * inclusion via #define (not #include). * * Syscalls that don't exist on your architecture are silently filtered out. * Syscalls marked with (*) are required for a container to spawn a bash * shell successfully (not necessarily full featured). Listing the same * syscall multiple times is no problem. * * If you want to make a list specifically for one application the easiest * way is to run the application under strace, like so: * * $ strace -f -q -c -o strace.out application args... * * Once you have a reasonable sample of the execution of the program, exit * it. The file strace.out will have a summary of the syscalls used. Copy * that list into this file, comment out everything else except the starred * syscalls (which you need for the container to start) and you're done. * * To get the list of syscalls from the strace output this works well for * me * * $ cut -c52 < strace.out * * This sample list was compiled as a combination of all the syscalls * available on i386 and amd64 on Ubuntu Precise, as such it may not contain * everything and not everything may be relevent for your system. This * shouldn't be a problem. */ // Filesystem/File descriptor related access // (*) chdir // (*) chmod chown chown32 close // (*) creat dup // (*) dup2 // (*) dup3 epoll_create epoll_create1 epoll_ctl epoll_ctl_old epoll_pwait epoll_wait epoll_wait_old eventfd eventfd2 faccessat // (*) fadvise64 fadvise64_64 fallocate fanotify_init fanotify_mark ioctl // (*) fchdir fchmod fchmodat fchown fchown32 fchownat fcntl // (*) fcntl64 fdatasync fgetxattr flistxattr flock fremovexattr fsetxattr fstat // (*) fstat64 fstatat64 fstatfs fstatfs64 fsync ftruncate ftruncate64 getcwd // (*) getdents // (*) getdents64 getxattr inotify_add_watch inotify_init inotify_init1 inotify_rm_watch io_cancel io_destroy io_getevents io_setup io_submit lchown lchown32 lgetxattr link linkat listxattr llistxattr llseek _llseek lremovexattr lseek // (*) lsetxattr lstat lstat64 mkdir mkdirat mknod mknodat newfstatat _newselect oldfstat oldlstat oldolduname oldstat olduname oldwait4 open // (*) openat // (*) pipe // (*) pipe2 poll ppoll pread64 preadv futimesat pselect6 pwrite64 pwritev read // (*) readahead readdir readlink readlinkat readv removexattr rename renameat rmdir select sendfile sendfile64 setxattr splice stat // (*) stat64 statfs // (*) statfs64 symlink symlinkat sync sync_file_range sync_file_range2 syncfs tee truncate truncate64 umask unlink unlinkat ustat utime utimensat utimes write // (*) writev // Network related accept accept4 bind // (*) connect // (*) getpeername getsockname // (*) getsockopt listen recv recvfrom // (*) recvmmsg recvmsg send sendmmsg sendmsg sendto // (*) setsockopt shutdown socket // (*) socketcall socketpair sethostname // (*) // Signal related pause rt_sigaction // (*) rt_sigpending rt_sigprocmask // (*) rt_sigqueueinfo rt_sigreturn // (*) rt_sigsuspend rt_sigtimedwait rt_tgsigqueueinfo sigaction sigaltstack // (*) signal signalfd signalfd4 sigpending sigprocmask sigreturn sigsuspend // Other needed POSIX alarm brk // (*) clock_adjtime clock_getres clock_gettime clock_nanosleep //clock_settime gettimeofday nanosleep nice sysinfo syslog time timer_create timer_delete timerfd_create timerfd_gettime timerfd_settime timer_getoverrun timer_gettime timer_settime times uname // (*) // Memory control madvise mbind mincore mlock mlockall mmap // (*) mmap2 mprotect // (*) mremap msync munlock munlockall munmap // (*) remap_file_pages set_mempolicy vmsplice // Process control capget capset // (*) clone // (*) execve // (*) exit // (*) exit_group // (*) fork getcpu getpgid getpgrp // (*) getpid // (*) getppid // (*) getpriority getresgid getresgid32 getresuid getresuid32 getrlimit // (*) getrusage getsid getuid // (*) getuid32 getegid // (*) getegid32 geteuid // (*) geteuid32 getgid // (*) getgid32 getgroups getgroups32 getitimer get_mempolicy kill //personality prctl prlimit64 sched_getaffinity sched_getparam sched_get_priority_max sched_get_priority_min sched_getscheduler sched_rr_get_interval //sched_setaffinity //sched_setparam //sched_setscheduler sched_yield setfsgid setfsgid32 setfsuid setfsuid32 setgid setgid32 setgroups setgroups32 setitimer setpgid // (*) setpriority setregid setregid32 setresgid setresgid32 setresuid setresuid32 setreuid setreuid32 setrlimit setsid setuid setuid32 ugetrlimit vfork wait4 // (*) waitid waitpid // IPC ipc mq_getsetattr mq_notify mq_open mq_timedreceive mq_timedsend mq_unlink msgctl msgget msgrcv msgsnd semctl semget semop semtimedop shmat shmctl shmdt shmget // Linux specific, mostly needed for thread-related stuff arch_prctl // (*) get_robust_list get_thread_area gettid futex // (*) restart_syscall // (*) set_robust_list // (*) set_thread_area set_tid_address // (*) tgkill tkill // Admin syscalls, these are blocked //acct //adjtimex //bdflush //chroot //create_module //delete_module //get_kernel_syms // Obsolete //idle // Obsolete //init_module //ioperm //iopl //ioprio_get //ioprio_set //kexec_load //lookup_dcookie // oprofile only? //migrate_pages // NUMA //modify_ldt //mount //move_pages // NUMA //name_to_handle_at // NFS server //nfsservctl // NFS server //open_by_handle_at // NFS server //perf_event_open //pivot_root //process_vm_readv // For debugger //process_vm_writev // For debugger //ptrace // For debugger //query_module //quotactl //reboot //setdomainname //setns //settimeofday //sgetmask // Obsolete //ssetmask // Obsolete //stime //swapoff //swapon //_sysctl //sysfs //sys_setaltroot //umount //umount2 //unshare //uselib //vhangup //vm86 //vm86old // Kernel key management //add_key //keyctl //request_key // Unimplemented //afs_syscall //break //ftime //getpmsg //gtty //lock //madvise1 //mpx //prof //profil //putpmsg //security //stty //tuxcall //ulimit //vserver docker-0.9.1/contrib/README0000644000175000017500000000036712314376205013431 0ustar tagtagThe `contrib` directory contains scripts, images, and other helpful things which are not part of the core docker distribution. Please note that they could be out of date, since they do not receive the same attention as the rest of the repository. docker-0.9.1/contrib/mkimage-arch.sh0000755000175000017500000000401412314376205015426 0ustar tagtag#!/usr/bin/env bash # Generate a minimal filesystem for archlinux and load it into the local # docker as "archlinux" # requires root set -e hash pacstrap &>/dev/null || { echo "Could not find pacstrap. Run pacman -S arch-install-scripts" exit 1 } hash expect &>/dev/null || { echo "Could not find expect. Run pacman -S expect" exit 1 } ROOTFS=$(mktemp -d /tmp/rootfs-archlinux-XXXXXXXXXX) chmod 755 $ROOTFS # packages to ignore for space savings PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs expect < $ROOTFS/etc/locale.gen arch-chroot $ROOTFS locale-gen arch-chroot $ROOTFS /bin/sh -c 'echo "Server = https://mirrors.kernel.org/archlinux/\$repo/os/\$arch" > /etc/pacman.d/mirrorlist' # udev doesn't work in containers, rebuild /dev DEV=$ROOTFS/dev rm -rf $DEV mkdir -p $DEV mknod -m 666 $DEV/null c 1 3 mknod -m 666 $DEV/zero c 1 5 mknod -m 666 $DEV/random c 1 8 mknod -m 666 $DEV/urandom c 1 9 mkdir -m 755 $DEV/pts mkdir -m 1777 $DEV/shm mknod -m 666 $DEV/tty c 5 0 mknod -m 600 $DEV/console c 5 1 mknod -m 666 $DEV/tty0 c 4 0 mknod -m 666 $DEV/full c 1 7 mknod -m 600 $DEV/initctl p mknod -m 666 $DEV/ptmx c 5 2 tar --numeric-owner -C $ROOTFS -c . | docker import - archlinux docker run -i -t archlinux echo Success. rm -rf $ROOTFS docker-0.9.1/contrib/zfs/0000755000175000017500000000000012314376205013345 5ustar tagtagdocker-0.9.1/contrib/zfs/MAINTAINERS0000644000175000017500000000006412314376205015042 0ustar tagtagGurjeet Singh (gurjeet.singh.im) docker-0.9.1/contrib/zfs/README.md0000644000175000017500000000112412314376205014622 0ustar tagtag# ZFS Storage Driver This is a placeholder to declare the presence and status of ZFS storage driver for containers. The current development is done in Gurjeet Singh's fork of Docker, under the branch named [zfs_driver]. [zfs_driver]: https://github.com/gurjeet/docker/tree/zfs_driver # Status Alpha: The code is now capable of creating, running and destroying containers and images. The code is under development. Contributions in the form of suggestions, code-reviews, and patches are welcome. Please send the communication to gurjeet@singh.im and CC at least one Docker mailing list. docker-0.9.1/contrib/mkimage-debootstrap.sh0000755000175000017500000002154312314376205017045 0ustar tagtag#!/usr/bin/env bash set -e variant='minbase' include='iproute,iputils-ping' arch='amd64' # intentionally undocumented for now skipDetection= strictDebootstrap= justTar= usage() { echo >&2 echo >&2 "usage: $0 [options] repo suite [mirror]" echo >&2 echo >&2 'options: (not recommended)' echo >&2 " -p set an http_proxy for debootstrap" echo >&2 " -v $variant # change default debootstrap variant" echo >&2 " -i $include # change default package includes" echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)" echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)" echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list" echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)" echo >&2 echo >&2 " ie: $0 username/debian squeeze" echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/" echo >&2 echo >&2 " ie: $0 username/ubuntu precise" echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/" echo >&2 echo >&2 " ie: $0 -t precise.tar.bz2 precise" echo >&2 " $0 -t wheezy.tgz wheezy" echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/" echo >&2 } # these should match the names found at http://www.debian.org/releases/ debianStable=wheezy debianUnstable=sid # this should match the name found at http://releases.ubuntu.com/ ubuntuLatestLTS=precise # this should match the name found at http://releases.tanglu.org/ tangluLatest=aequorea while getopts v:i:a:p:dst name; do case "$name" in p) http_proxy="$OPTARG" ;; v) variant="$OPTARG" ;; i) include="$OPTARG" ;; a) arch="$OPTARG" ;; d) strictDebootstrap=1 ;; s) skipDetection=1 ;; t) justTar=1 ;; ?) usage exit 0 ;; esac done shift $(($OPTIND - 1)) repo="$1" suite="$2" mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided if [ ! "$repo" ] || [ ! "$suite" ]; then usage exit 1 fi # some rudimentary detection for whether we need to "sudo" our docker calls docker='' if docker version > /dev/null 2>&1; then docker='docker' elif sudo docker version > /dev/null 2>&1; then docker='sudo docker' elif command -v docker > /dev/null 2>&1; then docker='docker' else echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" echo >&2 " this script is not likely to work as expected" sleep 3 docker='docker' # give us a command-not-found later fi # make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory if [ "$justTar" ]; then if [ ! -d "$(dirname "$repo")" ]; then echo >&2 "error: $(dirname "$repo") does not exist" exit 1 fi repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")" fi # will be filled in later, if [ -z "$skipDetection" ] lsbDist='' target="/tmp/docker-rootfs-debootstrap-$suite-$$-$RANDOM" cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" returnTo="$(pwd -P)" if [ "$suite" = 'lucid' ]; then # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails include+=',gpgv' fi set -x # bootstrap mkdir -p "$target" sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror" cd "$target" if [ -z "$strictDebootstrap" ]; then # prevent init scripts from running during install/update # policy-rc.d (for most scripts) echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null sudo chmod +x usr/sbin/policy-rc.d # initctl (for some pesky upstart scripts) sudo chroot . dpkg-divert --local --rename --add /sbin/initctl sudo ln -sf /bin/true sbin/initctl # see https://github.com/dotcloud/docker/issues/446#issuecomment-16953173 # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) sudo chroot . apt-get clean if strings usr/bin/dpkg | grep -q unsafe-io; then # while we're at it, apt is unnecessarily slow inside containers # this forces dpkg not to call sync() after package extraction and speeds up install # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null # we have this wrapped up in an "if" because the "force-unsafe-io" # option was added in dpkg 1.15.8.6 # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), # and ubuntu lucid/10.04 only has 1.15.5.6 fi # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) { aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' echo "DPkg::Post-Invoke { ${aptGetClean} };" echo "APT::Update::Post-Invoke { ${aptGetClean} };" echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null # and remove the translations, too echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): # rm /usr/sbin/policy-rc.d # rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl # rm /etc/dpkg/dpkg.cfg.d/02apt-speedup # rm /etc/apt/apt.conf.d/no-cache # rm /etc/apt/apt.conf.d/no-languages if [ -z "$skipDetection" ]; then # see also rudimentary platform detection in hack/install.sh lsbDist='' if [ -r etc/lsb-release ]; then lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")" fi if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then lsbDist='Debian' fi case "$lsbDist" in Debian) # add the updates and security repositories if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then # ${suite}-updates only applies to non-unstable sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list # same for security updates echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null fi ;; Ubuntu) # add the universe, updates, and security repositories sudo sed -i " s/ $suite main$/ $suite main universe/; p; s/ $suite main/ ${suite}-updates main/; p; s/ $suite-updates main/ ${suite}-security main/ " etc/apt/sources.list ;; Tanglu) # add the updates repository if [ "$suite" = "$tangluLatest" ]; then # ${suite}-updates only applies to stable Tanglu versions sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list fi ;; SteamOS) # add contrib and non-free sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list ;; esac fi # make sure our packages lists are as up to date as we can get them sudo chroot . apt-get update fi if [ "$justTar" ]; then # create the tarball file so it has the right permissions (ie, not root) touch "$repo" # fill the tarball sudo tar --numeric-owner -caf "$repo" . else # create the image (and tag $repo:$suite) sudo tar --numeric-owner -c . | $docker import - $repo:$suite # test the image $docker run -i -t $repo:$suite echo success if [ -z "$skipDetection" ]; then case "$lsbDist" in Debian) if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then # tag latest $docker tag $repo:$suite $repo:latest if [ -r etc/debian_version ]; then # tag the specific debian release version (which is only reasonable to tag on debian stable) ver=$(cat etc/debian_version) $docker tag $repo:$suite $repo:$ver fi fi ;; Ubuntu) if [ "$suite" = "$ubuntuLatestLTS" ]; then # tag latest $docker tag $repo:$suite $repo:latest fi if [ -r etc/lsb-release ]; then lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" if [ "$lsbRelease" ]; then # tag specific Ubuntu version number, if available (12.04, etc.) $docker tag $repo:$suite $repo:$lsbRelease fi fi ;; Tanglu) if [ "$suite" = "$tangluLatest" ]; then # tag latest $docker tag $repo:$suite $repo:latest fi if [ -r etc/lsb-release ]; then lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" if [ "$lsbRelease" ]; then # tag specific Tanglu version number, if available (1.0, 2.0, etc.) $docker tag $repo:$suite $repo:$lsbRelease fi fi ;; SteamOS) if [ -r etc/lsb-release ]; then lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" if [ "$lsbRelease" ]; then # tag specific SteamOS version number, if available (1.0, 2.0, etc.) $docker tag $repo:$suite $repo:$lsbRelease fi fi ;; esac fi fi # cleanup cd "$returnTo" sudo rm -rf "$target" docker-0.9.1/contrib/vagrant-docker/0000755000175000017500000000000012314376205015452 5ustar tagtagdocker-0.9.1/contrib/vagrant-docker/README.md0000644000175000017500000000402012314376205016725 0ustar tagtag# Vagrant integration Currently there are at least 4 different projects that we are aware of that deals with integration with [Vagrant](http://vagrantup.com/) at different levels. One approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) which means you can create containers and pull base images on VMs using Docker's CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), meaning you can use Vagrant to control Docker containers. ### Provisioners * [Vocker](https://github.com/fgrehm/vocker) * [Ventriloquist](https://github.com/fgrehm/ventriloquist) ### Providers * [docker-provider](https://github.com/fgrehm/docker-provider) * [vagrant-shell](https://github.com/destructuring/vagrant-shell) ## Setting up Vagrant-docker with the Remote API The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: ``` description "Docker daemon" start on filesystem and started lxc-net stop on runlevel [!2345] respawn script /usr/bin/docker -d -H=tcp://0.0.0.0:4243 end script ``` Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: ``` ssh -L 4243:localhost:4243 -p 2222 vagrant@localhost ``` (The first 4243 is what your host can connect to, the second 4243 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) Note that because the port has been changed, to run docker commands from within the command line you must run them like this: ``` sudo docker -H 0.0.0.0:4243 < commands for docker > ``` docker-0.9.1/contrib/init/0000755000175000017500000000000012314376205013506 5ustar tagtagdocker-0.9.1/contrib/init/sysvinit-debian/0000755000175000017500000000000012314376205016616 5ustar tagtagdocker-0.9.1/contrib/init/sysvinit-debian/docker.default0000644000175000017500000000075312314376205021440 0ustar tagtag# Docker Upstart and SysVinit configuration file # Customize location of Docker binary (especially for development testing). #DOCKER="/usr/local/bin/docker" # Use DOCKER_OPTS to modify the daemon startup options. #DOCKER_OPTS="-dns 8.8.8.8 -dns 8.8.4.4" # If you need Docker to use an HTTP proxy, it can also be specified here. #export http_proxy="http://127.0.0.1:3128/" # This is also a handy place to tweak where Docker's temporary files go. #export TMPDIR="/mnt/bigdrive/docker-tmp" docker-0.9.1/contrib/init/sysvinit-debian/docker0000755000175000017500000000573012314376205020020 0ustar tagtag#!/bin/sh ### BEGIN INIT INFO # Provides: docker # Required-Start: $syslog $remote_fs # Required-Stop: $syslog $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Create lightweight, portable, self-sufficient containers. # Description: # Docker is an open-source project to easily create lightweight, portable, # self-sufficient containers from any application. The same container that a # developer builds and tests on a laptop can run at scale, in production, on # VMs, bare metal, OpenStack clusters, public clouds and more. ### END INIT INFO export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin BASE=$(basename $0) # modify these in /etc/default/$BASE (/etc/default/docker) DOCKER=/usr/bin/$BASE DOCKER_PIDFILE=/var/run/$BASE.pid DOCKER_LOGFILE=/var/log/$BASE.log DOCKER_OPTS= DOCKER_DESC="Docker" # Get lsb functions . /lib/lsb/init-functions if [ -f /etc/default/$BASE ]; then . /etc/default/$BASE fi # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it) if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" exit 1 fi # Check docker is present if [ ! -x $DOCKER ]; then log_failure_msg "$DOCKER not present or not executable" exit 1 fi fail_unless_root() { if [ "$(id -u)" != '0' ]; then log_failure_msg "$DOCKER_DESC must be run as root" exit 1 fi } cgroupfs_mount() { # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount if grep -v '^#' /etc/fstab | grep -q cgroup \ || [ ! -e /proc/cgroups ] \ || [ ! -d /sys/fs/cgroup ]; then return fi if ! mountpoint -q /sys/fs/cgroup; then mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup fi ( cd /sys/fs/cgroup for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do mkdir -p $sys if ! mountpoint -q $sys; then if ! mount -n -t cgroup -o $sys cgroup $sys; then rmdir $sys || true fi fi done ) } case "$1" in start) fail_unless_root cgroupfs_mount touch "$DOCKER_LOGFILE" chgrp docker "$DOCKER_LOGFILE" log_begin_msg "Starting $DOCKER_DESC: $BASE" start-stop-daemon --start --background \ --no-close \ --exec "$DOCKER" \ --pidfile "$DOCKER_PIDFILE" \ -- \ -d -p "$DOCKER_PIDFILE" \ $DOCKER_OPTS \ >> "$DOCKER_LOGFILE" 2>&1 log_end_msg $? ;; stop) fail_unless_root log_begin_msg "Stopping $DOCKER_DESC: $BASE" start-stop-daemon --stop --pidfile "$DOCKER_PIDFILE" log_end_msg $? ;; restart) fail_unless_root docker_pid=`cat "$DOCKER_PIDFILE" 2>/dev/null` [ -n "$docker_pid" ] \ && ps -p $docker_pid > /dev/null 2>&1 \ && $0 stop $0 start ;; force-reload) fail_unless_root $0 restart ;; status) status_of_proc -p "$DOCKER_PIDFILE" "$DOCKER" docker ;; *) echo "Usage: $0 {start|stop|restart|status}" exit 1 ;; esac exit 0 docker-0.9.1/contrib/init/systemd/0000755000175000017500000000000012314376205015176 5ustar tagtagdocker-0.9.1/contrib/init/systemd/docker.service0000644000175000017500000000037012314376205020027 0ustar tagtag[Unit] Description=Docker Application Container Engine Documentation=http://docs.docker.io After=network.target [Service] ExecStart=/usr/bin/docker -d Restart=on-failure LimitNOFILE=1048576 LimitNPROC=1048576 [Install] WantedBy=multi-user.target docker-0.9.1/contrib/init/systemd/socket-activation/0000755000175000017500000000000012314376205020625 5ustar tagtagdocker-0.9.1/contrib/init/systemd/socket-activation/docker.service0000644000175000017500000000040112314376205023451 0ustar tagtag[Unit] Description=Docker Application Container Engine Documentation=http://docs.docker.io After=network.target [Service] ExecStart=/usr/bin/docker -d -H fd:// Restart=on-failure LimitNOFILE=1048576 LimitNPROC=1048576 [Install] WantedBy=multi-user.target docker-0.9.1/contrib/init/systemd/socket-activation/docker.socket0000644000175000017500000000017412314376205023310 0ustar tagtag[Unit] Description=Docker Socket for the API [Socket] ListenStream=/var/run/docker.sock [Install] WantedBy=sockets.target docker-0.9.1/contrib/init/openrc/0000755000175000017500000000000012314376205014774 5ustar tagtagdocker-0.9.1/contrib/init/openrc/docker.initd0000755000175000017500000000142512314376205017301 0ustar tagtag#!/sbin/runscript # Copyright 1999-2013 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 # $Header: $ DOCKER_LOGFILE=${DOCKER_LOGFILE:-/var/log/${SVCNAME}.log} DOCKER_PIDFILE=${DOCKER_PIDFILE:-/run/${SVCNAME}.pid} DOCKER_BINARY=${DOCKER_BINARY:-/usr/bin/docker} DOCKER_OPTS=${DOCKER_OPTS:-} start() { checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" ebegin "Starting docker daemon" start-stop-daemon --start --background \ --exec "$DOCKER_BINARY" \ --pidfile "$DOCKER_PIDFILE" \ --stdout "$DOCKER_LOGFILE" \ --stderr "$DOCKER_LOGFILE" \ -- -d -p "$DOCKER_PIDFILE" \ $DOCKER_OPTS eend $? } stop() { ebegin "Stopping docker daemon" start-stop-daemon --stop \ --exec "$DOCKER_BINARY" \ --pidfile "$DOCKER_PIDFILE" eend $? } docker-0.9.1/contrib/init/openrc/docker.confd0000644000175000017500000000054412314376205017261 0ustar tagtag# /etc/conf.d/docker: config file for /etc/init.d/docker # where the docker daemon output gets piped #DOCKER_LOGFILE="/var/log/docker.log" # where docker's pid get stored #DOCKER_PIDFILE="/run/docker.pid" # where the docker daemon itself is run from #DOCKER_BINARY="/usr/bin/docker" # any other random options you want to pass to docker DOCKER_OPTS="" docker-0.9.1/contrib/init/upstart/0000755000175000017500000000000012314376205015210 5ustar tagtagdocker-0.9.1/contrib/init/upstart/docker.conf0000644000175000017500000000161212314376205017326 0ustar tagtagdescription "Docker daemon" start on filesystem stop on runlevel [!2345] respawn pre-start script # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount if grep -v '^#' /etc/fstab | grep -q cgroup \ || [ ! -e /proc/cgroups ] \ || [ ! -d /sys/fs/cgroup ]; then exit 0 fi if ! mountpoint -q /sys/fs/cgroup; then mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup fi ( cd /sys/fs/cgroup for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do mkdir -p $sys if ! mountpoint -q $sys; then if ! mount -n -t cgroup -o $sys cgroup $sys; then rmdir $sys || true fi fi done ) end script script # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) DOCKER=/usr/bin/$UPSTART_JOB DOCKER_OPTS= if [ -f /etc/default/$UPSTART_JOB ]; then . /etc/default/$UPSTART_JOB fi "$DOCKER" -d $DOCKER_OPTS end script docker-0.9.1/contrib/init/sysvinit-redhat/0000755000175000017500000000000012314376205016643 5ustar tagtagdocker-0.9.1/contrib/init/sysvinit-redhat/docker0000755000175000017500000000400612314376205020040 0ustar tagtag#!/bin/sh # # /etc/rc.d/init.d/docker # # Daemon for docker.io # # chkconfig: 2345 95 95 # description: Daemon for docker.io ### BEGIN INIT INFO # Provides: docker # Required-Start: $network cgconfig # Required-Stop: # Should-Start: # Should-Stop: # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: start and stop docker # Description: Daemon for docker.io ### END INIT INFO # Source function library. . /etc/rc.d/init.d/functions prog="docker" exec="/usr/bin/$prog" pidfile="/var/run/$prog.pid" lockfile="/var/lock/subsys/$prog" logfile="/var/log/$prog" [ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog prestart() { service cgconfig status > /dev/null if [[ $? != 0 ]]; then service cgconfig start fi } start() { [ -x $exec ] || exit 5 if ! [ -f $pidfile ]; then prestart printf "Starting $prog:\t" echo "\n$(date)\n" >> $logfile $exec -d $other_args &>> $logfile & pid=$! touch $lockfile success echo else failure echo printf "$pidfile still exists...\n" exit 7 fi } stop() { echo -n $"Stopping $prog: " killproc -p $pidfile $prog retval=$? echo [ $retval -eq 0 ] && rm -f $lockfile return $retval } restart() { stop start } reload() { restart } force_reload() { restart } rh_status() { status -p $pidfile $prog } rh_status_q() { rh_status >/dev/null 2>&1 } case "$1" in start) rh_status_q && exit 0 $1 ;; stop) rh_status_q || exit 0 $1 ;; restart) $1 ;; reload) rh_status_q || exit 7 $1 ;; force-reload) force_reload ;; status) rh_status ;; condrestart|try-restart) rh_status_q || exit 0 restart ;; *) echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" exit 2 esac exit $? docker-0.9.1/contrib/init/sysvinit-redhat/docker.sysconfig0000644000175000017500000000031012314376205022032 0ustar tagtag# /etc/sysconfig/docker # # Other arguments to pass to the docker daemon process # These will be parsed by the sysv initscript and appended # to the arguments list passed to docker -d other_args="" docker-0.9.1/contrib/completion/0000755000175000017500000000000012314376205014714 5ustar tagtagdocker-0.9.1/contrib/completion/fish/0000755000175000017500000000000012314376205015645 5ustar tagtagdocker-0.9.1/contrib/completion/fish/docker.fish0000644000175000017500000004547412314376205020005 0ustar tagtag# docker.fish - docker completions for fish shell # # This file is generated by gen_docker_fish_completions.py from: # https://github.com/barnybug/docker-fish-completion # # To install the completions: # mkdir -p ~/.config/fish/completions # cp docker.fish ~/.config/fish/completions # # Completion supported: # - parameters # - commands # - containers # - images # - repositories function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' for i in (commandline -opc) if contains -- $i attach build commit cp diff events export history images import info insert inspect kill load login logs port ps pull push restart rm rmi run save search start stop tag top version wait return 1 end end return 0 end function __fish_print_docker_containers --description 'Print a list of docker containers' -a select switch $select case running docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n' case stopped docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n' case all docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n' end end function __fish_print_docker_images --description 'Print a list of docker images' docker images | awk 'NR>1' | grep -v '' | awk '{print $1":"$2}' end function __fish_print_docker_repositories --description 'Print a list of docker repositories' docker images | awk 'NR>1' | grep -v '' | awk '{print $1}' | sort | uniq end # common options complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified' complete -c docker -f -n '__fish_docker_no_subcommand' -l api-enable-cors -d 'Enable CORS headers in the remote API' complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d "Attach containers to a pre-existing network bridge; use 'none' to disable container networking" complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers' complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime' complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward' complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules" complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available' complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' complete -c docker -f -n '__fish_docker_no_subcommand' -s r -l restart -d 'Restart previously running containers' complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the docker runtime to use a specific storage driver' complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' # subcommands # attach complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach stdin' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" # build complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build a container from a Dockerfile' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress verbose build output' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' # commit complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (eg. "John Hannibal Smith "' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: -run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" # cp complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d 'Copy files/folders from the containers filesystem to the host path' # diff complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" # events complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show previously created events and then stream.' # export complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" # history complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'only show numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" # images complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'show all images (by default filter out the intermediate images used to build)' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'only show numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'output graph in tree format' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'output graph in graphviz format' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" # import complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' # info complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' # insert complete -c docker -f -n '__fish_docker_no_subcommand' -a insert -d 'Insert a file in an image' complete -c docker -A -f -n '__fish_seen_subcommand_from insert' -a '(__fish_print_docker_images)' -d "Image" # inspect complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers running)' -d "Container" # kill complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" # load complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' # login complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or Login to the docker registry server' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'email' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'password' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'username' # logs complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" # port complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port which is NAT-ed to PRIVATE_PORT' complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" # ps complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before-id -d 'Show only container created before Id, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since-id -d 'Show only containers created since Id, include non-running ones.' # pull complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from the docker registry server' complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s t -l tag -d 'Download tagged image in repository' complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" # push complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to the docker registry server' complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" # restart complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10' complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" # rm complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" # rmi complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" # run complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to the host interfaces' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to stdin, stdout or stderr.' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: Run container in the background, print new container id' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom dns servers' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default entrypoint of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port from the container without publishing it to your host' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep stdin open even if not attached' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container (name:alias)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s n -l networking -d 'Enable networking for this container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxify all received signal to the process (even in non-tty mode)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-tty' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" # save complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" # search complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image in the docker index' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least xxx stars' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s t -l trusted -d 'Only show trusted builds' # start complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a stopped container' complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's stdout/stderr and forward all signals to the process" complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's stdin" complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" # stop complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it.' complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" # tag complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -a '(__fish_print_docker_images)' -d "Image" # top complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" # version complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the docker version information' # wait complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" docker-0.9.1/contrib/completion/zsh/0000755000175000017500000000000012314376205015520 5ustar tagtagdocker-0.9.1/contrib/completion/zsh/_docker0000755000175000017500000001774712314376205017074 0ustar tagtag#compdef docker # # zsh completion for docker (http://docker.io) # # version: 0.2.2 # author: Felix Riedel # license: BSD License # github: https://github.com/felixr/docker-zsh-completion # __parse_docker_list() { sed -e '/^ID/d' -e 's/[ ]\{2,\}/|/g' -e 's/ \([hdwm]\)\(inutes\|ays\|ours\|eeks\)/\1/' | awk ' BEGIN {FS="|"} { printf("%s:%7s, %s\n", $1, $4, $2)}' } __docker_stoppedcontainers() { local expl declare -a stoppedcontainers stoppedcontainers=(${(f)"$(docker ps -a | grep --color=never 'Exit' | __parse_docker_list )"}) _describe -t containers-stopped "Stopped Containers" stoppedcontainers } __docker_runningcontainers() { local expl declare -a containers containers=(${(f)"$(docker ps | __parse_docker_list)"}) _describe -t containers-active "Running Containers" containers } __docker_containers () { __docker_stoppedcontainers __docker_runningcontainers } __docker_images () { local expl declare -a images images=(${(f)"$(docker images | awk '(NR > 1){printf("%s\\:%s\n", $1,$2)}')"}) images=($images ${(f)"$(docker images | awk '(NR > 1){printf("%s:%-15s in %s\n", $3,$2,$1)}')"}) _describe -t docker-images "Images" images } __docker_tags() { local expl declare -a tags tags=(${(f)"$(docker images | awk '(NR>1){print $2}'| sort | uniq)"}) _describe -t docker-tags "tags" tags } __docker_search() { # declare -a dockersearch local cache_policy zstyle -s ":completion:${curcontext}:" cache-policy cache_policy if [[ -z "$cache_policy" ]]; then zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy fi local searchterm cachename searchterm="${words[$CURRENT]%/}" cachename=_docker-search-$searchterm local expl local -a result if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ && ! _retrieve_cache ${cachename#_}; then _message "Searching for ${searchterm}..." result=(${(f)"$(docker search ${searchterm} | awk '(NR>2){print $1}')"}) _store_cache ${cachename#_} result fi _wanted dockersearch expl 'Available images' compadd -a result } __docker_caching_policy() { # oldp=( "$1"(Nmh+24) ) # 24 hour oldp=( "$1"(Nmh+1) ) # 24 hour (( $#oldp )) } __docker_repositories () { local expl declare -a repos repos=(${(f)"$(docker images | sed -e '1d' -e 's/[ ].*//' | sort | uniq)"}) _describe -t docker-repos "Repositories" repos } __docker_commands () { # local -a _docker_subcommands local cache_policy zstyle -s ":completion:${curcontext}:" cache-policy cache_policy if [[ -z "$cache_policy" ]]; then zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy fi if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ && ! _retrieve_cache docker_subcommands; then _docker_subcommands=(${${(f)"$(_call_program commands docker 2>&1 | sed -e '1,6d' -e '/^[ ]*$/d' -e 's/[ ]*\([^ ]\+\)\s*\([^ ].*\)/\1:\2/' )"}}) _docker_subcommands=($_docker_subcommands 'help:Show help for a command') _store_cache docker_subcommands _docker_subcommands fi _describe -t docker-commands "docker command" _docker_subcommands } __docker_subcommand () { local -a _command_args case "$words[1]" in (attach|wait) _arguments ':containers:__docker_runningcontainers' ;; (build) _arguments \ '-t=-:repository:__docker_repositories' \ ':path or URL:_directories' ;; (commit) _arguments \ ':container:__docker_containers' \ ':repository:__docker_repositories' \ ':tag: ' ;; (diff|export|logs) _arguments '*:containers:__docker_containers' ;; (history) _arguments '*:images:__docker_images' ;; (images) _arguments \ '-a[Show all images]' \ ':repository:__docker_repositories' ;; (inspect) _arguments '*:containers:__docker_containers' ;; (history) _arguments ':images:__docker_images' ;; (insert) _arguments '1:containers:__docker_containers' \ '2:URL:(http:// file://)' \ '3:file:_files' ;; (kill) _arguments '*:containers:__docker_runningcontainers' ;; (port) _arguments '1:containers:__docker_runningcontainers' ;; (start) _arguments '*:containers:__docker_stoppedcontainers' ;; (rm) _arguments '-v[Remove the volumes associated to the container]' \ '*:containers:__docker_stoppedcontainers' ;; (rmi) _arguments '-v[Remove the volumes associated to the container]' \ '*:images:__docker_images' ;; (top) _arguments '1:containers:__docker_runningcontainers' ;; (restart|stop) _arguments '-t=-[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \ '*:containers:__docker_runningcontainers' ;; (top) _arguments ':containers:__docker_runningcontainers' ;; (ps) _arguments '-a[Show all containers. Only running containers are shown by default]' \ '-h[Show help]' \ '-before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \ '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' ;; (tag) _arguments \ '-f[force]'\ ':image:__docker_images'\ ':repository:__docker_repositories' \ ':tag:__docker_tags' ;; (run) _arguments \ '-a=-[Attach to stdin, stdout or stderr]:toggle:(true false)' \ '-c=-[CPU shares (relative weight)]:CPU shares: ' \ '-d[Detached mode: leave the container running in the background]' \ '*--dns=[Set custom dns servers]:dns server: ' \ '*-e=[Set environment variables]:environment variable: ' \ '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ '-h=-[Container host name]:hostname:_hosts' \ '-i[Keep stdin open even if not attached]' \ '-m=-[Memory limit (in bytes)]:limit: ' \ '*-p=-[Expose a container''s port to the host]:port:_ports' \ '-t=-[Allocate a pseudo-tty]:toggle:(true false)' \ '-u=-[Username or UID]:user:_users' \ '*-v=-[Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)]:volume: '\ '--volumes-from=-[Mount volumes from the specified container]:volume: ' \ '(-):images:__docker_images' \ '(-):command: _command_names -e' \ '*::arguments: _normal' ;; (pull|search) _arguments ':name:__docker_search' ;; (help) _arguments ':subcommand:__docker_commands' ;; (*) _message 'Unknown sub command' esac } _docker () { local curcontext="$curcontext" state line typeset -A opt_args _arguments -C \ '-H=-[tcp://host:port to bind/connect to]:socket: ' \ '(-): :->command' \ '(-)*:: :->option-or-argument' if (( CURRENT == 1 )); then fi case $state in (command) __docker_commands ;; (option-or-argument) curcontext=${curcontext%:*:*}:docker-$words[1]: __docker_subcommand ;; esac } _docker "$@" docker-0.9.1/contrib/completion/bash/0000755000175000017500000000000012314376205015631 5ustar tagtagdocker-0.9.1/contrib/completion/bash/docker0000755000175000017500000002745012314376205017036 0ustar tagtag#!bash # # bash completion file for core docker commands # # This script provides supports completion of: # - commands and their options # - container ids and names # - image repos and tags # - filepaths # # To enable the completions either: # - place this file in /etc/bash_completion.d # or # - copy this file and add the line below to your .bashrc after # bash completion features are loaded # . docker.bash # # Note: # Currently, the completions will not work if the docker daemon is not # bound to the default communication port/socket # If the docker daemon is using a unix socket for communication your user # must have access to the socket for the completions to function correctly __docker_q() { docker 2>/dev/null "$@" } __docker_containers_all() { local containers="$( __docker_q ps -a -q )" local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) ) } __docker_containers_running() { local containers="$( __docker_q ps -q )" local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) ) } __docker_containers_stopped() { local containers="$( { __docker_q ps -a -q; __docker_q ps -q; } | sort | uniq -u )" local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) ) } __docker_image_repos() { local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" COMPREPLY=( $( compgen -W "$repos" -- "$cur" ) ) } __docker_image_repos_and_tags() { local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^:' )" COMPREPLY=( $( compgen -W "$repos $images" -- "$cur" ) ) __ltrim_colon_completions "$cur" } __docker_image_repos_and_tags_and_ids() { local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^:' )" local ids="$( __docker_q images -a -q )" COMPREPLY=( $( compgen -W "$repos $images $ids" -- "$cur" ) ) __ltrim_colon_completions "$cur" } __docker_containers_and_images() { local containers="$( __docker_q ps -a -q )" local names="$( __docker_q inspect --format '{{.Name}}' $containers | sed 's,^/,,' )" local repos="$( __docker_q images | awk 'NR>1{print $1}' | grep -v '^$' )" local images="$( __docker_q images | awk 'NR>1{print $1":"$2}' | grep -v '^:' )" local ids="$( __docker_q images -a -q )" COMPREPLY=( $( compgen -W "$containers $names $repos $images $ids" -- "$cur" ) ) __ltrim_colon_completions "$cur" } __docker_pos_first_nonflag() { local argument_flags=$1 local counter=$cpos while [ $counter -le $cword ]; do if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then (( counter++ )) else case "${words[$counter]}" in -*) ;; *) break ;; esac fi (( counter++ )) done echo $counter } _docker_docker() { case "$prev" in -H) return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-H" -- "$cur" ) ) ;; *) COMPREPLY=( $( compgen -W "$commands help" -- "$cur" ) ) ;; esac } _docker_attach() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--no-stdin --sig-proxy" -- "$cur" ) ) ;; *) local counter="$(__docker_pos_first_nonflag)" if [ $cword -eq $counter ]; then __docker_containers_running fi ;; esac } _docker_build() { case "$prev" in -t|--tag) __docker_image_repos_and_tags return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-t --tag -q --quiet --no-cache --rm" -- "$cur" ) ) ;; *) local counter="$(__docker_pos_first_nonflag '-t|--tag')" if [ $cword -eq $counter ]; then _filedir fi ;; esac } _docker_commit() { case "$prev" in -m|--message|-a|--author|--run) return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-m --message -a --author --run" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag '-m|--message|-a|--author|--run') if [ $cword -eq $counter ]; then __docker_containers_all return fi (( counter++ )) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags return fi ;; esac } _docker_cp() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then case "$cur" in *:) return ;; *) __docker_containers_all COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) compopt -o nospace return ;; esac fi (( counter++ )) if [ $cword -eq $counter ]; then _filedir return fi } _docker_diff() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_all fi } _docker_events() { case "$prev" in --since) return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--since" -- "$cur" ) ) ;; *) ;; esac } _docker_export() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_all fi } _docker_help() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then COMPREPLY=( $( compgen -W "$commands" -- "$cur" ) ) fi } _docker_history() { case "$cur" in -*) COMPREPLY=( $( compgen -W "-q --quiet --no-trunc" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids fi ;; esac } _docker_images() { case "$cur" in -*) COMPREPLY=( $( compgen -W "-q --quiet -a --all --no-trunc -v --viz -t --tree" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_image_repos fi ;; esac } _docker_import() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then return fi (( counter++ )) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags return fi } _docker_info() { return } _docker_insert() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids fi } _docker_inspect() { case "$prev" in -f|--format) return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-f --format" -- "$cur" ) ) ;; *) __docker_containers_and_images ;; esac } _docker_kill() { __docker_containers_running } _docker_load() { return } _docker_login() { case "$prev" in -u|--username|-p|--password|-e|--email) return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-u --username -p --password -e --email" -- "$cur" ) ) ;; *) ;; esac } _docker_logs() { case "$cur" in -*) COMPREPLY=( $( compgen -W "-f --follow" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_all fi ;; esac } _docker_port() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_all fi } _docker_ps() { case "$prev" in --since-id|--before-id) COMPREPLY=( $( compgen -W "$( __docker_q ps -a -q )" -- "$cur" ) ) # TODO replace this with __docker_containers_all # see https://github.com/dotcloud/docker/issues/3565 return ;; -n) return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since-id --before-id -n" -- "$cur" ) ) ;; *) ;; esac } _docker_pull() { case "$prev" in -t|--tag) return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-t --tag" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag '-t|--tag') if [ $cword -eq $counter ]; then __docker_image_repos_and_tags fi ;; esac } _docker_push() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_image_repos # TODO replace this with __docker_image_repos_and_tags # see https://github.com/dotcloud/docker/issues/3411 fi } _docker_restart() { case "$prev" in -t|--time) return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) ) ;; *) __docker_containers_all ;; esac } _docker_rm() { case "$cur" in -*) COMPREPLY=( $( compgen -W "-v --volumes -l --link" -- "$cur" ) ) ;; *) __docker_containers_stopped ;; esac } _docker_rmi() { __docker_image_repos_and_tags_and_ids } _docker_run() { case "$prev" in --cidfile) _filedir ;; --volumes-from) __docker_containers_all ;; -v|--volume) # TODO something magical with colons and _filedir ? return ;; -e|--env) COMPREPLY=( $( compgen -e -- "$cur" ) ) return ;; --entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf) return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--rm -d --detach -n --networking --privileged -P --publish-all -i --interactive -t --tty --cidfile --entrypoint -h --hostname -m --memory -u --user -w --workdir -c --cpu-shares --sig-proxy --name -a --attach -v --volume --link -e --env -p --publish --expose --dns --volumes-from --lxc-conf" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag '--cidfile|--volumes-from|-v|--volume|-e|--env|--entrypoint|-h|--hostname|-m|--memory|-u|--user|-w|--workdir|-c|--cpu-shares|-n|--name|-a|--attach|--link|-p|--publish|--expose|--dns|--lxc-conf') if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids fi ;; esac } _docker_save() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids fi } _docker_search() { case "$prev" in -s|--stars) return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--no-trunc -t --trusted -s --stars" -- "$cur" ) ) ;; *) ;; esac } _docker_start() { case "$cur" in -*) COMPREPLY=( $( compgen -W "-a --attach -i --interactive" -- "$cur" ) ) ;; *) __docker_containers_stopped ;; esac } _docker_stop() { case "$prev" in -t|--time) return ;; *) ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-t --time" -- "$cur" ) ) ;; *) __docker_containers_running ;; esac } _docker_tag() { case "$cur" in -*) COMPREPLY=( $( compgen -W "-f --force" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags return fi (( counter++ )) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags return fi ;; esac } _docker_top() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_running fi } _docker_version() { return } _docker_wait() { __docker_containers_all } _docker() { local commands=" attach build commit cp diff events export history images import info insert inspect kill load login logs port ps pull push restart rm rmi run save search start stop tag top version wait " COMPREPLY=() local cur prev words cword _get_comp_words_by_ref -n : cur prev words cword local command='docker' local counter=1 while [ $counter -lt $cword ]; do case "${words[$counter]}" in -H) (( counter++ )) ;; -*) ;; *) command="${words[$counter]}" cpos=$counter (( cpos++ )) break ;; esac (( counter++ )) done local completions_func=_docker_${command} declare -F $completions_func >/dev/null && $completions_func return 0 } complete -F _docker docker docker-0.9.1/contrib/syntax/0000755000175000017500000000000012314376205014071 5ustar tagtagdocker-0.9.1/contrib/syntax/textmate/0000755000175000017500000000000012314376205015724 5ustar tagtagdocker-0.9.1/contrib/syntax/textmate/MAINTAINERS0000644000175000017500000000006512314376205017422 0ustar tagtagAsbjorn Enge (@asbjornenge) docker-0.9.1/contrib/syntax/textmate/Docker.tmbundle/0000755000175000017500000000000012314376205020744 5ustar tagtagdocker-0.9.1/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/0000755000175000017500000000000012314376205022562 5ustar tagtagdocker-0.9.1/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage0000644000175000017500000000416112314376205026641 0ustar tagtag name Dockerfile fileTypes Dockerfile patterns match ^\s*(ONBUILD\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|WORKDIR)\s captures 0 name keyword.control.dockerfile 1 name keyword.other.special-method.dockerfile match ^\s*(ONBUILD\s+)?(CMD|ENTRYPOINT)\s captures 0 name keyword.operator.dockerfile 1 name keyword.other.special-method.dockerfile begin " end " name string.quoted.double.dockerfile patterns match \\. name constant.character.escaped.dockerfile begin ' end ' name string.quoted.single.dockerfile patterns match \\. name constant.character.escaped.dockerfile match ^\s*#.*$ name comment.block.dockerfile scopeName source.dockerfile uuid a39d8795-59d2-49af-aa00-fe74ee29576e docker-0.9.1/contrib/syntax/textmate/Docker.tmbundle/Preferences/0000755000175000017500000000000012314376205023205 5ustar tagtagdocker-0.9.1/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences0000644000175000017500000000110212314376205027772 0ustar tagtag name Comments scope source.dockerfile settings shellVariables name TM_COMMENT_START value # uuid 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 docker-0.9.1/contrib/syntax/textmate/Docker.tmbundle/info.plist0000644000175000017500000000075112314376205022757 0ustar tagtag contactEmailRot13 germ@andz.com.ar contactName GermanDZ description Helpers for Docker. name Docker uuid 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 docker-0.9.1/contrib/syntax/textmate/README.md0000644000175000017500000000065712314376205017213 0ustar tagtag# Docker.tmbundle Dockerfile syntaxt highlighting for TextMate and Sublime Text. ## Install ### Sublime Text Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). Search for *Dockerfile Syntax Highlighting* ### TextMate 2 Copy the directory `Docker.tmbundle` (showed as a Package in OSX) to `~/Library/Application Support/TextMate/Managed/Bundles` enjoy. docker-0.9.1/contrib/syntax/vim/0000755000175000017500000000000012314376205014664 5ustar tagtagdocker-0.9.1/contrib/syntax/vim/LICENSE0000644000175000017500000000242212314376205015671 0ustar tagtagCopyright (c) 2013 Honza Pokorny All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. docker-0.9.1/contrib/syntax/vim/ftdetect/0000755000175000017500000000000012314376205016466 5ustar tagtagdocker-0.9.1/contrib/syntax/vim/ftdetect/dockerfile.vim0000644000175000017500000000007112314376205021310 0ustar tagtagau BufNewFile,BufRead Dockerfile set filetype=dockerfile docker-0.9.1/contrib/syntax/vim/doc/0000755000175000017500000000000012314376205015431 5ustar tagtagdocker-0.9.1/contrib/syntax/vim/doc/dockerfile.txt0000644000175000017500000000066412314376205020307 0ustar tagtag*dockerfile.txt* Syntax highlighting for Dockerfiles Author: Honza Pokorny License: BSD INSTALLATION *installation* Drop it on your Pathogen path and you're all set. FEATURES *features* The syntax highlighting includes: * The directives (e.g. FROM) * Strings * Comments vim:tw=78:et:ft=help:norl: docker-0.9.1/contrib/syntax/vim/syntax/0000755000175000017500000000000012314376205016212 5ustar tagtagdocker-0.9.1/contrib/syntax/vim/syntax/dockerfile.vim0000644000175000017500000000114612314376205021040 0ustar tagtag" dockerfile.vim - Syntax highlighting for Dockerfiles " Maintainer: Honza Pokorny " Version: 0.5 if exists("b:current_syntax") finish endif let b:current_syntax = "dockerfile" syntax case ignore syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|VOLUME|WORKDIR)\s/ highlight link dockerfileKeyword Keyword syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ highlight link dockerfileString String syntax match dockerfileComment "\v^\s*#.*$" highlight link dockerfileComment Comment set commentstring=#\ %s docker-0.9.1/contrib/syntax/vim/README.md0000644000175000017500000000042012314376205016137 0ustar tagtagdockerfile.vim ============== Syntax highlighting for Dockerfiles Installation ------------ Via pathogen, the usual way... Features -------- The syntax highlighting includes: * The directives (e.g. `FROM`) * Strings * Comments License ------- BSD, short and sweet docker-0.9.1/contrib/crashTest.go0000644000175000017500000000453412314376205015040 0ustar tagtagpackage main import ( "fmt" "io" "log" "net" "os" "os/exec" "path" "time" ) var DOCKERPATH = path.Join(os.Getenv("DOCKERPATH"), "docker") // WARNING: this crashTest will 1) crash your host, 2) remove all containers func runDaemon() (*exec.Cmd, error) { os.Remove("/var/run/docker.pid") exec.Command("rm", "-rf", "/var/lib/docker/containers").Run() cmd := exec.Command(DOCKERPATH, "-d") outPipe, err := cmd.StdoutPipe() if err != nil { return nil, err } errPipe, err := cmd.StderrPipe() if err != nil { return nil, err } if err := cmd.Start(); err != nil { return nil, err } go func() { io.Copy(os.Stdout, outPipe) }() go func() { io.Copy(os.Stderr, errPipe) }() return cmd, nil } func crashTest() error { if err := exec.Command("/bin/bash", "-c", "while true; do true; done").Start(); err != nil { return err } var endpoint string if ep := os.Getenv("TEST_ENDPOINT"); ep == "" { endpoint = "192.168.56.1:7979" } else { endpoint = ep } c := make(chan bool) var conn io.Writer go func() { conn, _ = net.Dial("tcp", endpoint) c <- false }() go func() { time.Sleep(2 * time.Second) c <- true }() <-c restartCount := 0 totalTestCount := 1 for { daemon, err := runDaemon() if err != nil { return err } restartCount++ // time.Sleep(5000 * time.Millisecond) var stop bool go func() error { stop = false for i := 0; i < 100 && !stop; { func() error { cmd := exec.Command(DOCKERPATH, "run", "ubuntu", "echo", fmt.Sprintf("%d", totalTestCount)) i++ totalTestCount++ outPipe, err := cmd.StdoutPipe() if err != nil { return err } inPipe, err := cmd.StdinPipe() if err != nil { return err } if err := cmd.Start(); err != nil { return err } if conn != nil { go io.Copy(conn, outPipe) } // Expecting error, do not check inPipe.Write([]byte("hello world!!!!!\n")) go inPipe.Write([]byte("hello world!!!!!\n")) go inPipe.Write([]byte("hello world!!!!!\n")) inPipe.Close() if err := cmd.Wait(); err != nil { return err } outPipe.Close() return nil }() } return nil }() time.Sleep(20 * time.Second) stop = true if err := daemon.Process.Kill(); err != nil { return err } } } func main() { if err := crashTest(); err != nil { log.Println(err) } } docker-0.9.1/contrib/docker-device-tool/0000755000175000017500000000000012314376205016222 5ustar tagtagdocker-0.9.1/contrib/docker-device-tool/device_tool.go0000644000175000017500000000717612314376205021060 0ustar tagtagpackage main import ( "flag" "fmt" "github.com/dotcloud/docker/graphdriver/devmapper" "os" "path" "sort" "strconv" "strings" ) func usage() { fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) flag.PrintDefaults() os.Exit(1) } func byteSizeFromString(arg string) (int64, error) { digits := "" rest := "" last := strings.LastIndexAny(arg, "0123456789") if last >= 0 { digits = arg[:last+1] rest = arg[last+1:] } val, err := strconv.ParseInt(digits, 10, 64) if err != nil { return val, err } rest = strings.ToLower(strings.TrimSpace(rest)) var multiplier int64 = 1 switch rest { case "": multiplier = 1 case "k", "kb": multiplier = 1024 case "m", "mb": multiplier = 1024 * 1024 case "g", "gb": multiplier = 1024 * 1024 * 1024 case "t", "tb": multiplier = 1024 * 1024 * 1024 * 1024 default: return 0, fmt.Errorf("Unknown size unit: %s", rest) } return val * multiplier, nil } func main() { root := flag.String("r", "/var/lib/docker", "Docker root dir") flDebug := flag.Bool("D", false, "Debug mode") flag.Parse() if *flDebug { os.Setenv("DEBUG", "1") } if flag.NArg() < 1 { usage() } args := flag.Args() home := path.Join(*root, "devicemapper") devices, err := devmapper.NewDeviceSet(home, false) if err != nil { fmt.Println("Can't initialize device mapper: ", err) os.Exit(1) } switch args[0] { case "status": status := devices.Status() fmt.Printf("Pool name: %s\n", status.PoolName) fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) fmt.Printf("Sector size: %d\n", status.SectorSize) fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) break case "list": ids := devices.List() sort.Strings(ids) for _, id := range ids { fmt.Println(id) } break case "device": if flag.NArg() < 2 { usage() } status, err := devices.GetDeviceStatus(args[1]) if err != nil { fmt.Println("Can't get device info: ", err) os.Exit(1) } fmt.Printf("Id: %d\n", status.DeviceId) fmt.Printf("Size: %d\n", status.Size) fmt.Printf("Transaction Id: %d\n", status.TransactionId) fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) break case "resize": if flag.NArg() < 2 { usage() } size, err := byteSizeFromString(args[1]) if err != nil { fmt.Println("Invalid size: ", err) os.Exit(1) } err = devices.ResizePool(size) if err != nil { fmt.Println("Error resizeing pool: ", err) os.Exit(1) } break case "snap": if flag.NArg() < 3 { usage() } err := devices.AddDevice(args[1], args[2]) if err != nil { fmt.Println("Can't create snap device: ", err) os.Exit(1) } break case "remove": if flag.NArg() < 2 { usage() } err := devices.RemoveDevice(args[1]) if err != nil { fmt.Println("Can't remove device: ", err) os.Exit(1) } break case "mount": if flag.NArg() < 3 { usage() } err := devices.MountDevice(args[1], args[2], false) if err != nil { fmt.Println("Can't create snap device: ", err) os.Exit(1) } break default: fmt.Printf("Unknown command %s\n", args[0]) usage() os.Exit(1) } return } docker-0.9.1/contrib/mkimage-crux.sh0000755000175000017500000000351212314376205015474 0ustar tagtag#!/usr/bin/env bash # Generate a minimal filesystem for CRUX/Linux and load it into the local # docker as "cruxlinux" # requires root and the crux iso (http://crux.nu) set -e die () { echo >&2 "$@" exit 1 } [ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" ISO=${1} ROOTFS=$(mktemp -d /tmp/rootfs-crux-XXXXXXXXXX) CRUX=$(mktemp -d /tmp/crux-XXXXXXXXXX) TMP=$(mktemp -d /tmp/XXXXXXXXXX) VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') # Mount the ISO mount -o ro,loop $ISO $CRUX # Extract pkgutils tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz # Put pkgadd in the $PATH export PATH="$TMP/usr/bin:$PATH" # Install core packages mkdir -p $ROOTFS/var/lib/pkg touch $ROOTFS/var/lib/pkg/db for pkg in $CRUX/crux/core/*; do pkgadd -r $ROOTFS $pkg done # Remove agetty and inittab config if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then echo "Removing agetty from /etc/inittab ..." chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab fi # Remove kernel source rm -rf $ROOTFS/usr/src/* # udev doesn't work in containers, rebuild /dev DEV=$ROOTFS/dev rm -rf $DEV mkdir -p $DEV mknod -m 666 $DEV/null c 1 3 mknod -m 666 $DEV/zero c 1 5 mknod -m 666 $DEV/random c 1 8 mknod -m 666 $DEV/urandom c 1 9 mkdir -m 755 $DEV/pts mkdir -m 1777 $DEV/shm mknod -m 666 $DEV/tty c 5 0 mknod -m 600 $DEV/console c 5 1 mknod -m 666 $DEV/tty0 c 4 0 mknod -m 666 $DEV/full c 1 7 mknod -m 600 $DEV/initctl p mknod -m 666 $DEV/ptmx c 5 2 IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) docker tag $IMAGE_ID crux:latest docker run -i -t crux echo Success. # Cleanup umount $CRUX rm -rf $ROOTFS rm -rf $CRUX rm -rf $TMP docker-0.9.1/contrib/mkimage-busybox.sh0000755000175000017500000000175512314376205016215 0ustar tagtag#!/usr/bin/env bash # Generate a very minimal filesystem based on busybox-static, # and load it into the local docker under the name "busybox". BUSYBOX=$(which busybox) [ "$BUSYBOX" ] || { echo "Sorry, I could not locate busybox." echo "Try 'apt-get install busybox-static'?" exit 1 } set -e ROOTFS=/tmp/rootfs-busybox-$$-$RANDOM mkdir $ROOTFS cd $ROOTFS mkdir bin etc dev dev/pts lib proc sys tmp touch etc/resolv.conf cp /etc/nsswitch.conf etc/nsswitch.conf echo root:x:0:0:root:/:/bin/sh > etc/passwd echo root:x:0: > etc/group ln -s lib lib64 ln -s bin sbin cp $BUSYBOX bin for X in $(busybox --list) do ln -s busybox bin/$X done rm bin/init ln bin/busybox bin/init cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib for X in console null ptmx random stdin stdout stderr tty urandom zero do cp -a /dev/$X dev done tar --numeric-owner -cf- . | docker import - busybox docker run -i -u root busybox /bin/echo Success. docker-0.9.1/graph.go0000644000175000017500000002633512314376205012544 0ustar tagtagpackage docker import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "os" "path" "path/filepath" "runtime" "strings" "syscall" "time" ) // A Graph is a store for versioned filesystem images and the relationship between them. type Graph struct { Root string idIndex *utils.TruncIndex driver graphdriver.Driver } // NewGraph instantiates a new graph at the given root path in the filesystem. // `root` will be created if it doesn't exist. func NewGraph(root string, driver graphdriver.Driver) (*Graph, error) { abspath, err := filepath.Abs(root) if err != nil { return nil, err } // Create the root directory if it doesn't exists if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { return nil, err } graph := &Graph{ Root: abspath, idIndex: utils.NewTruncIndex(), driver: driver, } if err := graph.restore(); err != nil { return nil, err } return graph, nil } func (graph *Graph) restore() error { dir, err := ioutil.ReadDir(graph.Root) if err != nil { return err } for _, v := range dir { id := v.Name() if graph.driver.Exists(id) { graph.idIndex.Add(id) } } utils.Debugf("Restored %d elements", len(dir)) return nil } // FIXME: Implement error subclass instead of looking at the error text // Note: This is the way golang implements os.IsNotExists on Plan9 func (graph *Graph) IsNotExist(err error) bool { return err != nil && (strings.Contains(err.Error(), "does not exist") || strings.Contains(err.Error(), "No such")) } // Exists returns true if an image is registered at the given id. // If the image doesn't exist or if an error is encountered, false is returned. func (graph *Graph) Exists(id string) bool { if _, err := graph.Get(id); err != nil { return false } return true } // Get returns the image with the given id, or an error if the image doesn't exist. func (graph *Graph) Get(name string) (*Image, error) { id, err := graph.idIndex.Get(name) if err != nil { return nil, err } // FIXME: return nil when the image doesn't exist, instead of an error img, err := LoadImage(graph.imageRoot(id)) if err != nil { return nil, err } if img.ID != id { return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID) } img.graph = graph if img.Size < 0 { rootfs, err := graph.driver.Get(img.ID) if err != nil { return nil, fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) } defer graph.driver.Put(img.ID) var size int64 if img.Parent == "" { if size, err = utils.TreeSize(rootfs); err != nil { return nil, err } } else { parentFs, err := graph.driver.Get(img.Parent) if err != nil { return nil, err } changes, err := archive.ChangesDirs(rootfs, parentFs) if err != nil { return nil, err } size = archive.ChangesSize(rootfs, changes) } img.Size = size if err := img.SaveSize(graph.imageRoot(id)); err != nil { return nil, err } } return img, nil } // Create creates a new image and registers it in the graph. func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container, comment, author string, config *runconfig.Config) (*Image, error) { img := &Image{ ID: GenerateID(), Comment: comment, Created: time.Now().UTC(), DockerVersion: dockerversion.VERSION, Author: author, Config: config, Architecture: runtime.GOARCH, OS: runtime.GOOS, } if container != nil { img.Parent = container.Image img.Container = container.ID img.ContainerConfig = *container.Config } if err := graph.Register(nil, layerData, img); err != nil { return nil, err } return img, nil } // Register imports a pre-existing image into the graph. // FIXME: pass img as first argument func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, img *Image) (err error) { defer func() { // If any error occurs, remove the new dir from the driver. // Don't check for errors since the dir might not have been created. // FIXME: this leaves a possible race condition. if err != nil { graph.driver.Remove(img.ID) } }() if err := ValidateID(img.ID); err != nil { return err } // (This is a convenience to save time. Race conditions are taken care of by os.Rename) if graph.Exists(img.ID) { return fmt.Errorf("Image %s already exists", img.ID) } // Ensure that the image root does not exist on the filesystem // when it is not registered in the graph. // This is common when you switch from one graph driver to another if err := os.RemoveAll(graph.imageRoot(img.ID)); err != nil && !os.IsNotExist(err) { return err } // If the driver has this ID but the graph doesn't, remove it from the driver to start fresh. // (the graph is the source of truth). // Ignore errors, since we don't know if the driver correctly returns ErrNotExist. // (FIXME: make that mandatory for drivers). graph.driver.Remove(img.ID) tmp, err := graph.Mktemp("") defer os.RemoveAll(tmp) if err != nil { return fmt.Errorf("Mktemp failed: %s", err) } // Create root filesystem in the driver if err := graph.driver.Create(img.ID, img.Parent); err != nil { return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) } // Mount the root filesystem so we can apply the diff/layer rootfs, err := graph.driver.Get(img.ID) if err != nil { return fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) } defer graph.driver.Put(img.ID) img.graph = graph if err := StoreImage(img, jsonData, layerData, tmp, rootfs); err != nil { return err } // Commit if err := os.Rename(tmp, graph.imageRoot(img.ID)); err != nil { return err } graph.idIndex.Add(img.ID) return nil } // TempLayerArchive creates a temporary archive of the given image's filesystem layer. // The archive is stored on disk and will be automatically deleted as soon as has been read. // If output is not nil, a human-readable progress bar will be written to it. // FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives? func (graph *Graph) TempLayerArchive(id string, compression archive.Compression, sf *utils.StreamFormatter, output io.Writer) (*archive.TempArchive, error) { image, err := graph.Get(id) if err != nil { return nil, err } tmp, err := graph.Mktemp("") if err != nil { return nil, err } a, err := image.TarLayer() if err != nil { return nil, err } progress := utils.ProgressReader(a, 0, output, sf, false, utils.TruncateID(id), "Buffering to disk") defer progress.Close() return archive.NewTempArchive(progress, tmp) } // Mktemp creates a temporary sub-directory inside the graph's filesystem. func (graph *Graph) Mktemp(id string) (string, error) { dir := path.Join(graph.Root, "_tmp", GenerateID()) if err := os.MkdirAll(dir, 0700); err != nil { return "", err } return dir, nil } // setupInitLayer populates a directory with mountpoints suitable // for bind-mounting dockerinit into the container. The mountpoint is simply an // empty file at /.dockerinit // // This extra layer is used by all containers as the top-most ro layer. It protects // the container from unwanted side-effects on the rw layer. func setupInitLayer(initLayer string) error { for pth, typ := range map[string]string{ "/dev/pts": "dir", "/dev/shm": "dir", "/proc": "dir", "/sys": "dir", "/.dockerinit": "file", "/.dockerenv": "file", "/etc/resolv.conf": "file", "/etc/hosts": "file", "/etc/hostname": "file", "/dev/console": "file", // "var/run": "dir", // "var/lock": "dir", } { parts := strings.Split(pth, "/") prev := "/" for _, p := range parts[1:] { prev = path.Join(prev, p) syscall.Unlink(path.Join(initLayer, prev)) } if _, err := os.Stat(path.Join(initLayer, pth)); err != nil { if os.IsNotExist(err) { switch typ { case "dir": if err := os.MkdirAll(path.Join(initLayer, pth), 0755); err != nil { return err } case "file": if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil { return err } f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755) if err != nil { return err } f.Close() } } else { return err } } } // Layer is ready to use, if it wasn't before. return nil } // Check if given error is "not empty". // Note: this is the way golang does it internally with os.IsNotExists. func isNotEmpty(err error) bool { switch pe := err.(type) { case nil: return false case *os.PathError: err = pe.Err case *os.LinkError: err = pe.Err } return strings.Contains(err.Error(), " not empty") } // Delete atomically removes an image from the graph. func (graph *Graph) Delete(name string) error { id, err := graph.idIndex.Get(name) if err != nil { return err } tmp, err := graph.Mktemp("") if err != nil { return err } graph.idIndex.Delete(id) err = os.Rename(graph.imageRoot(id), tmp) if err != nil { return err } // Remove rootfs data from the driver graph.driver.Remove(id) // Remove the trashed image directory return os.RemoveAll(tmp) } // Map returns a list of all images in the graph, addressable by ID. func (graph *Graph) Map() (map[string]*Image, error) { images := make(map[string]*Image) err := graph.walkAll(func(image *Image) { images[image.ID] = image }) if err != nil { return nil, err } return images, nil } // walkAll iterates over each image in the graph, and passes it to a handler. // The walking order is undetermined. func (graph *Graph) walkAll(handler func(*Image)) error { files, err := ioutil.ReadDir(graph.Root) if err != nil { return err } for _, st := range files { if img, err := graph.Get(st.Name()); err != nil { // Skip image continue } else if handler != nil { handler(img) } } return nil } // ByParent returns a lookup table of images by their parent. // If an image of id ID has 3 children images, then the value for key ID // will be a list of 3 images. // If an image has no children, it will not have an entry in the table. func (graph *Graph) ByParent() (map[string][]*Image, error) { byParent := make(map[string][]*Image) err := graph.walkAll(func(image *Image) { parent, err := graph.Get(image.Parent) if err != nil { return } if children, exists := byParent[parent.ID]; exists { byParent[parent.ID] = append(children, image) } else { byParent[parent.ID] = []*Image{image} } }) return byParent, err } // Heads returns all heads in the graph, keyed by id. // A head is an image which is not the parent of another image in the graph. func (graph *Graph) Heads() (map[string]*Image, error) { heads := make(map[string]*Image) byParent, err := graph.ByParent() if err != nil { return nil, err } err = graph.walkAll(func(image *Image) { // If it's not in the byParent lookup table, then // it's not a parent -> so it's a head! if _, exists := byParent[image.ID]; !exists { heads[image.ID] = image } }) return heads, err } func (graph *Graph) imageRoot(id string) string { return path.Join(graph.Root, id) } func (graph *Graph) Driver() graphdriver.Driver { return graph.driver } docker-0.9.1/commands_unit_test.go0000644000175000017500000002146412314376205015340 0ustar tagtagpackage docker import ( "github.com/dotcloud/docker/runconfig" "strings" "testing" ) func parse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig, error) { config, hostConfig, _, err := runconfig.Parse(strings.Split(args+" ubuntu bash", " "), nil) return config, hostConfig, err } func mustParse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig) { config, hostConfig, err := parse(t, args) if err != nil { t.Fatal(err) } return config, hostConfig } func TestParseRunLinks(t *testing.T) { if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) } if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) } if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) } if _, _, err := parse(t, "-link a"); err == nil { t.Fatalf("Error parsing links. `-link a` should be an error but is not") } if _, _, err := parse(t, "-link"); err == nil { t.Fatalf("Error parsing links. `-link` should be an error but is not") } } func TestParseRunAttach(t *testing.T) { if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if _, _, err := parse(t, "-a"); err == nil { t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") } if _, _, err := parse(t, "-a invalid"); err == nil { t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") } if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") } if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") } if _, _, err := parse(t, "-a stdin -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") } if _, _, err := parse(t, "-a stdout -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") } if _, _, err := parse(t, "-a stderr -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") } if _, _, err := parse(t, "-d -rm"); err == nil { t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not") } } func TestParseRunVolumes(t *testing.T) { if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes["/tmp"]; !exists { t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) } if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes["/tmp"]; !exists { t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes) } else if _, exists := config.Volumes["/var"]; !exists { t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) } if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes["/containerTmp"]; !exists { t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) } if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes["/containerTmp"]; !exists { t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) } else if _, exists := config.Volumes["/containerVar"]; !exists { t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) } if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes["/containerTmp"]; !exists { t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) } else if _, exists := config.Volumes["/containerVar"]; !exists { t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) } if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes["/containerTmp"]; !exists { t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) } else if _, exists := config.Volumes["/containerVar"]; !exists { t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) } if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) } else if len(config.Volumes) != 0 { t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) } if _, _, err := parse(t, "-v /"); err == nil { t.Fatalf("Expected error, but got none") } if _, _, err := parse(t, "-v /:/"); err == nil { t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") } if _, _, err := parse(t, "-v"); err == nil { t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") } if _, _, err := parse(t, "-v /tmp:"); err == nil { t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") } if _, _, err := parse(t, "-v /tmp:ro"); err == nil { t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't") } if _, _, err := parse(t, "-v /tmp::"); err == nil { t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") } if _, _, err := parse(t, "-v :"); err == nil { t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") } if _, _, err := parse(t, "-v ::"); err == nil { t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") } if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") } } docker-0.9.1/VERSION0000644000175000017500000000000612314376205012147 0ustar tagtag0.9.1 docker-0.9.1/MAINTAINERS0000644000175000017500000000056312314376205012604 0ustar tagtagSolomon Hykes (@shykes) Guillaume Charmes (@creack) Victor Vieux (@vieux) Michael Crosby (@crosbymichael) .travis.yml: Tianon Gravi (@tianon) Dockerfile: Tianon Gravi (@tianon) Makefile: Tianon Gravi (@tianon) docker-0.9.1/utils.go0000644000175000017500000000240412314376205012572 0ustar tagtagpackage docker import ( "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/pkg/namesgenerator" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" ) type Change struct { archive.Change } func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { if config.PortSpecs != nil { ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs) if err != nil { return err } config.PortSpecs = nil if len(bindings) > 0 { if hostConfig == nil { hostConfig = &runconfig.HostConfig{} } hostConfig.PortBindings = bindings } if config.ExposedPorts == nil { config.ExposedPorts = make(nat.PortSet, len(ports)) } for k, v := range ports { config.ExposedPorts[k] = v } } return nil } // Links come in the format of // name:alias func parseLink(rawLink string) (map[string]string, error) { return utils.PartParser("name:alias", rawLink) } type checker struct { runtime *Runtime } func (c *checker) Exists(name string) bool { return c.runtime.containerGraph.Exists("/" + name) } // Generate a random and unique name func generateRandomName(runtime *Runtime) (string, error) { return namesgenerator.GenerateRandomName(&checker{runtime}) } docker-0.9.1/utils_test.go0000644000175000017500000000107312314376205013632 0ustar tagtagpackage docker import ( "bytes" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" ) func fakeTar() (io.Reader, error) { content := []byte("Hello world!\n") buf := new(bytes.Buffer) tw := tar.NewWriter(buf) for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { hdr := new(tar.Header) hdr.Size = int64(len(content)) hdr.Name = name if err := tw.WriteHeader(hdr); err != nil { return nil, err } tw.Write([]byte(content)) } tw.Close() return buf, nil } docker-0.9.1/volumes.go0000644000175000017500000002070012314376205013123 0ustar tagtagpackage docker import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/pkg/mount" "github.com/dotcloud/docker/utils" "io/ioutil" "log" "os" "path/filepath" "strings" "syscall" ) type BindMap struct { SrcPath string DstPath string Mode string } func prepareVolumesForContainer(container *Container) error { if container.Volumes == nil || len(container.Volumes) == 0 { container.Volumes = make(map[string]string) container.VolumesRW = make(map[string]bool) if err := applyVolumesFrom(container); err != nil { return err } } if err := createVolumes(container); err != nil { return err } return nil } func mountVolumesForContainer(container *Container, envPath string) error { // Setup the root fs as a bind mount of the base fs var ( root = container.RootfsPath() runtime = container.runtime ) if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) { return nil } // Create a bind mount of the base fs as a place where we can add mounts // without affecting the ability to access the base fs if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil { return err } // Make sure the root fs is private so the mounts here don't propagate to basefs if err := mount.ForceMount(root, root, "none", "private"); err != nil { return err } // Mount docker specific files into the containers root fs if err := mount.Mount(runtime.sysInitPath, filepath.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil { return err } if err := mount.Mount(envPath, filepath.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil { return err } if err := mount.Mount(container.ResolvConfPath, filepath.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil { return err } if container.HostnamePath != "" && container.HostsPath != "" { if err := mount.Mount(container.HostnamePath, filepath.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil { return err } if err := mount.Mount(container.HostsPath, filepath.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil { return err } } // Mount user specified volumes for r, v := range container.Volumes { mountAs := "ro" if container.VolumesRW[r] { mountAs = "rw" } r = filepath.Join(root, r) if p, err := utils.FollowSymlinkInScope(r, root); err != nil { return err } else { r = p } if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { return err } } return nil } func unmountVolumesForContainer(container *Container) { var ( root = container.RootfsPath() mounts = []string{ root, filepath.Join(root, "/.dockerinit"), filepath.Join(root, "/.dockerenv"), filepath.Join(root, "/etc/resolv.conf"), } ) if container.HostnamePath != "" && container.HostsPath != "" { mounts = append(mounts, filepath.Join(root, "/etc/hostname"), filepath.Join(root, "/etc/hosts")) } for r := range container.Volumes { mounts = append(mounts, filepath.Join(root, r)) } for i := len(mounts) - 1; i >= 0; i-- { if lastError := mount.Unmount(mounts[i]); lastError != nil { log.Printf("Failed to umount %v: %v", mounts[i], lastError) } } } func applyVolumesFrom(container *Container) error { if container.Config.VolumesFrom != "" { for _, containerSpec := range strings.Split(container.Config.VolumesFrom, ",") { var ( mountRW = true specParts = strings.SplitN(containerSpec, ":", 2) ) switch len(specParts) { case 0: return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom) case 2: switch specParts[1] { case "ro": mountRW = false case "rw": // mountRW is already true default: return fmt.Errorf("Malformed volumes-from specification: %s", containerSpec) } } c := container.runtime.Get(specParts[0]) if c == nil { return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID) } for volPath, id := range c.Volumes { if _, exists := container.Volumes[volPath]; exists { continue } if err := os.MkdirAll(filepath.Join(container.basefs, volPath), 0755); err != nil { return err } container.Volumes[volPath] = id if isRW, exists := c.VolumesRW[volPath]; exists { container.VolumesRW[volPath] = isRW && mountRW } } } } return nil } func getBindMap(container *Container) (map[string]BindMap, error) { var ( // Create the requested bind mounts binds = make(map[string]BindMap) // Define illegal container destinations illegalDsts = []string{"/", "."} ) for _, bind := range container.hostConfig.Binds { // FIXME: factorize bind parsing in parseBind var ( src, dst, mode string arr = strings.Split(bind, ":") ) if len(arr) == 2 { src = arr[0] dst = arr[1] mode = "rw" } else if len(arr) == 3 { src = arr[0] dst = arr[1] mode = arr[2] } else { return nil, fmt.Errorf("Invalid bind specification: %s", bind) } // Bail if trying to mount to an illegal destination for _, illegal := range illegalDsts { if dst == illegal { return nil, fmt.Errorf("Illegal bind destination: %s", dst) } } bindMap := BindMap{ SrcPath: src, DstPath: dst, Mode: mode, } binds[filepath.Clean(dst)] = bindMap } return binds, nil } func createVolumes(container *Container) error { binds, err := getBindMap(container) if err != nil { return err } volumesDriver := container.runtime.volumes.driver // Create the requested volumes if they don't exist for volPath := range container.Config.Volumes { volPath = filepath.Clean(volPath) volIsDir := true // Skip existing volumes if _, exists := container.Volumes[volPath]; exists { continue } var srcPath string var isBindMount bool srcRW := false // If an external bind is defined for this volume, use that as a source if bindMap, exists := binds[volPath]; exists { isBindMount = true srcPath = bindMap.SrcPath if strings.ToLower(bindMap.Mode) == "rw" { srcRW = true } if stat, err := os.Stat(bindMap.SrcPath); err != nil { return err } else { volIsDir = stat.IsDir() } // Otherwise create an directory in $ROOT/volumes/ and use that } else { // Do not pass a container as the parameter for the volume creation. // The graph driver using the container's information ( Image ) to // create the parent. c, err := container.runtime.volumes.Create(nil, nil, "", "", nil) if err != nil { return err } srcPath, err = volumesDriver.Get(c.ID) if err != nil { return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) } srcRW = true // RW by default } if p, err := filepath.EvalSymlinks(srcPath); err != nil { return err } else { srcPath = p } container.Volumes[volPath] = srcPath container.VolumesRW[volPath] = srcRW // Create the mountpoint volPath = filepath.Join(container.basefs, volPath) rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.basefs) if err != nil { return err } if _, err := os.Stat(rootVolPath); err != nil { if os.IsNotExist(err) { if volIsDir { if err := os.MkdirAll(rootVolPath, 0755); err != nil { return err } } else { if err := os.MkdirAll(filepath.Dir(rootVolPath), 0755); err != nil { return err } if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil { return err } else { f.Close() } } } } // Do not copy or change permissions if we are mounting from the host if srcRW && !isBindMount { volList, err := ioutil.ReadDir(rootVolPath) if err != nil { return err } if len(volList) > 0 { srcList, err := ioutil.ReadDir(srcPath) if err != nil { return err } if len(srcList) == 0 { // If the source volume is empty copy files from the root into the volume if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil { return err } var stat syscall.Stat_t if err := syscall.Stat(rootVolPath, &stat); err != nil { return err } var srcStat syscall.Stat_t if err := syscall.Stat(srcPath, &srcStat); err != nil { return err } // Change the source volume's ownership if it differs from the root // files that were just copied if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { return err } } } } } } return nil } docker-0.9.1/execdriver/0000755000175000017500000000000012314376205013243 5ustar tagtagdocker-0.9.1/execdriver/termconsole.go0000644000175000017500000000450112314376205016124 0ustar tagtagpackage execdriver import ( "github.com/dotcloud/docker/pkg/term" "github.com/kr/pty" "io" "os" "os/exec" ) func SetTerminal(command *Command, pipes *Pipes) error { var ( term Terminal err error ) if command.Tty { term, err = NewTtyConsole(command, pipes) } else { term, err = NewStdConsole(command, pipes) } if err != nil { return err } command.Terminal = term return nil } type TtyConsole struct { MasterPty *os.File SlavePty *os.File } func NewTtyConsole(command *Command, pipes *Pipes) (*TtyConsole, error) { ptyMaster, ptySlave, err := pty.Open() if err != nil { return nil, err } tty := &TtyConsole{ MasterPty: ptyMaster, SlavePty: ptySlave, } if err := tty.AttachPipes(&command.Cmd, pipes); err != nil { tty.Close() return nil, err } command.Console = tty.SlavePty.Name() return tty, nil } func (t *TtyConsole) Master() *os.File { return t.MasterPty } func (t *TtyConsole) Resize(h, w int) error { return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) } func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { command.Stdout = t.SlavePty command.Stderr = t.SlavePty go func() { if wb, ok := pipes.Stdout.(interface { CloseWriters() error }); ok { defer wb.CloseWriters() } io.Copy(pipes.Stdout, t.MasterPty) }() if pipes.Stdin != nil { command.Stdin = t.SlavePty command.SysProcAttr.Setctty = true go func() { defer pipes.Stdin.Close() io.Copy(t.MasterPty, pipes.Stdin) }() } return nil } func (t *TtyConsole) Close() error { t.SlavePty.Close() return t.MasterPty.Close() } type StdConsole struct { } func NewStdConsole(command *Command, pipes *Pipes) (*StdConsole, error) { std := &StdConsole{} if err := std.AttachPipes(&command.Cmd, pipes); err != nil { return nil, err } return std, nil } func (s *StdConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { command.Stdout = pipes.Stdout command.Stderr = pipes.Stderr if pipes.Stdin != nil { stdin, err := command.StdinPipe() if err != nil { return err } go func() { defer stdin.Close() io.Copy(stdin, pipes.Stdin) }() } return nil } func (s *StdConsole) Resize(h, w int) error { // we do not need to reside a non tty return nil } func (s *StdConsole) Close() error { // nothing to close here return nil } docker-0.9.1/execdriver/MAINTAINERS0000644000175000017500000000016112314376205014736 0ustar tagtagMichael Crosby (@crosbymichael) Guillaume Charmes (@creack) docker-0.9.1/execdriver/native/0000755000175000017500000000000012314376205014531 5ustar tagtagdocker-0.9.1/execdriver/native/driver.go0000644000175000017500000001372012314376205016356 0ustar tagtagpackage native import ( "encoding/json" "fmt" "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/apparmor" "github.com/dotcloud/docker/pkg/libcontainer/nsinit" "github.com/dotcloud/docker/pkg/system" "io/ioutil" "os" "os/exec" "path/filepath" "strconv" "strings" "syscall" ) const ( DriverName = "native" Version = "0.1" ) func init() { execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { var ( container *libcontainer.Container ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}) ) f, err := os.Open(filepath.Join(args.Root, "container.json")) if err != nil { return err } if err := json.NewDecoder(f).Decode(&container); err != nil { f.Close() return err } f.Close() cwd, err := os.Getwd() if err != nil { return err } syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(args.Pipe)) if err != nil { return err } if err := ns.Init(container, cwd, args.Console, syncPipe, args.Args); err != nil { return err } return nil }) } type driver struct { root string } func NewDriver(root string) (*driver, error) { if err := os.MkdirAll(root, 0700); err != nil { return nil, err } if err := apparmor.InstallDefaultProfile(); err != nil { return nil, err } return &driver{ root: root, }, nil } func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { if err := d.validateCommand(c); err != nil { return -1, err } var ( term nsinit.Terminal container = createContainer(c) factory = &dockerCommandFactory{c: c, driver: d} stateWriter = &dockerStateWriter{ callback: startCallback, c: c, dsw: &nsinit.DefaultStateWriter{filepath.Join(d.root, c.ID)}, } ns = nsinit.NewNsInit(factory, stateWriter) args = append([]string{c.Entrypoint}, c.Arguments...) ) if err := d.createContainerRoot(c.ID); err != nil { return -1, err } defer d.removeContainerRoot(c.ID) if c.Tty { term = &dockerTtyTerm{ pipes: pipes, } } else { term = &dockerStdTerm{ pipes: pipes, } } c.Terminal = term if err := d.writeContainerFile(container, c.ID); err != nil { return -1, err } return ns.Exec(container, term, args) } func (d *driver) Kill(p *execdriver.Command, sig int) error { err := syscall.Kill(p.Process.Pid, syscall.Signal(sig)) d.removeContainerRoot(p.ID) return err } func (d *driver) Info(id string) execdriver.Info { return &info{ ID: id, driver: d, } } func (d *driver) Name() string { return fmt.Sprintf("%s-%s", DriverName, Version) } // TODO: this can be improved with our driver // there has to be a better way to do this func (d *driver) GetPidsForContainer(id string) ([]int, error) { pids := []int{} subsystem := "devices" cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) if err != nil { return pids, err } cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) if err != nil { return pids, err } filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") if _, err := os.Stat(filename); os.IsNotExist(err) { filename = filepath.Join(cgroupRoot, cgroupDir, "docker", id, "tasks") } output, err := ioutil.ReadFile(filename) if err != nil { return pids, err } for _, p := range strings.Split(string(output), "\n") { if len(p) == 0 { continue } pid, err := strconv.Atoi(p) if err != nil { return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) } pids = append(pids, pid) } return pids, nil } func (d *driver) writeContainerFile(container *libcontainer.Container, id string) error { data, err := json.Marshal(container) if err != nil { return err } return ioutil.WriteFile(filepath.Join(d.root, id, "container.json"), data, 0655) } func (d *driver) createContainerRoot(id string) error { return os.MkdirAll(filepath.Join(d.root, id), 0655) } func (d *driver) removeContainerRoot(id string) error { return os.RemoveAll(filepath.Join(d.root, id)) } func (d *driver) validateCommand(c *execdriver.Command) error { // we need to check the Config of the command to make sure that we // do not have any of the lxc-conf variables for _, conf := range c.Config { if strings.Contains(conf, "lxc") { return fmt.Errorf("%s is not supported by the native driver", conf) } } return nil } func getEnv(key string, env []string) string { for _, pair := range env { parts := strings.Split(pair, "=") if parts[0] == key { return parts[1] } } return "" } type dockerCommandFactory struct { c *execdriver.Command driver *driver } // createCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces // defined on the container's configuration and use the current binary as the init with the // args provided func (d *dockerCommandFactory) Create(container *libcontainer.Container, console string, syncFile *os.File, args []string) *exec.Cmd { // we need to join the rootfs because nsinit will setup the rootfs and chroot initPath := filepath.Join(d.c.Rootfs, d.c.InitPath) d.c.Path = initPath d.c.Args = append([]string{ initPath, "-driver", DriverName, "-console", console, "-pipe", "3", "-root", filepath.Join(d.driver.root, d.c.ID), "--", }, args...) // set this to nil so that when we set the clone flags anything else is reset d.c.SysProcAttr = nil system.SetCloneFlags(&d.c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces))) d.c.ExtraFiles = []*os.File{syncFile} d.c.Env = container.Env d.c.Dir = d.c.Rootfs return &d.c.Cmd } type dockerStateWriter struct { dsw nsinit.StateWriter c *execdriver.Command callback execdriver.StartCallback } func (d *dockerStateWriter) WritePid(pid int) error { d.c.ContainerPid = pid err := d.dsw.WritePid(pid) if d.callback != nil { d.callback(d.c) } return err } func (d *dockerStateWriter) DeletePid() error { return d.dsw.DeletePid() } docker-0.9.1/execdriver/native/term.go0000644000175000017500000000146112314376205016031 0ustar tagtag/* These types are wrappers around the libcontainer Terminal interface so that we can resuse the docker implementations where possible. */ package native import ( "github.com/dotcloud/docker/execdriver" "io" "os" "os/exec" ) type dockerStdTerm struct { execdriver.StdConsole pipes *execdriver.Pipes } func (d *dockerStdTerm) Attach(cmd *exec.Cmd) error { return d.AttachPipes(cmd, d.pipes) } func (d *dockerStdTerm) SetMaster(master *os.File) { // do nothing } type dockerTtyTerm struct { execdriver.TtyConsole pipes *execdriver.Pipes } func (t *dockerTtyTerm) Attach(cmd *exec.Cmd) error { go io.Copy(t.pipes.Stdout, t.MasterPty) if t.pipes.Stdin != nil { go io.Copy(t.MasterPty, t.pipes.Stdin) } return nil } func (t *dockerTtyTerm) SetMaster(master *os.File) { t.MasterPty = master } docker-0.9.1/execdriver/native/info.go0000644000175000017500000000060412314376205016013 0ustar tagtagpackage native import ( "os" "path/filepath" ) type info struct { ID string driver *driver } // IsRunning is determined by looking for the // pid file for a container. If the file exists then the // container is currently running func (i *info) IsRunning() bool { if _, err := os.Stat(filepath.Join(i.driver.root, i.ID, "pid")); err == nil { return true } return false } docker-0.9.1/execdriver/native/default_template.go0000644000175000017500000000600512314376205020400 0ustar tagtagpackage native import ( "fmt" "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" "os" ) // createContainer populates and configures the container type with the // data provided by the execdriver.Command func createContainer(c *execdriver.Command) *libcontainer.Container { container := getDefaultTemplate() container.Hostname = getEnv("HOSTNAME", c.Env) container.Tty = c.Tty container.User = c.User container.WorkingDir = c.WorkingDir container.Env = c.Env loopbackNetwork := libcontainer.Network{ Mtu: c.Network.Mtu, Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), Gateway: "localhost", Type: "loopback", Context: libcontainer.Context{}, } container.Networks = []*libcontainer.Network{ &loopbackNetwork, } if c.Network.Interface != nil { vethNetwork := libcontainer.Network{ Mtu: c.Network.Mtu, Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), Gateway: c.Network.Interface.Gateway, Type: "veth", Context: libcontainer.Context{ "prefix": "veth", "bridge": c.Network.Interface.Bridge, }, } container.Networks = append(container.Networks, &vethNetwork) } container.Cgroups.Name = c.ID if c.Privileged { container.Capabilities = nil container.Cgroups.DeviceAccess = true container.Context["apparmor_profile"] = "unconfined" } if c.Resources != nil { container.Cgroups.CpuShares = c.Resources.CpuShares container.Cgroups.Memory = c.Resources.Memory container.Cgroups.MemorySwap = c.Resources.MemorySwap } // check to see if we are running in ramdisk to disable pivot root container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" return container } // getDefaultTemplate returns the docker default for // the libcontainer configuration file func getDefaultTemplate() *libcontainer.Container { return &libcontainer.Container{ Capabilities: libcontainer.Capabilities{ libcontainer.GetCapability("SETPCAP"), libcontainer.GetCapability("SYS_MODULE"), libcontainer.GetCapability("SYS_RAWIO"), libcontainer.GetCapability("SYS_PACCT"), libcontainer.GetCapability("SYS_ADMIN"), libcontainer.GetCapability("SYS_NICE"), libcontainer.GetCapability("SYS_RESOURCE"), libcontainer.GetCapability("SYS_TIME"), libcontainer.GetCapability("SYS_TTY_CONFIG"), libcontainer.GetCapability("MKNOD"), libcontainer.GetCapability("AUDIT_WRITE"), libcontainer.GetCapability("AUDIT_CONTROL"), libcontainer.GetCapability("MAC_OVERRIDE"), libcontainer.GetCapability("MAC_ADMIN"), libcontainer.GetCapability("NET_ADMIN"), }, Namespaces: libcontainer.Namespaces{ libcontainer.GetNamespace("NEWNS"), libcontainer.GetNamespace("NEWUTS"), libcontainer.GetNamespace("NEWIPC"), libcontainer.GetNamespace("NEWPID"), libcontainer.GetNamespace("NEWNET"), }, Cgroups: &cgroups.Cgroup{ Parent: "docker", DeviceAccess: false, }, Context: libcontainer.Context{ "apparmor_profile": "docker-default", }, } } docker-0.9.1/execdriver/driver.go0000644000175000017500000000730112314376205015066 0ustar tagtagpackage execdriver import ( "errors" "io" "os" "os/exec" ) var ( ErrNotRunning = errors.New("Process could not be started") ErrWaitTimeoutReached = errors.New("Wait timeout reached") ErrDriverAlreadyRegistered = errors.New("A driver already registered this docker init function") ErrDriverNotFound = errors.New("The requested docker init has not been found") ) var dockerInitFcts map[string]InitFunc type ( StartCallback func(*Command) InitFunc func(i *InitArgs) error ) func RegisterInitFunc(name string, fct InitFunc) error { if dockerInitFcts == nil { dockerInitFcts = make(map[string]InitFunc) } if _, ok := dockerInitFcts[name]; ok { return ErrDriverAlreadyRegistered } dockerInitFcts[name] = fct return nil } func GetInitFunc(name string) (InitFunc, error) { fct, ok := dockerInitFcts[name] if !ok { return nil, ErrDriverNotFound } return fct, nil } // Args provided to the init function for a driver type InitArgs struct { User string Gateway string Ip string WorkDir string Privileged bool Env []string Args []string Mtu int Driver string Console string Pipe int Root string } // Driver specific information based on // processes registered with the driver type Info interface { IsRunning() bool } // Terminal in an interface for drivers to implement // if they want to support Close and Resize calls from // the core type Terminal interface { io.Closer Resize(height, width int) error } type TtyTerminal interface { Master() *os.File } type Driver interface { Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code Kill(c *Command, sig int) error Name() string // Driver name Info(id string) Info // "temporary" hack (until we move state from core to plugins) GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. } // Network settings of the container type Network struct { Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled Mtu int `json:"mtu"` } type NetworkInterface struct { Gateway string `json:"gateway"` IPAddress string `json:"ip"` Bridge string `json:"bridge"` IPPrefixLen int `json:"ip_prefix_len"` } type Resources struct { Memory int64 `json:"memory"` MemorySwap int64 `json:"memory_swap"` CpuShares int64 `json:"cpu_shares"` } // Process wrapps an os/exec.Cmd to add more metadata type Command struct { exec.Cmd `json:"-"` ID string `json:"id"` Privileged bool `json:"privileged"` User string `json:"user"` Rootfs string `json:"rootfs"` // root fs of the container InitPath string `json:"initpath"` // dockerinit Entrypoint string `json:"entrypoint"` Arguments []string `json:"arguments"` WorkingDir string `json:"working_dir"` ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver Tty bool `json:"tty"` Network *Network `json:"network"` Config []string `json:"config"` // generic values that specific drivers can consume Resources *Resources `json:"resources"` Terminal Terminal `json:"-"` // standard or tty terminal Console string `json:"-"` // dev/console path ContainerPid int `json:"container_pid"` // the pid for the process inside a container } // Return the pid of the process // If the process is nil -1 will be returned func (c *Command) Pid() int { return c.ContainerPid } docker-0.9.1/execdriver/pipes.go0000644000175000017500000000057512314376205014721 0ustar tagtagpackage execdriver import ( "io" ) // Pipes is a wrapper around a containers output for // stdin, stdout, stderr type Pipes struct { Stdin io.ReadCloser Stdout, Stderr io.Writer } func NewPipes(stdin io.ReadCloser, stdout, stderr io.Writer, useStdin bool) *Pipes { p := &Pipes{ Stdout: stdout, Stderr: stderr, } if useStdin { p.Stdin = stdin } return p } docker-0.9.1/execdriver/lxc/0000755000175000017500000000000012314376205014031 5ustar tagtagdocker-0.9.1/execdriver/lxc/init.go0000644000175000017500000000672512314376205015335 0ustar tagtagpackage lxc import ( "fmt" "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/pkg/netlink" "github.com/dotcloud/docker/pkg/user" "github.com/syndtr/gocapability/capability" "net" "os" "strings" "syscall" ) func setupHostname(args *execdriver.InitArgs) error { hostname := getEnv(args, "HOSTNAME") if hostname == "" { return nil } return setHostname(hostname) } // Setup networking func setupNetworking(args *execdriver.InitArgs) error { if args.Ip != "" { // eth0 iface, err := net.InterfaceByName("eth0") if err != nil { return fmt.Errorf("Unable to set up networking: %v", err) } ip, ipNet, err := net.ParseCIDR(args.Ip) if err != nil { return fmt.Errorf("Unable to set up networking: %v", err) } if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil { return fmt.Errorf("Unable to set up networking: %v", err) } if err := netlink.NetworkSetMTU(iface, args.Mtu); err != nil { return fmt.Errorf("Unable to set MTU: %v", err) } if err := netlink.NetworkLinkUp(iface); err != nil { return fmt.Errorf("Unable to set up networking: %v", err) } // loopback iface, err = net.InterfaceByName("lo") if err != nil { return fmt.Errorf("Unable to set up networking: %v", err) } if err := netlink.NetworkLinkUp(iface); err != nil { return fmt.Errorf("Unable to set up networking: %v", err) } } if args.Gateway != "" { gw := net.ParseIP(args.Gateway) if gw == nil { return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway) } if err := netlink.AddDefaultGw(gw); err != nil { return fmt.Errorf("Unable to set up networking: %v", err) } } return nil } // Setup working directory func setupWorkingDirectory(args *execdriver.InitArgs) error { if args.WorkDir == "" { return nil } if err := syscall.Chdir(args.WorkDir); err != nil { return fmt.Errorf("Unable to change dir to %v: %v", args.WorkDir, err) } return nil } // Takes care of dropping privileges to the desired user func changeUser(args *execdriver.InitArgs) error { uid, gid, suppGids, err := user.GetUserGroupSupplementary( args.User, syscall.Getuid(), syscall.Getgid(), ) if err != nil { return err } if err := syscall.Setgroups(suppGids); err != nil { return fmt.Errorf("Setgroups failed: %v", err) } if err := syscall.Setgid(gid); err != nil { return fmt.Errorf("Setgid failed: %v", err) } if err := syscall.Setuid(uid); err != nil { return fmt.Errorf("Setuid failed: %v", err) } return nil } func setupCapabilities(args *execdriver.InitArgs) error { if args.Privileged { return nil } drop := []capability.Cap{ capability.CAP_SETPCAP, capability.CAP_SYS_MODULE, capability.CAP_SYS_RAWIO, capability.CAP_SYS_PACCT, capability.CAP_SYS_ADMIN, capability.CAP_SYS_NICE, capability.CAP_SYS_RESOURCE, capability.CAP_SYS_TIME, capability.CAP_SYS_TTY_CONFIG, capability.CAP_MKNOD, capability.CAP_AUDIT_WRITE, capability.CAP_AUDIT_CONTROL, capability.CAP_MAC_OVERRIDE, capability.CAP_MAC_ADMIN, capability.CAP_NET_ADMIN, } c, err := capability.NewPid(os.Getpid()) if err != nil { return err } c.Unset(capability.CAPS|capability.BOUNDS, drop...) if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil { return err } return nil } func getEnv(args *execdriver.InitArgs, key string) string { for _, kv := range args.Env { parts := strings.SplitN(kv, "=", 2) if parts[0] == key && len(parts) == 2 { return parts[1] } } return "" } docker-0.9.1/execdriver/lxc/info_test.go0000644000175000017500000000107212314376205016352 0ustar tagtagpackage lxc import ( "testing" ) func TestParseRunningInfo(t *testing.T) { raw := ` state: RUNNING pid: 50` info, err := parseLxcInfo(raw) if err != nil { t.Fatal(err) } if !info.Running { t.Fatal("info should return a running state") } if info.Pid != 50 { t.Fatalf("info should have pid 50 got %d", info.Pid) } } func TestEmptyInfo(t *testing.T) { _, err := parseLxcInfo("") if err == nil { t.Fatal("error should not be nil") } } func TestBadInfo(t *testing.T) { _, err := parseLxcInfo("state") if err != nil { t.Fatal(err) } } docker-0.9.1/execdriver/lxc/driver.go0000644000175000017500000002121112314376205015650 0ustar tagtagpackage lxc import ( "fmt" "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/utils" "io/ioutil" "log" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "syscall" "time" ) const DriverName = "lxc" func init() { execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { if err := setupHostname(args); err != nil { return err } if err := setupNetworking(args); err != nil { return err } if err := setupCapabilities(args); err != nil { return err } if err := setupWorkingDirectory(args); err != nil { return err } if err := changeUser(args); err != nil { return err } path, err := exec.LookPath(args.Args[0]) if err != nil { log.Printf("Unable to locate %v", args.Args[0]) os.Exit(127) } if err := syscall.Exec(path, args.Args, os.Environ()); err != nil { return fmt.Errorf("dockerinit unable to execute %s - %s", path, err) } panic("Unreachable") }) } type driver struct { root string // root path for the driver to use apparmor bool sharedRoot bool } func NewDriver(root string, apparmor bool) (*driver, error) { // setup unconfined symlink if err := linkLxcStart(root); err != nil { return nil, err } return &driver{ apparmor: apparmor, root: root, sharedRoot: rootIsShared(), }, nil } func (d *driver) Name() string { version := d.version() return fmt.Sprintf("%s-%s", DriverName, version) } func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { if err := execdriver.SetTerminal(c, pipes); err != nil { return -1, err } configPath, err := d.generateLXCConfig(c) if err != nil { return -1, err } params := []string{ "lxc-start", "-n", c.ID, "-f", configPath, "--", c.InitPath, "-driver", DriverName, } if c.Network.Interface != nil { params = append(params, "-g", c.Network.Interface.Gateway, "-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), ) } params = append(params, "-mtu", strconv.Itoa(c.Network.Mtu), ) if c.User != "" { params = append(params, "-u", c.User) } if c.Privileged { if d.apparmor { params[0] = path.Join(d.root, "lxc-start-unconfined") } params = append(params, "-privileged") } if c.WorkingDir != "" { params = append(params, "-w", c.WorkingDir) } params = append(params, "--", c.Entrypoint) params = append(params, c.Arguments...) if d.sharedRoot { // lxc-start really needs / to be non-shared, or all kinds of stuff break // when lxc-start unmount things and those unmounts propagate to the main // mount namespace. // What we really want is to clone into a new namespace and then // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork // without exec in go we have to do this horrible shell hack... shellString := "mount --make-rslave /; exec " + utils.ShellQuoteArguments(params) params = []string{ "unshare", "-m", "--", "/bin/sh", "-c", shellString, } } var ( name = params[0] arg = params[1:] ) aname, err := exec.LookPath(name) if err != nil { aname = name } c.Path = aname c.Args = append([]string{name}, arg...) if err := c.Start(); err != nil { return -1, err } var ( waitErr error waitLock = make(chan struct{}) ) go func() { if err := c.Wait(); err != nil { if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 waitErr = err } } close(waitLock) }() // Poll lxc for RUNNING status pid, err := d.waitForStart(c, waitLock) if err != nil { if c.Process != nil { c.Process.Kill() } return -1, err } c.ContainerPid = pid if startCallback != nil { startCallback(c) } <-waitLock return getExitCode(c), waitErr } /// Return the exit code of the process // if the process has not exited -1 will be returned func getExitCode(c *execdriver.Command) int { if c.ProcessState == nil { return -1 } return c.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() } func (d *driver) Kill(c *execdriver.Command, sig int) error { return KillLxc(c.ID, sig) } func (d *driver) version() string { var ( version string output []byte err error ) if _, errPath := exec.LookPath("lxc-version"); errPath == nil { output, err = exec.Command("lxc-version").CombinedOutput() } else { output, err = exec.Command("lxc-start", "--version").CombinedOutput() } if err == nil { version = strings.TrimSpace(string(output)) if parts := strings.SplitN(version, ":", 2); len(parts) == 2 { version = strings.TrimSpace(parts[1]) } } return version } func KillLxc(id string, sig int) error { var ( err error output []byte ) _, err = exec.LookPath("lxc-kill") if err == nil { output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput() } else { output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput() } if err != nil { return fmt.Errorf("Err: %s Output: %s", err, output) } return nil } // wait for the process to start and return the pid for the process func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (int, error) { var ( err error output []byte ) // We wait for the container to be fully running. // Timeout after 5 seconds. In case of broken pipe, just retry. // Note: The container can run and finish correctly before // the end of this loop for now := time.Now(); time.Since(now) < 5*time.Second; { select { case <-waitLock: // If the process dies while waiting for it, just return return -1, nil default: } output, err = d.getInfo(c.ID) if err != nil { output, err = d.getInfo(c.ID) if err != nil { return -1, err } } info, err := parseLxcInfo(string(output)) if err != nil { return -1, err } if info.Running { return info.Pid, nil } time.Sleep(50 * time.Millisecond) } return -1, execdriver.ErrNotRunning } func (d *driver) getInfo(id string) ([]byte, error) { return exec.Command("lxc-info", "-n", id).CombinedOutput() } type info struct { ID string driver *driver } func (i *info) IsRunning() bool { var running bool output, err := i.driver.getInfo(i.ID) if err != nil { utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) return false } if strings.Contains(string(output), "RUNNING") { running = true } return running } func (d *driver) Info(id string) execdriver.Info { return &info{ ID: id, driver: d, } } func (d *driver) GetPidsForContainer(id string) ([]int, error) { pids := []int{} // cpu is chosen because it is the only non optional subsystem in cgroups subsystem := "cpu" cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) if err != nil { return pids, err } cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) if err != nil { return pids, err } filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") if _, err := os.Stat(filename); os.IsNotExist(err) { // With more recent lxc versions use, cgroup will be in lxc/ filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks") } output, err := ioutil.ReadFile(filename) if err != nil { return pids, err } for _, p := range strings.Split(string(output), "\n") { if len(p) == 0 { continue } pid, err := strconv.Atoi(p) if err != nil { return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) } pids = append(pids, pid) } return pids, nil } func linkLxcStart(root string) error { sourcePath, err := exec.LookPath("lxc-start") if err != nil { return err } targetPath := path.Join(root, "lxc-start-unconfined") if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) { return err } else if err == nil { if err := os.Remove(targetPath); err != nil { return err } } return os.Symlink(sourcePath, targetPath) } // TODO: This can be moved to the mountinfo reader in the mount pkg func rootIsShared() bool { if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil { for _, line := range strings.Split(string(data), "\n") { cols := strings.Split(line, " ") if len(cols) >= 6 && cols[4] == "/" { return strings.HasPrefix(cols[6], "shared") } } } // No idea, probably safe to assume so return true } func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { root := path.Join(d.root, "containers", c.ID, "config.lxc") fo, err := os.Create(root) if err != nil { return "", err } defer fo.Close() if err := LxcTemplateCompiled.Execute(fo, struct { *execdriver.Command AppArmor bool }{ Command: c, AppArmor: d.apparmor, }); err != nil { return "", err } return root, nil } docker-0.9.1/execdriver/lxc/lxc_init_linux.go0000644000175000017500000000021712314376205017410 0ustar tagtag// +build amd64 package lxc import ( "syscall" ) func setHostname(hostname string) error { return syscall.Sethostname([]byte(hostname)) } docker-0.9.1/execdriver/lxc/lxc_template.go0000644000175000017500000000725012314376205017045 0ustar tagtagpackage lxc import ( "github.com/dotcloud/docker/execdriver" "strings" "text/template" ) const LxcTemplate = ` {{if .Network.Interface}} # network configuration lxc.network.type = veth lxc.network.link = {{.Network.Interface.Bridge}} lxc.network.name = eth0 {{else}} # network is disabled (-n=false) lxc.network.type = empty lxc.network.flags = up {{end}} lxc.network.mtu = {{.Network.Mtu}} # root filesystem {{$ROOTFS := .Rootfs}} lxc.rootfs = {{$ROOTFS}} # use a dedicated pts for the container (and limit the number of pseudo terminal # available) lxc.pts = 1024 # disable the main console lxc.console = none # no controlling tty at all lxc.tty = 1 {{if .Privileged}} lxc.cgroup.devices.allow = a {{else}} # no implicit access to devices lxc.cgroup.devices.deny = a # /dev/null and zero lxc.cgroup.devices.allow = c 1:3 rwm lxc.cgroup.devices.allow = c 1:5 rwm # consoles lxc.cgroup.devices.allow = c 5:1 rwm lxc.cgroup.devices.allow = c 5:0 rwm lxc.cgroup.devices.allow = c 4:0 rwm lxc.cgroup.devices.allow = c 4:1 rwm # /dev/urandom,/dev/random lxc.cgroup.devices.allow = c 1:9 rwm lxc.cgroup.devices.allow = c 1:8 rwm # /dev/pts/ - pts namespaces are "coming soon" lxc.cgroup.devices.allow = c 136:* rwm lxc.cgroup.devices.allow = c 5:2 rwm # tuntap lxc.cgroup.devices.allow = c 10:200 rwm # fuse #lxc.cgroup.devices.allow = c 10:229 rwm # rtc #lxc.cgroup.devices.allow = c 254:0 rwm {{end}} # standard mount point # Use mnt.putold as per https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/986385 lxc.pivotdir = lxc_putold # NOTICE: These mounts must be applied within the namespace # WARNING: procfs is a known attack vector and should probably be disabled # if your userspace allows it. eg. see http://blog.zx2c4.com/749 lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0 # WARNING: sysfs is a known attack vector and should probably be disabled # if your userspace allows it. eg. see http://bit.ly/T9CkqJ lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 {{if .Tty}} lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 {{end}} lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0 lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0 {{if .Privileged}} {{if .AppArmor}} lxc.aa_profile = unconfined {{else}} #lxc.aa_profile = unconfined {{end}} {{end}} # limits {{if .Resources}} {{if .Resources.Memory}} lxc.cgroup.memory.limit_in_bytes = {{.Resources.Memory}} lxc.cgroup.memory.soft_limit_in_bytes = {{.Resources.Memory}} {{with $memSwap := getMemorySwap .Resources}} lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}} {{end}} {{end}} {{if .Resources.CpuShares}} lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} {{end}} {{end}} {{if .Config}} {{range $value := .Config}} {{$value}} {{end}} {{end}} ` var LxcTemplateCompiled *template.Template // Escape spaces in strings according to the fstab documentation, which is the // format for "lxc.mount.entry" lines in lxc.conf. See also "man 5 fstab". func escapeFstabSpaces(field string) string { return strings.Replace(field, " ", "\\040", -1) } func getMemorySwap(v *execdriver.Resources) int64 { // By default, MemorySwap is set to twice the size of RAM. // If you want to omit MemorySwap, set it to `-1'. if v.MemorySwap < 0 { return 0 } return v.Memory * 2 } func init() { var err error funcMap := template.FuncMap{ "getMemorySwap": getMemorySwap, "escapeFstabSpaces": escapeFstabSpaces, } LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) if err != nil { panic(err) } } docker-0.9.1/execdriver/lxc/lxc_template_unit_test.go0000644000175000017500000000535012314376205021142 0ustar tagtagpackage lxc import ( "bufio" "fmt" "github.com/dotcloud/docker/execdriver" "io/ioutil" "math/rand" "os" "path" "strings" "testing" "time" ) func TestLXCConfig(t *testing.T) { root, err := ioutil.TempDir("", "TestLXCConfig") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) os.MkdirAll(path.Join(root, "containers", "1"), 0777) // Memory is allocated randomly for testing rand.Seed(time.Now().UTC().UnixNano()) var ( memMin = 33554432 memMax = 536870912 mem = memMin + rand.Intn(memMax-memMin) cpuMin = 100 cpuMax = 10000 cpu = cpuMin + rand.Intn(cpuMax-cpuMin) ) driver, err := NewDriver(root, false) if err != nil { t.Fatal(err) } command := &execdriver.Command{ ID: "1", Resources: &execdriver.Resources{ Memory: int64(mem), CpuShares: int64(cpu), }, Network: &execdriver.Network{ Mtu: 1500, Interface: nil, }, } p, err := driver.generateLXCConfig(command) if err != nil { t.Fatal(err) } grepFile(t, p, fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) grepFile(t, p, fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) } func TestCustomLxcConfig(t *testing.T) { root, err := ioutil.TempDir("", "TestCustomLxcConfig") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) os.MkdirAll(path.Join(root, "containers", "1"), 0777) driver, err := NewDriver(root, false) if err != nil { t.Fatal(err) } command := &execdriver.Command{ ID: "1", Privileged: false, Config: []string{ "lxc.utsname = docker", "lxc.cgroup.cpuset.cpus = 0,1", }, Network: &execdriver.Network{ Mtu: 1500, Interface: nil, }, } p, err := driver.generateLXCConfig(command) if err != nil { t.Fatal(err) } grepFile(t, p, "lxc.utsname = docker") grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") } func grepFile(t *testing.T, path string, pattern string) { f, err := os.Open(path) if err != nil { t.Fatal(err) } defer f.Close() r := bufio.NewReader(f) var ( line string ) err = nil for err == nil { line, err = r.ReadString('\n') if strings.Contains(line, pattern) == true { return } } t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) } func TestEscapeFstabSpaces(t *testing.T) { var testInputs = map[string]string{ " ": "\\040", "": "", "/double space": "/double\\040\\040space", "/some long test string": "/some\\040long\\040test\\040string", "/var/lib/docker": "/var/lib/docker", " leading": "\\040leading", "trailing ": "trailing\\040", } for in, exp := range testInputs { if out := escapeFstabSpaces(in); exp != out { t.Logf("Expected %s got %s", exp, out) t.Fail() } } } docker-0.9.1/execdriver/lxc/lxc_init_unsupported.go0000644000175000017500000000016412314376205020642 0ustar tagtag// +build !linux !amd64 package lxc func setHostname(hostname string) error { panic("Not supported on darwin") } docker-0.9.1/execdriver/lxc/info.go0000644000175000017500000000145212314376205015315 0ustar tagtagpackage lxc import ( "bufio" "errors" "strconv" "strings" ) var ( ErrCannotParse = errors.New("cannot parse raw input") ) type lxcInfo struct { Running bool Pid int } func parseLxcInfo(raw string) (*lxcInfo, error) { if raw == "" { return nil, ErrCannotParse } var ( err error s = bufio.NewScanner(strings.NewReader(raw)) info = &lxcInfo{} ) for s.Scan() { text := s.Text() if s.Err() != nil { return nil, s.Err() } parts := strings.Split(text, ":") if len(parts) < 2 { continue } switch strings.ToLower(strings.TrimSpace(parts[0])) { case "state": info.Running = strings.TrimSpace(parts[1]) == "RUNNING" case "pid": info.Pid, err = strconv.Atoi(strings.TrimSpace(parts[1])) if err != nil { return nil, err } } } return info, nil } docker-0.9.1/FIXME0000644000175000017500000000173012314376205011677 0ustar tagtag ## FIXME This file is a loose collection of things to improve in the codebase, for the internal use of the maintainers. They are not big enough to be in the roadmap, not user-facing enough to be github issues, and not important enough to be discussed in the mailing list. They are just like FIXME comments in the source code, except we're not sure where in the source to put them - so we put them here :) * Run linter on codebase * Unify build commands and regular commands * Move source code into src/ subdir for clarity * docker build: on non-existent local path for ADD, don't show full absolute path on the host * use size header for progress bar in pull * Clean up context upload in build!!! * Parallel pull * Upgrade dockerd without stopping containers * Simple command to remove all untagged images (`docker rmi $(docker images | awk '/^/ { print $3 }')`) * Simple command to clean up containers for disk space * Clean up the ProgressReader api, it's a PITA to use docker-0.9.1/container_unit_test.go0000644000175000017500000000552312314376205015517 0ustar tagtagpackage docker import ( "github.com/dotcloud/docker/nat" "testing" ) func TestParseNetworkOptsPrivateOnly(t *testing.T) { ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::80"}) if err != nil { t.Fatal(err) } if len(ports) != 1 { t.Logf("Expected 1 got %d", len(ports)) t.FailNow() } if len(bindings) != 1 { t.Logf("Expected 1 got %d", len(bindings)) t.FailNow() } for k := range ports { if k.Proto() != "tcp" { t.Logf("Expected tcp got %s", k.Proto()) t.Fail() } if k.Port() != "80" { t.Logf("Expected 80 got %s", k.Port()) t.Fail() } b, exists := bindings[k] if !exists { t.Log("Binding does not exist") t.FailNow() } if len(b) != 1 { t.Logf("Expected 1 got %d", len(b)) t.FailNow() } s := b[0] if s.HostPort != "" { t.Logf("Expected \"\" got %s", s.HostPort) t.Fail() } if s.HostIp != "192.168.1.100" { t.Fail() } } } func TestParseNetworkOptsPublic(t *testing.T) { ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:8080:80"}) if err != nil { t.Fatal(err) } if len(ports) != 1 { t.Logf("Expected 1 got %d", len(ports)) t.FailNow() } if len(bindings) != 1 { t.Logf("Expected 1 got %d", len(bindings)) t.FailNow() } for k := range ports { if k.Proto() != "tcp" { t.Logf("Expected tcp got %s", k.Proto()) t.Fail() } if k.Port() != "80" { t.Logf("Expected 80 got %s", k.Port()) t.Fail() } b, exists := bindings[k] if !exists { t.Log("Binding does not exist") t.FailNow() } if len(b) != 1 { t.Logf("Expected 1 got %d", len(b)) t.FailNow() } s := b[0] if s.HostPort != "8080" { t.Logf("Expected 8080 got %s", s.HostPort) t.Fail() } if s.HostIp != "192.168.1.100" { t.Fail() } } } func TestParseNetworkOptsUdp(t *testing.T) { ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) if err != nil { t.Fatal(err) } if len(ports) != 1 { t.Logf("Expected 1 got %d", len(ports)) t.FailNow() } if len(bindings) != 1 { t.Logf("Expected 1 got %d", len(bindings)) t.FailNow() } for k := range ports { if k.Proto() != "udp" { t.Logf("Expected udp got %s", k.Proto()) t.Fail() } if k.Port() != "6000" { t.Logf("Expected 6000 got %s", k.Port()) t.Fail() } b, exists := bindings[k] if !exists { t.Log("Binding does not exist") t.FailNow() } if len(b) != 1 { t.Logf("Expected 1 got %d", len(b)) t.FailNow() } s := b[0] if s.HostPort != "" { t.Logf("Expected \"\" got %s", s.HostPort) t.Fail() } if s.HostIp != "192.168.1.100" { t.Fail() } } } func TestGetFullName(t *testing.T) { name, err := getFullName("testing") if err != nil { t.Fatal(err) } if name != "/testing" { t.Fatalf("Expected /testing got %s", name) } if _, err := getFullName(""); err == nil { t.Fatal("Error should not be nil") } } docker-0.9.1/registry/0000755000175000017500000000000012314376205012753 5ustar tagtagdocker-0.9.1/registry/registry.go0000644000175000017500000005365712314376205015172 0ustar tagtagpackage registry import ( "bytes" "crypto/sha256" "encoding/json" "errors" "fmt" "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "net" "net/http" "net/http/cookiejar" "net/url" "regexp" "strconv" "strings" "time" ) var ( ErrAlreadyExists = errors.New("Image already exists") ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") errLoginRequired = errors.New("Authentication is required.") ) func pingRegistryEndpoint(endpoint string) (bool, error) { if endpoint == auth.IndexServerAddress() { // Skip the check, we now this one is valid // (and we never want to fallback to http in case of error) return false, nil } httpDial := func(proto string, addr string) (net.Conn, error) { // Set the connect timeout to 5 seconds conn, err := net.DialTimeout(proto, addr, time.Duration(5)*time.Second) if err != nil { return nil, err } // Set the recv timeout to 10 seconds conn.SetDeadline(time.Now().Add(time.Duration(10) * time.Second)) return conn, nil } httpTransport := &http.Transport{Dial: httpDial} client := &http.Client{Transport: httpTransport} resp, err := client.Get(endpoint + "_ping") if err != nil { return false, err } defer resp.Body.Close() if resp.Header.Get("X-Docker-Registry-Version") == "" { return false, errors.New("This does not look like a Registry server (\"X-Docker-Registry-Version\" header not found in the response)") } standalone := resp.Header.Get("X-Docker-Registry-Standalone") utils.Debugf("Registry standalone header: '%s'", standalone) // If the header is absent, we assume true for compatibility with earlier // versions of the registry if standalone == "" { return true, nil // Accepted values are "true" (case-insensitive) and "1". } else if strings.EqualFold(standalone, "true") || standalone == "1" { return true, nil } // Otherwise, not standalone return false, nil } func validateRepositoryName(repositoryName string) error { var ( namespace string name string ) nameParts := strings.SplitN(repositoryName, "/", 2) if len(nameParts) < 2 { namespace = "library" name = nameParts[0] } else { namespace = nameParts[0] name = nameParts[1] } validNamespace := regexp.MustCompile(`^([a-z0-9_]{4,30})$`) if !validNamespace.MatchString(namespace) { return fmt.Errorf("Invalid namespace name (%s), only [a-z0-9_] are allowed, size between 4 and 30", namespace) } validRepo := regexp.MustCompile(`^([a-z0-9-_.]+)$`) if !validRepo.MatchString(name) { return fmt.Errorf("Invalid repository name (%s), only [a-z0-9-_.] are allowed", name) } return nil } // Resolves a repository name to a hostname + name func ResolveRepositoryName(reposName string) (string, string, error) { if strings.Contains(reposName, "://") { // It cannot contain a scheme! return "", "", ErrInvalidRepositoryName } nameParts := strings.SplitN(reposName, "/", 2) if !strings.Contains(nameParts[0], ".") && !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost" { // This is a Docker Index repos (ex: samalba/hipache or ubuntu) err := validateRepositoryName(reposName) return auth.IndexServerAddress(), reposName, err } if len(nameParts) < 2 { // There is a dot in repos name (and no registry address) // Is it a Registry address without repos name? return "", "", ErrInvalidRepositoryName } hostname := nameParts[0] reposName = nameParts[1] if strings.Contains(hostname, "index.docker.io") { return "", "", fmt.Errorf("Invalid repository name, try \"%s\" instead", reposName) } if err := validateRepositoryName(reposName); err != nil { return "", "", err } return hostname, reposName, nil } // this method expands the registry name as used in the prefix of a repo // to a full url. if it already is a url, there will be no change. // The registry is pinged to test if it http or https func ExpandAndVerifyRegistryUrl(hostname string) (string, error) { if strings.HasPrefix(hostname, "http:") || strings.HasPrefix(hostname, "https:") { // if there is no slash after https:// (8 characters) then we have no path in the url if strings.LastIndex(hostname, "/") < 9 { // there is no path given. Expand with default path hostname = hostname + "/v1/" } if _, err := pingRegistryEndpoint(hostname); err != nil { return "", errors.New("Invalid Registry endpoint: " + err.Error()) } return hostname, nil } endpoint := fmt.Sprintf("https://%s/v1/", hostname) if _, err := pingRegistryEndpoint(endpoint); err != nil { utils.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err) endpoint = fmt.Sprintf("http://%s/v1/", hostname) if _, err = pingRegistryEndpoint(endpoint); err != nil { //TODO: triggering highland build can be done there without "failing" return "", errors.New("Invalid Registry endpoint: " + err.Error()) } } return endpoint, nil } func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) { for _, cookie := range c.Jar.Cookies(req.URL) { req.AddCookie(cookie) } res, err := c.Do(req) if err != nil { return nil, err } if len(res.Cookies()) > 0 { c.Jar.SetCookies(req.URL, res.Cookies()) } return res, err } func setTokenAuth(req *http.Request, token []string) { if req.Header.Get("Authorization") == "" { // Don't override req.Header.Set("Authorization", "Token "+strings.Join(token, ",")) } } // Retrieve the history of a given image from the Registry. // Return a list of the parent's json (requested image included) func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) { req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/ancestry", nil) if err != nil { return nil, err } setTokenAuth(req, token) res, err := doWithCookies(r.client, req) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { return nil, errLoginRequired } return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) } jsonString, err := ioutil.ReadAll(res.Body) if err != nil { return nil, fmt.Errorf("Error while reading the http response: %s", err) } utils.Debugf("Ancestry: %s", jsonString) history := new([]string) if err := json.Unmarshal(jsonString, history); err != nil { return nil, err } return *history, nil } // Check if an image exists in the Registry // TODO: This method should return the errors instead of masking them and returning false func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) bool { req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) if err != nil { utils.Errorf("Error in LookupRemoteImage %s", err) return false } setTokenAuth(req, token) res, err := doWithCookies(r.client, req) if err != nil { utils.Errorf("Error in LookupRemoteImage %s", err) return false } res.Body.Close() return res.StatusCode == 200 } // Retrieve an image from the Registry. func (r *Registry) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) { // Get the JSON req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) if err != nil { return nil, -1, fmt.Errorf("Failed to download json: %s", err) } setTokenAuth(req, token) res, err := doWithCookies(r.client, req) if err != nil { return nil, -1, fmt.Errorf("Failed to download json: %s", err) } defer res.Body.Close() if res.StatusCode != 200 { return nil, -1, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } imageSize, err := strconv.Atoi(res.Header.Get("X-Docker-Size")) if err != nil { return nil, -1, err } jsonString, err := ioutil.ReadAll(res.Body) if err != nil { return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString) } return jsonString, imageSize, nil } func (r *Registry) GetRemoteImageLayer(imgID, registry string, token []string) (io.ReadCloser, error) { req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/layer", nil) if err != nil { return nil, fmt.Errorf("Error while getting from the server: %s\n", err) } setTokenAuth(req, token) res, err := doWithCookies(r.client, req) if err != nil { return nil, err } if res.StatusCode != 200 { res.Body.Close() return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", res.StatusCode, imgID) } return res.Body, nil } func (r *Registry) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) { if strings.Count(repository, "/") == 0 { // This will be removed once the Registry supports auto-resolution on // the "library" namespace repository = "library/" + repository } for _, host := range registries { endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) req, err := r.reqFactory.NewRequest("GET", endpoint, nil) if err != nil { return nil, err } setTokenAuth(req, token) res, err := doWithCookies(r.client, req) if err != nil { return nil, err } utils.Debugf("Got status code %d from %s", res.StatusCode, endpoint) defer res.Body.Close() if res.StatusCode != 200 && res.StatusCode != 404 { continue } else if res.StatusCode == 404 { return nil, fmt.Errorf("Repository not found") } result := make(map[string]string) rawJSON, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } if err := json.Unmarshal(rawJSON, &result); err != nil { return nil, err } return result, nil } return nil, fmt.Errorf("Could not reach any registry endpoint") } func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) { indexEp := r.indexEndpoint repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote) utils.Debugf("[registry] Calling GET %s", repositoryTarget) req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil) if err != nil { return nil, err } if r.authConfig != nil && len(r.authConfig.Username) > 0 { req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) } req.Header.Set("X-Docker-Token", "true") res, err := r.client.Do(req) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode == 401 { return nil, errLoginRequired } // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. if res.StatusCode != 200 { return nil, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) } var tokens []string if res.Header.Get("X-Docker-Token") != "" { tokens = res.Header["X-Docker-Token"] } var endpoints []string var urlScheme = indexEp[:strings.Index(indexEp, ":")] if res.Header.Get("X-Docker-Endpoints") != "" { // The Registry's URL scheme has to match the Index' for _, ep := range res.Header["X-Docker-Endpoints"] { endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, ep)) } } else { return nil, fmt.Errorf("Index response didn't contain any endpoints") } checksumsJSON, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } remoteChecksums := []*ImgData{} if err := json.Unmarshal(checksumsJSON, &remoteChecksums); err != nil { return nil, err } // Forge a better object from the retrieved data imgsData := make(map[string]*ImgData) for _, elem := range remoteChecksums { imgsData[elem.ID] = elem } return &RepositoryData{ ImgList: imgsData, Endpoints: endpoints, Tokens: tokens, }, nil } func (r *Registry) PushImageChecksumRegistry(imgData *ImgData, registry string, token []string) error { utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/checksum") req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/checksum", nil) if err != nil { return err } setTokenAuth(req, token) req.Header.Set("X-Docker-Checksum", imgData.Checksum) req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) res, err := doWithCookies(r.client, req) if err != nil { return fmt.Errorf("Failed to upload metadata: %s", err) } defer res.Body.Close() if len(res.Cookies()) > 0 { r.client.Jar.SetCookies(req.URL, res.Cookies()) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { errBody = []byte(err.Error()) } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } return fmt.Errorf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody) } return nil } // Push a local image to the registry func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error { utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/json") req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/json", bytes.NewReader(jsonRaw)) if err != nil { return err } req.Header.Add("Content-type", "application/json") setTokenAuth(req, token) res, err := doWithCookies(r.client, req) if err != nil { return fmt.Errorf("Failed to upload metadata: %s", err) } defer res.Body.Close() if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { errBody = []byte(err.Error()) } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %s", res.StatusCode, errBody), res) } return nil } func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer") h := sha256.New() checksumLayer := &utils.CheckSum{Reader: layer, Hash: h} tarsumLayer := &utils.TarSum{Reader: checksumLayer} req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", tarsumLayer) if err != nil { return "", "", err } req.ContentLength = -1 req.TransferEncoding = []string{"chunked"} setTokenAuth(req, token) res, err := doWithCookies(r.client, req) if err != nil { return "", "", fmt.Errorf("Failed to upload layer: %s", err) } if rc, ok := layer.(io.Closer); ok { if err := rc.Close(); err != nil { return "", "", err } } defer res.Body.Close() if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return "", "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } return "", "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %s", res.StatusCode, errBody), res) } checksumPayload = "sha256:" + checksumLayer.Sum() return tarsumLayer.Sum(jsonRaw), checksumPayload, nil } // push a tag on the registry. // Remote has the format '/ func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token []string) error { // "jsonify" the string revision = "\"" + revision + "\"" path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag) req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision)) if err != nil { return err } req.Header.Add("Content-type", "application/json") setTokenAuth(req, token) req.ContentLength = int64(len(revision)) res, err := doWithCookies(r.client, req) if err != nil { return err } res.Body.Close() if res.StatusCode != 200 && res.StatusCode != 201 { return utils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) } return nil } func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { cleanImgList := []*ImgData{} indexEp := r.indexEndpoint if validate { for _, elem := range imgList { if elem.Checksum != "" { cleanImgList = append(cleanImgList, elem) } } } else { cleanImgList = imgList } imgListJSON, err := json.Marshal(cleanImgList) if err != nil { return nil, err } var suffix string if validate { suffix = "images" } u := fmt.Sprintf("%srepositories/%s/%s", indexEp, remote, suffix) utils.Debugf("[registry] PUT %s", u) utils.Debugf("Image list pushed to index:\n%s", imgListJSON) req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(imgListJSON)) if err != nil { return nil, err } req.Header.Add("Content-type", "application/json") req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) req.ContentLength = int64(len(imgListJSON)) req.Header.Set("X-Docker-Token", "true") if validate { req.Header["X-Docker-Endpoints"] = regs } res, err := r.client.Do(req) if err != nil { return nil, err } defer res.Body.Close() // Redirect if necessary for res.StatusCode >= 300 && res.StatusCode < 400 { utils.Debugf("Redirected to %s", res.Header.Get("Location")) req, err = r.reqFactory.NewRequest("PUT", res.Header.Get("Location"), bytes.NewReader(imgListJSON)) if err != nil { return nil, err } req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) req.ContentLength = int64(len(imgListJSON)) req.Header.Set("X-Docker-Token", "true") if validate { req.Header["X-Docker-Endpoints"] = regs } res, err = r.client.Do(req) if err != nil { return nil, err } defer res.Body.Close() } var tokens, endpoints []string var urlScheme = indexEp[:strings.Index(indexEp, ":")] if !validate { if res.StatusCode != 200 && res.StatusCode != 201 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %s", res.StatusCode, remote, errBody), res) } if res.Header.Get("X-Docker-Token") != "" { tokens = res.Header["X-Docker-Token"] utils.Debugf("Auth token: %v", tokens) } else { return nil, fmt.Errorf("Index response didn't contain an access token") } if res.Header.Get("X-Docker-Endpoints") != "" { // The Registry's URL scheme has to match the Index' for _, ep := range res.Header["X-Docker-Endpoints"] { endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, ep)) } } else { return nil, fmt.Errorf("Index response didn't contain any endpoints") } } if validate { if res.StatusCode != 204 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %s", res.StatusCode, remote, errBody), res) } } return &RepositoryData{ Tokens: tokens, Endpoints: endpoints, }, nil } func (r *Registry) SearchRepositories(term string) (*SearchResults, error) { utils.Debugf("Index server: %s", r.indexEndpoint) u := auth.IndexServerAddress() + "search?q=" + url.QueryEscape(term) req, err := r.reqFactory.NewRequest("GET", u, nil) if err != nil { return nil, err } if r.authConfig != nil && len(r.authConfig.Username) > 0 { req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) } req.Header.Set("X-Docker-Token", "true") res, err := r.client.Do(req) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != 200 { return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexepected status code %d", res.StatusCode), res) } rawData, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } result := new(SearchResults) err = json.Unmarshal(rawData, result) return result, err } func (r *Registry) GetAuthConfig(withPasswd bool) *auth.AuthConfig { password := "" if withPasswd { password = r.authConfig.Password } return &auth.AuthConfig{ Username: r.authConfig.Username, Password: password, Email: r.authConfig.Email, } } type SearchResult struct { StarCount int `json:"star_count"` IsOfficial bool `json:"is_official"` Name string `json:"name"` IsTrusted bool `json:"is_trusted"` Description string `json:"description"` } type SearchResults struct { Query string `json:"query"` NumResults int `json:"num_results"` Results []SearchResult `json:"results"` } type RepositoryData struct { ImgList map[string]*ImgData Endpoints []string Tokens []string } type ImgData struct { ID string `json:"id"` Checksum string `json:"checksum,omitempty"` ChecksumPayload string `json:"-"` Tag string `json:",omitempty"` } type Registry struct { client *http.Client authConfig *auth.AuthConfig reqFactory *utils.HTTPRequestFactory indexEndpoint string } func NewRegistry(authConfig *auth.AuthConfig, factory *utils.HTTPRequestFactory, indexEndpoint string) (r *Registry, err error) { httpTransport := &http.Transport{ DisableKeepAlives: true, Proxy: http.ProxyFromEnvironment, } r = &Registry{ authConfig: authConfig, client: &http.Client{ Transport: httpTransport, }, indexEndpoint: indexEndpoint, } r.client.Jar, err = cookiejar.New(nil) if err != nil { return nil, err } // If we're working with a standalone private registry over HTTPS, send Basic Auth headers // alongside our requests. if indexEndpoint != auth.IndexServerAddress() && strings.HasPrefix(indexEndpoint, "https://") { standalone, err := pingRegistryEndpoint(indexEndpoint) if err != nil { return nil, err } if standalone { utils.Debugf("Endpoint %s is eligible for private registry auth. Enabling decorator.", indexEndpoint) dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password) factory.AddDecorator(dec) } } r.reqFactory = factory return r, nil } docker-0.9.1/registry/registry_mock_test.go0000644000175000017500000002632012314376205017225 0ustar tagtagpackage registry import ( "encoding/json" "fmt" "github.com/dotcloud/docker/utils" "github.com/gorilla/mux" "io" "io/ioutil" "net/http" "net/http/httptest" "net/url" "strconv" "strings" "testing" "time" ) var ( testHttpServer *httptest.Server testLayers = map[string]map[string]string{ "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, "PortSpecs":null,"Tty":false,"OpenStdin":false,"StdinOnce":false, "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, "VolumesFrom":"","Entrypoint":null},"Size":424242}`, "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, "layer": string([]byte{ 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, }), }, "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, "PortSpecs":null,"Tty":false,"OpenStdin":false,"StdinOnce":false, "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, "VolumesFrom":"","Entrypoint":null},"Size":424242}`, "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, "layer": string([]byte{ 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, }), }, } testRepositories = map[string]map[string]string{ "foo42/bar": { "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", }, } ) func init() { r := mux.NewRouter() r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") r.HandleFunc("/v1/search", handlerSearch).Methods("GET") testHttpServer = httptest.NewServer(handlerAccessLog(r)) } func handlerAccessLog(handler http.Handler) http.Handler { logHandler := func(w http.ResponseWriter, r *http.Request) { utils.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) handler.ServeHTTP(w, r) } return http.HandlerFunc(logHandler) } func makeURL(req string) string { return testHttpServer.URL + req } func writeHeaders(w http.ResponseWriter) { h := w.Header() h.Add("Server", "docker-tests/mock") h.Add("Expires", "-1") h.Add("Content-Type", "application/json") h.Add("Pragma", "no-cache") h.Add("Cache-Control", "no-cache") h.Add("X-Docker-Registry-Version", "0.0.0") h.Add("X-Docker-Registry-Config", "mock") } func writeResponse(w http.ResponseWriter, message interface{}, code int) { writeHeaders(w) w.WriteHeader(code) body, err := json.Marshal(message) if err != nil { io.WriteString(w, err.Error()) return } w.Write(body) } func readJSON(r *http.Request, dest interface{}) error { body, err := ioutil.ReadAll(r.Body) if err != nil { return err } return json.Unmarshal(body, dest) } func apiError(w http.ResponseWriter, message string, code int) { body := map[string]string{ "error": message, } writeResponse(w, body, code) } func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { if a == b { return } if len(message) == 0 { message = fmt.Sprintf("%v != %v", a, b) } t.Fatal(message) } func requiresAuth(w http.ResponseWriter, r *http.Request) bool { writeCookie := func() { value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} http.SetCookie(w, cookie) //FIXME(sam): this should be sent only on Index routes value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) w.Header().Add("X-Docker-Token", value) } if len(r.Cookies()) > 0 { writeCookie() return true } if len(r.Header.Get("Authorization")) > 0 { writeCookie() return true } w.Header().Add("WWW-Authenticate", "token") apiError(w, "Wrong auth", 401) return false } func handlerGetPing(w http.ResponseWriter, r *http.Request) { writeResponse(w, true, 200) } func handlerGetImage(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) layer, exists := testLayers[vars["image_id"]] if !exists { http.NotFound(w, r) return } writeHeaders(w) layer_size := len(layer["layer"]) w.Header().Add("X-Docker-Size", strconv.Itoa(layer_size)) io.WriteString(w, layer[vars["action"]]) } func handlerPutImage(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) image_id := vars["image_id"] action := vars["action"] layer, exists := testLayers[image_id] if !exists { if action != "json" { http.NotFound(w, r) return } layer = make(map[string]string) testLayers[image_id] = layer } if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { apiError(w, "Wrong checksum", 400) return } } body, err := ioutil.ReadAll(r.Body) if err != nil { apiError(w, fmt.Sprintf("Error: %s", err), 500) return } layer[action] = string(body) writeResponse(w, true, 200) } func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } repositoryName := mux.Vars(r)["repository"] tags, exists := testRepositories[repositoryName] if !exists { apiError(w, "Repository not found", 404) } if r.Method == "DELETE" { delete(testRepositories, repositoryName) writeResponse(w, true, 200) return } writeResponse(w, tags, 200) } func handlerGetTag(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) repositoryName := vars["repository"] tagName := vars["tag"] tags, exists := testRepositories[repositoryName] if !exists { apiError(w, "Repository not found", 404) } tag, exists := tags[tagName] if !exists { apiError(w, "Tag not found", 404) } writeResponse(w, tag, 200) } func handlerPutTag(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) repositoryName := vars["repository"] tagName := vars["tag"] tags, exists := testRepositories[repositoryName] if !exists { tags := make(map[string]string) testRepositories[repositoryName] = tags } tagValue := "" readJSON(r, tagValue) tags[tagName] = tagValue writeResponse(w, true, 200) } func handlerUsers(w http.ResponseWriter, r *http.Request) { code := 200 if r.Method == "POST" { code = 201 } else if r.Method == "PUT" { code = 204 } writeResponse(w, "", code) } func handlerImages(w http.ResponseWriter, r *http.Request) { u, _ := url.Parse(testHttpServer.URL) w.Header().Add("X-Docker-Endpoints", u.Host) w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) if r.Method == "PUT" { if strings.HasSuffix(r.URL.Path, "images") { writeResponse(w, "", 204) return } writeResponse(w, "", 200) return } if r.Method == "DELETE" { writeResponse(w, "", 204) return } images := []map[string]string{} for image_id, layer := range testLayers { image := make(map[string]string) image["id"] = image_id image["checksum"] = layer["checksum_tarsum"] image["Tag"] = "latest" images = append(images, image) } writeResponse(w, images, 200) } func handlerAuth(w http.ResponseWriter, r *http.Request) { writeResponse(w, "OK", 200) } func handlerSearch(w http.ResponseWriter, r *http.Request) { writeResponse(w, "{}", 200) } func TestPing(t *testing.T) { res, err := http.Get(makeURL("/v1/_ping")) if err != nil { t.Fatal(err) } assertEqual(t, res.StatusCode, 200, "") assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", "This is not a Mocked Registry") } /* Uncomment this to test Mocked Registry locally with curl * WARNING: Don't push on the repos uncommented, it'll block the tests * func TestWait(t *testing.T) { log.Println("Test HTTP server ready and waiting:", testHttpServer.URL) c := make(chan int) <-c } //*/ docker-0.9.1/registry/MAINTAINERS0000644000175000017500000000020512314376205014445 0ustar tagtagSam Alba (@samalba) Joffrey Fuhrer (@shin-) Ken Cochrane (@kencochrane) docker-0.9.1/registry/registry_test.go0000644000175000017500000001355412314376205016221 0ustar tagtagpackage registry import ( "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/utils" "strings" "testing" ) var ( IMAGE_ID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" TOKEN = []string{"fake-token"} REPO = "foo42/bar" ) func spawnTestRegistry(t *testing.T) *Registry { authConfig := &auth.AuthConfig{} r, err := NewRegistry(authConfig, utils.NewHTTPRequestFactory(), makeURL("/v1/")) if err != nil { t.Fatal(err) } return r } func TestPingRegistryEndpoint(t *testing.T) { standalone, err := pingRegistryEndpoint(makeURL("/v1/")) if err != nil { t.Fatal(err) } assertEqual(t, standalone, true, "Expected standalone to be true (default)") } func TestGetRemoteHistory(t *testing.T) { r := spawnTestRegistry(t) hist, err := r.GetRemoteHistory(IMAGE_ID, makeURL("/v1/"), TOKEN) if err != nil { t.Fatal(err) } assertEqual(t, len(hist), 2, "Expected 2 images in history") assertEqual(t, hist[0], IMAGE_ID, "Expected "+IMAGE_ID+"as first ancestry") assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "Unexpected second ancestry") } func TestLookupRemoteImage(t *testing.T) { r := spawnTestRegistry(t) found := r.LookupRemoteImage(IMAGE_ID, makeURL("/v1/"), TOKEN) assertEqual(t, found, true, "Expected remote lookup to succeed") found = r.LookupRemoteImage("abcdef", makeURL("/v1/"), TOKEN) assertEqual(t, found, false, "Expected remote lookup to fail") } func TestGetRemoteImageJSON(t *testing.T) { r := spawnTestRegistry(t) json, size, err := r.GetRemoteImageJSON(IMAGE_ID, makeURL("/v1/"), TOKEN) if err != nil { t.Fatal(err) } assertEqual(t, size, 154, "Expected size 154") if len(json) <= 0 { t.Fatal("Expected non-empty json") } _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"), TOKEN) if err == nil { t.Fatal("Expected image not found error") } } func TestGetRemoteImageLayer(t *testing.T) { r := spawnTestRegistry(t) data, err := r.GetRemoteImageLayer(IMAGE_ID, makeURL("/v1/"), TOKEN) if err != nil { t.Fatal(err) } if data == nil { t.Fatal("Expected non-nil data result") } _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), TOKEN) if err == nil { t.Fatal("Expected image not found error") } } func TestGetRemoteTags(t *testing.T) { r := spawnTestRegistry(t) tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, TOKEN) if err != nil { t.Fatal(err) } assertEqual(t, len(tags), 1, "Expected one tag") assertEqual(t, tags["latest"], IMAGE_ID, "Expected tag latest to map to "+IMAGE_ID) _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz", TOKEN) if err == nil { t.Fatal("Expected error when fetching tags for bogus repo") } } func TestGetRepositoryData(t *testing.T) { r := spawnTestRegistry(t) data, err := r.GetRepositoryData("foo42/bar") if err != nil { t.Fatal(err) } assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") assertEqual(t, len(data.Endpoints), 1, "Expected one endpoint in Endpoints") } func TestPushImageJSONRegistry(t *testing.T) { r := spawnTestRegistry(t) imgData := &ImgData{ ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", } err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"), TOKEN) if err != nil { t.Fatal(err) } } func TestPushImageLayerRegistry(t *testing.T) { r := spawnTestRegistry(t) layer := strings.NewReader("") _, _, err := r.PushImageLayerRegistry(IMAGE_ID, layer, makeURL("/v1/"), TOKEN, []byte{}) if err != nil { t.Fatal(err) } } func TestResolveRepositoryName(t *testing.T) { _, _, err := ResolveRepositoryName("https://github.com/dotcloud/docker") assertEqual(t, err, ErrInvalidRepositoryName, "Expected error invalid repo name") ep, repo, err := ResolveRepositoryName("fooo/bar") if err != nil { t.Fatal(err) } assertEqual(t, ep, auth.IndexServerAddress(), "Expected endpoint to be index server address") assertEqual(t, repo, "fooo/bar", "Expected resolved repo to be foo/bar") u := makeURL("")[7:] ep, repo, err = ResolveRepositoryName(u + "/private/moonbase") if err != nil { t.Fatal(err) } assertEqual(t, ep, u, "Expected endpoint to be "+u) assertEqual(t, repo, "private/moonbase", "Expected endpoint to be private/moonbase") } func TestPushRegistryTag(t *testing.T) { r := spawnTestRegistry(t) err := r.PushRegistryTag("foo42/bar", IMAGE_ID, "stable", makeURL("/v1/"), TOKEN) if err != nil { t.Fatal(err) } } func TestPushImageJSONIndex(t *testing.T) { r := spawnTestRegistry(t) imgData := []*ImgData{ { ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", }, { ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", }, } repoData, err := r.PushImageJSONIndex("foo42/bar", imgData, false, nil) if err != nil { t.Fatal(err) } if repoData == nil { t.Fatal("Expected RepositoryData object") } repoData, err = r.PushImageJSONIndex("foo42/bar", imgData, true, []string{r.indexEndpoint}) if err != nil { t.Fatal(err) } if repoData == nil { t.Fatal("Expected RepositoryData object") } } func TestSearchRepositories(t *testing.T) { r := spawnTestRegistry(t) results, err := r.SearchRepositories("supercalifragilisticepsialidocious") if err != nil { t.Fatal(err) } if results == nil { t.Fatal("Expected non-nil SearchResults object") } assertEqual(t, results.NumResults, 0, "Expected 0 search results") } func TestValidRepositoryName(t *testing.T) { if err := validateRepositoryName("docker/docker"); err != nil { t.Fatal(err) } if err := validateRepositoryName("docker/Docker"); err == nil { t.Log("Repository name should be invalid") t.Fail() } } docker-0.9.1/container.go0000644000175000017500000007252512314376205013427 0ustar tagtagpackage docker import ( "encoding/json" "errors" "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/links" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "log" "os" "path" "strings" "sync" "syscall" "time" ) const defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" var ( ErrNotATTY = errors.New("The PTY is not a file") ErrNoTTY = errors.New("No PTY found") ErrContainerStart = errors.New("The container failed to start. Unknown error") ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") ) type Container struct { sync.Mutex root string // Path to the "home" of the container, including metadata. basefs string // Path to the graphdriver mountpoint ID string Created time.Time Path string Args []string Config *runconfig.Config State State Image string NetworkSettings *NetworkSettings ResolvConfPath string HostnamePath string HostsPath string Name string Driver string ExecDriver string command *execdriver.Command stdout *utils.WriteBroadcaster stderr *utils.WriteBroadcaster stdin io.ReadCloser stdinPipe io.WriteCloser runtime *Runtime waitLock chan struct{} Volumes map[string]string // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. // Easier than migrating older container configs :) VolumesRW map[string]bool hostConfig *runconfig.HostConfig activeLinks map[string]*links.Link } // FIXME: move deprecated port stuff to nat to clean up the core. type PortMapping map[string]string // Deprecated type NetworkSettings struct { IPAddress string IPPrefixLen int Gateway string Bridge string PortMapping map[string]PortMapping // Deprecated Ports nat.PortMap } func (settings *NetworkSettings) PortMappingAPI() *engine.Table { var outs = engine.NewTable("", 0) for port, bindings := range settings.Ports { p, _ := nat.ParsePort(port.Port()) if len(bindings) == 0 { out := &engine.Env{} out.SetInt("PublicPort", p) out.Set("Type", port.Proto()) outs.Add(out) continue } for _, binding := range bindings { out := &engine.Env{} h, _ := nat.ParsePort(binding.HostPort) out.SetInt("PrivatePort", p) out.SetInt("PublicPort", h) out.Set("Type", port.Proto()) out.Set("IP", binding.HostIp) outs.Add(out) } } return outs } // Inject the io.Reader at the given path. Note: do not close the reader func (container *Container) Inject(file io.Reader, pth string) error { if err := container.Mount(); err != nil { return fmt.Errorf("inject: error mounting container %s: %s", container.ID, err) } defer container.Unmount() // Return error if path exists destPath := path.Join(container.basefs, pth) if _, err := os.Stat(destPath); err == nil { // Since err is nil, the path could be stat'd and it exists return fmt.Errorf("%s exists", pth) } else if !os.IsNotExist(err) { // Expect err might be that the file doesn't exist, so // if it's some other error, return that. return err } // Make sure the directory exists if err := os.MkdirAll(path.Join(container.basefs, path.Dir(pth)), 0755); err != nil { return err } dest, err := os.Create(destPath) if err != nil { return err } defer dest.Close() if _, err := io.Copy(dest, file); err != nil { return err } return nil } func (container *Container) When() time.Time { return container.Created } func (container *Container) FromDisk() error { data, err := ioutil.ReadFile(container.jsonPath()) if err != nil { return err } // Load container settings // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { return err } return container.readHostConfig() } func (container *Container) ToDisk() (err error) { data, err := json.Marshal(container) if err != nil { return } err = ioutil.WriteFile(container.jsonPath(), data, 0666) if err != nil { return } return container.writeHostConfig() } func (container *Container) readHostConfig() error { container.hostConfig = &runconfig.HostConfig{} // If the hostconfig file does not exist, do not read it. // (We still have to initialize container.hostConfig, // but that's OK, since we just did that above.) _, err := os.Stat(container.hostConfigPath()) if os.IsNotExist(err) { return nil } data, err := ioutil.ReadFile(container.hostConfigPath()) if err != nil { return err } return json.Unmarshal(data, container.hostConfig) } func (container *Container) writeHostConfig() (err error) { data, err := json.Marshal(container.hostConfig) if err != nil { return } return ioutil.WriteFile(container.hostConfigPath(), data, 0666) } func (container *Container) generateEnvConfig(env []string) error { data, err := json.Marshal(env) if err != nil { return err } p, err := container.EnvConfigPath() if err != nil { return err } ioutil.WriteFile(p, data, 0600) return nil } func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { var cStdout, cStderr io.ReadCloser var nJobs int errors := make(chan error, 3) if stdin != nil && container.Config.OpenStdin { nJobs += 1 if cStdin, err := container.StdinPipe(); err != nil { errors <- err } else { go func() { utils.Debugf("attach: stdin: begin") defer utils.Debugf("attach: stdin: end") // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr if container.Config.StdinOnce && !container.Config.Tty { defer cStdin.Close() } else { defer func() { if cStdout != nil { cStdout.Close() } if cStderr != nil { cStderr.Close() } }() } if container.Config.Tty { _, err = utils.CopyEscapable(cStdin, stdin) } else { _, err = io.Copy(cStdin, stdin) } if err == io.ErrClosedPipe { err = nil } if err != nil { utils.Errorf("attach: stdin: %s", err) } errors <- err }() } } if stdout != nil { nJobs += 1 if p, err := container.StdoutPipe(); err != nil { errors <- err } else { cStdout = p go func() { utils.Debugf("attach: stdout: begin") defer utils.Debugf("attach: stdout: end") // If we are in StdinOnce mode, then close stdin if container.Config.StdinOnce && stdin != nil { defer stdin.Close() } if stdinCloser != nil { defer stdinCloser.Close() } _, err := io.Copy(stdout, cStdout) if err == io.ErrClosedPipe { err = nil } if err != nil { utils.Errorf("attach: stdout: %s", err) } errors <- err }() } } else { go func() { if stdinCloser != nil { defer stdinCloser.Close() } if cStdout, err := container.StdoutPipe(); err != nil { utils.Errorf("attach: stdout pipe: %s", err) } else { io.Copy(&utils.NopWriter{}, cStdout) } }() } if stderr != nil { nJobs += 1 if p, err := container.StderrPipe(); err != nil { errors <- err } else { cStderr = p go func() { utils.Debugf("attach: stderr: begin") defer utils.Debugf("attach: stderr: end") // If we are in StdinOnce mode, then close stdin if container.Config.StdinOnce && stdin != nil { defer stdin.Close() } if stdinCloser != nil { defer stdinCloser.Close() } _, err := io.Copy(stderr, cStderr) if err == io.ErrClosedPipe { err = nil } if err != nil { utils.Errorf("attach: stderr: %s", err) } errors <- err }() } } else { go func() { if stdinCloser != nil { defer stdinCloser.Close() } if cStderr, err := container.StderrPipe(); err != nil { utils.Errorf("attach: stdout pipe: %s", err) } else { io.Copy(&utils.NopWriter{}, cStderr) } }() } return utils.Go(func() error { defer func() { if cStdout != nil { cStdout.Close() } if cStderr != nil { cStderr.Close() } }() // FIXME: how to clean up the stdin goroutine without the unwanted side effect // of closing the passed stdin? Add an intermediary io.Pipe? for i := 0; i < nJobs; i += 1 { utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs) if err := <-errors; err != nil { utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err) return err } utils.Debugf("attach: job %d completed successfully", i+1) } utils.Debugf("attach: all jobs completed successfully") return nil }) } func populateCommand(c *Container) { var ( en *execdriver.Network driverConfig []string ) en = &execdriver.Network{ Mtu: c.runtime.config.Mtu, Interface: nil, } if !c.Config.NetworkDisabled { network := c.NetworkSettings en.Interface = &execdriver.NetworkInterface{ Gateway: network.Gateway, Bridge: network.Bridge, IPAddress: network.IPAddress, IPPrefixLen: network.IPPrefixLen, } } if lxcConf := c.hostConfig.LxcConf; lxcConf != nil { for _, pair := range lxcConf { driverConfig = append(driverConfig, fmt.Sprintf("%s = %s", pair.Key, pair.Value)) } } resources := &execdriver.Resources{ Memory: c.Config.Memory, MemorySwap: c.Config.MemorySwap, CpuShares: c.Config.CpuShares, } c.command = &execdriver.Command{ ID: c.ID, Privileged: c.hostConfig.Privileged, Rootfs: c.RootfsPath(), InitPath: "/.dockerinit", Entrypoint: c.Path, Arguments: c.Args, WorkingDir: c.Config.WorkingDir, Network: en, Tty: c.Config.Tty, User: c.Config.User, Config: driverConfig, Resources: resources, } c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} } func (container *Container) Start() (err error) { container.Lock() defer container.Unlock() if container.State.IsRunning() { return fmt.Errorf("The container %s is already running.", container.ID) } defer func() { if err != nil { container.cleanup() } }() if err := container.Mount(); err != nil { return err } if container.runtime.config.DisableNetwork { container.Config.NetworkDisabled = true container.buildHostnameAndHostsFiles("127.0.1.1") } else { if err := container.allocateNetwork(); err != nil { return err } container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress) } // Make sure the config is compatible with the current kernel if container.Config.Memory > 0 && !container.runtime.sysInfo.MemoryLimit { log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") container.Config.Memory = 0 } if container.Config.Memory > 0 && !container.runtime.sysInfo.SwapLimit { log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") container.Config.MemorySwap = -1 } if container.runtime.sysInfo.IPv4ForwardingDisabled { log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work") } if err := prepareVolumesForContainer(container); err != nil { return err } // Setup environment env := []string{ "HOME=/", "PATH=" + defaultPathEnv, "HOSTNAME=" + container.Config.Hostname, } if container.Config.Tty { env = append(env, "TERM=xterm") } // Init any links between the parent and children runtime := container.runtime children, err := runtime.Children(container.Name) if err != nil { return err } if len(children) > 0 { container.activeLinks = make(map[string]*links.Link, len(children)) // If we encounter an error make sure that we rollback any network // config and ip table changes rollback := func() { for _, link := range container.activeLinks { link.Disable() } container.activeLinks = nil } for linkAlias, child := range children { if !child.State.IsRunning() { return fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) } link, err := links.NewLink( container.NetworkSettings.IPAddress, child.NetworkSettings.IPAddress, linkAlias, child.Config.Env, child.Config.ExposedPorts, runtime.eng) if err != nil { rollback() return err } container.activeLinks[link.Alias()] = link if err := link.Enable(); err != nil { rollback() return err } for _, envVar := range link.ToEnv() { env = append(env, envVar) } } } // because the env on the container can override certain default values // we need to replace the 'env' keys where they match and append anything // else. env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) if err := container.generateEnvConfig(env); err != nil { return err } if container.Config.WorkingDir != "" { container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil { return nil } } envPath, err := container.EnvConfigPath() if err != nil { return err } if err := mountVolumesForContainer(container, envPath); err != nil { return err } populateCommand(container) container.command.Env = env // Setup logging of stdout and stderr to disk if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil { return err } if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil { return err } container.waitLock = make(chan struct{}) callbackLock := make(chan struct{}) callback := func(command *execdriver.Command) { container.State.SetRunning(command.Pid()) if command.Tty { // The callback is called after the process Start() // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace // which we close here. if c, ok := command.Stdout.(io.Closer); ok { c.Close() } } if err := container.ToDisk(); err != nil { utils.Debugf("%s", err) } close(callbackLock) } // We use a callback here instead of a goroutine and an chan for // syncronization purposes cErr := utils.Go(func() error { return container.monitor(callback) }) // Start should not return until the process is actually running select { case <-callbackLock: case err := <-cErr: return err } return nil } func (container *Container) Run() error { if err := container.Start(); err != nil { return err } container.Wait() return nil } func (container *Container) Output() (output []byte, err error) { pipe, err := container.StdoutPipe() if err != nil { return nil, err } defer pipe.Close() if err := container.Start(); err != nil { return nil, err } output, err = ioutil.ReadAll(pipe) container.Wait() return output, err } // Container.StdinPipe returns a WriteCloser which can be used to feed data // to the standard input of the container's active process. // Container.StdoutPipe and Container.StderrPipe each return a ReadCloser // which can be used to retrieve the standard output (and error) generated // by the container's active process. The output (and error) are actually // copied and delivered to all StdoutPipe and StderrPipe consumers, using // a kind of "broadcaster". func (container *Container) StdinPipe() (io.WriteCloser, error) { return container.stdinPipe, nil } func (container *Container) StdoutPipe() (io.ReadCloser, error) { reader, writer := io.Pipe() container.stdout.AddWriter(writer, "") return utils.NewBufReader(reader), nil } func (container *Container) StderrPipe() (io.ReadCloser, error) { reader, writer := io.Pipe() container.stderr.AddWriter(writer, "") return utils.NewBufReader(reader), nil } func (container *Container) buildHostnameAndHostsFiles(IP string) { container.HostnamePath = path.Join(container.root, "hostname") ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) hostsContent := []byte(` 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters `) container.HostsPath = path.Join(container.root, "hosts") if container.Config.Domainname != "" { hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...) } else if !container.Config.NetworkDisabled { hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...) } ioutil.WriteFile(container.HostsPath, hostsContent, 0644) } func (container *Container) allocateNetwork() error { if container.Config.NetworkDisabled { return nil } var ( env *engine.Env err error eng = container.runtime.eng ) if container.State.IsGhost() { if container.runtime.config.DisableNetwork { env = &engine.Env{} } else { currentIP := container.NetworkSettings.IPAddress job := eng.Job("allocate_interface", container.ID) if currentIP != "" { job.Setenv("RequestIP", currentIP) } env, err = job.Stdout.AddEnv() if err != nil { return err } if err := job.Run(); err != nil { return err } } } else { job := eng.Job("allocate_interface", container.ID) env, err = job.Stdout.AddEnv() if err != nil { return err } if err := job.Run(); err != nil { return err } } if container.Config.PortSpecs != nil { utils.Debugf("Migrating port mappings for container: %s", strings.Join(container.Config.PortSpecs, ", ")) if err := migratePortMappings(container.Config, container.hostConfig); err != nil { return err } container.Config.PortSpecs = nil if err := container.writeHostConfig(); err != nil { return err } } var ( portSpecs = make(nat.PortSet) bindings = make(nat.PortMap) ) if !container.State.IsGhost() { if container.Config.ExposedPorts != nil { portSpecs = container.Config.ExposedPorts } if container.hostConfig.PortBindings != nil { bindings = container.hostConfig.PortBindings } } else { if container.NetworkSettings.Ports != nil { for port, binding := range container.NetworkSettings.Ports { portSpecs[port] = struct{}{} bindings[port] = binding } } } container.NetworkSettings.PortMapping = nil for port := range portSpecs { binding := bindings[port] if container.hostConfig.PublishAllPorts && len(binding) == 0 { binding = append(binding, nat.PortBinding{}) } for i := 0; i < len(binding); i++ { b := binding[i] portJob := eng.Job("allocate_port", container.ID) portJob.Setenv("HostIP", b.HostIp) portJob.Setenv("HostPort", b.HostPort) portJob.Setenv("Proto", port.Proto()) portJob.Setenv("ContainerPort", port.Port()) portEnv, err := portJob.Stdout.AddEnv() if err != nil { return err } if err := portJob.Run(); err != nil { eng.Job("release_interface", container.ID).Run() return err } b.HostIp = portEnv.Get("HostIP") b.HostPort = portEnv.Get("HostPort") binding[i] = b } bindings[port] = binding } container.writeHostConfig() container.NetworkSettings.Ports = bindings container.NetworkSettings.Bridge = env.Get("Bridge") container.NetworkSettings.IPAddress = env.Get("IP") container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen") container.NetworkSettings.Gateway = env.Get("Gateway") return nil } func (container *Container) releaseNetwork() { if container.Config.NetworkDisabled { return } eng := container.runtime.eng eng.Job("release_interface", container.ID).Run() container.NetworkSettings = &NetworkSettings{} } func (container *Container) monitor(callback execdriver.StartCallback) error { var ( err error exitCode int ) pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin) exitCode, err = container.runtime.Run(container, pipes, callback) if err != nil { utils.Errorf("Error running container: %s", err) } if container.runtime != nil && container.runtime.srv != nil && container.runtime.srv.IsRunning() { container.State.SetStopped(exitCode) // FIXME: there is a race condition here which causes this to fail during the unit tests. // If another goroutine was waiting for Wait() to return before removing the container's root // from the filesystem... At this point it may already have done so. // This is because State.setStopped() has already been called, and has caused Wait() // to return. // FIXME: why are we serializing running state to disk in the first place? //log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err) if err := container.ToDisk(); err != nil { utils.Errorf("Error dumping container state to disk: %s\n", err) } } // Cleanup container.cleanup() // Re-create a brand new stdin pipe once the container exited if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } if container.runtime != nil && container.runtime.srv != nil { container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image)) } close(container.waitLock) return err } func (container *Container) cleanup() { container.releaseNetwork() // Disable all active links if container.activeLinks != nil { for _, link := range container.activeLinks { link.Disable() } } if container.Config.OpenStdin { if err := container.stdin.Close(); err != nil { utils.Errorf("%s: Error close stdin: %s", container.ID, err) } } if err := container.stdout.CloseWriters(); err != nil { utils.Errorf("%s: Error close stdout: %s", container.ID, err) } if err := container.stderr.CloseWriters(); err != nil { utils.Errorf("%s: Error close stderr: %s", container.ID, err) } if container.command != nil && container.command.Terminal != nil { if err := container.command.Terminal.Close(); err != nil { utils.Errorf("%s: Error closing terminal: %s", container.ID, err) } } unmountVolumesForContainer(container) if err := container.Unmount(); err != nil { log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) } } func (container *Container) kill(sig int) error { container.Lock() defer container.Unlock() if !container.State.IsRunning() { return nil } return container.runtime.Kill(container, sig) } func (container *Container) Kill() error { if !container.State.IsRunning() { return nil } // 1. Send SIGKILL if err := container.kill(9); err != nil { return err } // 2. Wait for the process to die, in last resort, try to kill the process directly if err := container.WaitTimeout(10 * time.Second); err != nil { if container.command == nil { return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", utils.TruncateID(container.ID)) } log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", utils.TruncateID(container.ID)) if err := container.runtime.Kill(container, 9); err != nil { return err } } container.Wait() return nil } func (container *Container) Stop(seconds int) error { if !container.State.IsRunning() { return nil } // 1. Send a SIGTERM if err := container.kill(15); err != nil { utils.Debugf("Error sending kill SIGTERM: %s", err) log.Print("Failed to send SIGTERM to the process, force killing") if err := container.kill(9); err != nil { return err } } // 2. Wait for the process to exit on its own if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil { log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) // 3. If it doesn't, then send SIGKILL if err := container.Kill(); err != nil { return err } } return nil } func (container *Container) Restart(seconds int) error { // Avoid unnecessarily unmounting and then directly mounting // the container when the container stops and then starts // again if err := container.Mount(); err == nil { defer container.Unmount() } if err := container.Stop(seconds); err != nil { return err } return container.Start() } // Wait blocks until the container stops running, then returns its exit code. func (container *Container) Wait() int { <-container.waitLock return container.State.GetExitCode() } func (container *Container) Resize(h, w int) error { return container.command.Terminal.Resize(h, w) } func (container *Container) ExportRw() (archive.Archive, error) { if err := container.Mount(); err != nil { return nil, err } if container.runtime == nil { return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID) } archive, err := container.runtime.Diff(container) if err != nil { container.Unmount() return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() container.Unmount() return err }), nil } func (container *Container) Export() (archive.Archive, error) { if err := container.Mount(); err != nil { return nil, err } archive, err := archive.Tar(container.basefs, archive.Uncompressed) if err != nil { container.Unmount() return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() container.Unmount() return err }), nil } func (container *Container) WaitTimeout(timeout time.Duration) error { done := make(chan bool) go func() { container.Wait() done <- true }() select { case <-time.After(timeout): return fmt.Errorf("Timed Out") case <-done: return nil } } func (container *Container) Mount() error { return container.runtime.Mount(container) } func (container *Container) Changes() ([]archive.Change, error) { return container.runtime.Changes(container) } func (container *Container) GetImage() (*Image, error) { if container.runtime == nil { return nil, fmt.Errorf("Can't get image of unregistered container") } return container.runtime.graph.Get(container.Image) } func (container *Container) Unmount() error { return container.runtime.Unmount(container) } func (container *Container) logPath(name string) string { return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name)) } func (container *Container) ReadLog(name string) (io.Reader, error) { return os.Open(container.logPath(name)) } func (container *Container) hostConfigPath() string { return path.Join(container.root, "hostconfig.json") } func (container *Container) jsonPath() string { return path.Join(container.root, "config.json") } func (container *Container) EnvConfigPath() (string, error) { p := path.Join(container.root, "config.env") if _, err := os.Stat(p); err != nil { if os.IsNotExist(err) { f, err := os.Create(p) if err != nil { return "", err } f.Close() } else { return "", err } } return p, nil } // This method must be exported to be used from the lxc template // This directory is only usable when the container is running func (container *Container) RootfsPath() string { return path.Join(container.root, "root") } // This is the stand-alone version of the root fs, without any additional mounts. // This directory is usable whenever the container is mounted (and not unmounted) func (container *Container) BasefsPath() string { return container.basefs } func validateID(id string) error { if id == "" { return fmt.Errorf("Invalid empty id") } return nil } // GetSize, return real size, virtual size func (container *Container) GetSize() (int64, int64) { var ( sizeRw, sizeRootfs int64 err error driver = container.runtime.driver ) if err := container.Mount(); err != nil { utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err) return sizeRw, sizeRootfs } defer container.Unmount() if differ, ok := container.runtime.driver.(graphdriver.Differ); ok { sizeRw, err = differ.DiffSize(container.ID) if err != nil { utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) // FIXME: GetSize should return an error. Not changing it now in case // there is a side-effect. sizeRw = -1 } } else { changes, _ := container.Changes() if changes != nil { sizeRw = archive.ChangesSize(container.basefs, changes) } else { sizeRw = -1 } } if _, err = os.Stat(container.basefs); err != nil { if sizeRootfs, err = utils.TreeSize(container.basefs); err != nil { sizeRootfs = -1 } } return sizeRw, sizeRootfs } func (container *Container) Copy(resource string) (io.ReadCloser, error) { if err := container.Mount(); err != nil { return nil, err } var filter []string basePath := path.Join(container.basefs, resource) stat, err := os.Stat(basePath) if err != nil { container.Unmount() return nil, err } if !stat.IsDir() { d, f := path.Split(basePath) basePath = d filter = []string{f} } else { filter = []string{path.Base(basePath)} basePath = path.Dir(basePath) } archive, err := archive.TarFilter(basePath, &archive.TarOptions{ Compression: archive.Uncompressed, Includes: filter, }) if err != nil { return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() container.Unmount() return err }), nil } // Returns true if the container exposes a certain port func (container *Container) Exposes(p nat.Port) bool { _, exists := container.Config.ExposedPorts[p] return exists } func (container *Container) GetPtyMaster() (*os.File, error) { ttyConsole, ok := container.command.Terminal.(execdriver.TtyTerminal) if !ok { return nil, ErrNoTTY } return ttyConsole.Master(), nil } docker-0.9.1/nat/0000755000175000017500000000000012314376205011665 5ustar tagtagdocker-0.9.1/nat/sort_test.go0000644000175000017500000000131412314376205014241 0ustar tagtagpackage nat import ( "fmt" "testing" ) func TestSortUniquePorts(t *testing.T) { ports := []Port{ Port("6379/tcp"), Port("22/tcp"), } Sort(ports, func(ip, jp Port) bool { return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") }) first := ports[0] if fmt.Sprint(first) != "22/tcp" { t.Log(fmt.Sprint(first)) t.Fail() } } func TestSortSamePortWithDifferentProto(t *testing.T) { ports := []Port{ Port("8888/tcp"), Port("8888/udp"), Port("6379/tcp"), Port("6379/udp"), } Sort(ports, func(ip, jp Port) bool { return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") }) first := ports[0] if fmt.Sprint(first) != "6379/tcp" { t.Fail() } } docker-0.9.1/nat/nat.go0000644000175000017500000000565412314376205013010 0ustar tagtagpackage nat // nat is a convenience package for docker's manipulation of strings describing // network ports. import ( "fmt" "github.com/dotcloud/docker/utils" "strconv" "strings" ) const ( PortSpecTemplate = "ip:hostPort:containerPort" PortSpecTemplateFormat = "ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort" ) type PortBinding struct { HostIp string HostPort string } type PortMap map[Port][]PortBinding type PortSet map[Port]struct{} // 80/tcp type Port string func NewPort(proto, port string) Port { return Port(fmt.Sprintf("%s/%s", port, proto)) } func ParsePort(rawPort string) (int, error) { port, err := strconv.ParseUint(rawPort, 10, 16) if err != nil { return 0, err } return int(port), nil } func (p Port) Proto() string { parts := strings.Split(string(p), "/") if len(parts) == 1 { return "tcp" } return parts[1] } func (p Port) Port() string { return strings.Split(string(p), "/")[0] } func (p Port) Int() int { i, err := ParsePort(p.Port()) if err != nil { panic(err) } return i } // Splits a port in the format of port/proto func SplitProtoPort(rawPort string) (string, string) { parts := strings.Split(rawPort, "/") l := len(parts) if l == 0 { return "", "" } if l == 1 { return "tcp", rawPort } return parts[0], parts[1] } // We will receive port specs in the format of ip:public:private/proto and these need to be // parsed in the internal types func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { var ( exposedPorts = make(map[Port]struct{}, len(ports)) bindings = make(map[Port][]PortBinding) ) for _, rawPort := range ports { proto := "tcp" if i := strings.LastIndex(rawPort, "/"); i != -1 { proto = rawPort[i+1:] rawPort = rawPort[:i] } if !strings.Contains(rawPort, ":") { rawPort = fmt.Sprintf("::%s", rawPort) } else if len(strings.Split(rawPort, ":")) == 2 { rawPort = fmt.Sprintf(":%s", rawPort) } parts, err := utils.PartParser(PortSpecTemplate, rawPort) if err != nil { return nil, nil, err } var ( containerPort = parts["containerPort"] rawIp = parts["ip"] hostPort = parts["hostPort"] ) if containerPort == "" { return nil, nil, fmt.Errorf("No port specified: %s", rawPort) } if _, err := strconv.ParseUint(containerPort, 10, 16); err != nil { return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) } if _, err := strconv.ParseUint(hostPort, 10, 16); hostPort != "" && err != nil { return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) } port := NewPort(proto, containerPort) if _, exists := exposedPorts[port]; !exists { exposedPorts[port] = struct{}{} } binding := PortBinding{ HostIp: rawIp, HostPort: hostPort, } bslice, exists := bindings[port] if !exists { bslice = []PortBinding{} } bindings[port] = append(bslice, binding) } return exposedPorts, bindings, nil } docker-0.9.1/nat/sort.go0000644000175000017500000000071312314376205013204 0ustar tagtagpackage nat import "sort" type portSorter struct { ports []Port by func(i, j Port) bool } func (s *portSorter) Len() int { return len(s.ports) } func (s *portSorter) Swap(i, j int) { s.ports[i], s.ports[j] = s.ports[j], s.ports[i] } func (s *portSorter) Less(i, j int) bool { ip := s.ports[i] jp := s.ports[j] return s.by(ip, jp) } func Sort(ports []Port, predicate func(i, j Port) bool) { s := &portSorter{ports, predicate} sort.Sort(s) } docker-0.9.1/LICENSE0000644000175000017500000002613612314376205012120 0ustar tagtag Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-0.9.1/runtime.go0000644000175000017500000006402612314376205013125 0ustar tagtagpackage docker import ( "container/list" "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/execdriver/lxc" "github.com/dotcloud/docker/execdriver/native" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/graphdriver/aufs" _ "github.com/dotcloud/docker/graphdriver/btrfs" _ "github.com/dotcloud/docker/graphdriver/devmapper" _ "github.com/dotcloud/docker/graphdriver/vfs" _ "github.com/dotcloud/docker/networkdriver/lxc" "github.com/dotcloud/docker/networkdriver/portallocator" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "os" "path" "regexp" "sort" "strings" "sync" "time" ) // Set the max depth to the aufs default that most // kernels are compiled with // For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk const MaxImageDepth = 127 var ( defaultDns = []string{"8.8.8.8", "8.8.4.4"} validContainerNameChars = `[a-zA-Z0-9_.-]` validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) ) type Runtime struct { repository string sysInitPath string containers *list.List graph *Graph repositories *TagStore idIndex *utils.TruncIndex sysInfo *sysinfo.SysInfo volumes *Graph srv *Server eng *engine.Engine config *DaemonConfig containerGraph *graphdb.Database driver graphdriver.Driver execDriver execdriver.Driver } // List returns an array of all containers registered in the runtime. func (runtime *Runtime) List() []*Container { containers := new(History) for e := runtime.containers.Front(); e != nil; e = e.Next() { containers.Add(e.Value.(*Container)) } return *containers } func (runtime *Runtime) getContainerElement(id string) *list.Element { for e := runtime.containers.Front(); e != nil; e = e.Next() { container := e.Value.(*Container) if container.ID == id { return e } } return nil } // Get looks for a container by the specified ID or name, and returns it. // If the container is not found, or if an error occurs, nil is returned. func (runtime *Runtime) Get(name string) *Container { if c, _ := runtime.GetByName(name); c != nil { return c } id, err := runtime.idIndex.Get(name) if err != nil { return nil } e := runtime.getContainerElement(id) if e == nil { return nil } return e.Value.(*Container) } // Exists returns a true if a container of the specified ID or name exists, // false otherwise. func (runtime *Runtime) Exists(id string) bool { return runtime.Get(id) != nil } func (runtime *Runtime) containerRoot(id string) string { return path.Join(runtime.repository, id) } // Load reads the contents of a container from disk // This is typically done at startup. func (runtime *Runtime) load(id string) (*Container, error) { container := &Container{root: runtime.containerRoot(id)} if err := container.FromDisk(); err != nil { return nil, err } if container.ID != id { return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) } if container.State.IsRunning() { container.State.SetGhost(true) } return container, nil } // Register makes a container object usable by the runtime as func (runtime *Runtime) Register(container *Container) error { if container.runtime != nil || runtime.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := runtime.ensureName(container); err != nil { return err } container.runtime = runtime // Attach to stdout and stderr container.stderr = utils.NewWriteBroadcaster() container.stdout = utils.NewWriteBroadcaster() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done runtime.containers.PushBack(container) runtime.idIndex.Add(container.ID) // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.State.IsRunning() { if container.State.IsGhost() { utils.Debugf("killing ghost %s", container.ID) existingPid := container.State.Pid container.State.SetGhost(false) container.State.SetStopped(0) if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { lxc.KillLxc(container.ID, 9) } else { command := &execdriver.Command{ ID: container.ID, } command.Process = &os.Process{Pid: existingPid} runtime.execDriver.Kill(command, 9) } // ensure that the filesystem is also unmounted unmountVolumesForContainer(container) if err := container.Unmount(); err != nil { utils.Debugf("ghost unmount error %s", err) } } info := runtime.execDriver.Info(container.ID) if !info.IsRunning() { utils.Debugf("Container %s was supposed to be running but is not.", container.ID) if runtime.config.AutoRestart { utils.Debugf("Restarting") unmountVolumesForContainer(container) if err := container.Unmount(); err != nil { utils.Debugf("restart unmount error %s", err) } container.State.SetGhost(false) container.State.SetStopped(0) if err := container.Start(); err != nil { return err } } else { utils.Debugf("Marking as stopped") container.State.SetStopped(-127) if err := container.ToDisk(); err != nil { return err } } } } else { // When the container is not running, we still initialize the waitLock // chan and close it. Receiving on nil chan blocks whereas receiving on a // closed chan does not. In this case we do not want to block. container.waitLock = make(chan struct{}) close(container.waitLock) } return nil } func (runtime *Runtime) ensureName(container *Container) error { if container.Name == "" { name, err := generateRandomName(runtime) if err != nil { name = utils.TruncateID(container.ID) } container.Name = name if err := container.ToDisk(); err != nil { utils.Debugf("Error saving container name %s", err) } if !runtime.containerGraph.Exists(name) { if _, err := runtime.containerGraph.Set(name, container.ID); err != nil { utils.Debugf("Setting default id - %s", err) } } } return nil } func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error { log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) if err != nil { return err } src.AddWriter(log, stream) return nil } // Destroy unregisters a container from the runtime and cleanly removes its contents from the filesystem. func (runtime *Runtime) Destroy(container *Container) error { if container == nil { return fmt.Errorf("The given container is ") } element := runtime.getContainerElement(container.ID) if element == nil { return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) } if err := container.Stop(3); err != nil { return err } if err := runtime.driver.Remove(container.ID); err != nil { return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", runtime.driver, container.ID, err) } initID := fmt.Sprintf("%s-init", container.ID) if err := runtime.driver.Remove(initID); err != nil { return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", runtime.driver, initID, err) } if _, err := runtime.containerGraph.Purge(container.ID); err != nil { utils.Debugf("Unable to remove container from link graph: %s", err) } // Deregister the container before removing its directory, to avoid race conditions runtime.idIndex.Delete(container.ID) runtime.containers.Remove(element) if err := os.RemoveAll(container.root); err != nil { return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) } return nil } func (runtime *Runtime) restore() error { if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Printf("Loading containers: ") } dir, err := ioutil.ReadDir(runtime.repository) if err != nil { return err } containers := make(map[string]*Container) currentDriver := runtime.driver.String() for _, v := range dir { id := v.Name() container, err := runtime.load(id) if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Print(".") } if err != nil { utils.Errorf("Failed to load container %v: %v", id, err) continue } // Ignore the container if it does not support the current driver being used by the graph if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver { utils.Debugf("Loaded container %v", container.ID) containers[container.ID] = container } else { utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) } } register := func(container *Container) { if err := runtime.Register(container); err != nil { utils.Debugf("Failed to register container %s: %s", container.ID, err) } } if entities := runtime.containerGraph.List("/", -1); entities != nil { for _, p := range entities.Paths() { if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Print(".") } e := entities[p] if container, ok := containers[e.ID()]; ok { register(container) delete(containers, e.ID()) } } } // Any containers that are left over do not exist in the graph for _, container := range containers { // Try to set the default name for a container if it exists prior to links container.Name, err = generateRandomName(runtime) if err != nil { container.Name = utils.TruncateID(container.ID) } if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil { utils.Debugf("Setting default id - %s", err) } register(container) } if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { fmt.Printf(": done.\n") } return nil } // Create creates a new container from the given configuration with a given name. func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) { // Lookup image img, err := runtime.repositories.LookupImage(config.Image) if err != nil { return nil, nil, err } // We add 2 layers to the depth because the container's rw and // init layer add to the restriction depth, err := img.Depth() if err != nil { return nil, nil, err } if depth+2 >= MaxImageDepth { return nil, nil, fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) } checkDeprecatedExpose := func(config *runconfig.Config) bool { if config != nil { if config.PortSpecs != nil { for _, p := range config.PortSpecs { if strings.Contains(p, ":") { return true } } } } return false } warnings := []string{} if checkDeprecatedExpose(img.Config) || checkDeprecatedExpose(config) { warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.") } if img.Config != nil { if err := runconfig.Merge(config, img.Config); err != nil { return nil, nil, err } } if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { return nil, nil, fmt.Errorf("No command specified") } // Generate id id := GenerateID() if name == "" { name, err = generateRandomName(runtime) if err != nil { name = utils.TruncateID(id) } } else { if !validContainerNamePattern.MatchString(name) { return nil, nil, fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) } } if name[0] != '/' { name = "/" + name } // Set the enitity in the graph using the default name specified if _, err := runtime.containerGraph.Set(name, id); err != nil { if !graphdb.IsNonUniqueNameError(err) { return nil, nil, err } conflictingContainer, err := runtime.GetByName(name) if err != nil { if strings.Contains(err.Error(), "Could not find entity") { return nil, nil, err } // Remove name and continue starting the container if err := runtime.containerGraph.Delete(name); err != nil { return nil, nil, err } } else { nameAsKnownByUser := strings.TrimPrefix(name, "/") return nil, nil, fmt.Errorf( "Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser, utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser) } } // Generate default hostname // FIXME: the lxc template no longer needs to set a default hostname if config.Hostname == "" { config.Hostname = id[:12] } var args []string var entrypoint string if len(config.Entrypoint) != 0 { entrypoint = config.Entrypoint[0] args = append(config.Entrypoint[1:], config.Cmd...) } else { entrypoint = config.Cmd[0] args = config.Cmd[1:] } container := &Container{ // FIXME: we should generate the ID here instead of receiving it as an argument ID: id, Created: time.Now().UTC(), Path: entrypoint, Args: args, //FIXME: de-duplicate from config Config: config, hostConfig: &runconfig.HostConfig{}, Image: img.ID, // Always use the resolved image id NetworkSettings: &NetworkSettings{}, Name: name, Driver: runtime.driver.String(), ExecDriver: runtime.execDriver.Name(), } container.root = runtime.containerRoot(container.ID) // Step 1: create the container directory. // This doubles as a barrier to avoid race conditions. if err := os.Mkdir(container.root, 0700); err != nil { return nil, nil, err } initID := fmt.Sprintf("%s-init", container.ID) if err := runtime.driver.Create(initID, img.ID); err != nil { return nil, nil, err } initPath, err := runtime.driver.Get(initID) if err != nil { return nil, nil, err } defer runtime.driver.Put(initID) if err := setupInitLayer(initPath); err != nil { return nil, nil, err } if err := runtime.driver.Create(container.ID, initID); err != nil { return nil, nil, err } resolvConf, err := utils.GetResolvConf() if err != nil { return nil, nil, err } if len(config.Dns) == 0 && len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { //"WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns runtime.config.Dns = defaultDns } // If custom dns exists, then create a resolv.conf for the container if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 { var dns []string if len(config.Dns) > 0 { dns = config.Dns } else { dns = runtime.config.Dns } container.ResolvConfPath = path.Join(container.root, "resolv.conf") f, err := os.Create(container.ResolvConfPath) if err != nil { return nil, nil, err } defer f.Close() for _, dns := range dns { if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil { return nil, nil, err } } } else { container.ResolvConfPath = "/etc/resolv.conf" } // Step 2: save the container json if err := container.ToDisk(); err != nil { return nil, nil, err } // Step 3: register the container if err := runtime.Register(container); err != nil { return nil, nil, err } return container, warnings, nil } // Commit creates a new filesystem image from the current state of a container. // The image can optionally be tagged into a repository func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*Image, error) { // FIXME: freeze the container before copying it to avoid data corruption? // FIXME: this shouldn't be in commands. if err := container.Mount(); err != nil { return nil, err } defer container.Unmount() rwTar, err := container.ExportRw() if err != nil { return nil, err } defer rwTar.Close() // Create a new image from the container's base layers + a new layer from container changes img, err := runtime.graph.Create(rwTar, container, comment, author, config) if err != nil { return nil, err } // Register the image if needed if repository != "" { if err := runtime.repositories.Set(repository, tag, img.ID, true); err != nil { return img, err } } return img, nil } func getFullName(name string) (string, error) { if name == "" { return "", fmt.Errorf("Container name cannot be empty") } if name[0] != '/' { name = "/" + name } return name, nil } func (runtime *Runtime) GetByName(name string) (*Container, error) { fullName, err := getFullName(name) if err != nil { return nil, err } entity := runtime.containerGraph.Get(fullName) if entity == nil { return nil, fmt.Errorf("Could not find entity for %s", name) } e := runtime.getContainerElement(entity.ID()) if e == nil { return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID()) } return e.Value.(*Container), nil } func (runtime *Runtime) Children(name string) (map[string]*Container, error) { name, err := getFullName(name) if err != nil { return nil, err } children := make(map[string]*Container) err = runtime.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { c := runtime.Get(e.ID()) if c == nil { return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p) } children[p] = c return nil }, 0) if err != nil { return nil, err } return children, nil } func (runtime *Runtime) RegisterLink(parent, child *Container, alias string) error { fullName := path.Join(parent.Name, alias) if !runtime.containerGraph.Exists(fullName) { _, err := runtime.containerGraph.Set(fullName, child.ID) return err } return nil } // FIXME: harmonize with NewGraph() func NewRuntime(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) { runtime, err := NewRuntimeFromDirectory(config, eng) if err != nil { return nil, err } return runtime, nil } func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) { // Set the default driver graphdriver.DefaultDriver = config.GraphDriver // Load storage driver driver, err := graphdriver.New(config.Root) if err != nil { return nil, err } utils.Debugf("Using graph driver %s", driver) runtimeRepo := path.Join(config.Root, "containers") if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) { return nil, err } if ad, ok := driver.(*aufs.Driver); ok { utils.Debugf("Migrating existing containers") if err := ad.Migrate(config.Root, setupInitLayer); err != nil { return nil, err } } utils.Debugf("Creating images graph") g, err := NewGraph(path.Join(config.Root, "graph"), driver) if err != nil { return nil, err } // We don't want to use a complex driver like aufs or devmapper // for volumes, just a plain filesystem volumesDriver, err := graphdriver.GetDriver("vfs", config.Root) if err != nil { return nil, err } utils.Debugf("Creating volumes graph") volumes, err := NewGraph(path.Join(config.Root, "volumes"), volumesDriver) if err != nil { return nil, err } utils.Debugf("Creating repository list") repositories, err := NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g) if err != nil { return nil, fmt.Errorf("Couldn't create Tag store: %s", err) } if !config.DisableNetwork { job := eng.Job("init_networkdriver") job.SetenvBool("EnableIptables", config.EnableIptables) job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication) job.SetenvBool("EnableIpForward", config.EnableIpForward) job.Setenv("BridgeIface", config.BridgeIface) job.Setenv("BridgeIP", config.BridgeIP) job.Setenv("DefaultBindingIP", config.DefaultIp.String()) if err := job.Run(); err != nil { return nil, err } } graphdbPath := path.Join(config.Root, "linkgraph.db") graph, err := graphdb.NewSqliteConn(graphdbPath) if err != nil { return nil, err } localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) sysInitPath := utils.DockerInitPath(localCopy) if sysInitPath == "" { return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.io/en/latest/contributing/devenvironment for official build instructions.") } if sysInitPath != localCopy { // When we find a suitable dockerinit binary (even if it's our local binary), we copy it into config.Root at localCopy for future use (so that the original can go away without that being a problem, for example during a package upgrade). if err := os.Mkdir(path.Dir(localCopy), 0700); err != nil && !os.IsExist(err) { return nil, err } if _, err := utils.CopyFile(sysInitPath, localCopy); err != nil { return nil, err } if err := os.Chmod(localCopy, 0700); err != nil { return nil, err } sysInitPath = localCopy } var ( ed execdriver.Driver sysInfo = sysinfo.New(false) ) switch config.ExecDriver { case "lxc": // we want to five the lxc driver the full docker root because it needs // to access and write config and template files in /var/lib/docker/containers/* // to be backwards compatible ed, err = lxc.NewDriver(config.Root, sysInfo.AppArmor) case "native": ed, err = native.NewDriver(path.Join(config.Root, "execdriver", "native")) default: return nil, fmt.Errorf("unknown exec driver %s", config.ExecDriver) } if err != nil { return nil, err } runtime := &Runtime{ repository: runtimeRepo, containers: list.New(), graph: g, repositories: repositories, idIndex: utils.NewTruncIndex(), sysInfo: sysInfo, volumes: volumes, config: config, containerGraph: graph, driver: driver, sysInitPath: sysInitPath, execDriver: ed, eng: eng, } if err := runtime.restore(); err != nil { return nil, err } return runtime, nil } func (runtime *Runtime) Close() error { errorsStrings := []string{} if err := portallocator.ReleaseAll(); err != nil { utils.Errorf("portallocator.ReleaseAll(): %s", err) errorsStrings = append(errorsStrings, err.Error()) } if err := runtime.driver.Cleanup(); err != nil { utils.Errorf("runtime.driver.Cleanup(): %s", err.Error()) errorsStrings = append(errorsStrings, err.Error()) } if err := runtime.containerGraph.Close(); err != nil { utils.Errorf("runtime.containerGraph.Close(): %s", err.Error()) errorsStrings = append(errorsStrings, err.Error()) } if len(errorsStrings) > 0 { return fmt.Errorf("%s", strings.Join(errorsStrings, ", ")) } return nil } func (runtime *Runtime) Mount(container *Container) error { dir, err := runtime.driver.Get(container.ID) if err != nil { return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, runtime.driver, err) } if container.basefs == "" { container.basefs = dir } else if container.basefs != dir { return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", runtime.driver, container.ID, container.basefs, dir) } return nil } func (runtime *Runtime) Unmount(container *Container) error { runtime.driver.Put(container.ID) return nil } func (runtime *Runtime) Changes(container *Container) ([]archive.Change, error) { if differ, ok := runtime.driver.(graphdriver.Differ); ok { return differ.Changes(container.ID) } cDir, err := runtime.driver.Get(container.ID) if err != nil { return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) } defer runtime.driver.Put(container.ID) initDir, err := runtime.driver.Get(container.ID + "-init") if err != nil { return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) } defer runtime.driver.Put(container.ID + "-init") return archive.ChangesDirs(cDir, initDir) } func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) { if differ, ok := runtime.driver.(graphdriver.Differ); ok { return differ.Diff(container.ID) } changes, err := runtime.Changes(container) if err != nil { return nil, err } cDir, err := runtime.driver.Get(container.ID) if err != nil { return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) } archive, err := archive.ExportChanges(cDir, changes) if err != nil { return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() runtime.driver.Put(container.ID) return err }), nil } func (runtime *Runtime) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { return runtime.execDriver.Run(c.command, pipes, startCallback) } func (runtime *Runtime) Kill(c *Container, sig int) error { return runtime.execDriver.Kill(c.command, sig) } // Nuke kills all containers then removes all content // from the content root, including images, volumes and // container filesystems. // Again: this will remove your entire docker runtime! func (runtime *Runtime) Nuke() error { var wg sync.WaitGroup for _, container := range runtime.List() { wg.Add(1) go func(c *Container) { c.Kill() wg.Done() }(container) } wg.Wait() runtime.Close() return os.RemoveAll(runtime.config.Root) } // FIXME: this is a convenience function for integration tests // which need direct access to runtime.graph. // Once the tests switch to using engine and jobs, this method // can go away. func (runtime *Runtime) Graph() *Graph { return runtime.graph } // History is a convenience type for storing a list of containers, // ordered by creation date. type History []*Container func (history *History) Len() int { return len(*history) } func (history *History) Less(i, j int) bool { containers := *history return containers[j].When().Before(containers[i].When()) } func (history *History) Swap(i, j int) { containers := *history tmp := containers[i] containers[i] = containers[j] containers[j] = tmp } func (history *History) Add(container *Container) { *history = append(*history, container) sort.Sort(history) } docker-0.9.1/CHANGELOG.md0000644000175000017500000013200312314376205012713 0ustar tagtag# Changelog ## 0.9.1 (2014-03-24) #### Builder - Fix printing multiple messages on a single line. Fixes broken output during builds. #### Documentation - Fix external link on security of containers. #### Contrib - Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. - Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. #### Hack - Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. #### Remote API - Fix content-type detection in `docker cp`. #### Runtime - Use BSD raw mode on Darwin. Fixes nano, tmux and others. - Only unshare the mount namespace for execin. - Retry to retrieve the layer metadata up to 5 times for `docker pull`. - Merge existing config when committing. - Fix panic in monitor. - Disable daemon startup timeout. - Fix issue #4681: add loopback interface when networking is disabled. - Add failing test case for issue #4681. - Send SIGTERM to child, instead of SIGKILL. - Show the driver and the kernel version in `docker info` even when not in debug mode. - Always symlink /dev/ptmx for libcontainer. This fixes console related problems. - Fix issue caused by the absence of /etc/apparmor.d. - Don't leave empty cidFile behind when failing to create the container. - Improve deprecation message. - Fix attach exit on darwin. - devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). - devicemapper: succeed immediately when removing non-existing devices. - devicemapper: increase timeout in waitClose to 10 seconds. - Remove goroutine leak on error. - Update parseLxcInfo to comply with new lxc1.0 format. ## 0.9.0 (2014-03-10) #### Builder - Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. - Add error to docker build --rm. This adds missing error handling. - Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. - Make `--rm` the default for `docker build`. #### Documentation - Download the docker client binary for Mac over https. - Update the titles of the install instructions & descriptions. * Add instructions for upgrading boot2docker. * Add port forwarding example in OS X install docs. - Attempt to disentangle repository and registry. - Update docs to explain more about `docker ps`. - Update sshd example to use a Dockerfile. - Rework some examples, including the Python examples. - Update docs to include instructions for a container's lifecycle. - Update docs documentation to discuss the docs branch. - Don't skip cert check for an example & use HTTPS. - Bring back the memory and swap accounting section which was lost when the kernel page was removed. - Explain DNS warnings and how to fix them on systems running and using a local nameserver. #### Contrib - Add Tanglu support for mkimage-debootstrap. - Add SteamOS support for mkimage-debootstrap. #### Hack - Get package coverage when running integration tests. - Remove the Vagrantfile. This is being replaced with boot2docker. - Fix tests on systems where aufs isn't available. - Update packaging instructions and remove the dependency on lxc. #### Remote API * Move code specific to the API to the api package. - Fix header content type for the API. Makes all endpoints use proper content type. - Fix registry auth & remove ping calls from CmdPush and CmdPull. - Add newlines to the JSON stream functions. #### Runtime * Do not ping the registry from the CLI. All requests to registres flow through the daemon. - Check for nil information return in the lxc driver. This fixes panics with older lxc versions. - Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. - Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. * Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. - Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. - Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. - Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. - Fix `--run` in `docker commit`. This makes `docker commit --run` work again. - Fix custom bridge related options. This makes custom bridges work again. + Mount-bind the PTY as container console. This allows tmux/screen to run. + Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. + Add native exec driver which uses libcontainer and make it the default exec driver. - Add support for handling extended attributes in archives. * Set the container MTU to be the same as the host MTU. + Add simple sha256 checksums for layers to speed up `docker push`. * Improve kernel version parsing. * Allow flag grouping (`docker run -it`). - Remove chroot exec driver. - Fix divide by zero to fix panic. - Rewrite `docker rmi`. - Fix docker info with lxc 1.0.0. - Fix fedora tty with apparmor. * Don't always append env vars, replace defaults with vars from config. * Fix a goroutine leak. * Switch to Go 1.2.1. - Fix unique constraint error checks. * Handle symlinks for Docker's data directory and for TMPDIR. - Add deprecation warnings for flags (-flag is deprecated in favor of --flag) - Add apparmor profile for the native execution driver. * Move system specific code from archive to pkg/system. - Fix duplicate signal for `docker run -i -t` (issue #3336). - Return correct process pid for lxc. - Add a -G option to specify the group which unix sockets belong to. + Add `-f` flag to `docker rm` to force removal of running containers. + Kill ghost containers and restart all ghost containers when the docker daemon restarts. + Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. ## 0.8.1 (2014-02-18) #### Builder - Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper - Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system - Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported #### Documentation * Update issue filing instructions * Warn against the use of symlinks for Docker's storage folder * Replace the Firefox example with an IceWeasel example * Rewrite the PostgresSQL example using a Dockerfile and add more details to it * Improve the OS X documentation #### Remote API - Fix broken images API for version less than 1.7 - Use the right encoding for all API endpoints which return JSON - Move remote api client to api/ - Queue calls to the API using generic socket wait #### Runtime - Fix the use of custom settings for bridges and custom bridges - Refactor the devicemapper code to avoid many mount/unmount race conditions and failures - Remove two panics which could make Docker crash in some situations - Don't ping registry from the CLI client - Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks - Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration - Remove directory when removing devicemapper device. This cleans up leftover mount directories - Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration - Ensure `docker cp` stream is closed properly - Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper - Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port + Mount-bind the PTY as container console. This allows tmux and screen to run in a container - Clean up archive closing. This fixes and improves archive handling - Fix engine tests on systems where temp directories are symlinked - Add test methods for save and load - Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart - Support submodules when building from a GitHub repository - Quote volume path to allow spaces - Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs ## 0.8.0 (2014-02-04) #### Notable features since 0.7.0 * Images and containers can be removed much faster * Building an image from source with docker build is now much faster * The Docker daemon starts and stops much faster * The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations * Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations * All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar * Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages * Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change * The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed * The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build * Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write * Docker is officially supported on Mac OSX * The Docker daemon supports systemd socket activation ## 0.7.6 (2014-01-14) #### Builder * Do not follow symlink outside of build context #### Runtime - Remount bind mounts when ro is specified * Use https for fetching docker version #### Other * Inline the test.docker.io fingerprint * Add ca-certificates to packaging documentation ## 0.7.5 (2014-01-09) #### Builder * Disable compression for build. More space usage but a much faster upload - Fix ADD caching for certain paths - Do not compress archive from git build #### Documentation - Fix error in GROUP add example * Make sure the GPG fingerprint is inline in the documentation * Give more specific advice on setting up signing of commits for DCO #### Runtime - Fix misspelled container names - Do not add hostname when networking is disabled * Return most recent image from the cache by date - Return all errors from docker wait * Add Content-Type Header "application/json" to GET /version and /info responses #### Other * Update DCO to version 1.1 + Update Makefile to use "docker:GIT_BRANCH" as the generated image name * Update Travis to check for new 1.1 DCO version ## 0.7.4 (2014-01-07) #### Builder - Fix ADD caching issue with . prefixed path - Fix docker build on devicemapper by reverting sparse file tar option - Fix issue with file caching and prevent wrong cache hit * Use same error handling while unmarshalling CMD and ENTRYPOINT #### Documentation * Simplify and streamline Amazon Quickstart * Install instructions use unprefixed fedora image * Update instructions for mtu flag for Docker on GCE + Add Ubuntu Saucy to installation - Fix for wrong version warning on master instead of latest #### Runtime - Only get the image's rootfs when we need to calculate the image size - Correctly handle unmapping UDP ports * Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build - Fix login message to say pull instead of push - Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN * Make blank -H option default to the same as no -H was sent * Extract cgroups utilities to own submodule #### Other + Add Travis CI configuration to validate DCO and gofmt requirements + Add Developer Certificate of Origin Text * Upgrade VBox Guest Additions * Check standalone header when pinging a registry server ## 0.7.3 (2014-01-02) #### Builder + Update ADD to use the image cache, based on a hash of the added content * Add error message for empty Dockerfile #### Documentation - Fix outdated link to the "Introduction" on www.docker.io + Update the docs to get wider when the screen does - Add information about needing to install LXC when using raw binaries * Update Fedora documentation to disentangle the docker and docker.io conflict * Add a note about using the new `-mtu` flag in several GCE zones + Add FrugalWare installation instructions + Add a more complete example of `docker run` - Fix API documentation for creating and starting Privileged containers - Add missing "name" parameter documentation on "/containers/create" * Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration - Update the 1.8 API documentation with some additions that were added to the docs for 1.7 #### Hack - Add missing libdevmapper dependency to the packagers documentation * Update minimum Go requirement to a hard line at Go 1.2+ * Many minor improvements to the Vagrantfile + Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) + Add coverprofile generation reporting - Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually * Update Dockerfile to be more canonical and have less spurious warnings during build - Fix some miscellaneous `docker pull` progress bar display issues * Migrate more miscellaneous packages under the "pkg" folder * Update TextMate highlighting to automatically be enabled for files named "Dockerfile" * Reorganize syntax highlighting files under a common "contrib/syntax" directory * Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation * Add support for container names in bash completion #### Packaging + Add an official Docker client binary for Darwin (Mac OS X) * Remove empty "Vendor" string and added "License" on deb package + Add a stubbed version of "/etc/default/docker" in the deb package #### Runtime * Update layer application to extract tars in place, avoiding file churn while handling whiteouts - Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) * Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) + Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions - Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files * Update container name validation to include '.' - Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected * Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler * Update to use proper box-drawing characters everywhere in `docker images -tree` * Move MTU setting from LXC configuration to directly use netlink * Add `-S` option to external tar invocation for more efficient spare file handling + Add arch/os info to User-Agent string, especially for registry requests + Add `-mtu` option to Docker daemon for configuring MTU - Fix `docker build` to exit with a non-zero exit code on error + Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation ## 0.7.2 (2013-12-16) #### Runtime + Validate container names on creation with standard regex * Increase maximum image depth to 127 from 42 * Continue to move api endpoints to the job api + Add -bip flag to allow specification of dynamic bridge IP via CIDR - Allow bridge creation when ipv6 is not enabled on certain systems * Set hostname and IP address from within dockerinit * Drop capabilities from within dockerinit - Fix volumes on host when symlink is present the image - Prevent deletion of image if ANY container is depending on it even if the container is not running * Update docker push to use new progress display * Use os.Lstat to allow mounting unix sockets when inspecting volumes - Adjust handling of inactive user login - Add missing defines in devicemapper for older kernels - Allow untag operations with no container validation - Add auth config to docker build #### Documentation * Add more information about Docker logging + Add RHEL documentation * Add a direct example for changing the CMD that is run in a container * Update Arch installation documentation + Add section on Trusted Builds + Add Network documentation page #### Other + Add new cover bundle for providing code coverage reporting * Separate integration tests in bundles * Make Tianon the hack maintainer * Update mkimage-debootstrap with more tweaks for keeping images small * Use https to get the install script * Remove vendored dotcloud/tar now that Go 1.2 has been released ## 0.7.1 (2013-12-05) #### Documentation + Add @SvenDowideit as documentation maintainer + Add links example + Add documentation regarding ambassador pattern + Add Google Cloud Platform docs + Add dockerfile best practices * Update doc for RHEL * Update doc for registry * Update Postgres examples * Update doc for Ubuntu install * Improve remote api doc #### Runtime + Add hostconfig to docker inspect + Implement `docker log -f` to stream logs + Add env variable to disable kernel version warning + Add -format to `docker inspect` + Support bind-mount for files - Fix bridge creation on RHEL - Fix image size calculation - Make sure iptables are called even if the bridge already exists - Fix issue with stderr only attach - Remove init layer when destroying a container - Fix same port binding on different interfaces - `docker build` now returns the correct exit code - Fix `docker port` to display correct port - `docker build` now check that the dockerfile exists client side - `docker attach` now returns the correct exit code - Remove the name entry when the container does not exist #### Registry * Improve progress bars, add ETA for downloads * Simultaneous pulls now waits for the first to finish instead of failing - Tag only the top-layer image when pushing to registry - Fix issue with offline image transfer - Fix issue preventing using ':' in password for registry #### Other + Add pprof handler for debug + Create a Makefile * Use stdlib tar that now includes fix * Improve make.sh test script * Handle SIGQUIT on the daemon * Disable verbose during tests * Upgrade to go1.2 for official build * Improve unit tests * The test suite now runs all tests even if one fails * Refactor C in Go (Devmapper) - Fix OSX compilation ## 0.7.0 (2013-11-25) #### Notable features since 0.6.0 * Storage drivers: choose from aufs, device-mapper, or vfs. * Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. * Links: compose complex software stacks by connecting containers to each other. * Container naming: organize your containers by giving them memorable names. * Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. * Offline transfer: push and pull images to the filesystem without losing information. * Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. ## 0.6.7 (2013-11-21) #### Runtime * Improve stability, fixes some race conditons * Skip the volumes mounted when deleting the volumes of container. * Fix layer size computation: handle hard links correctly * Use the work Path for docker cp CONTAINER:PATH * Fix tmp dir never cleanup * Speedup docker ps * More informative error message on name collisions * Fix nameserver regex * Always return long id's * Fix container restart race condition * Keep published ports on docker stop;docker start * Fix container networking on Fedora * Correctly express "any address" to iptables * Fix network setup when reconnecting to ghost container * Prevent deletion if image is used by a running container * Lock around read operations in graph #### RemoteAPI * Return full ID on docker rmi #### Client + Add -tree option to images + Offline image transfer * Exit with status 2 on usage error and display usage on stderr * Do not forward SIGCHLD to container * Use string timestamp for docker events -since #### Other * Update to go 1.2rc5 + Add /etc/default/docker support to upstart ## 0.6.6 (2013-11-06) #### Runtime * Ensure container name on register * Fix regression in /etc/hosts + Add lock around write operations in graph * Check if port is valid * Fix restart runtime error with ghost container networking + Add some more colors and animals to increase the pool of generated names * Fix issues in docker inspect + Escape apparmor confinement + Set environment variables using a file. * Prevent docker insert to erase something + Prevent DNS server conflicts in CreateBridgeIface + Validate bind mounts on the server side + Use parent image config in docker build * Fix regression in /etc/hosts #### Client + Add -P flag to publish all exposed ports + Add -notrunc and -q flags to docker history * Fix docker commit, tag and import usage + Add stars, trusted builds and library flags in docker search * Fix docker logs with tty #### RemoteAPI * Make /events API send headers immediately * Do not split last column docker top + Add size to history #### Other + Contrib: Desktop integration. Firefox usecase. + Dockerfile: bump to go1.2rc3 ## 0.6.5 (2013-10-29) #### Runtime + Containers can now be named + Containers can now be linked together for service discovery + 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors + Automatically start crashed containers after a reboot + Expose IP, port, and proto as separate environment vars for container links * Allow ports to be published to specific ips * Prohibit inter-container communication by default - Ignore ErrClosedPipe for stdin in Container.Attach - Remove unused field kernelVersion * Fix issue when mounting subdirectories of /mnt in container - Fix untag during removal of images * Check return value of syscall.Chdir when changing working directory inside dockerinit #### Client - Only pass stdin to hijack when needed to avoid closed pipe errors * Use less reflection in command-line method invocation - Monitor the tty size after starting the container, not prior - Remove useless os.Exit() calls after log.Fatal #### Hack + Add initial init scripts library and a safer Ubuntu packaging script that works for Debian * Add -p option to invoke debootstrap with http_proxy - Update install.sh with $sh_c to get sudo/su for modprobe * Update all the mkimage scripts to use --numeric-owner as a tar argument * Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues #### Other * Documentation: Fix the flags for nc in example * Testing: Remove warnings and prevent mount issues - Testing: Change logic for tty resize to avoid warning in tests - Builder: Fix race condition in docker build with verbose output - Registry: Fix content-type for PushImageJSONIndex method * Contrib: Improve helper tools to generate debian and Arch linux server images ## 0.6.4 (2013-10-16) #### Runtime - Add cleanup of container when Start() fails * Add better comments to utils/stdcopy.go * Add utils.Errorf for error logging + Add -rm to docker run for removing a container on exit - Remove error messages which are not actually errors - Fix `docker rm` with volumes - Fix some error cases where a HTTP body might not be closed - Fix panic with wrong dockercfg file - Fix the attach behavior with -i * Record termination time in state. - Use empty string so TempDir uses the OS's temp dir automatically - Make sure to close the network allocators + Autorestart containers by default * Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` * lxc: Allow set_file_cap capability in container - Move run -rm to the cli only * Split stdout stderr * Always create a new session for the container #### Testing - Add aggregated docker-ci email report - Add cleanup to remove leftover containers * Add nightly release to docker-ci * Add more tests around auth.ResolveAuthConfig - Remove a few errors in tests - Catch errClosing error when TCP and UDP proxies are terminated * Only run certain tests with TESTFLAGS='-run TestName' make.sh * Prevent docker-ci to test closing PRs * Replace panic by log.Fatal in tests - Increase TestRunDetach timeout #### Documentation * Add initial draft of the Docker infrastructure doc * Add devenvironment link to CONTRIBUTING.md * Add `apt-get install curl` to Ubuntu docs * Add explanation for export restrictions * Add .dockercfg doc * Remove Gentoo install notes about #1422 workaround * Fix help text for -v option * Fix Ping endpoint documentation - Fix parameter names in docs for ADD command - Fix ironic typo in changelog * Various command fixes in postgres example * Document how to edit and release docs - Minor updates to `postgresql_service.rst` * Clarify LGTM process to contributors - Corrected error in the package name * Document what `vagrant up` is actually doing + improve doc search results * Cleanup whitespace in API 1.5 docs * use angle brackets in MAINTAINER example email * Update archlinux.rst + Changes to a new style for the docs. Includes version switcher. * Formatting, add information about multiline json * Improve registry and index REST API documentation - Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 * Update Gentoo installation documentation now that we're in the portage tree proper * Cleanup and reorganize docs and tooling for contributors and maintainers - Minor spelling correction of protocoll -> protocol #### Contrib * Add vim syntax highlighting for Dockerfiles from @honza * Add mkimage-arch.sh * Reorganize contributed completion scripts to add zsh completion #### Hack * Add vagrant user to the docker group * Add proper bash completion for "docker push" * Add xz utils as a runtime dep * Add cleanup/refactor portion of #2010 for hack and Dockerfile updates + Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link * Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly + Add @tianon to hack/MAINTAINERS * Improve network performance for VirtualBox * Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) - Fix contrib/mkimage-debian.sh apt caching prevention + Add Dockerfile.tmLanguage to contrib * Configured FPM to make /etc/init/docker.conf a config file * Enable SSH Agent forwarding in Vagrant VM * Several small tweaks/fixes for contrib/mkimage-debian.sh #### Other - Builder: Abort build if mergeConfig returns an error and fix duplicate error message - Packaging: Remove deprecated packaging directory - Registry: Use correct auth config when logging in. - Registry: Fix the error message so it is the same as the regex ## 0.6.3 (2013-09-23) #### Packaging * Add 'docker' group on install for ubuntu package * Update tar vendor dependency * Download apt key over HTTPS #### Runtime - Only copy and change permissions on non-bindmount volumes * Allow multiple volumes-from - Fix HTTP imports from STDIN #### Documentation * Update section on extracting the docker binary after build * Update development environment docs for new build process * Remove 'base' image from documentation #### Other - Client: Fix detach issue - Registry: Update regular expression to match index ## 0.6.2 (2013-09-17) #### Runtime + Add domainname support + Implement image filtering with path.Match * Remove unnecesasry warnings * Remove os/user dependency * Only mount the hostname file when the config exists * Handle signals within the `docker login` command - UID and GID are now also applied to volumes - `docker start` set error code upon error - `docker run` set the same error code as the process started #### Builder + Add -rm option in order to remove intermediate containers * Allow multiline for the RUN instruction #### Registry * Implement login with private registry - Fix push issues #### Other + Hack: Vendor all dependencies * Remote API: Bump to v1.5 * Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. * Documentation: General improvments ## 0.6.1 (2013-08-23) #### Registry * Pass "meta" headers in API calls to the registry #### Packaging - Use correct upstart script with new build tool - Use libffi-dev, don`t build it from sources - Remove duplicate mercurial install command ## 0.6.0 (2013-08-22) #### Runtime + Add lxc-conf flag to allow custom lxc options + Add an option to set the working directory * Add Image name to LogEvent tests + Add -privileged flag and relevant tests, docs, and examples * Add websocket support to /container//attach/ws * Add warning when net.ipv4.ip_forwarding = 0 * Add hostname to environment * Add last stable version in `docker version` - Fix race conditions in parallel pull - Fix Graph ByParent() to generate list of child images per parent image. - Fix typo: fmt.Sprint -> fmt.Sprintf - Fix small \n error un docker build * Fix to "Inject dockerinit at /.dockerinit" * Fix #910. print user name to docker info output * Use Go 1.1.2 for dockerbuilder * Use ranged for loop on channels - Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete - Improve CMD, ENTRYPOINT, and attach docs. - Improve connect message with socket error - Load authConfig only when needed and fix useless WARNING - Show tag used when image is missing * Apply volumes-from before creating volumes - Make docker run handle SIGINT/SIGTERM - Prevent crash when .dockercfg not readable - Install script should be fetched over https, not http. * API, issue 1471: Use groups for socket permissions - Correctly detect IPv4 forwarding * Mount /dev/shm as a tmpfs - Switch from http to https for get.docker.io * Let userland proxy handle container-bound traffic * Update the Docker CLI to specify a value for the "Host" header. - Change network range to avoid conflict with EC2 DNS - Reduce connect and read timeout when pinging the registry * Parallel pull - Handle ip route showing mask-less IP addresses * Allow ENTRYPOINT without CMD - Always consider localhost as a domain name when parsing the FQN repos name * Refactor checksum #### Documentation * Add MongoDB image example * Add instructions for creating and using the docker group * Add sudo to examples and installation to documentation * Add ufw doc * Add a reference to ps -a * Add information about Docker`s high level tools over LXC. * Fix typo in docs for docker run -dns * Fix a typo in the ubuntu installation guide * Fix to docs regarding adding docker groups * Update default -H docs * Update readme with dependencies for building * Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 * PostgreSQL service example in documentation * Suggest installing linux-headers by default. * Change the twitter handle * Clarify Amazon EC2 installation * 'Base' image is deprecated and should no longer be referenced in the docs. * Move note about officially supported kernel - Solved the logo being squished in Safari #### Builder + Add USER instruction do Dockerfile + Add workdir support for the Buildfile * Add no cache for docker build - Fix docker build and docker events output - Only count known instructions as build steps - Make sure ENV instruction within build perform a commit each time - Forbid certain paths within docker build ADD - Repository name (and optionally a tag) in build usage - Make sure ADD will create everything in 0755 #### Remote API * Sort Images by most recent creation date. * Reworking opaque requests in registry module * Add image name in /events * Use mime pkg to parse Content-Type * 650 http utils and user agent field #### Hack + Bash Completion: Limit commands to containers of a relevant state * Add docker dependencies coverage testing into docker-ci #### Packaging + Docker-brew 0.5.2 support and memory footprint reduction * Add new docker dependencies into docker-ci - Revert "docker.upstart: avoid spawning a `sh` process" + Docker-brew and Docker standard library + Release docker with docker * Fix the upstart script generated by get.docker.io * Enabled the docs to generate manpages. * Revert Bind daemon to 0.0.0.0 in Vagrant. #### Register * Improve auth push * Registry unit tests + mock registry #### Tests * Improve TestKillDifferentUser to prevent timeout on buildbot - Fix typo in TestBindMounts (runContainer called without image) * Improve TestGetContainersTop so it does not rely on sleep * Relax the lo interface test to allow iface index != 1 * Add registry functional test to docker-ci * Add some tests in server and utils #### Other * Contrib: bash completion script * Client: Add docker cp command and copy api endpoint to copy container files/folders to the host * Don`t read from stdout when only attached to stdin ## 0.5.3 (2013-08-13) #### Runtime * Use docker group for socket permissions - Spawn shell within upstart script - Handle ip route showing mask-less IP addresses - Add hostname to environment #### Builder - Make sure ENV instruction within build perform a commit each time ## 0.5.2 (2013-08-08) * Builder: Forbid certain paths within docker build ADD - Runtime: Change network range to avoid conflict with EC2 DNS * API: Change daemon to listen on unix socket by default ## 0.5.1 (2013-07-30) #### Runtime + Add `ps` args to `docker top` + Add support for container ID files (pidfile like) + Add container=lxc in default env + Support networkless containers with `docker run -n` and `docker -d -b=none` * Stdout/stderr logs are now stored in the same file as JSON * Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. * Change .dockercfg format to json and support multiple auth remote - Do not override volumes from config - Fix issue with EXPOSE override #### API + Docker client now sets useragent (RFC 2616) + Add /events endpoint #### Builder + ADD command now understands URLs + CmdAdd and CmdEnv now respect Dockerfile-set ENV variables - Create directories with 755 instead of 700 within ADD instruction #### Hack * Simplify unit tests with helpers * Improve docker.upstart event * Add coverage testing into docker-ci ## 0.5.0 (2013-07-17) #### Runtime + List all processes running inside a container with 'docker top' + Host directories can be mounted as volumes with 'docker run -v' + Containers can expose public UDP ports (eg, '-p 123/udp') + Optionally specify an exact public port (eg. '-p 80:4500') * 'docker login' supports additional options - Dont save a container`s hostname when committing an image. #### Registry + New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries - Fix issues when uploading images to a private registry #### Builder + ENTRYPOINT instruction sets a default binary entry point to a container + VOLUME instruction marks a part of the container as persistent data * 'docker build' displays the full output of a build by default ## 0.4.8 (2013-07-01) + Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. - Tests: Fix issues in the test suite ## 0.4.7 (2013-06-28) #### Remote API * The progress bar updates faster when downloading and uploading large files - Fix a bug in the optional unix socket transport #### Runtime * Improve detection of kernel version + Host directories can be mounted as volumes with 'docker run -b' - fix an issue when only attaching to stdin * Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts #### Hack * Improve test suite and dev environment * Remove dependency on unit tests on 'os/user' #### Other * Registry: easier push/pull to a custom registry + Documentation: add terminology section ## 0.4.6 (2013-06-22) - Runtime: fix a bug which caused creation of empty images (and volumes) to crash. ## 0.4.5 (2013-06-21) + Builder: 'docker build git://URL' fetches and builds a remote git repository * Runtime: 'docker ps -s' optionally prints container size * Tests: improved and simplified - Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. - Builder: fix a regression when using ADD with single regular file. ## 0.4.4 (2013-06-19) - Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. ## 0.4.3 (2013-06-19) #### Builder + ADD of a local file will detect tar archives and unpack them * ADD improvements: use tar for copy + automatically unpack local archives * ADD uses tar/untar for copies instead of calling 'cp -ar' * Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. - Fix a bug which caused builds to fail if ADD was the first command * Nicer output for 'docker build' #### Runtime * Remove bsdtar dependency * Add unix socket and multiple -H support * Prevent rm of running containers * Use go1.1 cookiejar - Fix issue detaching from running TTY container - Forbid parralel push/pull for a single image/repo. Fixes #311 - Fix race condition within Run command when attaching. #### Client * HumanReadable ProgressBar sizes in pull * Fix docker version`s git commit output #### API * Send all tags on History API call * Add tag lookup to history command. Fixes #882 #### Documentation - Fix missing command in irc bouncer example ## 0.4.2 (2013-06-17) - Packaging: Bumped version to work around an Ubuntu bug ## 0.4.1 (2013-06-17) #### Remote Api + Add flag to enable cross domain requests + Add images and containers sizes in docker ps and docker images #### Runtime + Configure dns configuration host-wide with 'docker -d -dns' + Detect faulty DNS configuration and replace it with a public default + Allow docker run : + You can now specify public port (ex: -p 80:4500) * Improve image removal to garbage-collect unreferenced parents #### Client * Allow multiple params in inspect * Print the container id before the hijack in `docker run` #### Registry * Add regexp check on repo`s name * Move auth to the client - Remove login check on pull #### Other * Vagrantfile: Add the rest api port to vagrantfile`s port_forward * Upgrade to Go 1.1 - Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n ## 0.4.0 (2013-06-03) #### Builder + Introducing Builder + 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile #### Remote API + Introducing Remote API + control Docker programmatically using a simple HTTP/json API #### Runtime * Various reliability and usability improvements ## 0.3.4 (2013-05-30) #### Builder + 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + 'docker build -t FOO' applies the tag FOO to the newly built container. #### Runtime + Interactive TTYs correctly handle window resize * Fix how configuration is merged between layers #### Remote API + Split stdout and stderr on 'docker run' + Optionally listen on a different IP and port (use at your own risk) #### Documentation * Improve install instructions. ## 0.3.3 (2013-05-23) - Registry: Fix push regression - Various bugfixes ## 0.3.2 (2013-05-09) #### Registry * Improve the checksum process * Use the size to have a good progress bar while pushing * Use the actual archive if it exists in order to speed up the push - Fix error 400 on push #### Runtime * Store the actual archive on commit ## 0.3.1 (2013-05-08) #### Builder + Implement the autorun capability within docker builder + Add caching to docker builder + Add support for docker builder with native API as top level command + Implement ENV within docker builder - Check the command existance prior create and add Unit tests for the case * use any whitespaces instead of tabs #### Runtime + Add go version to debug infos * Kernel version - don`t show the dash if flavor is empty #### Registry + Add docker search top level command in order to search a repository - Fix pull for official images with specific tag - Fix issue when login in with a different user and trying to push * Improve checksum - async calculation #### Images + Output graph of images to dot (graphviz) - Fix ByParent function #### Documentation + New introduction and high-level overview + Add the documentation for docker builder - CSS fix for docker documentation to make REST API docs look better. - Fix CouchDB example page header mistake - Fix README formatting * Update www.docker.io website. #### Other + Website: new high-level overview - Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc * Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker ## 0.3.0 (2013-05-06) #### Runtime - Fix the command existance check - strings.Split may return an empty string on no match - Fix an index out of range crash if cgroup memory is not #### Documentation * Various improvments + New example: sharing data between 2 couchdb databases #### Other * Vagrant: Use only one deb line in /etc/apt + Registry: Implement the new registry ## 0.2.2 (2013-05-03) + Support for data volumes ('docker run -v=PATH') + Share data volumes between containers ('docker run -volumes-from') + Improve documentation * Upgrade to Go 1.0.3 * Various upgrades to the dev environment for contributors ## 0.2.1 (2013-05-01) + 'docker commit -run' bundles a layer with default runtime options: command, ports etc. * Improve install process on Vagrant + New Dockerfile operation: "maintainer" + New Dockerfile operation: "expose" + New Dockerfile operation: "cmd" + Contrib script to build a Debian base layer + 'docker -d -r': restart crashed containers at daemon startup * Runtime: improve test coverage ## 0.2.0 (2013-04-23) - Runtime: ghost containers can be killed and waited for * Documentation: update install intructions - Packaging: fix Vagrantfile - Development: automate releasing binaries and ubuntu packages + Add a changelog - Various bugfixes ## 0.1.8 (2013-04-22) - Dynamically detect cgroup capabilities - Issue stability warning on kernels <3.8 - 'docker push' buffers on disk instead of memory - Fix 'docker diff' for removed files - Fix 'docker stop' for ghost containers - Fix handling of pidfile - Various bugfixes and stability improvements ## 0.1.7 (2013-04-18) - Container ports are available on localhost - 'docker ps' shows allocated TCP ports - Contributors can run 'make hack' to start a continuous integration VM - Streamline ubuntu packaging & uploading - Various bugfixes and stability improvements ## 0.1.6 (2013-04-17) - Record the author an image with 'docker commit -author' ## 0.1.5 (2013-04-17) - Disable standalone mode - Use a custom DNS resolver with 'docker -d -dns' - Detect ghost containers - Improve diagnosis of missing system capabilities - Allow disabling memory limits at compile time - Add debian packaging - Documentation: installing on Arch Linux - Documentation: running Redis on docker - Fix lxc 0.9 compatibility - Automatically load aufs module - Various bugfixes and stability improvements ## 0.1.4 (2013-04-09) - Full support for TTY emulation - Detach from a TTY session with the escape sequence `C-p C-q` - Various bugfixes and stability improvements - Minor UI improvements - Automatically create our own bridge interface 'docker0' ## 0.1.3 (2013-04-04) - Choose TCP frontend port with '-p :PORT' - Layer format is versioned - Major reliability improvements to the process manager - Various bugfixes and stability improvements ## 0.1.2 (2013-04-03) - Set container hostname with 'docker run -h' - Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' - Various bugfixes and stability improvements - UI polish - Progress bar on push/pull - Use XZ compression by default - Make IP allocator lazy ## 0.1.1 (2013-03-31) - Display shorthand IDs for convenience - Stabilize process management - Layers can include a commit message - Simplified 'docker attach' - Fix support for re-attaching - Various bugfixes and stability improvements - Auto-download at run - Auto-login on push - Beefed up documentation ## 0.1.0 (2013-03-23) Initial public release - Implement registry in order to push/pull images - TCP port allocation - Fix termcaps on Linux - Add documentation - Add Vagrant support with Vagrantfile - Add unit tests - Add repository/tags to ease image management - Improve the layer implementation docker-0.9.1/CONTRIBUTING.md0000644000175000017500000001766712314376205013355 0ustar tagtag# Contributing to Docker Want to hack on Docker? Awesome! Here are instructions to get you started. They are probably not perfect, please let us know if anything feels wrong or incomplete. ## Reporting Issues When reporting [issues](https://github.com/dotcloud/docker/issues) on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), the output of `uname -a` and the output of `docker version` along with the output of `docker info`. Please include the steps required to reproduce the problem if possible and applicable. This information will help us review and fix your issue faster. ## Build Environment For instructions on setting up your development environment, please see our dedicated [dev environment setup docs](http://docs.docker.io/en/latest/contributing/devenvironment/). ## Contribution guidelines ### Pull requests are always welcome We are always thrilled to receive pull requests, and do our best to process them as fast as possible. Not sure if that typo is worth a pull request? Do it! We will appreciate it. If your pull request is not accepted on the first try, don't be discouraged! If there's a problem with the implementation, hopefully you received feedback on what to improve. We're trying very hard to keep Docker lean and focused. We don't want it to do everything for everybody. This means that we might decide against incorporating a new feature. However, there might be a way to implement that feature *on top of* docker. ### Discuss your design on the mailing list We recommend discussing your plans [on the mailing list](https://groups.google.com/forum/?fromgroups#!forum/docker-dev) before starting to code - especially for more ambitious contributions. This gives other contributors a chance to point you in the right direction, give feedback on your design, and maybe point out if someone else is working on the same thing. ### Create issues... Any significant improvement should be documented as [a GitHub issue](https://github.com/dotcloud/docker/issues) before anybody starts working on it. ### ...but check for existing issues first! Please take a moment to check that an issue doesn't already exist documenting your bug report or improvement proposal. If it does, it never hurts to add a quick "+1" or "I have this problem too". This will help prioritize the most common problems and requests. ### Conventions Fork the repo and make changes on your fork in a feature branch: - If it's a bugfix branch, name it XXX-something where XXX is the number of the issue - If it's a feature branch, create an enhancement issue to announce your intentions, and name it XXX-something where XXX is the number of the issue. Submit unit tests for your changes. Go has a great test framework built in; use it! Take a look at existing tests for inspiration. Run the full test suite on your branch before submitting a pull request. Update the documentation when creating or modifying features. Test your documentation changes for clarity, concision, and correctness, as well as a clean documentation build. See ``docs/README.md`` for more information on building the docs and how docs get released. Write clean code. Universally formatted code promotes ease of writing, reading, and maintenance. Always run `go fmt` before committing your changes. Most editors have plugins that do this automatically, and there's also a git pre-commit hook: ``` curl -o .git/hooks/pre-commit https://raw.github.com/edsrzf/gofmt-git-hook/master/fmt-check && chmod +x .git/hooks/pre-commit ``` Pull requests descriptions should be as clear as possible and include a reference to all the issues that they address. Pull requests must not contain commits from other users or branches. Code review comments may be added to your pull request. Discuss, then make the suggested modifications and push additional commits to your feature branch. Be sure to post a comment after pushing. The new commits will show up in the pull request automatically, but the reviewers will not be notified unless you comment. Before the pull request is merged, make sure that you squash your commits into logical units of work using `git rebase -i` and `git push -f`. After every commit the test suite should be passing. Include documentation changes in the same commit so that a revert would remove all traces of the feature or fix. Commits that fix or close an issue should include a reference like `Closes #XXX` or `Fixes #XXX`, which will automatically close the issue when merged. Add your name to the AUTHORS file, but make sure the list is sorted and your name and email address match your git configuration. The AUTHORS file is regenerated occasionally from the git commit history, so a mismatch may result in your changes being overwritten. ### Merge approval Docker maintainers use LGTM (looks good to me) in comments on the code review to indicate acceptance. A change requires LGTMs from an absolute majority of the maintainers of each component affected. For example, if a change affects docs/ and registry/, it needs an absolute majority from the maintainers of docs/ AND, separately, an absolute majority of the maintainers of registry. For more details see [MAINTAINERS.md](hack/MAINTAINERS.md) ### Sign your work The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below: ``` Docker Developer Certificate of Origin 1.1 By making a contribution to the Docker Project ("Project"), I represent and warrant that: a. The contribution was created in whole or in part by me and I have the right to submit the contribution on my own behalf or on behalf of a third party who has authorized me to submit this contribution to the Project; or b. The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right and authorization to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license) that I have identified in the contribution; or c. The contribution was provided directly to me by some other person who represented and warranted (a) or (b) and I have not modified it. d. I understand and agree that this Project and the contribution are publicly known and that a record of the contribution (including all personal information I submit with it, including my sign-off record) is maintained indefinitely and may be redistributed consistent with this Project or the open source license(s) involved. ``` then you just add a line to every git commit message: Docker-DCO-1.1-Signed-off-by: Joe Smith (github: github_handle) using your real name (sorry, no pseudonyms or anonymous contributions.) One way to automate this, is customise your get ``commit.template`` by adding a ``prepare-commit-msg`` hook to your docker checkout: ``` curl -o .git/hooks/prepare-commit-msg https://raw.github.com/dotcloud/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg ``` * Note: the above script expects to find your GitHub user name in ``git config --get github.user`` #### Small patch exception There are several exceptions to the signing requirement. Currently these are: * Your patch fixes spelling or grammar errors. * Your patch is a single line change to documentation. If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io) ### How can I become a maintainer? * Step 1: learn the component inside out * Step 2: make yourself useful by contributing code, bugfixes, support etc. * Step 3: volunteer on the irc channel (#docker@freenode) * Step 4: propose yourself at a scheduled docker meeting in #docker-dev Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. You don't have to be a maintainer to make a difference on the project! docker-0.9.1/image.go0000644000175000017500000001667612314376205012534 0ustar tagtagpackage docker import ( "crypto/rand" "encoding/hex" "encoding/json" "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "os" "path" "strconv" "strings" "time" ) type Image struct { ID string `json:"id"` Parent string `json:"parent,omitempty"` Comment string `json:"comment,omitempty"` Created time.Time `json:"created"` Container string `json:"container,omitempty"` ContainerConfig runconfig.Config `json:"container_config,omitempty"` DockerVersion string `json:"docker_version,omitempty"` Author string `json:"author,omitempty"` Config *runconfig.Config `json:"config,omitempty"` Architecture string `json:"architecture,omitempty"` OS string `json:"os,omitempty"` graph *Graph Size int64 } func LoadImage(root string) (*Image, error) { // Load the json data jsonData, err := ioutil.ReadFile(jsonPath(root)) if err != nil { return nil, err } img := &Image{} if err := json.Unmarshal(jsonData, img); err != nil { return nil, err } if err := ValidateID(img.ID); err != nil { return nil, err } if buf, err := ioutil.ReadFile(path.Join(root, "layersize")); err != nil { if !os.IsNotExist(err) { return nil, err } // If the layersize file does not exist then set the size to a negative number // because a layer size of 0 (zero) is valid img.Size = -1 } else { size, err := strconv.Atoi(string(buf)) if err != nil { return nil, err } img.Size = int64(size) } return img, nil } func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, root, layer string) error { // Store the layer var ( size int64 err error driver = img.graph.driver ) if err := os.MkdirAll(layer, 0755); err != nil { return err } // If layerData is not nil, unpack it into the new layer if layerData != nil { if differ, ok := driver.(graphdriver.Differ); ok { if err := differ.ApplyDiff(img.ID, layerData); err != nil { return err } if size, err = differ.DiffSize(img.ID); err != nil { return err } } else { start := time.Now().UTC() utils.Debugf("Start untar layer") if err := archive.ApplyLayer(layer, layerData); err != nil { return err } utils.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) if img.Parent == "" { if size, err = utils.TreeSize(layer); err != nil { return err } } else { parent, err := driver.Get(img.Parent) if err != nil { return err } defer driver.Put(img.Parent) changes, err := archive.ChangesDirs(layer, parent) if err != nil { return err } size = archive.ChangesSize(layer, changes) } } } img.Size = size if err := img.SaveSize(root); err != nil { return err } // If raw json is provided, then use it if jsonData != nil { if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { return err } } else { if jsonData, err = json.Marshal(img); err != nil { return err } if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { return err } } return nil } // SaveSize stores the current `size` value of `img` in the directory `root`. func (img *Image) SaveSize(root string) error { if err := ioutil.WriteFile(path.Join(root, "layersize"), []byte(strconv.Itoa(int(img.Size))), 0600); err != nil { return fmt.Errorf("Error storing image size in %s/layersize: %s", root, err) } return nil } func jsonPath(root string) string { return path.Join(root, "json") } // TarLayer returns a tar archive of the image's filesystem layer. func (img *Image) TarLayer() (arch archive.Archive, err error) { if img.graph == nil { return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID) } driver := img.graph.driver if differ, ok := driver.(graphdriver.Differ); ok { return differ.Diff(img.ID) } imgFs, err := driver.Get(img.ID) if err != nil { return nil, err } defer func() { if err != nil { driver.Put(img.ID) } }() if img.Parent == "" { archive, err := archive.Tar(imgFs, archive.Uncompressed) if err != nil { return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() driver.Put(img.ID) return err }), nil } parentFs, err := driver.Get(img.Parent) if err != nil { return nil, err } defer driver.Put(img.Parent) changes, err := archive.ChangesDirs(imgFs, parentFs) if err != nil { return nil, err } archive, err := archive.ExportChanges(imgFs, changes) if err != nil { return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() driver.Put(img.ID) return err }), nil } func ValidateID(id string) error { if id == "" { return fmt.Errorf("Image id can't be empty") } if strings.Contains(id, ":") { return fmt.Errorf("Invalid character in image id: ':'") } return nil } func GenerateID() string { for { id := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, id); err != nil { panic(err) // This shouldn't happen } value := hex.EncodeToString(id) // if we try to parse the truncated for as an int and we don't have // an error then the value is all numberic and causes issues when // used as a hostname. ref #3869 if _, err := strconv.Atoi(utils.TruncateID(value)); err == nil { continue } return value } } // Image includes convenience proxy functions to its graph // These functions will return an error if the image is not registered // (ie. if image.graph == nil) func (img *Image) History() ([]*Image, error) { var parents []*Image if err := img.WalkHistory( func(img *Image) error { parents = append(parents, img) return nil }, ); err != nil { return nil, err } return parents, nil } func (img *Image) WalkHistory(handler func(*Image) error) (err error) { currentImg := img for currentImg != nil { if handler != nil { if err := handler(currentImg); err != nil { return err } } currentImg, err = currentImg.GetParent() if err != nil { return fmt.Errorf("Error while getting parent image: %v", err) } } return nil } func (img *Image) GetParent() (*Image, error) { if img.Parent == "" { return nil, nil } if img.graph == nil { return nil, fmt.Errorf("Can't lookup parent of unregistered image") } return img.graph.Get(img.Parent) } func (img *Image) root() (string, error) { if img.graph == nil { return "", fmt.Errorf("Can't lookup root of unregistered image") } return img.graph.imageRoot(img.ID), nil } func (img *Image) getParentsSize(size int64) int64 { parentImage, err := img.GetParent() if err != nil || parentImage == nil { return size } size += parentImage.Size return parentImage.getParentsSize(size) } // Depth returns the number of parents for a // current image func (img *Image) Depth() (int, error) { var ( count = 0 parent = img err error ) for parent != nil { count++ parent, err = parent.GetParent() if err != nil { return -1, err } } return count, nil } // Build an Image object from raw json data func NewImgJSON(src []byte) (*Image, error) { ret := &Image{} utils.Debugf("Json string: {%s}", src) // FIXME: Is there a cleaner way to "purify" the input json? if err := json.Unmarshal(src, ret); err != nil { return nil, err } return ret, nil } docker-0.9.1/.gitignore0000644000175000017500000000061012314376205013070 0ustar tagtag# Docker project generated files to ignore # if you want to ignore files created by your editor/tools, # please consider a global .gitignore https://help.github.com/articles/ignoring-files .vagrant* bin docker/docker .*.swp a.out *.orig build_src .flymake* .idea .DS_Store docs/_build docs/_static docs/_templates .gopath/ .dotcloud *.test bundles/ .hg/ .git/ vendor/pkg/ pyenv Vagrantfile docker-0.9.1/server_unit_test.go0000644000175000017500000000474712314376205015052 0ustar tagtagpackage docker import ( "github.com/dotcloud/docker/utils" "testing" "time" ) func TestPools(t *testing.T) { srv := &Server{ pullingPool: make(map[string]chan struct{}), pushingPool: make(map[string]chan struct{}), } if _, err := srv.poolAdd("pull", "test1"); err != nil { t.Fatal(err) } if _, err := srv.poolAdd("pull", "test2"); err != nil { t.Fatal(err) } if _, err := srv.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { t.Fatalf("Expected `pull test1 is already in progress`") } if _, err := srv.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { t.Fatalf("Expected `pull test1 is already in progress`") } if _, err := srv.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { t.Fatalf("Expected `Unknown pool type`") } if err := srv.poolRemove("pull", "test2"); err != nil { t.Fatal(err) } if err := srv.poolRemove("pull", "test2"); err != nil { t.Fatal(err) } if err := srv.poolRemove("pull", "test1"); err != nil { t.Fatal(err) } if err := srv.poolRemove("push", "test1"); err != nil { t.Fatal(err) } if err := srv.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { t.Fatalf("Expected `Unknown pool type`") } } func TestLogEvent(t *testing.T) { srv := &Server{ events: make([]utils.JSONMessage, 0, 64), listeners: make(map[string]chan utils.JSONMessage), } srv.LogEvent("fakeaction", "fakeid", "fakeimage") listener := make(chan utils.JSONMessage) srv.Lock() srv.listeners["test"] = listener srv.Unlock() srv.LogEvent("fakeaction2", "fakeid", "fakeimage") numEvents := len(srv.GetEvents()) if numEvents != 2 { t.Fatalf("Expected 2 events, found %d", numEvents) } go func() { time.Sleep(200 * time.Millisecond) srv.LogEvent("fakeaction3", "fakeid", "fakeimage") time.Sleep(200 * time.Millisecond) srv.LogEvent("fakeaction4", "fakeid", "fakeimage") }() setTimeout(t, "Listening for events timed out", 2*time.Second, func() { for i := 2; i < 4; i++ { event := <-listener if event != srv.GetEvents()[i] { t.Fatalf("Event received it different than expected") } } }) } // FIXME: this is duplicated from integration/commands_test.go func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { c := make(chan bool) // Make sure we are not too long go func() { time.Sleep(d) c <- true }() go func() { f() c <- false }() if <-c && msg != "" { t.Fatal(msg) } } docker-0.9.1/builtins/0000755000175000017500000000000012314376205012734 5ustar tagtagdocker-0.9.1/builtins/builtins.go0000644000175000017500000000205712314376205015120 0ustar tagtagpackage builtins import ( "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker" "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/networkdriver/lxc" ) func Register(eng *engine.Engine) { daemon(eng) remote(eng) } // remote: a RESTful api for cross-docker communication func remote(eng *engine.Engine) { eng.Register("serveapi", api.ServeApi) } // daemon: a default execution and storage backend for Docker on Linux, // with the following underlying components: // // * Pluggable storage drivers including aufs, vfs, lvm and btrfs. // * Pluggable execution drivers including lxc and chroot. // // In practice `daemon` still includes most core Docker components, including: // // * The reference registry client implementation // * Image management // * The build facility // * Logging // // These components should be broken off into plugins of their own. // func daemon(eng *engine.Engine) { eng.Register("initserver", docker.InitServer) eng.Register("init_networkdriver", lxc.InitDriver) eng.Register("version", docker.GetVersion) } docker-0.9.1/api/0000755000175000017500000000000012314376205011654 5ustar tagtagdocker-0.9.1/api/common.go0000644000175000017500000000222312314376205013472 0ustar tagtagpackage api import ( "fmt" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/utils" "mime" "strings" ) const ( APIVERSION = "1.10" DEFAULTHTTPHOST = "127.0.0.1" DEFAULTUNIXSOCKET = "/var/run/docker.sock" ) func ValidateHost(val string) (string, error) { host, err := utils.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val) if err != nil { return val, err } return host, nil } //TODO remove, used on < 1.5 in getContainersJSON func displayablePorts(ports *engine.Table) string { result := []string{} for _, port := range ports.Data { if port.Get("IP") == "" { result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type"))) } else { result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type"))) } } return strings.Join(result, ", ") } func MatchesContentType(contentType, expectedType string) bool { mimetype, _, err := mime.ParseMediaType(contentType) if err != nil { utils.Errorf("Error parsing media type: %s error: %s", contentType, err.Error()) } return err == nil && mimetype == expectedType } docker-0.9.1/api/server.go0000644000175000017500000010670612314376205013523 0ustar tagtagpackage api import ( "bufio" "bytes" "code.google.com/p/go.net/websocket" "encoding/base64" "encoding/json" "expvar" "fmt" "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/listenbuffer" "github.com/dotcloud/docker/pkg/systemd" "github.com/dotcloud/docker/pkg/user" "github.com/dotcloud/docker/pkg/version" "github.com/dotcloud/docker/utils" "github.com/gorilla/mux" "io" "io/ioutil" "log" "net" "net/http" "net/http/pprof" "os" "strconv" "strings" "syscall" ) var ( activationLock chan struct{} ) type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { conn, _, err := w.(http.Hijacker).Hijack() if err != nil { return nil, nil, err } // Flush the options to make sure the client sets the raw mode conn.Write([]byte{}) return conn, conn, nil } //If we don't do this, POST method without Content-type (even with empty body) will fail func parseForm(r *http.Request) error { if r == nil { return nil } if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { return err } return nil } func parseMultipartForm(r *http.Request) error { if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { return err } return nil } func httpError(w http.ResponseWriter, err error) { statusCode := http.StatusInternalServerError // FIXME: this is brittle and should not be necessary. // If we need to differentiate between different possible error types, we should // create appropriate error types with clearly defined meaning. if strings.Contains(err.Error(), "No such") { statusCode = http.StatusNotFound } else if strings.Contains(err.Error(), "Bad parameter") { statusCode = http.StatusBadRequest } else if strings.Contains(err.Error(), "Conflict") { statusCode = http.StatusConflict } else if strings.Contains(err.Error(), "Impossible") { statusCode = http.StatusNotAcceptable } else if strings.Contains(err.Error(), "Wrong login/password") { statusCode = http.StatusUnauthorized } else if strings.Contains(err.Error(), "hasn't been activated") { statusCode = http.StatusForbidden } if err != nil { utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error()) http.Error(w, err.Error(), statusCode) } } func writeJSON(w http.ResponseWriter, code int, v engine.Env) error { w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) return v.Encode(w) } func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { w.Header().Set("Content-Type", "application/json") if flush { job.Stdout.Add(utils.NewWriteFlusher(w)) } else { job.Stdout.Add(w) } } func getBoolParam(value string) (bool, error) { if value == "" { return false, nil } ret, err := strconv.ParseBool(value) if err != nil { return false, fmt.Errorf("Bad parameter") } return ret, nil } func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var ( authConfig, err = ioutil.ReadAll(r.Body) job = eng.Job("auth") status string ) if err != nil { return err } job.Setenv("authConfig", string(authConfig)) job.Stdout.AddString(&status) if err = job.Run(); err != nil { return err } if status != "" { var env engine.Env env.Set("Status", status) return writeJSON(w, http.StatusOK, env) } w.WriteHeader(http.StatusNoContent) return nil } func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Set("Content-Type", "application/json") eng.ServeHTTP(w, r) return nil } func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } job := eng.Job("kill", vars["name"]) if sig := r.Form.Get("signal"); sig != "" { job.Args = append(job.Args, sig) } if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } job := eng.Job("export", vars["name"]) job.Stdout.Add(w) if err := job.Run(); err != nil { return err } return nil } func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( err error outs *engine.Table job = eng.Job("images") ) job.Setenv("filter", r.Form.Get("filter")) job.Setenv("all", r.Form.Get("all")) if version.GreaterThanOrEqualTo("1.7") { streamJSON(job, w, false) } else if outs, err = job.Stdout.AddListTable(); err != nil { return err } if err := job.Run(); err != nil { return err } if version.LessThan("1.7") && outs != nil { // Convert to legacy format outsLegacy := engine.NewTable("Created", 0) for _, out := range outs.Data { for _, repoTag := range out.GetList("RepoTags") { parts := strings.Split(repoTag, ":") outLegacy := &engine.Env{} outLegacy.Set("Repository", parts[0]) outLegacy.Set("Tag", parts[1]) outLegacy.Set("Id", out.Get("Id")) outLegacy.SetInt64("Created", out.GetInt64("Created")) outLegacy.SetInt64("Size", out.GetInt64("Size")) outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize")) outsLegacy.Add(outLegacy) } } w.Header().Set("Content-Type", "application/json") if _, err := outsLegacy.WriteListTo(w); err != nil { return err } } return nil } func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version.GreaterThan("1.6") { w.WriteHeader(http.StatusNotFound) return fmt.Errorf("This is now implemented in the client.") } eng.ServeHTTP(w, r) return nil } func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Set("Content-Type", "application/json") eng.ServeHTTP(w, r) return nil } func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var job = eng.Job("events", r.RemoteAddr) streamJSON(job, w, true) job.Setenv("since", r.Form.Get("since")) return job.Run() } func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var job = eng.Job("history", vars["name"]) streamJSON(job, w, false) if err := job.Run(); err != nil { return err } return nil } func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var job = eng.Job("changes", vars["name"]) streamJSON(job, w, false) return job.Run() } func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version.LessThan("1.4") { return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.") } if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) streamJSON(job, w, false) return job.Run() } func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( err error outs *engine.Table job = eng.Job("containers") ) job.Setenv("all", r.Form.Get("all")) job.Setenv("size", r.Form.Get("size")) job.Setenv("since", r.Form.Get("since")) job.Setenv("before", r.Form.Get("before")) job.Setenv("limit", r.Form.Get("limit")) if version.GreaterThanOrEqualTo("1.5") { streamJSON(job, w, false) } else if outs, err = job.Stdout.AddTable(); err != nil { return err } if err = job.Run(); err != nil { return err } if version.LessThan("1.5") { // Convert to legacy format for _, out := range outs.Data { ports := engine.NewTable("", 0) ports.ReadListFrom([]byte(out.Get("Ports"))) out.Set("Ports", displayablePorts(ports)) } w.Header().Set("Content-Type", "application/json") if _, err = outs.WriteListTo(w); err != nil { return err } } return nil } func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) job.Setenv("force", r.Form.Get("force")) if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusCreated) return nil } func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( config engine.Env env engine.Env job = eng.Job("commit", r.Form.Get("container")) ) if err := config.Decode(r.Body); err != nil { utils.Errorf("%s", err) } job.Setenv("repo", r.Form.Get("repo")) job.Setenv("tag", r.Form.Get("tag")) job.Setenv("author", r.Form.Get("author")) job.Setenv("comment", r.Form.Get("comment")) job.SetenvSubEnv("config", &config) var id string job.Stdout.AddString(&id) if err := job.Run(); err != nil { return err } env.Set("Id", id) return writeJSON(w, http.StatusCreated, env) } // Creates an image from Pull or from Import func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( image = r.Form.Get("fromImage") tag = r.Form.Get("tag") job *engine.Job ) authEncoded := r.Header.Get("X-Registry-Auth") authConfig := &auth.AuthConfig{} if authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = &auth.AuthConfig{} } } if image != "" { //pull metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } job = eng.Job("pull", r.Form.Get("fromImage"), tag) job.SetenvBool("parallel", version.GreaterThan("1.3")) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) } else { //import job = eng.Job("import", r.Form.Get("fromSrc"), r.Form.Get("repo"), tag) job.Stdin.Add(r.Body) } if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) w.Write(sf.FormatError(err)) } return nil } func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( authEncoded = r.Header.Get("X-Registry-Auth") authConfig = &auth.AuthConfig{} metaHeaders = map[string][]string{} ) if authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a search it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = &auth.AuthConfig{} } } for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } var job = eng.Job("search", r.Form.Get("term")) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) streamJSON(job, w, false) return job.Run() } func postImagesInsert(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } job := eng.Job("insert", vars["name"], r.Form.Get("url"), r.Form.Get("path")) if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, false) } else { job.Stdout.Add(w) } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) w.Write(sf.FormatError(err)) } return nil } func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } if err := parseForm(r); err != nil { return err } authConfig := &auth.AuthConfig{} authEncoded := r.Header.Get("X-Registry-Auth") if authEncoded != "" { // the new format is to handle the authConfig as a header authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // to increase compatibility to existing api it is defaulting to be empty authConfig = &auth.AuthConfig{} } } else { // the old format is supported for compatibility if there was no authConfig header if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { return err } } job := eng.Job("push", vars["name"]) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) w.Write(sf.FormatError(err)) } return nil } func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } if version.GreaterThan("1.0") { w.Header().Set("Content-Type", "application/x-tar") } job := eng.Job("image_export", vars["name"]) job.Stdout.Add(w) return job.Run() } func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { job := eng.Job("load") job.Stdin.Add(r.Body) return job.Run() } func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return nil } var ( out engine.Env job = eng.Job("create", r.Form.Get("name")) outWarnings []string outId string warnings = bytes.NewBuffer(nil) ) if err := job.DecodeEnv(r.Body); err != nil { return err } // Read container ID from the first line of stdout job.Stdout.AddString(&outId) // Read warnings from stderr job.Stderr.Add(warnings) if err := job.Run(); err != nil { return err } // Parse warnings from stderr scanner := bufio.NewScanner(warnings) for scanner.Scan() { outWarnings = append(outWarnings, scanner.Text()) } out.Set("Id", outId) out.SetList("Warnings", outWarnings) return writeJSON(w, http.StatusCreated, out) } func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } job := eng.Job("restart", vars["name"]) job.Setenv("t", r.Form.Get("t")) if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } job := eng.Job("container_delete", vars["name"]) job.Setenv("removeVolume", r.Form.Get("v")) job.Setenv("removeLink", r.Form.Get("link")) job.Setenv("forceRemove", r.Form.Get("force")) if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } var job = eng.Job("image_delete", vars["name"]) streamJSON(job, w, false) job.Setenv("force", r.Form.Get("force")) return job.Run() } func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } name := vars["name"] job := eng.Job("start", name) // allow a nil body for backwards compatibility if r.Body != nil { if MatchesContentType(r.Header.Get("Content-Type"), "application/json") { if err := job.DecodeEnv(r.Body); err != nil { return err } } } if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } job := eng.Job("stop", vars["name"]) job.Setenv("t", r.Form.Get("t")) if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var ( env engine.Env status string job = eng.Job("wait", vars["name"]) ) job.Stdout.AddString(&status) if err := job.Run(); err != nil { return err } // Parse a 16-bit encoded integer to map typical unix exit status. _, err := strconv.ParseInt(status, 10, 16) if err != nil { return err } env.Set("StatusCode", status) return writeJSON(w, http.StatusOK, env) } func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { return err } return nil } func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } var ( job = eng.Job("inspect", vars["name"], "container") c, err = job.Stdout.AddEnv() ) if err != nil { return err } if err = job.Run(); err != nil { return err } inStream, outStream, err := hijackServer(w) if err != nil { return err } defer func() { if tcpc, ok := inStream.(*net.TCPConn); ok { tcpc.CloseWrite() } else { inStream.Close() } }() defer func() { if tcpc, ok := outStream.(*net.TCPConn); ok { tcpc.CloseWrite() } else if closer, ok := outStream.(io.Closer); ok { closer.Close() } }() var errStream io.Writer fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { errStream = utils.NewStdWriter(outStream, utils.Stderr) outStream = utils.NewStdWriter(outStream, utils.Stdout) } else { errStream = outStream } job = eng.Job("attach", vars["name"]) job.Setenv("logs", r.Form.Get("logs")) job.Setenv("stream", r.Form.Get("stream")) job.Setenv("stdin", r.Form.Get("stdin")) job.Setenv("stdout", r.Form.Get("stdout")) job.Setenv("stderr", r.Form.Get("stderr")) job.Stdin.Add(inStream) job.Stdout.Add(outStream) job.Stderr.Set(errStream) if err := job.Run(); err != nil { fmt.Fprintf(outStream, "Error: %s\n", err) } return nil } func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } if err := eng.Job("inspect", vars["name"], "container").Run(); err != nil { return err } h := websocket.Handler(func(ws *websocket.Conn) { defer ws.Close() job := eng.Job("attach", vars["name"]) job.Setenv("logs", r.Form.Get("logs")) job.Setenv("stream", r.Form.Get("stream")) job.Setenv("stdin", r.Form.Get("stdin")) job.Setenv("stdout", r.Form.Get("stdout")) job.Setenv("stderr", r.Form.Get("stderr")) job.Stdin.Add(ws) job.Stdout.Add(ws) job.Stderr.Set(ws) if err := job.Run(); err != nil { utils.Errorf("Error: %s", err) } }) h.ServeHTTP(w, r) return nil } func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var job = eng.Job("inspect", vars["name"], "container") streamJSON(job, w, false) job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job return job.Run() } func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var job = eng.Job("inspect", vars["name"], "image") streamJSON(job, w, false) job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job return job.Run() } func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version.LessThan("1.3") { return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") } var ( authEncoded = r.Header.Get("X-Registry-Auth") authConfig = &auth.AuthConfig{} configFileEncoded = r.Header.Get("X-Registry-Config") configFile = &auth.ConfigFile{} job = eng.Job("build") ) // This block can be removed when API versions prior to 1.9 are deprecated. // Both headers will be parsed and sent along to the daemon, but if a non-empty // ConfigFile is present, any value provided as an AuthConfig directly will // be overridden. See BuildFile::CmdFrom for details. if version.LessThan("1.9") && authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = &auth.AuthConfig{} } } if configFileEncoded != "" { configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty configFile = &auth.ConfigFile{} } } if version.GreaterThanOrEqualTo("1.8") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } job.Stdin.Add(r.Body) job.Setenv("remote", r.FormValue("remote")) job.Setenv("t", r.FormValue("t")) job.Setenv("q", r.FormValue("q")) job.Setenv("nocache", r.FormValue("nocache")) job.Setenv("rm", r.FormValue("rm")) job.SetenvJson("authConfig", authConfig) job.SetenvJson("configFile", configFile) if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) w.Write(sf.FormatError(err)) } return nil } func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var copyData engine.Env if contentType := r.Header.Get("Content-Type"); MatchesContentType(contentType, "application/json") { if err := copyData.Decode(r.Body); err != nil { return err } } else { return fmt.Errorf("Content-Type not supported: %s", contentType) } if copyData.Get("Resource") == "" { return fmt.Errorf("Path cannot be empty") } if copyData.Get("Resource")[0] == '/' { copyData.Set("Resource", copyData.Get("Resource")[1:]) } job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) job.Stdout.Add(w) if err := job.Run(); err != nil { utils.Errorf("%s", err.Error()) if strings.Contains(err.Error(), "No such container") { w.WriteHeader(http.StatusNotFound) } } return nil } func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { w.Header().Add("Access-Control-Allow-Origin", "*") w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") } func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request utils.Debugf("Calling %s %s", localMethod, localRoute) if logging { log.Println(r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && !dockerVersion.Equal(userAgent[1]) { utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version := version.Version(mux.Vars(r)["version"]) if version == "" { version = APIVERSION } if enableCors { writeCorsHeaders(w, r) } if version.GreaterThan(APIVERSION) { http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, APIVERSION).Error(), http.StatusNotFound) return } if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { utils.Errorf("Error: %s", err) httpError(w, err) } } } // Replicated from expvar.go as not public. func expvarHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") fmt.Fprintf(w, "{\n") first := true expvar.Do(func(kv expvar.KeyValue) { if !first { fmt.Fprintf(w, ",\n") } first = false fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) }) fmt.Fprintf(w, "\n}\n") } func AttachProfiler(router *mux.Router) { router.HandleFunc("/debug/vars", expvarHandler) router.HandleFunc("/debug/pprof/", pprof.Index) router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) router.HandleFunc("/debug/pprof/profile", pprof.Profile) router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) } func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) (*mux.Router, error) { r := mux.NewRouter() if os.Getenv("DEBUG") != "" { AttachProfiler(r) } m := map[string]map[string]HttpApiFunc{ "GET": { "/events": getEvents, "/info": getInfo, "/version": getVersion, "/images/json": getImagesJSON, "/images/viz": getImagesViz, "/images/search": getImagesSearch, "/images/{name:.*}/get": getImagesGet, "/images/{name:.*}/history": getImagesHistory, "/images/{name:.*}/json": getImagesByName, "/containers/ps": getContainersJSON, "/containers/json": getContainersJSON, "/containers/{name:.*}/export": getContainersExport, "/containers/{name:.*}/changes": getContainersChanges, "/containers/{name:.*}/json": getContainersByName, "/containers/{name:.*}/top": getContainersTop, "/containers/{name:.*}/attach/ws": wsContainersAttach, }, "POST": { "/auth": postAuth, "/commit": postCommit, "/build": postBuild, "/images/create": postImagesCreate, "/images/{name:.*}/insert": postImagesInsert, "/images/load": postImagesLoad, "/images/{name:.*}/push": postImagesPush, "/images/{name:.*}/tag": postImagesTag, "/containers/create": postContainersCreate, "/containers/{name:.*}/kill": postContainersKill, "/containers/{name:.*}/restart": postContainersRestart, "/containers/{name:.*}/start": postContainersStart, "/containers/{name:.*}/stop": postContainersStop, "/containers/{name:.*}/wait": postContainersWait, "/containers/{name:.*}/resize": postContainersResize, "/containers/{name:.*}/attach": postContainersAttach, "/containers/{name:.*}/copy": postContainersCopy, }, "DELETE": { "/containers/{name:.*}": deleteContainers, "/images/{name:.*}": deleteImages, }, "OPTIONS": { "": optionsHandler, }, } for method, routes := range m { for route, fct := range routes { utils.Debugf("Registering %s, %s", method, route) // NOTE: scope issue, make sure the variables are local and won't be changed localRoute := route localFct := fct localMethod := method // build the handler function f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, enableCors, version.Version(dockerVersion)) // add the new route if localRoute == "" { r.Methods(localMethod).HandlerFunc(f) } else { r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) r.Path(localRoute).Methods(localMethod).HandlerFunc(f) } } } return r, nil } // ServeRequest processes a single http request to the docker remote api. // FIXME: refactor this to be part of Server and not require re-creating a new // router each time. This requires first moving ListenAndServe into Server. func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) error { router, err := createRouter(eng, false, true, "") if err != nil { return err } // Insert APIVERSION into the request as a convenience req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path) router.ServeHTTP(w, req) return nil } // ServeFD creates an http.Server and sets it up to serve given a socket activated // argument. func ServeFd(addr string, handle http.Handler) error { ls, e := systemd.ListenFD(addr) if e != nil { return e } chErrors := make(chan error, len(ls)) // We don't want to start serving on these sockets until the // "initserver" job has completed. Otherwise required handlers // won't be ready. <-activationLock // Since ListenFD will return one or more sockets we have // to create a go func to spawn off multiple serves for i := range ls { listener := ls[i] go func() { httpSrv := http.Server{Handler: handle} chErrors <- httpSrv.Serve(listener) }() } for i := 0; i < len(ls); i += 1 { err := <-chErrors if err != nil { return err } } return nil } func lookupGidByName(nameOrGid string) (int, error) { groups, err := user.ParseGroupFilter(func(g *user.Group) bool { return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid }) if err != nil { return -1, err } if groups != nil && len(groups) > 0 { return groups[0].Gid, nil } return -1, fmt.Errorf("Group %s not found", nameOrGid) } func changeGroup(addr string, nameOrGid string) error { gid, err := lookupGidByName(nameOrGid) if err != nil { return err } utils.Debugf("%s group found. gid: %d", nameOrGid, gid) return os.Chown(addr, 0, gid) } // ListenAndServe sets up the required http.Server and gets it listening for // each addr passed in and does protocol specific checking. func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors bool, dockerVersion, socketGroup string) error { r, err := createRouter(eng, logging, enableCors, dockerVersion) if err != nil { return err } if proto == "fd" { return ServeFd(addr, r) } if proto == "unix" { if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { return err } } l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock) if err != nil { return err } // Basic error and sanity checking switch proto { case "tcp": if !strings.HasPrefix(addr, "127.0.0.1") { log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } case "unix": if err := os.Chmod(addr, 0660); err != nil { return err } if socketGroup != "" { if err := changeGroup(addr, socketGroup); err != nil { if socketGroup == "docker" { // if the user hasn't explicitly specified the group ownership, don't fail on errors. utils.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) } else { return err } } } default: return fmt.Errorf("Invalid protocol format.") } httpSrv := http.Server{Addr: addr, Handler: r} return httpSrv.Serve(l) } // ServeApi loops through all of the protocols sent in to docker and spawns // off a go routine to setup a serving http.Server for each. func ServeApi(job *engine.Job) engine.Status { var ( protoAddrs = job.Args chErrors = make(chan error, len(protoAddrs)) ) activationLock = make(chan struct{}) if err := job.Eng.Register("acceptconnections", AcceptConnections); err != nil { return job.Error(err) } for _, protoAddr := range protoAddrs { protoAddrParts := strings.SplitN(protoAddr, "://", 2) go func() { log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1]) chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"), job.Getenv("SocketGroup")) }() } for i := 0; i < len(protoAddrs); i += 1 { err := <-chErrors if err != nil { return job.Error(err) } } return engine.StatusOK } func AcceptConnections(job *engine.Job) engine.Status { // Tell the init daemon we are accepting requests go systemd.SdNotify("READY=1") // close the lock so the listeners start accepting connections close(activationLock) return engine.StatusOK } docker-0.9.1/api/api_unit_test.go0000644000175000017500000000335012314376205015053 0ustar tagtagpackage api import ( "fmt" "net/http" "net/http/httptest" "testing" ) func TestJsonContentType(t *testing.T) { if !MatchesContentType("application/json", "application/json") { t.Fail() } if !MatchesContentType("application/json; charset=utf-8", "application/json") { t.Fail() } if MatchesContentType("dockerapplication/json", "application/json") { t.Fail() } } func TestGetBoolParam(t *testing.T) { if ret, err := getBoolParam("true"); err != nil || !ret { t.Fatalf("true -> true, nil | got %t %s", ret, err) } if ret, err := getBoolParam("True"); err != nil || !ret { t.Fatalf("True -> true, nil | got %t %s", ret, err) } if ret, err := getBoolParam("1"); err != nil || !ret { t.Fatalf("1 -> true, nil | got %t %s", ret, err) } if ret, err := getBoolParam(""); err != nil || ret { t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) } if ret, err := getBoolParam("false"); err != nil || ret { t.Fatalf("false -> false, nil | got %t %s", ret, err) } if ret, err := getBoolParam("0"); err != nil || ret { t.Fatalf("0 -> false, nil | got %t %s", ret, err) } if ret, err := getBoolParam("faux"); err == nil || ret { t.Fatalf("faux -> false, err | got %t %s", ret, err) } } func TesthttpError(t *testing.T) { r := httptest.NewRecorder() httpError(r, fmt.Errorf("No such method")) if r.Code != http.StatusNotFound { t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) } httpError(r, fmt.Errorf("This accound hasn't been activated")) if r.Code != http.StatusForbidden { t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) } httpError(r, fmt.Errorf("Some error")) if r.Code != http.StatusInternalServerError { t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) } } docker-0.9.1/api/MAINTAINERS0000644000175000017500000000005112314376205013345 0ustar tagtagVictor Vieux (@vieux) docker-0.9.1/api/container.go0000644000175000017500000000044312314376205014166 0ustar tagtagpackage api import ( "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/runconfig" ) type Container struct { Config runconfig.Config HostConfig runconfig.HostConfig State struct { Running bool ExitCode int } NetworkSettings struct { Ports nat.PortMap } } docker-0.9.1/api/client.go0000644000175000017500000020502612314376205013466 0ustar tagtagpackage api import ( "bufio" "bytes" "encoding/base64" "encoding/json" "errors" "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" flag "github.com/dotcloud/docker/pkg/mflag" "github.com/dotcloud/docker/pkg/signal" "github.com/dotcloud/docker/pkg/term" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "net" "net/http" "net/http/httputil" "net/url" "os" gosignal "os/signal" "path" "reflect" "regexp" goruntime "runtime" "strconv" "strings" "syscall" "text/tabwriter" "text/template" "time" ) var funcMap = template.FuncMap{ "json": func(v interface{}) string { a, _ := json.Marshal(v) return string(a) }, } var ( ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") ) func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) { methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:]) method := reflect.ValueOf(cli).MethodByName(methodName) if !method.IsValid() { return nil, false } return method.Interface().(func(...string) error), true } func ParseCommands(proto, addr string, args ...string) error { cli := NewDockerCli(os.Stdin, os.Stdout, os.Stderr, proto, addr) if len(args) > 0 { method, exists := cli.getMethod(args[0]) if !exists { fmt.Println("Error: Command not found:", args[0]) return cli.CmdHelp(args[1:]...) } return method(args[1:]...) } return cli.CmdHelp(args...) } func (cli *DockerCli) CmdHelp(args ...string) error { if len(args) > 0 { method, exists := cli.getMethod(args[0]) if !exists { fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0]) } else { method("--help") return nil } } help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET) for _, command := range [][]string{ {"attach", "Attach to a running container"}, {"build", "Build a container from a Dockerfile"}, {"commit", "Create a new image from a container's changes"}, {"cp", "Copy files/folders from the containers filesystem to the host path"}, {"diff", "Inspect changes on a container's filesystem"}, {"events", "Get real time events from the server"}, {"export", "Stream the contents of a container as a tar archive"}, {"history", "Show the history of an image"}, {"images", "List images"}, {"import", "Create a new filesystem image from the contents of a tarball"}, {"info", "Display system-wide information"}, {"insert", "Insert a file in an image"}, {"inspect", "Return low-level information on a container"}, {"kill", "Kill a running container"}, {"load", "Load an image from a tar archive"}, {"login", "Register or Login to the docker registry server"}, {"logs", "Fetch the logs of a container"}, {"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"}, {"ps", "List containers"}, {"pull", "Pull an image or a repository from the docker registry server"}, {"push", "Push an image or a repository to the docker registry server"}, {"restart", "Restart a running container"}, {"rm", "Remove one or more containers"}, {"rmi", "Remove one or more images"}, {"run", "Run a command in a new container"}, {"save", "Save an image to a tar archive"}, {"search", "Search for an image in the docker index"}, {"start", "Start a stopped container"}, {"stop", "Stop a running container"}, {"tag", "Tag an image into a repository"}, {"top", "Lookup the running processes of a container"}, {"version", "Show the docker version information"}, {"wait", "Block until a container stops, then print its exit code"}, } { help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) } fmt.Fprintf(cli.err, "%s\n", help) return nil } func (cli *DockerCli) CmdInsert(args ...string) error { cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 3 { cmd.Usage() return nil } v := url.Values{} v.Set("url", cmd.Arg(1)) v.Set("path", cmd.Arg(2)) return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil) } func (cli *DockerCli) CmdBuild(args ...string) error { cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH") tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 1 { cmd.Usage() return nil } var ( context archive.Archive isRemote bool err error ) if cmd.Arg(0) == "-" { // As a special case, 'docker build -' will build from an empty context with the // contents of stdin as a Dockerfile dockerfile, err := ioutil.ReadAll(cli.in) if err != nil { return err } context, err = archive.Generate("Dockerfile", string(dockerfile)) } else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) { isRemote = true } else { if _, err := os.Stat(cmd.Arg(0)); err != nil { return err } filename := path.Join(cmd.Arg(0), "Dockerfile") if _, err = os.Stat(filename); os.IsNotExist(err) { return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) } context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed) } var body io.Reader // Setup an upload progress bar // FIXME: ProgressReader shouldn't be this annoying to use if context != nil { sf := utils.NewStreamFormatter(false) body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Uploading context") } // Upload the build context v := &url.Values{} v.Set("t", *tag) if *suppressOutput { v.Set("q", "1") } if isRemote { v.Set("remote", cmd.Arg(0)) } if *noCache { v.Set("nocache", "1") } if *rm { v.Set("rm", "1") } cli.LoadConfigFile() headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(cli.configFile) if err != nil { return err } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) if context != nil { headers.Set("Content-Type", "application/tar") } err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers) if jerr, ok := err.(*utils.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code} } return err } // 'docker login': login / register a user to registry service. func (cli *DockerCli) CmdLogin(args ...string) error { cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+auth.IndexServerAddress()+"\" is the default.") var username, password, email string cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") err := cmd.Parse(args) if err != nil { return nil } serverAddress := auth.IndexServerAddress() if len(cmd.Args()) > 0 { serverAddress = cmd.Arg(0) } promptDefault := func(prompt string, configDefault string) { if configDefault == "" { fmt.Fprintf(cli.out, "%s: ", prompt) } else { fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) } } readInput := func(in io.Reader, out io.Writer) string { reader := bufio.NewReader(in) line, _, err := reader.ReadLine() if err != nil { fmt.Fprintln(out, err.Error()) os.Exit(1) } return string(line) } cli.LoadConfigFile() authconfig, ok := cli.configFile.Configs[serverAddress] if !ok { authconfig = auth.AuthConfig{} } if username == "" { promptDefault("Username", authconfig.Username) username = readInput(cli.in, cli.out) if username == "" { username = authconfig.Username } } if username != authconfig.Username { if password == "" { oldState, _ := term.SaveState(cli.terminalFd) fmt.Fprintf(cli.out, "Password: ") term.DisableEcho(cli.terminalFd, oldState) password = readInput(cli.in, cli.out) fmt.Fprint(cli.out, "\n") term.RestoreTerminal(cli.terminalFd, oldState) if password == "" { return fmt.Errorf("Error : Password Required") } } if email == "" { promptDefault("Email", authconfig.Email) email = readInput(cli.in, cli.out) if email == "" { email = authconfig.Email } } } else { password = authconfig.Password email = authconfig.Email } authconfig.Username = username authconfig.Password = password authconfig.Email = email authconfig.ServerAddress = serverAddress cli.configFile.Configs[serverAddress] = authconfig stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false) if statusCode == 401 { delete(cli.configFile.Configs, serverAddress) auth.SaveConfig(cli.configFile) return err } if err != nil { return err } var out2 engine.Env err = out2.Decode(stream) if err != nil { cli.configFile, _ = auth.LoadConfig(os.Getenv("HOME")) return err } auth.SaveConfig(cli.configFile) if out2.Get("Status") != "" { fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) } return nil } // 'docker wait': block until a container stops func (cli *DockerCli) CmdWait(args ...string) error { cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() < 1 { cmd.Usage() return nil } var encounteredError error for _, name := range cmd.Args() { status, err := waitForExit(cli, name) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to wait one or more containers") } else { fmt.Fprintf(cli.out, "%d\n", status) } } return encounteredError } // 'docker version': show version information func (cli *DockerCli) CmdVersion(args ...string) error { cmd := cli.Subcmd("version", "", "Show the docker version information.") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() > 0 { cmd.Usage() return nil } if dockerversion.VERSION != "" { fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) } fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version()) if dockerversion.GITCOMMIT != "" { fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) } body, _, err := readBody(cli.call("GET", "/version", nil, false)) if err != nil { return err } out := engine.NewOutput() remoteVersion, err := out.AddEnv() if err != nil { utils.Errorf("Error reading remote version: %s\n", err) return err } if _, err := out.Write(body); err != nil { utils.Errorf("Error reading remote version: %s\n", err) return err } out.Close() fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version")) fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) release := utils.GetReleaseVersion() if release != "" { fmt.Fprintf(cli.out, "Last stable version: %s", release) if (dockerversion.VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(dockerversion.VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) { fmt.Fprintf(cli.out, ", please update docker") } fmt.Fprintf(cli.out, "\n") } return nil } // 'docker info': display system-wide information. func (cli *DockerCli) CmdInfo(args ...string) error { cmd := cli.Subcmd("info", "", "Display system-wide information") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() > 0 { cmd.Usage() return nil } body, _, err := readBody(cli.call("GET", "/info", nil, false)) if err != nil { return err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return err } if _, err := out.Write(body); err != nil { utils.Errorf("Error reading remote info: %s\n", err) return err } out.Close() fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) var driverStatus [][2]string if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { return err } for _, pair := range driverStatus { fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) } fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) } if initPath := remoteInfo.Get("InitPath"); initPath != "" { fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) } } if len(remoteInfo.GetList("IndexServerAddress")) != 0 { cli.LoadConfigFile() u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username if len(u) > 0 { fmt.Fprintf(cli.out, "Username: %v\n", u) fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) } } if !remoteInfo.GetBool("MemoryLimit") { fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") } if !remoteInfo.GetBool("SwapLimit") { fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") } if !remoteInfo.GetBool("IPv4Forwarding") { fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") } return nil } func (cli *DockerCli) CmdStop(args ...string) error { cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)") nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() < 1 { cmd.Usage() return nil } v := url.Values{} v.Set("t", strconv.Itoa(*nSeconds)) var encounteredError error for _, name := range cmd.Args() { _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to stop one or more containers") } else { fmt.Fprintf(cli.out, "%s\n", name) } } return encounteredError } func (cli *DockerCli) CmdRestart(args ...string) error { cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container") nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() < 1 { cmd.Usage() return nil } v := url.Values{} v.Set("t", strconv.Itoa(*nSeconds)) var encounteredError error for _, name := range cmd.Args() { _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to restart one or more containers") } else { fmt.Fprintf(cli.out, "%s\n", name) } } return encounteredError } func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { sigc := make(chan os.Signal, 1) signal.CatchAll(sigc) go func() { for s := range sigc { if s == syscall.SIGCHLD { continue } var sig string for sigStr, sigN := range signal.SignalMap { if sigN == s { sig = sigStr break } } if sig == "" { utils.Errorf("Unsupported signal: %d. Discarding.", s) } if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { utils.Debugf("Error sending signal: %s", err) } } }() return sigc } func (cli *DockerCli) CmdStart(args ...string) error { cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process") openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() < 1 { cmd.Usage() return nil } var cErr chan error var tty bool if *attach || *openStdin { if cmd.NArg() > 1 { return fmt.Errorf("You cannot start and attach multiple containers at once.") } body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) if err != nil { return err } container := &Container{} err = json.Unmarshal(body, container) if err != nil { return err } tty = container.Config.Tty if !container.Config.Tty { sigc := cli.forwardAllSignals(cmd.Arg(0)) defer signal.StopCatch(sigc) } var in io.ReadCloser v := url.Values{} v.Set("stream", "1") if *openStdin && container.Config.OpenStdin { v.Set("stdin", "1") in = cli.in } v.Set("stdout", "1") v.Set("stderr", "1") cErr = utils.Go(func() error { return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil) }) } var encounteredError error for _, name := range cmd.Args() { _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) if err != nil { if !*attach || !*openStdin { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to start one or more containers") } } else { if !*attach || !*openStdin { fmt.Fprintf(cli.out, "%s\n", name) } } } if encounteredError != nil { if *openStdin || *attach { cli.in.Close() <-cErr } return encounteredError } if *openStdin || *attach { if tty && cli.isTerminal { if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { utils.Errorf("Error monitoring TTY size: %s\n", err) } } return <-cErr } return nil } func (cli *DockerCli) CmdInspect(args ...string) error { cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image") tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() < 1 { cmd.Usage() return nil } var tmpl *template.Template if *tmplStr != "" { var err error if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) return &utils.StatusError{StatusCode: 64, Status: "Template parsing error: " + err.Error()} } } indented := new(bytes.Buffer) indented.WriteByte('[') status := 0 for _, name := range cmd.Args() { obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) if err != nil { obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) if err != nil { if strings.Contains(err.Error(), "No such") { fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) } else { fmt.Fprintf(cli.err, "%s", err) } status = 1 continue } } if tmpl == nil { if err = json.Indent(indented, obj, "", " "); err != nil { fmt.Fprintf(cli.err, "%s\n", err) status = 1 continue } } else { // Has template, will render var value interface{} if err := json.Unmarshal(obj, &value); err != nil { fmt.Fprintf(cli.err, "%s\n", err) status = 1 continue } if err := tmpl.Execute(cli.out, value); err != nil { return err } cli.out.Write([]byte{'\n'}) } indented.WriteString(",") } if indented.Len() > 1 { // Remove trailing ',' indented.Truncate(indented.Len() - 1) } indented.WriteByte(']') if tmpl == nil { if _, err := io.Copy(cli.out, indented); err != nil { return err } } if status != 0 { return &utils.StatusError{StatusCode: status} } return nil } func (cli *DockerCli) CmdTop(args ...string) error { cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() == 0 { cmd.Usage() return nil } val := url.Values{} if cmd.NArg() > 1 { val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) } stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) if err != nil { return err } var procs engine.Env if err := procs.Decode(stream); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) processes := [][]string{} if err := procs.GetJson("Processes", &processes); err != nil { return err } for _, proc := range processes { fmt.Fprintln(w, strings.Join(proc, "\t")) } w.Flush() return nil } func (cli *DockerCli) CmdPort(args ...string) error { cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 2 { cmd.Usage() return nil } port := cmd.Arg(1) proto := "tcp" parts := strings.SplitN(port, "/", 2) if len(parts) == 2 && len(parts[1]) != 0 { port = parts[0] proto = parts[1] } body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) if err != nil { return err } var out Container err = json.Unmarshal(body, &out) if err != nil { return err } if frontends, exists := out.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { for _, frontend := range frontends { fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) } } else { return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0)) } return nil } // 'docker rmi IMAGE' removes all images with the name IMAGE func (cli *DockerCli) CmdRmi(args ...string) error { var ( cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images") force = cmd.Bool([]string{"f", "-force"}, false, "Force") ) if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() < 1 { cmd.Usage() return nil } v := url.Values{} if *force { v.Set("force", "1") } var encounteredError error for _, name := range cmd.Args() { body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, false)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to remove one or more images") } else { outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(body); err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to remove one or more images") continue } for _, out := range outs.Data { if out.Get("Deleted") != "" { fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted")) } else { fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged")) } } } } return encounteredError } func (cli *DockerCli) CmdHistory(args ...string) error { cmd := cli.Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 1 { cmd.Usage() return nil } body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false)) if err != nil { return err } outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") } for _, out := range outs.Data { outID := out.Get("Id") if !*quiet { if *noTrunc { fmt.Fprintf(w, "%s\t", outID) } else { fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) } fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) if *noTrunc { fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) } else { fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) } fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size"))) } else { if *noTrunc { fmt.Fprintln(w, outID) } else { fmt.Fprintln(w, utils.TruncateID(outID)) } } } w.Flush() return nil } func (cli *DockerCli) CmdRm(args ...string) error { cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers") v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container") link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container") force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of running container") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() < 1 { cmd.Usage() return nil } val := url.Values{} if *v { val.Set("v", "1") } if *link { val.Set("link", "1") } if *force { val.Set("force", "1") } var encounteredError error for _, name := range cmd.Args() { _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to remove one or more containers") } else { fmt.Fprintf(cli.out, "%s\n", name) } } return encounteredError } // 'docker kill NAME' kills a running container func (cli *DockerCli) CmdKill(args ...string) error { cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)") signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() < 1 { cmd.Usage() return nil } var encounteredError error for _, name := range cmd.Args() { if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to kill one or more containers") } else { fmt.Fprintf(cli.out, "%s\n", name) } } return encounteredError } func (cli *DockerCli) CmdImport(args ...string) error { cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() < 1 { cmd.Usage() return nil } var src, repository, tag string if cmd.NArg() == 3 { fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n") src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) } else { src = cmd.Arg(0) repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) } v := url.Values{} v.Set("repo", repository) v.Set("tag", tag) v.Set("fromSrc", src) var in io.Reader if src == "-" { in = cli.in } return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil) } func (cli *DockerCli) CmdPush(args ...string) error { cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry") if err := cmd.Parse(args); err != nil { return nil } name := cmd.Arg(0) if name == "" { cmd.Usage() return nil } cli.LoadConfigFile() // Resolve the Repository name from fqn to hostname + name hostname, _, err := registry.ResolveRepositoryName(name) if err != nil { return err } // Resolve the Auth config relevant for this server authConfig := cli.configFile.ResolveAuthConfig(hostname) // If we're not using a custom registry, we know the restrictions // applied to repository names and can warn the user in advance. // Custom repositories can have different rules, and we must also // allow pushing by image ID. if len(strings.SplitN(name, "/", 2)) == 1 { username := cli.configFile.Configs[auth.IndexServerAddress()].Username if username == "" { username = "" } return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository in / (ex: %s/%s)", username, name) } v := url.Values{} push := func(authConfig auth.AuthConfig) error { buf, err := json.Marshal(authConfig) if err != nil { return err } registryAuthHeader := []string{ base64.URLEncoding.EncodeToString(buf), } return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{ "X-Registry-Auth": registryAuthHeader, }) } if err := push(authConfig); err != nil { if strings.Contains(err.Error(), "Status 401") { fmt.Fprintln(cli.out, "\nPlease login prior to push:") if err := cli.CmdLogin(hostname); err != nil { return err } authConfig := cli.configFile.ResolveAuthConfig(hostname) return push(authConfig) } return err } return nil } func (cli *DockerCli) CmdPull(args ...string) error { cmd := cli.Subcmd("pull", "NAME", "Pull an image or a repository from the registry") tag := cmd.String([]string{"t", "-tag"}, "", "Download tagged image in repository") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 1 { cmd.Usage() return nil } remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0)) if *tag == "" { *tag = parsedTag } // Resolve the Repository name from fqn to hostname + name hostname, _, err := registry.ResolveRepositoryName(remote) if err != nil { return err } cli.LoadConfigFile() // Resolve the Auth config relevant for this server authConfig := cli.configFile.ResolveAuthConfig(hostname) v := url.Values{} v.Set("fromImage", remote) v.Set("tag", *tag) pull := func(authConfig auth.AuthConfig) error { buf, err := json.Marshal(authConfig) if err != nil { return err } registryAuthHeader := []string{ base64.URLEncoding.EncodeToString(buf), } return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{ "X-Registry-Auth": registryAuthHeader, }) } if err := pull(authConfig); err != nil { if strings.Contains(err.Error(), "Status 401") { fmt.Fprintln(cli.out, "\nPlease login prior to pull:") if err := cli.CmdLogin(hostname); err != nil { return err } authConfig := cli.configFile.ResolveAuthConfig(hostname) return pull(authConfig) } return err } return nil } func (cli *DockerCli) CmdImages(args ...string) error { cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate images used to build)") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "Output graph in graphviz format") flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "Output graph in tree format") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() > 1 { cmd.Usage() return nil } filter := cmd.Arg(0) if *flViz || *flTree { body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false)) if err != nil { return err } outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } var ( printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string) startImage *engine.Env roots = engine.NewTable("Created", outs.Len()) byParent = make(map[string]*engine.Table) ) for _, image := range outs.Data { if image.Get("ParentId") == "" { roots.Add(image) } else { if children, exists := byParent[image.Get("ParentId")]; exists { children.Add(image) } else { byParent[image.Get("ParentId")] = engine.NewTable("Created", 1) byParent[image.Get("ParentId")].Add(image) } } if filter != "" { if filter == image.Get("Id") || filter == utils.TruncateID(image.Get("Id")) { startImage = image } for _, repotag := range image.GetList("RepoTags") { if repotag == filter { startImage = image } } } } if *flViz { fmt.Fprintf(cli.out, "digraph docker {\n") printNode = (*DockerCli).printVizNode } else { printNode = (*DockerCli).printTreeNode } if startImage != nil { root := engine.NewTable("Created", 1) root.Add(startImage) cli.WalkTree(*noTrunc, root, byParent, "", printNode) } else if filter == "" { cli.WalkTree(*noTrunc, roots, byParent, "", printNode) } if *flViz { fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") } } else { v := url.Values{} if cmd.NArg() == 1 { v.Set("filter", filter) } if *all { v.Set("all", "1") } body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) if err != nil { return err } outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") } for _, out := range outs.Data { for _, repotag := range out.GetList("RepoTags") { repo, tag := utils.ParseRepositoryTag(repotag) outID := out.Get("Id") if !*noTrunc { outID = utils.TruncateID(outID) } if !*quiet { fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize"))) } else { fmt.Fprintln(w, outID) } } } if !*quiet { w.Flush() } } return nil } func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { length := images.Len() if length > 1 { for index, image := range images.Data { if index+1 == length { printNode(cli, noTrunc, image, prefix+"└─") if subimages, exists := byParent[image.Get("Id")]; exists { cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) } } else { printNode(cli, noTrunc, image, prefix+"\u251C─") if subimages, exists := byParent[image.Get("Id")]; exists { cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) } } } } else { for _, image := range images.Data { printNode(cli, noTrunc, image, prefix+"└─") if subimages, exists := byParent[image.Get("Id")]; exists { cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) } } } } func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { var ( imageID string parentID string ) if noTrunc { imageID = image.Get("Id") parentID = image.Get("ParentId") } else { imageID = utils.TruncateID(image.Get("Id")) parentID = utils.TruncateID(image.Get("ParentId")) } if parentID == "" { fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) } else { fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) } if image.GetList("RepoTags")[0] != ":" { fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) } } func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { var imageID string if noTrunc { imageID = image.Get("Id") } else { imageID = utils.TruncateID(image.Get("Id")) } fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize"))) if image.GetList("RepoTags")[0] != ":" { fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) } else { fmt.Fprint(cli.out, "\n") } } func (cli *DockerCli) CmdPs(args ...string) error { cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") size := cmd.Bool([]string{"s", "-size"}, false, "Display sizes") all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") since := cmd.String([]string{"#sinceId", "-since-id"}, "", "Show only containers created since Id, include non-running ones.") before := cmd.String([]string{"#beforeId", "-before-id"}, "", "Show only container created before Id, include non-running ones.") last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.") if err := cmd.Parse(args); err != nil { return nil } v := url.Values{} if *last == -1 && *nLatest { *last = 1 } if *all { v.Set("all", "1") } if *last != -1 { v.Set("limit", strconv.Itoa(*last)) } if *since != "" { v.Set("since", *since) } if *before != "" { v.Set("before", *before) } if *size { v.Set("size", "1") } body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false)) if err != nil { return err } outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES") if *size { fmt.Fprintln(w, "\tSIZE") } else { fmt.Fprint(w, "\n") } } for _, out := range outs.Data { var ( outID = out.Get("Id") outNames = out.GetList("Names") ) if !*noTrunc { outID = utils.TruncateID(outID) } // Remove the leading / from the names for i := 0; i < len(outNames); i++ { outNames[i] = outNames[i][1:] } if !*quiet { var ( outCommand = out.Get("Command") ports = engine.NewTable("", 0) ) if !*noTrunc { outCommand = utils.Trunc(outCommand, 20) } ports.ReadListFrom([]byte(out.Get("Ports"))) fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), displayablePorts(ports), strings.Join(outNames, ",")) if *size { if out.GetInt("SizeRootFs") > 0 { fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs"))) } else { fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw"))) } } else { fmt.Fprint(w, "\n") } } else { fmt.Fprintln(w, outID) } } if !*quiet { w.Flush() } return nil } func (cli *DockerCli) CmdCommit(args ...string) error { cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes") flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith \"") flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`) if err := cmd.Parse(args); err != nil { return nil } var name, repository, tag string if cmd.NArg() == 3 { fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n") name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) } else { name = cmd.Arg(0) repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) } if name == "" { cmd.Usage() return nil } v := url.Values{} v.Set("container", name) v.Set("repo", repository) v.Set("tag", tag) v.Set("comment", *flComment) v.Set("author", *flAuthor) var ( config *runconfig.Config env engine.Env ) if *flConfig != "" { config = &runconfig.Config{} if err := json.Unmarshal([]byte(*flConfig), config); err != nil { return err } } stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) if err != nil { return err } if err := env.Decode(stream); err != nil { return err } fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) return nil } func (cli *DockerCli) CmdEvents(args ...string) error { cmd := cli.Subcmd("events", "[OPTIONS]", "Get real time events from the server") since := cmd.String([]string{"#since", "-since"}, "", "Show previously created events and then stream.") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 0 { cmd.Usage() return nil } v := url.Values{} if *since != "" { loc := time.FixedZone(time.Now().Zone()) format := "2006-01-02 15:04:05 -0700 MST" if len(*since) < len(format) { format = format[:len(*since)] } if t, err := time.ParseInLocation(format, *since, loc); err == nil { v.Set("since", strconv.FormatInt(t.Unix(), 10)) } else { v.Set("since", *since) } } if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { return err } return nil } func (cli *DockerCli) CmdExport(args ...string) error { cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 1 { cmd.Usage() return nil } if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil { return err } return nil } func (cli *DockerCli) CmdDiff(args ...string) error { cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 1 { cmd.Usage() return nil } body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false)) if err != nil { return err } outs := engine.NewTable("", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } for _, change := range outs.Data { var kind string switch change.GetInt("Kind") { case archive.ChangeModify: kind = "C" case archive.ChangeAdd: kind = "A" case archive.ChangeDelete: kind = "D" } fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path")) } return nil } func (cli *DockerCli) CmdLogs(args ...string) error { cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 1 { cmd.Usage() return nil } name := cmd.Arg(0) body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) if err != nil { return err } container := &Container{} err = json.Unmarshal(body, container) if err != nil { return err } v := url.Values{} v.Set("logs", "1") v.Set("stdout", "1") v.Set("stderr", "1") if *follow && container.State.Running { v.Set("stream", "1") } if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil { return err } return nil } func (cli *DockerCli) CmdAttach(args ...string) error { cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container") noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin") proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 1 { cmd.Usage() return nil } name := cmd.Arg(0) body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) if err != nil { return err } container := &Container{} err = json.Unmarshal(body, container) if err != nil { return err } if !container.State.Running { return fmt.Errorf("You cannot attach to a stopped container, start it first") } if container.Config.Tty && cli.isTerminal { if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { utils.Debugf("Error monitoring TTY size: %s", err) } } var in io.ReadCloser v := url.Values{} v.Set("stream", "1") if !*noStdin && container.Config.OpenStdin { v.Set("stdin", "1") in = cli.in } v.Set("stdout", "1") v.Set("stderr", "1") if *proxy && !container.Config.Tty { sigc := cli.forwardAllSignals(cmd.Arg(0)) defer signal.StopCatch(sigc) } if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil { return err } _, status, err := getExitCode(cli, cmd.Arg(0)) if err != nil { return err } if status != 0 { return &utils.StatusError{StatusCode: status} } return nil } func (cli *DockerCli) CmdSearch(args ...string) error { cmd := cli.Subcmd("search", "TERM", "Search the docker index for images") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds") stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 1 { cmd.Usage() return nil } v := url.Values{} v.Set("term", cmd.Arg(0)) body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true)) if err != nil { return err } outs := engine.NewTable("star_count", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n") for _, out := range outs.Data { if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) { continue } desc := strings.Replace(out.Get("description"), "\n", " ", -1) desc = strings.Replace(desc, "\r", " ", -1) if !*noTrunc && len(desc) > 45 { desc = utils.Trunc(desc, 42) + "..." } fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count")) if out.GetBool("is_official") { fmt.Fprint(w, "[OK]") } fmt.Fprint(w, "\t") if out.GetBool("is_trusted") { fmt.Fprint(w, "[OK]") } fmt.Fprint(w, "\n") } w.Flush() return nil } // Ports type - Used to parse multiple -p flags type ports []int func (cli *DockerCli) CmdTag(args ...string) error { cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository") force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 2 && cmd.NArg() != 3 { cmd.Usage() return nil } var repository, tag string if cmd.NArg() == 3 { fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]]\n") repository, tag = cmd.Arg(1), cmd.Arg(2) } else { repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) } v := url.Values{} v.Set("repo", repository) v.Set("tag", tag) if *force { v.Set("force", "1") } if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil { return err } return nil } func (cli *DockerCli) CmdRun(args ...string) error { // FIXME: just use runconfig.Parse already config, hostConfig, cmd, err := runconfig.ParseSubcommand(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil) if err != nil { return err } if config.Image == "" { cmd.Usage() return nil } // Retrieve relevant client-side config var ( flName = cmd.Lookup("name") flRm = cmd.Lookup("rm") flSigProxy = cmd.Lookup("sig-proxy") autoRemove, _ = strconv.ParseBool(flRm.Value.String()) sigProxy, _ = strconv.ParseBool(flSigProxy.Value.String()) ) // Disable sigProxy in case on TTY if config.Tty { sigProxy = false } var containerIDFile io.WriteCloser if len(hostConfig.ContainerIDFile) > 0 { if _, err := os.Stat(hostConfig.ContainerIDFile); err == nil { return fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile) } if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil { return fmt.Errorf("Failed to create the container ID file: %s", err) } defer func() { containerIDFile.Close() var ( cidFileInfo os.FileInfo err error ) if cidFileInfo, err = os.Stat(hostConfig.ContainerIDFile); err != nil { return } if cidFileInfo.Size() == 0 { if err := os.Remove(hostConfig.ContainerIDFile); err != nil { fmt.Printf("failed to remove CID file '%s': %s \n", hostConfig.ContainerIDFile, err) } } }() } containerValues := url.Values{} if name := flName.Value.String(); name != "" { containerValues.Set("name", name) } //create the container stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false) //if image not found try to pull it if statusCode == 404 { fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) v := url.Values{} repos, tag := utils.ParseRepositoryTag(config.Image) v.Set("fromImage", repos) v.Set("tag", tag) // Resolve the Repository name from fqn to hostname + name hostname, _, err := registry.ResolveRepositoryName(repos) if err != nil { return err } // Load the auth config file, to be able to pull the image cli.LoadConfigFile() // Resolve the Auth config relevant for this server authConfig := cli.configFile.ResolveAuthConfig(hostname) buf, err := json.Marshal(authConfig) if err != nil { return err } registryAuthHeader := []string{ base64.URLEncoding.EncodeToString(buf), } if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { return err } if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false); err != nil { return err } } else if err != nil { return err } var runResult engine.Env if err := runResult.Decode(stream); err != nil { return err } for _, warning := range runResult.GetList("Warnings") { fmt.Fprintf(cli.err, "WARNING: %s\n", warning) } if len(hostConfig.ContainerIDFile) > 0 { if _, err = containerIDFile.Write([]byte(runResult.Get("Id"))); err != nil { return fmt.Errorf("Failed to write the container ID to the file: %s", err) } } if sigProxy { sigc := cli.forwardAllSignals(runResult.Get("Id")) defer signal.StopCatch(sigc) } var ( waitDisplayId chan struct{} errCh chan error ) if !config.AttachStdout && !config.AttachStderr { // Make this asynchrone in order to let the client write to stdin before having to read the ID waitDisplayId = make(chan struct{}) go func() { defer close(waitDisplayId) fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id")) }() } // We need to instanciate the chan because the select needs it. It can // be closed but can't be uninitialized. hijacked := make(chan io.Closer) // Block the return until the chan gets closed defer func() { utils.Debugf("End of CmdRun(), Waiting for hijack to finish.") if _, ok := <-hijacked; ok { utils.Errorf("Hijack did not finish (chan still open)") } }() if config.AttachStdin || config.AttachStdout || config.AttachStderr { var ( out, stderr io.Writer in io.ReadCloser v = url.Values{} ) v.Set("stream", "1") if config.AttachStdin { v.Set("stdin", "1") in = cli.in } if config.AttachStdout { v.Set("stdout", "1") out = cli.out } if config.AttachStderr { v.Set("stderr", "1") if config.Tty { stderr = cli.out } else { stderr = cli.err } } errCh = utils.Go(func() error { return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked) }) } else { close(hijacked) } // Acknowledge the hijack before starting select { case closer := <-hijacked: // Make sure that hijack gets closed when returning. (result // in closing hijack chan and freeing server's goroutines. if closer != nil { defer closer.Close() } case err := <-errCh: if err != nil { utils.Debugf("Error hijack: %s", err) return err } } //start the container if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { return err } if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal { if err := cli.monitorTtySize(runResult.Get("Id")); err != nil { utils.Errorf("Error monitoring TTY size: %s\n", err) } } if errCh != nil { if err := <-errCh; err != nil { utils.Debugf("Error hijack: %s", err) return err } } // Detached mode: wait for the id to be displayed and return. if !config.AttachStdout && !config.AttachStderr { // Detached mode <-waitDisplayId return nil } var status int // Attached mode if autoRemove { // Autoremove: wait for the container to finish, retrieve // the exit code and remove the container if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil { return err } if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { return err } if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil { return err } } else { if !config.Tty { // In non-tty mode, we can't dettach, so we know we need to wait. if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { return err } } else { // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call // and result in a wrong exit code. // No Autoremove: Simply retrieve the exit code if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { return err } } } if status != 0 { return &utils.StatusError{StatusCode: status} } return nil } func (cli *DockerCli) CmdCp(args ...string) error { cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH") if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 2 { cmd.Usage() return nil } var copyData engine.Env info := strings.Split(cmd.Arg(0), ":") if len(info) != 2 { return fmt.Errorf("Error: Path not specified") } copyData.Set("Resource", info[1]) copyData.Set("HostPath", cmd.Arg(1)) stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) if stream != nil { defer stream.Close() } if statusCode == 404 { return fmt.Errorf("No such container: %v", info[0]) } if err != nil { return err } if statusCode == 200 { if err := archive.Untar(stream, copyData.Get("HostPath"), nil); err != nil { return err } } return nil } func (cli *DockerCli) CmdSave(args ...string) error { cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)") if err := cmd.Parse(args); err != nil { return err } if cmd.NArg() != 1 { cmd.Usage() return nil } image := cmd.Arg(0) if err := cli.stream("GET", "/images/"+image+"/get", nil, cli.out, nil); err != nil { return err } return nil } func (cli *DockerCli) CmdLoad(args ...string) error { cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN") if err := cmd.Parse(args); err != nil { return err } if cmd.NArg() != 0 { cmd.Usage() return nil } if err := cli.stream("POST", "/images/load", cli.in, cli.out, nil); err != nil { return err } return nil } func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { params := bytes.NewBuffer(nil) if data != nil { if env, ok := data.(engine.Env); ok { if err := env.Encode(params); err != nil { return nil, -1, err } } else { buf, err := json.Marshal(data) if err != nil { return nil, -1, err } if _, err := params.Write(buf); err != nil { return nil, -1, err } } } // fixme: refactor client to support redirect re := regexp.MustCompile("/+") path = re.ReplaceAllString(path, "/") req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), params) if err != nil { return nil, -1, err } if passAuthInfo { cli.LoadConfigFile() // Resolve the Auth config relevant for this server authConfig := cli.configFile.ResolveAuthConfig(auth.IndexServerAddress()) getHeaders := func(authConfig auth.AuthConfig) (map[string][]string, error) { buf, err := json.Marshal(authConfig) if err != nil { return nil, err } registryAuthHeader := []string{ base64.URLEncoding.EncodeToString(buf), } return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil } if headers, err := getHeaders(authConfig); err == nil && headers != nil { for k, v := range headers { req.Header[k] = v } } } req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) req.Host = cli.addr if data != nil { req.Header.Set("Content-Type", "application/json") } else if method == "POST" { req.Header.Set("Content-Type", "plain/text") } dial, err := net.Dial(cli.proto, cli.addr) if err != nil { if strings.Contains(err.Error(), "connection refused") { return nil, -1, ErrConnectionRefused } return nil, -1, err } clientconn := httputil.NewClientConn(dial, nil) resp, err := clientconn.Do(req) if err != nil { clientconn.Close() if strings.Contains(err.Error(), "connection refused") { return nil, -1, ErrConnectionRefused } return nil, -1, err } if resp.StatusCode < 200 || resp.StatusCode >= 400 { body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, -1, err } if len(body) == 0 { return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL) } return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body)) } wrapper := utils.NewReadCloserWrapper(resp.Body, func() error { if resp != nil && resp.Body != nil { resp.Body.Close() } return clientconn.Close() }) return wrapper, resp.StatusCode, nil } func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { if (method == "POST" || method == "PUT") && in == nil { in = bytes.NewReader([]byte{}) } // fixme: refactor client to support redirect re := regexp.MustCompile("/+") path = re.ReplaceAllString(path, "/") req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), in) if err != nil { return err } req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) req.Host = cli.addr if method == "POST" { req.Header.Set("Content-Type", "plain/text") } if headers != nil { for k, v := range headers { req.Header[k] = v } } dial, err := net.Dial(cli.proto, cli.addr) if err != nil { if strings.Contains(err.Error(), "connection refused") { return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") } return err } clientconn := httputil.NewClientConn(dial, nil) resp, err := clientconn.Do(req) defer clientconn.Close() if err != nil { if strings.Contains(err.Error(), "connection refused") { return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") } return err } defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode >= 400 { body, err := ioutil.ReadAll(resp.Body) if err != nil { return err } if len(body) == 0 { return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) } return fmt.Errorf("Error: %s", bytes.TrimSpace(body)) } if MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal) } if _, err := io.Copy(out, resp.Body); err != nil { return err } return nil } func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error { defer func() { if started != nil { close(started) } }() // fixme: refactor client to support redirect re := regexp.MustCompile("/+") path = re.ReplaceAllString(path, "/") req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), nil) if err != nil { return err } req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) req.Header.Set("Content-Type", "plain/text") req.Host = cli.addr dial, err := net.Dial(cli.proto, cli.addr) if err != nil { if strings.Contains(err.Error(), "connection refused") { return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") } return err } clientconn := httputil.NewClientConn(dial, nil) defer clientconn.Close() // Server hijacks the connection, error 'connection closed' expected clientconn.Do(req) rwc, br := clientconn.Hijack() defer rwc.Close() if started != nil { started <- rwc } var receiveStdout chan error var oldState *term.State if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" { oldState, err = term.SetRawTerminal(cli.terminalFd) if err != nil { return err } defer term.RestoreTerminal(cli.terminalFd, oldState) } if stdout != nil || stderr != nil { receiveStdout = utils.Go(func() (err error) { defer func() { if in != nil { if setRawTerminal && cli.isTerminal { term.RestoreTerminal(cli.terminalFd, oldState) } // For some reason this Close call blocks on darwin.. // As the client exists right after, simply discard the close // until we find a better solution. if goruntime.GOOS != "darwin" { in.Close() } } }() // When TTY is ON, use regular copy if setRawTerminal { _, err = io.Copy(stdout, br) } else { _, err = utils.StdCopy(stdout, stderr, br) } utils.Debugf("[hijack] End of stdout") return err }) } sendStdin := utils.Go(func() error { if in != nil { io.Copy(rwc, in) utils.Debugf("[hijack] End of stdin") } if tcpc, ok := rwc.(*net.TCPConn); ok { if err := tcpc.CloseWrite(); err != nil { utils.Errorf("Couldn't send EOF: %s\n", err) } } else if unixc, ok := rwc.(*net.UnixConn); ok { if err := unixc.CloseWrite(); err != nil { utils.Errorf("Couldn't send EOF: %s\n", err) } } // Discard errors due to pipe interruption return nil }) if stdout != nil || stderr != nil { if err := <-receiveStdout; err != nil { utils.Errorf("Error receiveStdout: %s", err) return err } } if !cli.isTerminal { if err := <-sendStdin; err != nil { utils.Errorf("Error sendStdin: %s", err) return err } } return nil } func (cli *DockerCli) getTtySize() (int, int) { if !cli.isTerminal { return 0, 0 } ws, err := term.GetWinsize(cli.terminalFd) if err != nil { utils.Errorf("Error getting size: %s", err) if ws == nil { return 0, 0 } } return int(ws.Height), int(ws.Width) } func (cli *DockerCli) resizeTty(id string) { height, width := cli.getTtySize() if height == 0 && width == 0 { return } v := url.Values{} v.Set("h", strconv.Itoa(height)) v.Set("w", strconv.Itoa(width)) if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil { utils.Errorf("Error resize: %s", err) } } func (cli *DockerCli) monitorTtySize(id string) error { cli.resizeTty(id) sigchan := make(chan os.Signal, 1) gosignal.Notify(sigchan, syscall.SIGWINCH) go func() { for _ = range sigchan { cli.resizeTty(id) } }() return nil } func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet { flags := flag.NewFlagSet(name, flag.ContinueOnError) flags.Usage = func() { fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description) flags.PrintDefaults() os.Exit(2) } return flags } func (cli *DockerCli) LoadConfigFile() (err error) { cli.configFile, err = auth.LoadConfig(os.Getenv("HOME")) if err != nil { fmt.Fprintf(cli.err, "WARNING: %s\n", err) } return err } func waitForExit(cli *DockerCli, containerId string) (int, error) { stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) if err != nil { return -1, err } var out engine.Env if err := out.Decode(stream); err != nil { return -1, err } return out.GetInt("StatusCode"), nil } // getExitCode perform an inspect on the container. It returns // the running state and the exit code. func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false)) if err != nil { // If we can't connect, then the daemon probably died. if err != ErrConnectionRefused { return false, -1, err } return false, -1, nil } c := &Container{} if err := json.Unmarshal(body, c); err != nil { return false, -1, err } return c.State.Running, c.State.ExitCode, nil } func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { if stream != nil { defer stream.Close() } if err != nil { return nil, statusCode, err } body, err := ioutil.ReadAll(stream) if err != nil { return nil, -1, err } return body, statusCode, nil } func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli { var ( isTerminal = false terminalFd uintptr ) if in != nil { if file, ok := in.(*os.File); ok { terminalFd = file.Fd() isTerminal = term.IsTerminal(terminalFd) } } if err == nil { err = out } return &DockerCli{ proto: proto, addr: addr, in: in, out: out, err: err, isTerminal: isTerminal, terminalFd: terminalFd, } } type DockerCli struct { proto string addr string configFile *auth.ConfigFile in io.ReadCloser out io.Writer err io.Writer isTerminal bool terminalFd uintptr } docker-0.9.1/integration/0000755000175000017500000000000012314376205013426 5ustar tagtagdocker-0.9.1/integration/sorter_test.go0000644000175000017500000000233512314376205016335 0ustar tagtagpackage docker import ( "github.com/dotcloud/docker/engine" "testing" "time" ) func TestServerListOrderedImagesByCreationDate(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() if err := generateImage("", eng); err != nil { t.Fatal(err) } images := getImages(eng, t, true, "") if images.Data[0].GetInt("Created") < images.Data[1].GetInt("Created") { t.Error("Expected images to be ordered by most recent creation date.") } } func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() err := generateImage("bar", eng) if err != nil { t.Fatal(err) } time.Sleep(time.Second) err = generateImage("zed", eng) if err != nil { t.Fatal(err) } images := getImages(eng, t, true, "") if repoTags := images.Data[0].GetList("RepoTags"); repoTags[0] != "repo:zed" && repoTags[0] != "repo:bar" { t.Errorf("Expected Images to be ordered by most recent creation date.") } } func generateImage(name string, eng *engine.Engine) error { archive, err := fakeTar() if err != nil { return err } job := eng.Job("import", "-", "repo", name) job.Stdin.Add(archive) job.SetenvBool("json", true) return job.Run() } docker-0.9.1/integration/commands_test.go0000644000175000017500000007313412314376205016625 0ustar tagtagpackage docker import ( "bufio" "fmt" "github.com/dotcloud/docker" "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/term" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "os" "path" "regexp" "strconv" "strings" "syscall" "testing" "time" ) func closeWrap(args ...io.Closer) error { e := false ret := fmt.Errorf("Error closing elements") for _, c := range args { if err := c.Close(); err != nil { e = true ret = fmt.Errorf("%s\n%s", ret, err) } } if e { return ret } return nil } func setRaw(t *testing.T, c *docker.Container) *term.State { pty, err := c.GetPtyMaster() if err != nil { t.Fatal(err) } state, err := term.MakeRaw(pty.Fd()) if err != nil { t.Fatal(err) } return state } func unsetRaw(t *testing.T, c *docker.Container, state *term.State) { pty, err := c.GetPtyMaster() if err != nil { t.Fatal(err) } term.RestoreTerminal(pty.Fd(), state) } func waitContainerStart(t *testing.T, timeout time.Duration) *docker.Container { var container *docker.Container setTimeout(t, "Waiting for the container to be started timed out", timeout, func() { for { l := globalRuntime.List() if len(l) == 1 && l[0].State.IsRunning() { container = l[0] break } time.Sleep(10 * time.Millisecond) } }) if container == nil { t.Fatal("An error occured while waiting for the container to start") } return container } func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { c := make(chan bool) // Make sure we are not too long go func() { time.Sleep(d) c <- true }() go func() { f() c <- false }() if <-c && msg != "" { t.Fatal(msg) } } func expectPipe(expected string, r io.Reader) error { o, err := bufio.NewReader(r).ReadString('\n') if err != nil { return err } if strings.Trim(o, " \r\n") != expected { return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", expected, o) } return nil } func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error { for i := 0; i < count; i++ { if _, err := w.Write([]byte(input)); err != nil { return err } if err := expectPipe(output, r); err != nil { return err } } return nil } // TestRunHostname checks that 'docker run -h' correctly sets a custom hostname func TestRunHostname(t *testing.T) { stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdRun("-h", "foobar", unitTestImageID, "hostname"); err != nil { t.Fatal(err) } }() setTimeout(t, "Reading command output time out", 2*time.Second, func() { cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { t.Fatal(err) } if cmdOutput != "foobar\n" { t.Fatalf("'hostname' should display '%s', not '%s'", "foobar\n", cmdOutput) } }) container := globalRuntime.List()[0] setTimeout(t, "CmdRun timed out", 10*time.Second, func() { <-c go func() { cli.CmdWait(container.ID) }() if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { t.Fatal(err) } }) // Cleanup pipes if err := closeWrap(stdout, stdoutPipe); err != nil { t.Fatal(err) } } // TestRunWorkdir checks that 'docker run -w' correctly sets a custom working directory func TestRunWorkdir(t *testing.T) { stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdRun("-w", "/foo/bar", unitTestImageID, "pwd"); err != nil { t.Fatal(err) } }() setTimeout(t, "Reading command output time out", 2*time.Second, func() { cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { t.Fatal(err) } if cmdOutput != "/foo/bar\n" { t.Fatalf("'pwd' should display '%s', not '%s'", "/foo/bar\n", cmdOutput) } }) container := globalRuntime.List()[0] setTimeout(t, "CmdRun timed out", 10*time.Second, func() { <-c go func() { cli.CmdWait(container.ID) }() if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { t.Fatal(err) } }) // Cleanup pipes if err := closeWrap(stdout, stdoutPipe); err != nil { t.Fatal(err) } } // TestRunWorkdirExists checks that 'docker run -w' correctly sets a custom working directory, even if it exists func TestRunWorkdirExists(t *testing.T) { stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdRun("-w", "/proc", unitTestImageID, "pwd"); err != nil { t.Fatal(err) } }() setTimeout(t, "Reading command output time out", 2*time.Second, func() { cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { t.Fatal(err) } if cmdOutput != "/proc\n" { t.Fatalf("'pwd' should display '%s', not '%s'", "/proc\n", cmdOutput) } }) container := globalRuntime.List()[0] setTimeout(t, "CmdRun timed out", 5*time.Second, func() { <-c go func() { cli.CmdWait(container.ID) }() if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { t.Fatal(err) } }) // Cleanup pipes if err := closeWrap(stdout, stdoutPipe); err != nil { t.Fatal(err) } } func TestRunExit(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c1 := make(chan struct{}) go func() { cli.CmdRun("-i", unitTestImageID, "/bin/cat") close(c1) }() setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) container := globalRuntime.List()[0] // Closing /bin/cat stdin, expect it to exit if err := stdin.Close(); err != nil { t.Fatal(err) } // as the process exited, CmdRun must finish and unblock. Wait for it setTimeout(t, "Waiting for CmdRun timed out", 10*time.Second, func() { <-c1 go func() { cli.CmdWait(container.ID) }() if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { t.Fatal(err) } }) // Make sure that the client has been disconnected setTimeout(t, "The client should have been disconnected once the remote process exited.", 2*time.Second, func() { // Expecting pipe i/o error, just check that read does not block stdin.Read([]byte{}) }) // Cleanup pipes if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { t.Fatal(err) } } // Expected behaviour: the process dies when the client disconnects func TestRunDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c1 := make(chan struct{}) go func() { // We're simulating a disconnect so the return value doesn't matter. What matters is the // fact that CmdRun returns. cli.CmdRun("-i", unitTestImageID, "/bin/cat") close(c1) }() setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) // Close pipes (simulate disconnect) if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { t.Fatal(err) } // as the pipes are close, we expect the process to die, // therefore CmdRun to unblock. Wait for CmdRun setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() { <-c1 }) // Client disconnect after run -i should cause stdin to be closed, which should // cause /bin/cat to exit. setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() { container := globalRuntime.List()[0] container.Wait() if container.State.IsRunning() { t.Fatalf("/bin/cat is still running after closing stdin") } }) } // Expected behaviour: the process stay alive when the client disconnects // but the client detaches. func TestRunDisconnectTty(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c1 := make(chan struct{}) go func() { defer close(c1) // We're simulating a disconnect so the return value doesn't matter. What matters is the // fact that CmdRun returns. if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil { utils.Debugf("Error CmdRun: %s", err) } }() container := waitContainerStart(t, 10*time.Second) state := setRaw(t, container) defer unsetRaw(t, container, state) // Client disconnect after run -i should keep stdin out in TTY mode setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) // Close pipes (simulate disconnect) if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { t.Fatal(err) } // wait for CmdRun to return setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() { <-c1 }) // In tty mode, we expect the process to stay alive even after client's stdin closes. // Give some time to monitor to do his thing container.WaitTimeout(500 * time.Millisecond) if !container.State.IsRunning() { t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)") } } // TestAttachStdin checks attaching to stdin without stdout and stderr. // 'docker run -i -a stdin' should sends the client's stdin to the command, // then detach from it and print the container id. func TestRunAttachStdin(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { defer close(ch) cli.CmdRun("-i", "-a", "stdin", unitTestImageID, "sh", "-c", "echo hello && cat && sleep 5") }() // Send input to the command, close stdin setTimeout(t, "Write timed out", 10*time.Second, func() { if _, err := stdinPipe.Write([]byte("hi there\n")); err != nil { t.Fatal(err) } if err := stdinPipe.Close(); err != nil { t.Fatal(err) } }) container := globalRuntime.List()[0] // Check output setTimeout(t, "Reading command output time out", 10*time.Second, func() { cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { t.Fatal(err) } if cmdOutput != container.ID+"\n" { t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ID+"\n", cmdOutput) } }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() { <-ch }) setTimeout(t, "Waiting for command to exit timed out", 10*time.Second, func() { container.Wait() }) // Check logs if cmdLogs, err := container.ReadLog("json"); err != nil { t.Fatal(err) } else { if output, err := ioutil.ReadAll(cmdLogs); err != nil { t.Fatal(err) } else { expectedLogs := []string{"{\"log\":\"hello\\n\",\"stream\":\"stdout\"", "{\"log\":\"hi there\\n\",\"stream\":\"stdout\""} for _, expectedLog := range expectedLogs { if !strings.Contains(string(output), expectedLog) { t.Fatalf("Unexpected logs: should contains '%s', it is not '%s'\n", expectedLog, output) } } } } } // TestRunDetach checks attaching and detaching with the escape sequence. func TestRunDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { defer close(ch) cli.CmdRun("-i", "-t", unitTestImageID, "cat") }() container := waitContainerStart(t, 10*time.Second) state := setRaw(t, container) defer unsetRaw(t, container, state) setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { stdinPipe.Write([]byte{16}) time.Sleep(100 * time.Millisecond) stdinPipe.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() { <-ch }) closeWrap(stdin, stdinPipe, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.State.IsRunning() { t.Fatal("The detached container should be still running") } setTimeout(t, "Waiting for container to die timed out", 20*time.Second, func() { container.Kill() }) } // TestAttachDetach checks that attach in tty mode can be detached using the long container ID func TestAttachDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { defer close(ch) if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil { t.Fatal(err) } }() container := waitContainerStart(t, 10*time.Second) setTimeout(t, "Reading container's id timed out", 10*time.Second, func() { buf := make([]byte, 1024) n, err := stdout.Read(buf) if err != nil { t.Fatal(err) } if strings.Trim(string(buf[:n]), " \r\n") != container.ID { t.Fatalf("Wrong ID received. Expect %s, received %s", container.ID, buf[:n]) } }) setTimeout(t, "Starting container timed out", 10*time.Second, func() { <-ch }) state := setRaw(t, container) defer unsetRaw(t, container, state) stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) ch = make(chan struct{}) go func() { defer close(ch) if err := cli.CmdAttach(container.ID); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } } }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } } }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { stdinPipe.Write([]byte{16}) time.Sleep(100 * time.Millisecond) stdinPipe.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() { <-ch }) closeWrap(stdin, stdinPipe, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.State.IsRunning() { t.Fatal("The detached container should be still running") } setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() { container.Kill() }) } // TestAttachDetachTruncatedID checks that attach in tty mode can be detached func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) // Discard the CmdRun output go stdout.Read(make([]byte, 1024)) setTimeout(t, "Starting container timed out", 2*time.Second, func() { if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil { t.Fatal(err) } }) container := waitContainerStart(t, 10*time.Second) state := setRaw(t, container) defer unsetRaw(t, container, state) stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) ch := make(chan struct{}) go func() { defer close(ch) if err := cli.CmdAttach(utils.TruncateID(container.ID)); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } } }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } } }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { stdinPipe.Write([]byte{16}) time.Sleep(100 * time.Millisecond) stdinPipe.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() { <-ch }) closeWrap(stdin, stdinPipe, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.State.IsRunning() { t.Fatal("The detached container should be still running") } setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() { container.Kill() }) } // Expected behaviour, the process stays alive when the client disconnects func TestAttachDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) go func() { // Start a process in daemon mode if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil { utils.Debugf("Error CmdRun: %s", err) } }() setTimeout(t, "Waiting for CmdRun timed out", 10*time.Second, func() { if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { t.Fatal(err) } }) setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { for { l := globalRuntime.List() if len(l) == 1 && l[0].State.IsRunning() { break } time.Sleep(10 * time.Millisecond) } }) container := globalRuntime.List()[0] // Attach to it c1 := make(chan struct{}) go func() { // We're simulating a disconnect so the return value doesn't matter. What matters is the // fact that CmdAttach returns. cli.CmdAttach(container.ID) close(c1) }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) // Close pipes (client disconnects) if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { t.Fatal(err) } // Wait for attach to finish, the client disconnected, therefore, Attach finished his job setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() { <-c1 }) // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing err := container.WaitTimeout(500 * time.Millisecond) if err == nil || !container.State.IsRunning() { t.Fatalf("/bin/cat is not running after closing stdin") } // Try to avoid the timeout in destroy. Best effort, don't check error cStdin, _ := container.StdinPipe() cStdin.Close() container.Wait() } // Expected behaviour: container gets deleted automatically after exit func TestRunAutoRemove(t *testing.T) { t.Skip("Fixme. Skipping test for now, race condition") stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdRun("-rm", unitTestImageID, "hostname"); err != nil { t.Fatal(err) } }() var temporaryContainerID string setTimeout(t, "Reading command output time out", 2*time.Second, func() { cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { t.Fatal(err) } temporaryContainerID = cmdOutput if err := closeWrap(stdout, stdoutPipe); err != nil { t.Fatal(err) } }) setTimeout(t, "CmdRun timed out", 10*time.Second, func() { <-c }) time.Sleep(500 * time.Millisecond) if len(globalRuntime.List()) > 0 { t.Fatalf("failed to remove container automatically: container %s still exists", temporaryContainerID) } } func TestCmdLogs(t *testing.T) { t.Skip("Test not impemented") cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil { t.Fatal(err) } if err := cli.CmdRun("-t", unitTestImageID, "sh", "-c", "ls -l"); err != nil { t.Fatal(err) } if err := cli.CmdLogs(globalRuntime.List()[0].ID); err != nil { t.Fatal(err) } } // Expected behaviour: error out when attempting to bind mount non-existing source paths func TestRunErrorBindNonExistingSource(t *testing.T) { cli := api.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) // This check is made at runtime, can't be "unit tested" if err := cli.CmdRun("-v", "/i/dont/exist:/tmp", unitTestImageID, "echo 'should fail'"); err == nil { t.Fatal("should have failed to run when using /i/dont/exist as a source for the bind mount") } }() setTimeout(t, "CmdRun timed out", 5*time.Second, func() { <-c }) } func TestImagesViz(t *testing.T) { stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) image := buildTestImages(t, globalEngine) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdImages("--viz"); err != nil { t.Fatal(err) } stdoutPipe.Close() }() setTimeout(t, "Reading command output time out", 2*time.Second, func() { cmdOutputBytes, err := ioutil.ReadAll(bufio.NewReader(stdout)) if err != nil { t.Fatal(err) } cmdOutput := string(cmdOutputBytes) regexpStrings := []string{ "digraph docker {", fmt.Sprintf("base -> \"%s\" \\[style=invis]", unitTestImageIDShort), fmt.Sprintf("label=\"%s\\\\n%s:latest\"", unitTestImageIDShort, unitTestImageName), fmt.Sprintf("label=\"%s\\\\n%s:%s\"", utils.TruncateID(image.ID), "test", "latest"), "base \\[style=invisible]", } compiledRegexps := []*regexp.Regexp{} for _, regexpString := range regexpStrings { regexp, err := regexp.Compile(regexpString) if err != nil { fmt.Println("Error in regex string: ", err) return } compiledRegexps = append(compiledRegexps, regexp) } for _, regexp := range compiledRegexps { if !regexp.MatchString(cmdOutput) { t.Fatalf("images --viz content '%s' did not match regexp '%s'", cmdOutput, regexp) } } }) } func TestImagesTree(t *testing.T) { stdout, stdoutPipe := io.Pipe() cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) image := buildTestImages(t, globalEngine) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdImages("--tree"); err != nil { t.Fatal(err) } stdoutPipe.Close() }() setTimeout(t, "Reading command output time out", 2*time.Second, func() { cmdOutputBytes, err := ioutil.ReadAll(bufio.NewReader(stdout)) if err != nil { t.Fatal(err) } cmdOutput := string(cmdOutputBytes) regexpStrings := []string{ fmt.Sprintf("└─%s Virtual Size: \\d+.\\d+ MB Tags: %s:latest", unitTestImageIDShort, unitTestImageName), "(?m) └─[0-9a-f]+.*", "(?m) └─[0-9a-f]+.*", "(?m) └─[0-9a-f]+.*", fmt.Sprintf("(?m)^ └─%s Virtual Size: \\d+.\\d+ MB Tags: test:latest", utils.TruncateID(image.ID)), } compiledRegexps := []*regexp.Regexp{} for _, regexpString := range regexpStrings { regexp, err := regexp.Compile(regexpString) if err != nil { fmt.Println("Error in regex string: ", err) return } compiledRegexps = append(compiledRegexps, regexp) } for _, regexp := range compiledRegexps { if !regexp.MatchString(cmdOutput) { t.Fatalf("images --tree content '%s' did not match regexp '%s'", cmdOutput, regexp) } } }) } func buildTestImages(t *testing.T, eng *engine.Engine) *docker.Image { var testBuilder = testContextTemplate{ ` from {IMAGE} run sh -c 'echo root:testpass > /tmp/passwd' run mkdir -p /var/run/sshd run [ "$(cat /tmp/passwd)" = "root:testpass" ] run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] `, nil, nil, } image, err := buildImage(testBuilder, t, eng, true) if err != nil { t.Fatal(err) } if err := eng.Job("tag", image.ID, "test").Run(); err != nil { t.Fatal(err) } return image } // #2098 - Docker cidFiles only contain short version of the containerId //sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" // TestRunCidFile tests that run --cidfile returns the longid func TestRunCidFileCheckIDLength(t *testing.T) { stdout, stdoutPipe := io.Pipe() tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { t.Fatal(err) } tmpCidFile := path.Join(tmpDir, "cid") cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdRun("--cidfile", tmpCidFile, unitTestImageID, "ls"); err != nil { t.Fatal(err) } }() defer os.RemoveAll(tmpDir) setTimeout(t, "Reading command output time out", 2*time.Second, func() { cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { t.Fatal(err) } if len(cmdOutput) < 1 { t.Fatalf("'ls' should return something , not '%s'", cmdOutput) } //read the tmpCidFile buffer, err := ioutil.ReadFile(tmpCidFile) if err != nil { t.Fatal(err) } id := string(buffer) if len(id) != len("2bf44ea18873287bd9ace8a4cb536a7cbe134bed67e805fdf2f58a57f69b320c") { t.Fatalf("--cidfile should be a long id, not '%s'", id) } //test that its a valid cid? (though the container is gone..) //remove the file and dir. }) setTimeout(t, "CmdRun timed out", 5*time.Second, func() { <-c }) } // Ensure that CIDFile gets deleted if it's empty // Perform this test by making `docker run` fail func TestRunCidFileCleanupIfEmpty(t *testing.T) { tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { t.Fatal(err) } tmpCidFile := path.Join(tmpDir, "cid") cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdRun("--cidfile", tmpCidFile, unitTestImageID); err == nil { t.Fatal("running without a command should haveve failed") } if _, err := os.Stat(tmpCidFile); err == nil { t.Fatalf("empty CIDFile '%s' should've been deleted", tmpCidFile) } }() defer os.RemoveAll(tmpDir) setTimeout(t, "CmdRun timed out", 5*time.Second, func() { <-c }) } func TestContainerOrphaning(t *testing.T) { // setup a temporary directory tmpDir, err := ioutil.TempDir("", "project") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) // setup a CLI and server cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) defer cleanup(globalEngine, t) srv := mkServerFromEngine(globalEngine, t) // closure to build something buildSomething := func(template string, image string) string { dockerfile := path.Join(tmpDir, "Dockerfile") replacer := strings.NewReplacer("{IMAGE}", unitTestImageID) contents := replacer.Replace(template) ioutil.WriteFile(dockerfile, []byte(contents), 0x777) if err := cli.CmdBuild("-t", image, tmpDir); err != nil { t.Fatal(err) } img, err := srv.ImageInspect(image) if err != nil { t.Fatal(err) } return img.ID } // build an image imageName := "orphan-test" template1 := ` from {IMAGE} cmd ["/bin/echo", "holla"] ` img1 := buildSomething(template1, imageName) // create a container using the fist image if err := cli.CmdRun(imageName); err != nil { t.Fatal(err) } // build a new image that splits lineage template2 := ` from {IMAGE} cmd ["/bin/echo", "holla"] expose 22 ` buildSomething(template2, imageName) // remove the second image by name resp := engine.NewTable("", 0) if err := srv.DeleteImage(imageName, resp, true, false); err == nil { t.Fatal("Expected error, got none") } // see if we deleted the first image (and orphaned the container) for _, i := range resp.Data { if img1 == i.Get("Deleted") { t.Fatal("Orphaned image with container") } } } func TestCmdKill(t *testing.T) { var ( stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) cli2 = api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) ) defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { defer close(ch) cli.CmdRun("-i", "-t", unitTestImageID, "sh", "-c", "trap 'echo SIGUSR1' USR1; trap 'echo SIGUSR2' USR2; echo Ready; while true; do read; done") }() container := waitContainerStart(t, 10*time.Second) setTimeout(t, "Read Ready timed out", 3*time.Second, func() { if err := expectPipe("Ready", stdout); err != nil { t.Fatal(err) } }) setTimeout(t, "SIGUSR1 timed out", 2*time.Second, func() { for i := 0; i < 10; i++ { if err := cli2.CmdKill("-s", strconv.Itoa(int(syscall.SIGUSR1)), container.ID); err != nil { t.Fatal(err) } if err := expectPipe("SIGUSR1", stdout); err != nil { t.Fatal(err) } } }) setTimeout(t, "SIGUSR2 timed out", 2*time.Second, func() { for i := 0; i < 10; i++ { if err := cli2.CmdKill("--signal=USR2", container.ID); err != nil { t.Fatal(err) } if err := expectPipe("SIGUSR2", stdout); err != nil { t.Fatal(err) } } }) stdout.Close() time.Sleep(500 * time.Millisecond) if !container.State.IsRunning() { t.Fatal("The container should be still running") } setTimeout(t, "Waiting for container timedout", 5*time.Second, func() { if err := cli2.CmdKill(container.ID); err != nil { t.Fatal(err) } <-ch if err := cli2.CmdWait(container.ID); err != nil { t.Fatal(err) } }) closeWrap(stdin, stdinPipe, stdout, stdoutPipe) } docker-0.9.1/integration/graph_test.go0000644000175000017500000001656412314376205016131 0ustar tagtagpackage docker import ( "errors" "github.com/dotcloud/docker" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "os" "path" "testing" "time" ) func TestMount(t *testing.T) { graph, driver := tempGraph(t) defer os.RemoveAll(graph.Root) defer driver.Cleanup() archive, err := fakeTar() if err != nil { t.Fatal(err) } image, err := graph.Create(archive, nil, "Testing", "", nil) if err != nil { t.Fatal(err) } tmp, err := ioutil.TempDir("", "docker-test-graph-mount-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) rootfs := path.Join(tmp, "rootfs") if err := os.MkdirAll(rootfs, 0700); err != nil { t.Fatal(err) } rw := path.Join(tmp, "rw") if err := os.MkdirAll(rw, 0700); err != nil { t.Fatal(err) } if _, err := driver.Get(image.ID); err != nil { t.Fatal(err) } } func TestInit(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) // Root should exist if _, err := os.Stat(graph.Root); err != nil { t.Fatal(err) } // Map() should be empty if l, err := graph.Map(); err != nil { t.Fatal(err) } else if len(l) != 0 { t.Fatalf("len(Map()) should return %d, not %d", 0, len(l)) } } // Test that Register can be interrupted cleanly without side effects func TestInterruptedRegister(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data image := &docker.Image{ ID: docker.GenerateID(), Comment: "testing", Created: time.Now(), } w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling) graph.Register(nil, badArchive, image) if _, err := graph.Get(image.ID); err == nil { t.Fatal("Image should not exist after Register is interrupted") } // Registering the same image again should succeed if the first register was interrupted goodArchive, err := fakeTar() if err != nil { t.Fatal(err) } if err := graph.Register(nil, goodArchive, image); err != nil { t.Fatal(err) } } // FIXME: Do more extensive tests (ex: create multiple, delete, recreate; // create multiple, check the amount of images and paths, etc..) func TestGraphCreate(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) archive, err := fakeTar() if err != nil { t.Fatal(err) } image, err := graph.Create(archive, nil, "Testing", "", nil) if err != nil { t.Fatal(err) } if err := docker.ValidateID(image.ID); err != nil { t.Fatal(err) } if image.Comment != "Testing" { t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", image.Comment) } if image.DockerVersion != dockerversion.VERSION { t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, image.DockerVersion) } images, err := graph.Map() if err != nil { t.Fatal(err) } else if l := len(images); l != 1 { t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) } if images[image.ID] == nil { t.Fatalf("Could not find image with id %s", image.ID) } } func TestRegister(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) archive, err := fakeTar() if err != nil { t.Fatal(err) } image := &docker.Image{ ID: docker.GenerateID(), Comment: "testing", Created: time.Now(), } err = graph.Register(nil, archive, image) if err != nil { t.Fatal(err) } if images, err := graph.Map(); err != nil { t.Fatal(err) } else if l := len(images); l != 1 { t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) } if resultImg, err := graph.Get(image.ID); err != nil { t.Fatal(err) } else { if resultImg.ID != image.ID { t.Fatalf("Wrong image ID. Should be '%s', not '%s'", image.ID, resultImg.ID) } if resultImg.Comment != image.Comment { t.Fatalf("Wrong image comment. Should be '%s', not '%s'", image.Comment, resultImg.Comment) } } } // Test that an image can be deleted by its shorthand prefix func TestDeletePrefix(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) img := createTestImage(graph, t) if err := graph.Delete(utils.TruncateID(img.ID)); err != nil { t.Fatal(err) } assertNImages(graph, t, 0) } func createTestImage(graph *docker.Graph, t *testing.T) *docker.Image { archive, err := fakeTar() if err != nil { t.Fatal(err) } img, err := graph.Create(archive, nil, "Test image", "", nil) if err != nil { t.Fatal(err) } return img } func TestDelete(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) archive, err := fakeTar() if err != nil { t.Fatal(err) } assertNImages(graph, t, 0) img, err := graph.Create(archive, nil, "Bla bla", "", nil) if err != nil { t.Fatal(err) } assertNImages(graph, t, 1) if err := graph.Delete(img.ID); err != nil { t.Fatal(err) } assertNImages(graph, t, 0) archive, err = fakeTar() if err != nil { t.Fatal(err) } // Test 2 create (same name) / 1 delete img1, err := graph.Create(archive, nil, "Testing", "", nil) if err != nil { t.Fatal(err) } archive, err = fakeTar() if err != nil { t.Fatal(err) } if _, err = graph.Create(archive, nil, "Testing", "", nil); err != nil { t.Fatal(err) } assertNImages(graph, t, 2) if err := graph.Delete(img1.ID); err != nil { t.Fatal(err) } assertNImages(graph, t, 1) // Test delete wrong name if err := graph.Delete("Not_foo"); err == nil { t.Fatalf("Deleting wrong ID should return an error") } assertNImages(graph, t, 1) archive, err = fakeTar() if err != nil { t.Fatal(err) } // Test delete twice (pull -> rm -> pull -> rm) if err := graph.Register(nil, archive, img1); err != nil { t.Fatal(err) } if err := graph.Delete(img1.ID); err != nil { t.Fatal(err) } assertNImages(graph, t, 1) } func TestByParent(t *testing.T) { archive1, _ := fakeTar() archive2, _ := fakeTar() archive3, _ := fakeTar() graph, _ := tempGraph(t) defer nukeGraph(graph) parentImage := &docker.Image{ ID: docker.GenerateID(), Comment: "parent", Created: time.Now(), Parent: "", } childImage1 := &docker.Image{ ID: docker.GenerateID(), Comment: "child1", Created: time.Now(), Parent: parentImage.ID, } childImage2 := &docker.Image{ ID: docker.GenerateID(), Comment: "child2", Created: time.Now(), Parent: parentImage.ID, } _ = graph.Register(nil, archive1, parentImage) _ = graph.Register(nil, archive2, childImage1) _ = graph.Register(nil, archive3, childImage2) byParent, err := graph.ByParent() if err != nil { t.Fatal(err) } numChildren := len(byParent[parentImage.ID]) if numChildren != 2 { t.Fatalf("Expected 2 children, found %d", numChildren) } } /* * HELPER FUNCTIONS */ func assertNImages(graph *docker.Graph, t *testing.T, n int) { if images, err := graph.Map(); err != nil { t.Fatal(err) } else if actualN := len(images); actualN != n { t.Fatalf("Expected %d images, found %d", n, actualN) } } func tempGraph(t *testing.T) (*docker.Graph, graphdriver.Driver) { tmp, err := ioutil.TempDir("", "docker-graph-") if err != nil { t.Fatal(err) } driver, err := graphdriver.New(tmp) if err != nil { t.Fatal(err) } graph, err := docker.NewGraph(tmp, driver) if err != nil { t.Fatal(err) } return graph, driver } func nukeGraph(graph *docker.Graph) { graph.Driver().Cleanup() os.RemoveAll(graph.Root) } func testArchive(t *testing.T) archive.Archive { archive, err := fakeTar() if err != nil { t.Fatal(err) } return archive } docker-0.9.1/integration/auth_test.go0000644000175000017500000000407412314376205015762 0ustar tagtagpackage docker import ( "crypto/rand" "encoding/hex" "fmt" "github.com/dotcloud/docker/auth" "os" "strings" "testing" ) // FIXME: these tests have an external dependency on a staging index hosted // on the docker.io infrastructure. That dependency should be removed. // - Unit tests should have no side-effect dependencies. // - Integration tests should have side-effects limited to the host environment being tested. func TestLogin(t *testing.T) { os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") defer os.Setenv("DOCKER_INDEX_URL", "") authConfig := &auth.AuthConfig{ Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@docker.com", ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/", } status, err := auth.Login(authConfig, nil) if err != nil { t.Fatal(err) } if status != "Login Succeeded" { t.Fatalf("Expected status \"Login Succeeded\", found \"%s\" instead", status) } } func TestCreateAccount(t *testing.T) { tokenBuffer := make([]byte, 16) _, err := rand.Read(tokenBuffer) if err != nil { t.Fatal(err) } token := hex.EncodeToString(tokenBuffer)[:12] username := "ut" + token authConfig := &auth.AuthConfig{ Username: username, Password: "test42", Email: fmt.Sprintf("docker-ut+%s@example.com", token), ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/", } status, err := auth.Login(authConfig, nil) if err != nil { t.Fatal(err) } expectedStatus := fmt.Sprintf( "Account created. Please see the documentation of the registry %s for instructions how to activate it.", authConfig.ServerAddress, ) if status != expectedStatus { t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status) } status, err = auth.Login(authConfig, nil) if err == nil { t.Fatalf("Expected error but found nil instead") } expectedError := "Login: Account is not Active" if !strings.Contains(err.Error(), expectedError) { t.Fatalf("Expected message \"%s\" but found \"%s\" instead", expectedError, err) } } docker-0.9.1/integration/server_test.go0000644000175000017500000004756312314376205016341 0ustar tagtagpackage docker import ( "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/runconfig" "strings" "testing" "time" ) func TestImageTagImageDelete(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() srv := mkServerFromEngine(eng, t) initialImages := getAllImages(eng, t) if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil { t.Fatal(err) } if err := eng.Job("tag", unitTestImageName, "utest/docker", "tag2").Run(); err != nil { t.Fatal(err) } if err := eng.Job("tag", unitTestImageName, "utest:5000/docker", "tag3").Run(); err != nil { t.Fatal(err) } images := getAllImages(eng, t) nExpected := len(initialImages.Data[0].GetList("RepoTags")) + 3 nActual := len(images.Data[0].GetList("RepoTags")) if nExpected != nActual { t.Errorf("Expected %d images, %d found", nExpected, nActual) } if err := srv.DeleteImage("utest/docker:tag2", engine.NewTable("", 0), true, false); err != nil { t.Fatal(err) } images = getAllImages(eng, t) nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 2 nActual = len(images.Data[0].GetList("RepoTags")) if nExpected != nActual { t.Errorf("Expected %d images, %d found", nExpected, nActual) } if err := srv.DeleteImage("utest:5000/docker:tag3", engine.NewTable("", 0), true, false); err != nil { t.Fatal(err) } images = getAllImages(eng, t) nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 1 nActual = len(images.Data[0].GetList("RepoTags")) if err := srv.DeleteImage("utest:tag1", engine.NewTable("", 0), true, false); err != nil { t.Fatal(err) } images = getAllImages(eng, t) if images.Len() != initialImages.Len() { t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len()) } } func TestCreateRm(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) job := eng.Job("containers") job.SetenvBool("all", true) outs, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } job = eng.Job("container_delete", id) job.SetenvBool("removeVolume", true) if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("containers") job.SetenvBool("all", true) outs, err = job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 0 { t.Errorf("Expected 0 container, %v found", len(outs.Data)) } } func TestCreateNumberHostname(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() config, _, _, err := runconfig.Parse([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } createTestContainer(eng, config, t) } func TestCreateNumberUsername(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() config, _, _, err := runconfig.Parse([]string{"-u", "1002", unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } createTestContainer(eng, config, t) } func TestCreateRmVolumes(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() config, hostConfig, _, err := runconfig.Parse([]string{"-v", "/srv", unitTestImageID, "echo", "test"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) job := eng.Job("containers") job.SetenvBool("all", true) outs, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } job = eng.Job("start", id) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("stop", id) job.SetenvInt("t", 1) if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("container_delete", id) job.SetenvBool("removeVolume", true) if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("containers") job.SetenvBool("all", true) outs, err = job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 0 { t.Errorf("Expected 0 container, %v found", len(outs.Data)) } } func TestCreateRmRunning(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() config, hostConfig, _, err := runconfig.Parse([]string{"-name", "foo", unitTestImageID, "sleep 300"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) job := eng.Job("containers") job.SetenvBool("all", true) outs, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } job = eng.Job("start", id) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } // Test cannot remove running container job = eng.Job("container_delete", id) job.SetenvBool("forceRemove", false) if err := job.Run(); err == nil { t.Fatal("Expected container delete to fail") } // Test can force removal of running container job = eng.Job("container_delete", id) job.SetenvBool("forceRemove", true) if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("containers") job.SetenvBool("all", true) outs, err = job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 0 { t.Errorf("Expected 0 container, %v found", len(outs.Data)) } } func TestCommit(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() config, _, _, err := runconfig.Parse([]string{unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) job := eng.Job("commit", id) job.Setenv("repo", "testrepo") job.Setenv("tag", "testtag") job.SetenvJson("config", config) if err := job.Run(); err != nil { t.Fatal(err) } } func TestMergeConfigOnCommit(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t) defer runtime.Destroy(container1) config, _, _, err := runconfig.Parse([]string{container1.ID, "cat /tmp/foo"}, nil) if err != nil { t.Error(err) } job := eng.Job("commit", container1.ID) job.Setenv("repo", "testrepo") job.Setenv("tag", "testtag") job.SetenvJson("config", config) var newId string job.Stdout.AddString(&newId) if err := job.Run(); err != nil { t.Error(err) } container2, _, _ := mkContainer(runtime, []string{newId}, t) defer runtime.Destroy(container2) job = eng.Job("inspect", container1.Name, "container") baseContainer, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Error(err) } job = eng.Job("inspect", container2.Name, "container") commitContainer, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Error(err) } baseConfig := baseContainer.GetSubEnv("Config") commitConfig := commitContainer.GetSubEnv("Config") if commitConfig.Get("Env") != baseConfig.Get("Env") { t.Fatalf("Env config in committed container should be %v, was %v", baseConfig.Get("Env"), commitConfig.Get("Env")) } if baseConfig.Get("Cmd") != "[\"echo test \\u003e /tmp/foo\"]" { t.Fatalf("Cmd in base container should be [\"echo test \\u003e /tmp/foo\"], was %s", baseConfig.Get("Cmd")) } if commitConfig.Get("Cmd") != "[\"cat /tmp/foo\"]" { t.Fatalf("Cmd in committed container should be [\"cat /tmp/foo\"], was %s", commitConfig.Get("Cmd")) } } func TestRestartKillWait(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) job := eng.Job("containers") job.SetenvBool("all", true) outs, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } job = eng.Job("start", id) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("kill", id) if err := job.Run(); err != nil { t.Fatal(err) } eng = newTestEngine(t, false, eng.Root()) srv = mkServerFromEngine(eng, t) job = srv.Eng.Job("containers") job.SetenvBool("all", true) outs, err = job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() { job = srv.Eng.Job("wait", outs.Data[0].Get("Id")) var statusStr string job.Stdout.AddString(&statusStr) if err := job.Run(); err != nil { t.Fatal(err) } }) } func TestCreateStartRestartStopStartKillRm(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) job := srv.Eng.Job("containers") job.SetenvBool("all", true) outs, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } job = eng.Job("start", id) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("restart", id) job.SetenvInt("t", 15) if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("stop", id) job.SetenvInt("t", 15) if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("start", id) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if err := eng.Job("kill", id).Run(); err != nil { t.Fatal(err) } // FIXME: this failed once with a race condition ("Unable to remove filesystem for xxx: directory not empty") job = eng.Job("container_delete", id) job.SetenvBool("removeVolume", true) if err := job.Run(); err != nil { t.Fatal(err) } job = srv.Eng.Job("containers") job.SetenvBool("all", true) outs, err = job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 0 { t.Errorf("Expected 0 container, %v found", len(outs.Data)) } } func TestRunWithTooLowMemoryLimit(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() // Try to create a container with a memory limit of 1 byte less than the minimum allowed limit. job := eng.Job("create") job.Setenv("Image", unitTestImageID) job.Setenv("Memory", "524287") job.Setenv("CpuShares", "1000") job.SetenvList("Cmd", []string{"/bin/cat"}) var id string job.Stdout.AddString(&id) if err := job.Run(); err == nil { t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!") } } func TestRmi(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() initialImages := getAllImages(eng, t) config, hostConfig, _, err := runconfig.Parse([]string{unitTestImageID, "echo", "test"}, nil) if err != nil { t.Fatal(err) } containerID := createTestContainer(eng, config, t) //To remove job := eng.Job("start", containerID) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if err := eng.Job("wait", containerID).Run(); err != nil { t.Fatal(err) } job = eng.Job("commit", containerID) job.Setenv("repo", "test") var imageID string job.Stdout.AddString(&imageID) if err := job.Run(); err != nil { t.Fatal(err) } if err := eng.Job("tag", imageID, "test", "0.1").Run(); err != nil { t.Fatal(err) } containerID = createTestContainer(eng, config, t) //To remove job = eng.Job("start", containerID) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if err := eng.Job("wait", containerID).Run(); err != nil { t.Fatal(err) } job = eng.Job("commit", containerID) job.Setenv("repo", "test") if err := job.Run(); err != nil { t.Fatal(err) } images := getAllImages(eng, t) if images.Len()-initialImages.Len() != 2 { t.Fatalf("Expected 2 new images, found %d.", images.Len()-initialImages.Len()) } if err = srv.DeleteImage(imageID, engine.NewTable("", 0), true, false); err != nil { t.Fatal(err) } images = getAllImages(eng, t) if images.Len()-initialImages.Len() != 1 { t.Fatalf("Expected 1 new image, found %d.", images.Len()-initialImages.Len()) } for _, image := range images.Data { if strings.Contains(unitTestImageID, image.Get("Id")) { continue } if image.GetList("RepoTags")[0] == ":" { t.Fatalf("Expected tagged image, got untagged one.") } } } func TestImagesFilter(t *testing.T) { eng := NewTestEngine(t) defer nuke(mkRuntimeFromEngine(eng, t)) if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil { t.Fatal(err) } if err := eng.Job("tag", unitTestImageName, "utest/docker", "tag2").Run(); err != nil { t.Fatal(err) } if err := eng.Job("tag", unitTestImageName, "utest:5000/docker", "tag3").Run(); err != nil { t.Fatal(err) } images := getImages(eng, t, false, "utest*/*") if len(images.Data[0].GetList("RepoTags")) != 2 { t.Fatal("incorrect number of matches returned") } images = getImages(eng, t, false, "utest") if len(images.Data[0].GetList("RepoTags")) != 1 { t.Fatal("incorrect number of matches returned") } images = getImages(eng, t, false, "utest*") if len(images.Data[0].GetList("RepoTags")) != 1 { t.Fatal("incorrect number of matches returned") } images = getImages(eng, t, false, "*5000*/*") if len(images.Data[0].GetList("RepoTags")) != 1 { t.Fatal("incorrect number of matches returned") } } func TestImageInsert(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() srv := mkServerFromEngine(eng, t) // bad image name fails if err := srv.Eng.Job("insert", "foo", "https://www.docker.io/static/img/docker-top-logo.png", "/foo").Run(); err == nil { t.Fatal("expected an error and got none") } // bad url fails if err := srv.Eng.Job("insert", unitTestImageID, "http://bad_host_name_that_will_totally_fail.com/", "/foo").Run(); err == nil { t.Fatal("expected an error and got none") } // success returns nil if err := srv.Eng.Job("insert", unitTestImageID, "https://www.docker.io/static/img/docker-top-logo.png", "/foo").Run(); err != nil { t.Fatalf("expected no error, but got %v", err) } } func TestListContainers(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() config := runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "cat"}, OpenStdin: true, } firstID := createTestContainer(eng, &config, t) secondID := createTestContainer(eng, &config, t) thirdID := createTestContainer(eng, &config, t) fourthID := createTestContainer(eng, &config, t) defer func() { containerKill(eng, firstID, t) containerKill(eng, secondID, t) containerKill(eng, fourthID, t) containerWait(eng, firstID, t) containerWait(eng, secondID, t) containerWait(eng, fourthID, t) }() startContainer(eng, firstID, t) startContainer(eng, secondID, t) startContainer(eng, fourthID, t) // all if !assertContainerList(srv, true, -1, "", "", []string{fourthID, thirdID, secondID, firstID}) { t.Error("Container list is not in the correct order") } // running if !assertContainerList(srv, false, -1, "", "", []string{fourthID, secondID, firstID}) { t.Error("Container list is not in the correct order") } // from here 'all' flag is ignored // limit expected := []string{fourthID, thirdID} if !assertContainerList(srv, true, 2, "", "", expected) || !assertContainerList(srv, false, 2, "", "", expected) { t.Error("Container list is not in the correct order") } // since expected = []string{fourthID, thirdID, secondID} if !assertContainerList(srv, true, -1, firstID, "", expected) || !assertContainerList(srv, false, -1, firstID, "", expected) { t.Error("Container list is not in the correct order") } // before expected = []string{secondID, firstID} if !assertContainerList(srv, true, -1, "", thirdID, expected) || !assertContainerList(srv, false, -1, "", thirdID, expected) { t.Error("Container list is not in the correct order") } // since & before expected = []string{thirdID, secondID} if !assertContainerList(srv, true, -1, firstID, fourthID, expected) || !assertContainerList(srv, false, -1, firstID, fourthID, expected) { t.Error("Container list is not in the correct order") } // since & limit expected = []string{fourthID, thirdID} if !assertContainerList(srv, true, 2, firstID, "", expected) || !assertContainerList(srv, false, 2, firstID, "", expected) { t.Error("Container list is not in the correct order") } // before & limit expected = []string{thirdID} if !assertContainerList(srv, true, 1, "", fourthID, expected) || !assertContainerList(srv, false, 1, "", fourthID, expected) { t.Error("Container list is not in the correct order") } // since & before & limit expected = []string{thirdID} if !assertContainerList(srv, true, 1, firstID, fourthID, expected) || !assertContainerList(srv, false, 1, firstID, fourthID, expected) { t.Error("Container list is not in the correct order") } } func assertContainerList(srv *docker.Server, all bool, limit int, since, before string, expected []string) bool { job := srv.Eng.Job("containers") job.SetenvBool("all", all) job.SetenvInt("limit", limit) job.Setenv("since", since) job.Setenv("before", before) outs, err := job.Stdout.AddListTable() if err != nil { return false } if err := job.Run(); err != nil { return false } if len(outs.Data) != len(expected) { return false } for i := 0; i < len(outs.Data); i++ { if outs.Data[i].Get("Id") != expected[i] { return false } } return true } // Regression test for being able to untag an image with an existing // container func TestDeleteTagWithExistingContainers(t *testing.T) { eng := NewTestEngine(t) defer nuke(mkRuntimeFromEngine(eng, t)) srv := mkServerFromEngine(eng, t) // Tag the image if err := eng.Job("tag", unitTestImageID, "utest", "tag1").Run(); err != nil { t.Fatal(err) } // Create a container from the image config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } id := createNamedTestContainer(eng, config, t, "testingtags") if id == "" { t.Fatal("No id returned") } job := srv.Eng.Job("containers") job.SetenvBool("all", true) outs, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Fatalf("Expected 1 container got %d", len(outs.Data)) } // Try to remove the tag imgs := engine.NewTable("", 0) if err := srv.DeleteImage("utest:tag1", imgs, true, false); err != nil { t.Fatal(err) } if len(imgs.Data) != 1 { t.Fatalf("Should only have deleted one untag %d", len(imgs.Data)) } if untag := imgs.Data[0].Get("Untagged"); untag != "utest:tag1" { t.Fatalf("Expected %s got %s", unitTestImageID, untag) } } docker-0.9.1/integration/iptables_test.go0000644000175000017500000000100012314376205016606 0ustar tagtagpackage docker import ( "github.com/dotcloud/docker/pkg/iptables" "os" "testing" ) // FIXME: this test should be a unit test. // For example by mocking os/exec to make sure iptables is not actually called. func TestIptables(t *testing.T) { if _, err := iptables.Raw("-L"); err != nil { t.Fatal(err) } path := os.Getenv("PATH") os.Setenv("PATH", "") defer os.Setenv("PATH", path) if _, err := iptables.Raw("-L"); err == nil { t.Fatal("Not finding iptables in the PATH should cause an error") } } docker-0.9.1/integration/utils_test.go0000644000175000017500000002316012314376205016156 0ustar tagtagpackage docker import ( "bytes" "fmt" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "net/http" "net/http/httptest" "os" "path" "strings" "testing" "time" "github.com/dotcloud/docker" "github.com/dotcloud/docker/builtins" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" ) // This file contains utility functions for docker's unit test suite. // It has to be named XXX_test.go, apparently, in other to access private functions // from other XXX_test.go functions. // Create a temporary runtime suitable for unit testing. // Call t.Fatal() at the first error. func mkRuntime(f utils.Fataler) *docker.Runtime { eng := newTestEngine(f, false, "") return mkRuntimeFromEngine(eng, f) // FIXME: // [...] // Mtu: docker.GetDefaultNetworkMtu(), // [...] } func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler, name string) (shortId string) { job := eng.Job("create", name) if err := job.ImportEnv(config); err != nil { f.Fatal(err) } job.Stdout.AddString(&shortId) if err := job.Run(); err != nil { f.Fatal(err) } return } func createTestContainer(eng *engine.Engine, config *runconfig.Config, f utils.Fataler) (shortId string) { return createNamedTestContainer(eng, config, f, "") } func startContainer(eng *engine.Engine, id string, t utils.Fataler) { job := eng.Job("start", id) if err := job.Run(); err != nil { t.Fatal(err) } } func containerRun(eng *engine.Engine, id string, t utils.Fataler) { startContainer(eng, id, t) containerWait(eng, id, t) } func containerFileExists(eng *engine.Engine, id, dir string, t utils.Fataler) bool { c := getContainer(eng, id, t) if err := c.Mount(); err != nil { t.Fatal(err) } defer c.Unmount() if _, err := os.Stat(path.Join(c.BasefsPath(), dir)); err != nil { if os.IsNotExist(err) { return false } t.Fatal(err) } return true } func containerAttach(eng *engine.Engine, id string, t utils.Fataler) (io.WriteCloser, io.ReadCloser) { c := getContainer(eng, id, t) i, err := c.StdinPipe() if err != nil { t.Fatal(err) } o, err := c.StdoutPipe() if err != nil { t.Fatal(err) } return i, o } func containerWait(eng *engine.Engine, id string, t utils.Fataler) int { return getContainer(eng, id, t).Wait() } func containerWaitTimeout(eng *engine.Engine, id string, t utils.Fataler) error { return getContainer(eng, id, t).WaitTimeout(500 * time.Millisecond) } func containerKill(eng *engine.Engine, id string, t utils.Fataler) { if err := eng.Job("kill", id).Run(); err != nil { t.Fatal(err) } } func containerRunning(eng *engine.Engine, id string, t utils.Fataler) bool { return getContainer(eng, id, t).State.IsRunning() } func containerAssertExists(eng *engine.Engine, id string, t utils.Fataler) { getContainer(eng, id, t) } func containerAssertNotExists(eng *engine.Engine, id string, t utils.Fataler) { runtime := mkRuntimeFromEngine(eng, t) if c := runtime.Get(id); c != nil { t.Fatal(fmt.Errorf("Container %s should not exist", id)) } } // assertHttpNotError expect the given response to not have an error. // Otherwise the it causes the test to fail. func assertHttpNotError(r *httptest.ResponseRecorder, t utils.Fataler) { // Non-error http status are [200, 400) if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) } } // assertHttpError expect the given response to have an error. // Otherwise the it causes the test to fail. func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) { // Non-error http status are [200, 400) if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) { t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code)) } } func getContainer(eng *engine.Engine, id string, t utils.Fataler) *docker.Container { runtime := mkRuntimeFromEngine(eng, t) c := runtime.Get(id) if c == nil { t.Fatal(fmt.Errorf("No such container: %s", id)) } return c } func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server { iSrv := eng.Hack_GetGlobalVar("httpapi.server") if iSrv == nil { panic("Legacy server field not set in engine") } srv, ok := iSrv.(*docker.Server) if !ok { panic("Legacy server field in engine does not cast to *docker.Server") } return srv } func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime { iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime") if iRuntime == nil { panic("Legacy runtime field not set in engine") } runtime, ok := iRuntime.(*docker.Runtime) if !ok { panic("Legacy runtime field in engine does not cast to *docker.Runtime") } return runtime } func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engine { if root == "" { if dir, err := newTestDirectory(unitTestStoreBase); err != nil { t.Fatal(err) } else { root = dir } } eng, err := engine.New(root) if err != nil { t.Fatal(err) } // Load default plugins builtins.Register(eng) // (This is manually copied and modified from main() until we have a more generic plugin system) job := eng.Job("initserver") job.Setenv("Root", root) job.SetenvBool("AutoRestart", autorestart) job.Setenv("ExecDriver", "native") // TestGetEnabledCors and TestOptionsRoute require EnableCors=true job.SetenvBool("EnableCors", true) if err := job.Run(); err != nil { t.Fatal(err) } return eng } func NewTestEngine(t utils.Fataler) *engine.Engine { return newTestEngine(t, false, "") } func newTestDirectory(templateDir string) (dir string, err error) { return utils.TestDirectory(templateDir) } func getCallerName(depth int) string { return utils.GetCallerName(depth) } // Write `content` to the file at path `dst`, creating it if necessary, // as well as any missing directories. // The file is truncated if it already exists. // Call t.Fatal() at the first error. func writeFile(dst, content string, t *testing.T) { // Create subdirectories if necessary if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { t.Fatal(err) } f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) if err != nil { t.Fatal(err) } // Write content (truncate if it exists) if _, err := io.Copy(f, strings.NewReader(content)); err != nil { t.Fatal(err) } } // Return the contents of file at path `src`. // Call t.Fatal() at the first error (including if the file doesn't exist) func readFile(src string, t *testing.T) (content string) { f, err := os.Open(src) if err != nil { t.Fatal(err) } data, err := ioutil.ReadAll(f) if err != nil { t.Fatal(err) } return string(data) } // Create a test container from the given runtime `r` and run arguments `args`. // If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is // dynamically replaced by the current test image. // The caller is responsible for destroying the container. // Call t.Fatal() at the first error. func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *runconfig.HostConfig, error) { config, hc, _, err := runconfig.Parse(args, nil) defer func() { if err != nil && t != nil { t.Fatal(err) } }() if err != nil { return nil, nil, err } if config.Image == "_" { config.Image = GetTestImage(r).ID } c, _, err := r.Create(config, "") if err != nil { return nil, nil, err } // NOTE: hostConfig is ignored. // If `args` specify privileged mode, custom lxc conf, external mount binds, // port redirects etc. they will be ignored. // This is because the correct way to set these things is to pass environment // to the `start` job. // FIXME: this helper function should be deprecated in favor of calling // `create` and `start` jobs directly. return c, hc, nil } // Create a test container, start it, wait for it to complete, destroy it, // and return its standard output as a string. // The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image. // If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally. func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testing.T) (output string, err error) { defer func() { if err != nil && t != nil { t.Fatal(err) } }() container, hc, err := mkContainer(r, args, t) if err != nil { return "", err } defer r.Destroy(container) stdout, err := container.StdoutPipe() if err != nil { return "", err } defer stdout.Close() job := eng.Job("start", container.ID) if err := job.ImportEnv(hc); err != nil { return "", err } if err := job.Run(); err != nil { return "", err } container.Wait() data, err := ioutil.ReadAll(stdout) if err != nil { return "", err } output = string(data) return } // FIXME: this is duplicated from graph_test.go in the docker package. func fakeTar() (io.ReadCloser, error) { content := []byte("Hello world!\n") buf := new(bytes.Buffer) tw := tar.NewWriter(buf) for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { hdr := new(tar.Header) hdr.Size = int64(len(content)) hdr.Name = name if err := tw.WriteHeader(hdr); err != nil { return nil, err } tw.Write([]byte(content)) } tw.Close() return ioutil.NopCloser(buf), nil } func getAllImages(eng *engine.Engine, t *testing.T) *engine.Table { return getImages(eng, t, true, "") } func getImages(eng *engine.Engine, t *testing.T, all bool, filter string) *engine.Table { job := eng.Job("images") job.SetenvBool("all", all) job.Setenv("filter", filter) images, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } return images } docker-0.9.1/integration/api_test.go0000644000175000017500000010353012314376205015567 0ustar tagtagpackage docker import ( "bufio" "bytes" "encoding/json" "fmt" "github.com/dotcloud/docker" "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "net" "net/http" "net/http/httptest" "strings" "testing" "time" ) func TestGetVersion(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() var err error r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/version", nil) if err != nil { t.Fatal(err) } // FIXME getting the version should require an actual running Server if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) out := engine.NewOutput() v, err := out.AddEnv() if err != nil { t.Fatal(err) } if _, err := io.Copy(out, r.Body); err != nil { t.Fatal(err) } out.Close() expected := dockerversion.VERSION if result := v.Get("Version"); result != expected { t.Errorf("Expected version %s, %s found", expected, result) } expected = "application/json" if result := r.HeaderMap.Get("Content-Type"); result != expected { t.Errorf("Expected Content-Type %s, %s found", expected, result) } } func TestGetInfo(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() job := eng.Job("images") initialImages, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } req, err := http.NewRequest("GET", "/info", nil) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) out := engine.NewOutput() i, err := out.AddEnv() if err != nil { t.Fatal(err) } if _, err := io.Copy(out, r.Body); err != nil { t.Fatal(err) } out.Close() if images := i.GetInt("Images"); images != initialImages.Len() { t.Errorf("Expected images: %d, %d found", initialImages.Len(), images) } expected := "application/json" if result := r.HeaderMap.Get("Content-Type"); result != expected { t.Errorf("Expected Content-Type %s, %s found", expected, result) } } func TestGetEvents(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) // FIXME: we might not need runtime, why not simply nuke // the engine? runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) var events []*utils.JSONMessage for _, parts := range [][3]string{ {"fakeaction", "fakeid", "fakeimage"}, {"fakeaction2", "fakeid", "fakeimage"}, } { action, id, from := parts[0], parts[1], parts[2] ev := srv.LogEvent(action, id, from) events = append(events, ev) } req, err := http.NewRequest("GET", "/events?since=1", nil) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() setTimeout(t, "", 500*time.Millisecond, func() { if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) }) dec := json.NewDecoder(r.Body) for i := 0; i < 2; i++ { var jm utils.JSONMessage if err := dec.Decode(&jm); err == io.EOF { break } else if err != nil { t.Fatal(err) } if jm != *events[i] { t.Fatalf("Event received it different than expected") } } } func TestGetImagesJSON(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() job := eng.Job("images") initialImages, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } req, err := http.NewRequest("GET", "/images/json?all=0", nil) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) images := engine.NewTable("Created", 0) if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil { t.Fatal(err) } if images.Len() != initialImages.Len() { t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len()) } found := false for _, img := range images.Data { if strings.Contains(img.GetList("RepoTags")[0], unitTestImageName) { found = true break } } if !found { t.Errorf("Expected image %s, %+v found", unitTestImageName, images) } r2 := httptest.NewRecorder() // all=1 initialImages = getAllImages(eng, t) req2, err := http.NewRequest("GET", "/images/json?all=true", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { t.Fatal(err) } assertHttpNotError(r2, t) images2 := engine.NewTable("Id", 0) if _, err := images2.ReadListFrom(r2.Body.Bytes()); err != nil { t.Fatal(err) } if images2.Len() != initialImages.Len() { t.Errorf("Expected %d image, %d found", initialImages.Len(), images2.Len()) } found = false for _, img := range images2.Data { if img.Get("Id") == unitTestImageID { found = true break } } if !found { t.Errorf("Retrieved image Id differs, expected %s, received %+v", unitTestImageID, images2) } r3 := httptest.NewRecorder() // filter=a req3, err := http.NewRequest("GET", "/images/json?filter=aaaaaaaaaa", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r3, req3); err != nil { t.Fatal(err) } assertHttpNotError(r3, t) images3 := engine.NewTable("Id", 0) if _, err := images3.ReadListFrom(r3.Body.Bytes()); err != nil { t.Fatal(err) } if images3.Len() != 0 { t.Errorf("Expected 0 image, %d found", images3.Len()) } } func TestGetImagesHistory(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() r := httptest.NewRecorder() req, err := http.NewRequest("GET", fmt.Sprintf("/images/%s/history", unitTestImageName), nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(r.Body.Bytes()); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 line, %d found", len(outs.Data)) } } func TestGetImagesByName(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() req, err := http.NewRequest("GET", "/images/"+unitTestImageName+"/json", nil) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) img := &docker.Image{} if err := json.Unmarshal(r.Body.Bytes(), img); err != nil { t.Fatal(err) } if img.ID != unitTestImageID { t.Errorf("Error inspecting image") } } func TestGetContainersJSON(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() job := eng.Job("containers") job.SetenvBool("all", true) outs, err := job.Stdout.AddTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } beginLen := len(outs.Data) containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"echo", "test"}, }, t) if containerID == "" { t.Fatalf("Received empty container ID") } req, err := http.NewRequest("GET", "/containers/json?all=1", nil) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) containers := engine.NewTable("", 0) if _, err := containers.ReadListFrom(r.Body.Bytes()); err != nil { t.Fatal(err) } if len(containers.Data) != beginLen+1 { t.Fatalf("Expected %d container, %d found (started with: %d)", beginLen+1, len(containers.Data), beginLen) } if id := containers.Data[0].Get("Id"); id != containerID { t.Fatalf("Container ID mismatch. Expected: %s, received: %s\n", containerID, id) } } func TestGetContainersExport(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, t, ) containerRun(eng, containerID, t) r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/containers/"+containerID+"/export", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } found := false for tarReader := tar.NewReader(r.Body); ; { h, err := tarReader.Next() if err != nil { if err == io.EOF { break } t.Fatal(err) } if h.Name == "test" { found = true break } } if !found { t.Fatalf("The created test file has not been found in the exported image") } } func TestSaveImageAndThenLoad(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() // save image r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } tarball := r.Body // delete the image r = httptest.NewRecorder() req, err = http.NewRequest("DELETE", "/images/"+unitTestImageID, nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } // make sure there is no image r = httptest.NewRecorder() req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusNotFound { t.Fatalf("%d NotFound expected, received %d\n", http.StatusNotFound, r.Code) } // load the image r = httptest.NewRecorder() req, err = http.NewRequest("POST", "/images/load", tarball) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } // finally make sure the image is there r = httptest.NewRecorder() req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } } func TestGetContainersChanges(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/rm", "/etc/passwd"}, }, t, ) containerRun(eng, containerID, t) r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/containers/"+containerID+"/changes", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) outs := engine.NewTable("", 0) if _, err := outs.ReadListFrom(r.Body.Bytes()); err != nil { t.Fatal(err) } // Check the changelog success := false for _, elem := range outs.Data { if elem.Get("Path") == "/etc/passwd" && elem.GetInt("Kind") == 2 { success = true } } if !success { t.Fatalf("/etc/passwd as been removed but is not present in the diff") } } func TestGetContainersTop(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "cat"}, OpenStdin: true, }, t, ) defer func() { // Make sure the process dies before destroying runtime containerKill(eng, containerID, t) containerWait(eng, containerID, t) }() startContainer(eng, containerID, t) setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { for { if containerRunning(eng, containerID, t) { break } time.Sleep(10 * time.Millisecond) } }) if !containerRunning(eng, containerID, t) { t.Fatalf("Container should be running") } // Make sure sh spawn up cat setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { in, out := containerAttach(eng, containerID, t) if err := assertPipe("hello\n", "hello", out, in, 150); err != nil { t.Fatal(err) } }) r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/containers/"+containerID+"/top?ps_args=aux", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) var procs engine.Env if err := procs.Decode(r.Body); err != nil { t.Fatal(err) } if len(procs.GetList("Titles")) != 11 { t.Fatalf("Expected 11 titles, found %d.", len(procs.GetList("Titles"))) } if procs.GetList("Titles")[0] != "USER" || procs.GetList("Titles")[10] != "COMMAND" { t.Fatalf("Expected Titles[0] to be USER and Titles[10] to be COMMAND, found %s and %s.", procs.GetList("Titles")[0], procs.GetList("Titles")[10]) } processes := [][]string{} if err := procs.GetJson("Processes", &processes); err != nil { t.Fatal(err) } if len(processes) != 2 { t.Fatalf("Expected 2 processes, found %d.", len(processes)) } if processes[0][10] != "/bin/sh -c cat" { t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[0][10]) } if processes[1][10] != "/bin/sh -c cat" { t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[1][10]) } } func TestGetContainersByName(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"echo", "test"}, }, t, ) r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/containers/"+containerID+"/json", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) outContainer := &docker.Container{} if err := json.Unmarshal(r.Body.Bytes(), outContainer); err != nil { t.Fatal(err) } if outContainer.ID != containerID { t.Fatalf("Wrong containers retrieved. Expected %s, received %s", containerID, outContainer.ID) } } func TestPostCommit(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() srv := mkServerFromEngine(eng, t) // Create a container and remove a file containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, t, ) containerRun(eng, containerID, t) req, err := http.NewRequest("POST", "/commit?repo=testrepo&testtag=tag&container="+containerID, bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusCreated { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } var env engine.Env if err := env.Decode(r.Body); err != nil { t.Fatal(err) } if _, err := srv.ImageInspect(env.Get("Id")); err != nil { t.Fatalf("The image has not been committed") } } func TestPostContainersCreate(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() configJSON, err := json.Marshal(&runconfig.Config{ Image: unitTestImageID, Memory: 33554432, Cmd: []string{"touch", "/test"}, }) if err != nil { t.Fatal(err) } req, err := http.NewRequest("POST", "/containers/create", bytes.NewReader(configJSON)) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusCreated { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } var apiRun engine.Env if err := apiRun.Decode(r.Body); err != nil { t.Fatal(err) } containerID := apiRun.Get("Id") containerAssertExists(eng, containerID, t) containerRun(eng, containerID, t) if !containerFileExists(eng, containerID, "test", t) { t.Fatal("Test file was not created") } } func TestPostContainersKill(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, t, ) startContainer(eng, containerID, t) // Give some time to the process to start containerWaitTimeout(eng, containerID, t) if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } r := httptest.NewRecorder() req, err := http.NewRequest("POST", "/containers/"+containerID+"/kill", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } if containerRunning(eng, containerID, t) { t.Fatalf("The container hasn't been killed") } } func TestPostContainersRestart(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, }, t, ) startContainer(eng, containerID, t) // Give some time to the process to start containerWaitTimeout(eng, containerID, t) if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } req, err := http.NewRequest("POST", "/containers/"+containerID+"/restart?t=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } // Give some time to the process to restart containerWaitTimeout(eng, containerID, t) if !containerRunning(eng, containerID, t) { t.Fatalf("Container should be running") } containerKill(eng, containerID, t) } func TestPostContainersStart(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer( eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, t, ) hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{}) req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } containerAssertExists(eng, containerID, t) // Give some time to the process to start // FIXME: use Wait once it's available as a job containerWaitTimeout(eng, containerID, t) if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } r = httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } // Starting an already started container should return an error // FIXME: verify a precise error code. There is a possible bug here // which causes this to return 404 even though the container exists. assertHttpError(r, t) containerAssertExists(eng, containerID, t) containerKill(eng, containerID, t) } // Expected behaviour: using / as a bind mount source should throw an error func TestRunErrorBindMountRootSource(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer( eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, t, ) hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{ Binds: []string{"/:/tmp"}, }) req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusInternalServerError { containerKill(eng, containerID, t) t.Fatal("should have failed to run when using / as a source for the bind mount") } } func TestPostContainersStop(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, }, t, ) startContainer(eng, containerID, t) // Give some time to the process to start containerWaitTimeout(eng, containerID, t) if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } // Note: as it is a POST request, it requires a body. req, err := http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } if containerRunning(eng, containerID, t) { t.Fatalf("The container hasn't been stopped") } } func TestPostContainersWait(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sleep", "1"}, OpenStdin: true, }, t, ) startContainer(eng, containerID, t) setTimeout(t, "Wait timed out", 3*time.Second, func() { r := httptest.NewRecorder() req, err := http.NewRequest("POST", "/containers/"+containerID+"/wait", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) var apiWait engine.Env if err := apiWait.Decode(r.Body); err != nil { t.Fatal(err) } if apiWait.GetInt("StatusCode") != 0 { t.Fatalf("Non zero exit code for sleep: %d\n", apiWait.GetInt("StatusCode")) } }) if containerRunning(eng, containerID, t) { t.Fatalf("The container should be stopped after wait") } } func TestPostContainersAttach(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, t, ) // Start the process startContainer(eng, containerID, t) stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() // Try to avoid the timeout in destroy. Best effort, don't check error defer func() { closeWrap(stdin, stdinPipe, stdout, stdoutPipe) containerKill(eng, containerID, t) }() // Attach to it c1 := make(chan struct{}) go func() { defer close(c1) r := &hijackTester{ ResponseRecorder: httptest.NewRecorder(), in: stdin, out: stdoutPipe, } req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r.ResponseRecorder, t) }() // Acknowledge hijack setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() { stdout.Read([]byte{}) stdout.Read(make([]byte, 4096)) }) setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", string([]byte{1, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) // Close pipes (client disconnects) if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { t.Fatal(err) } // Wait for attach to finish, the client disconnected, therefore, Attach finished his job setTimeout(t, "Waiting for CmdAttach timed out", 10*time.Second, func() { <-c1 }) // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing containerWaitTimeout(eng, containerID, t) // Try to avoid the timeout in destroy. Best effort, don't check error cStdin, _ := containerAttach(eng, containerID, t) cStdin.Close() containerWait(eng, containerID, t) } func TestPostContainersAttachStderr(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "/bin/cat >&2"}, OpenStdin: true, }, t, ) // Start the process startContainer(eng, containerID, t) stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() // Try to avoid the timeout in destroy. Best effort, don't check error defer func() { closeWrap(stdin, stdinPipe, stdout, stdoutPipe) containerKill(eng, containerID, t) }() // Attach to it c1 := make(chan struct{}) go func() { defer close(c1) r := &hijackTester{ ResponseRecorder: httptest.NewRecorder(), in: stdin, out: stdoutPipe, } req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r.ResponseRecorder, t) }() // Acknowledge hijack setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() { stdout.Read([]byte{}) stdout.Read(make([]byte, 4096)) }) setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", string([]byte{2, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) // Close pipes (client disconnects) if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { t.Fatal(err) } // Wait for attach to finish, the client disconnected, therefore, Attach finished his job setTimeout(t, "Waiting for CmdAttach timed out", 10*time.Second, func() { <-c1 }) // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing containerWaitTimeout(eng, containerID, t) // Try to avoid the timeout in destroy. Best effort, don't check error cStdin, _ := containerAttach(eng, containerID, t) cStdin.Close() containerWait(eng, containerID, t) } // FIXME: Test deleting running container // FIXME: Test deleting container with volume // FIXME: Test deleting volume in use by other container func TestDeleteContainers(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, t, ) req, err := http.NewRequest("DELETE", "/containers/"+containerID, nil) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } containerAssertNotExists(eng, containerID, t) } func TestOptionsRoute(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() r := httptest.NewRecorder() req, err := http.NewRequest("OPTIONS", "/", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code) } } func TestGetEnabledCors(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/version", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code) } allowOrigin := r.Header().Get("Access-Control-Allow-Origin") allowHeaders := r.Header().Get("Access-Control-Allow-Headers") allowMethods := r.Header().Get("Access-Control-Allow-Methods") if allowOrigin != "*" { t.Errorf("Expected header Access-Control-Allow-Origin to be \"*\", %s found.", allowOrigin) } if allowHeaders != "Origin, X-Requested-With, Content-Type, Accept" { t.Errorf("Expected header Access-Control-Allow-Headers to be \"Origin, X-Requested-With, Content-Type, Accept\", %s found.", allowHeaders) } if allowMethods != "GET, POST, DELETE, PUT, OPTIONS" { t.Errorf("Expected hearder Access-Control-Allow-Methods to be \"GET, POST, DELETE, PUT, OPTIONS\", %s found.", allowMethods) } } func TestDeleteImages(t *testing.T) { eng := NewTestEngine(t) //we expect errors, so we disable stderr eng.Stderr = ioutil.Discard defer mkRuntimeFromEngine(eng, t).Nuke() initialImages := getImages(eng, t, true, "") if err := eng.Job("tag", unitTestImageName, "test", "test").Run(); err != nil { t.Fatal(err) } images := getImages(eng, t, true, "") if len(images.Data[0].GetList("RepoTags")) != len(initialImages.Data[0].GetList("RepoTags"))+1 { t.Errorf("Expected %d images, %d found", len(initialImages.Data[0].GetList("RepoTags"))+1, len(images.Data[0].GetList("RepoTags"))) } req, err := http.NewRequest("DELETE", "/images/"+unitTestImageID, nil) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusConflict { t.Fatalf("Expected http status 409-conflict, got %v", r.Code) } req2, err := http.NewRequest("DELETE", "/images/test:test", nil) if err != nil { t.Fatal(err) } r2 := httptest.NewRecorder() if err := api.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { t.Fatal(err) } assertHttpNotError(r2, t) if r2.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(r2.Body.Bytes()); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Fatalf("Expected %d event (untagged), got %d", 1, len(outs.Data)) } images = getImages(eng, t, false, "") if images.Len() != initialImages.Len() { t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len()) } } func TestPostContainersCopy(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test.txt"}, }, t, ) containerRun(eng, containerID, t) r := httptest.NewRecorder() var copyData engine.Env copyData.Set("Resource", "/test.txt") copyData.Set("HostPath", ".") jsonData := bytes.NewBuffer(nil) if err := copyData.Encode(jsonData); err != nil { t.Fatal(err) } req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", jsonData) if err != nil { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } found := false for tarReader := tar.NewReader(r.Body); ; { h, err := tarReader.Next() if err != nil { if err == io.EOF { break } t.Fatal(err) } if h.Name == "test.txt" { found = true break } } if !found { t.Fatalf("The created test file has not been found in the copied output") } } func TestPostContainersCopyWhenContainerNotFound(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() r := httptest.NewRecorder() var copyData engine.Env copyData.Set("Resource", "/test.txt") copyData.Set("HostPath", ".") jsonData := bytes.NewBuffer(nil) if err := copyData.Encode(jsonData); err != nil { t.Fatal(err) } req, err := http.NewRequest("POST", "/containers/id_not_found/copy", jsonData) if err != nil { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusNotFound { t.Fatalf("404 expected for id_not_found Container, received %v", r.Code) } } // Mocked types for tests type NopConn struct { io.ReadCloser io.Writer } func (c *NopConn) LocalAddr() net.Addr { return nil } func (c *NopConn) RemoteAddr() net.Addr { return nil } func (c *NopConn) SetDeadline(t time.Time) error { return nil } func (c *NopConn) SetReadDeadline(t time.Time) error { return nil } func (c *NopConn) SetWriteDeadline(t time.Time) error { return nil } type hijackTester struct { *httptest.ResponseRecorder in io.ReadCloser out io.Writer } func (t *hijackTester) Hijack() (net.Conn, *bufio.ReadWriter, error) { bufrw := bufio.NewReadWriter(bufio.NewReader(t.in), bufio.NewWriter(t.out)) conn := &NopConn{ ReadCloser: t.in, Writer: t.out, } return conn, bufrw, nil } docker-0.9.1/integration/z_final_test.go0000644000175000017500000000055612314376205016444 0ustar tagtagpackage docker import ( "github.com/dotcloud/docker/utils" "runtime" "testing" ) func displayFdGoroutines(t *testing.T) { t.Logf("Fds: %d, Goroutines: %d", utils.GetTotalUsedFds(), runtime.NumGoroutine()) } func TestFinal(t *testing.T) { nuke(globalRuntime) t.Logf("Start Fds: %d, Start Goroutines: %d", startFds, startGoroutines) displayFdGoroutines(t) } docker-0.9.1/integration/runtime_test.go0000644000175000017500000005324012314376205016503 0ustar tagtagpackage docker import ( "bytes" "fmt" "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/utils" "io" "log" "net" "net/url" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "testing" "time" ) const ( unitTestImageName = "docker-test-image" unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0 unitTestImageIDShort = "83599e29c455" unitTestNetworkBridge = "testdockbr0" unitTestStoreBase = "/var/lib/docker/unit-tests" testDaemonAddr = "127.0.0.1:4270" testDaemonProto = "tcp" ) var ( // FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted. globalRuntime *docker.Runtime globalEngine *engine.Engine startFds int startGoroutines int ) // FIXME: nuke() is deprecated by Runtime.Nuke() func nuke(runtime *docker.Runtime) error { return runtime.Nuke() } // FIXME: cleanup and nuke are redundant. func cleanup(eng *engine.Engine, t *testing.T) error { runtime := mkRuntimeFromEngine(eng, t) for _, container := range runtime.List() { container.Kill() runtime.Destroy(container) } job := eng.Job("images") images, err := job.Stdout.AddTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } for _, image := range images.Data { if image.Get("Id") != unitTestImageID { eng.Job("image_delete", image.Get("Id")).Run() } } return nil } func layerArchive(tarfile string) (io.Reader, error) { // FIXME: need to close f somewhere f, err := os.Open(tarfile) if err != nil { return nil, err } return f, nil } func init() { // Always use the same driver (vfs) for all integration tests. // To test other drivers, we need a dedicated driver validation suite. os.Setenv("DOCKER_DRIVER", "vfs") os.Setenv("TEST", "1") // Hack to run sys init during unit testing if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") { sysinit.SysInit() return } if uid := syscall.Geteuid(); uid != 0 { log.Fatal("docker tests need to be run as root") } // Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary) if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" { src, err := os.Open(dockerinit) if err != nil { log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s\n", err) } defer src.Close() dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555) if err != nil { log.Fatalf("Unable to create dockerinit in test directory: %s\n", err) } defer dst.Close() if _, err := io.Copy(dst, src); err != nil { log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s\n", err) } dst.Close() src.Close() } // Setup the base runtime, which will be duplicated for each test. // (no tests are run directly in the base) setupBaseImage() // Create the "global runtime" with a long-running daemon for integration tests spawnGlobalDaemon() startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() } func setupBaseImage() { eng := newTestEngine(log.New(os.Stderr, "", 0), false, unitTestStoreBase) job := eng.Job("inspect", unitTestImageName, "image") img, _ := job.Stdout.AddEnv() // If the unit test is not found, try to download it. if err := job.Run(); err != nil || img.Get("id") != unitTestImageID { // Retrieve the Image job = eng.Job("pull", unitTestImageName) job.Stdout.Add(utils.NopWriteCloser(os.Stdout)) if err := job.Run(); err != nil { log.Fatalf("Unable to pull the test image: %s", err) } } } func spawnGlobalDaemon() { if globalRuntime != nil { utils.Debugf("Global runtime already exists. Skipping.") return } t := log.New(os.Stderr, "", 0) eng := NewTestEngine(t) globalEngine = eng globalRuntime = mkRuntimeFromEngine(eng, t) // Spawn a Daemon go func() { utils.Debugf("Spawning global daemon for integration tests") listenURL := &url.URL{ Scheme: testDaemonProto, Host: testDaemonAddr, } job := eng.Job("serveapi", listenURL.String()) job.SetenvBool("Logging", true) if err := job.Run(); err != nil { log.Fatalf("Unable to spawn the test daemon: %s", err) } }() // Give some time to ListenAndServer to actually start // FIXME: use inmem transports instead of tcp time.Sleep(time.Second) if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatalf("Unable to accept connections for test api: %s", err) } } // FIXME: test that ImagePull(json=true) send correct json output func GetTestImage(runtime *docker.Runtime) *docker.Image { imgs, err := runtime.Graph().Map() if err != nil { log.Fatalf("Unable to get the test image: %s", err) } for _, image := range imgs { if image.ID == unitTestImageID { return image } } log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, runtime.Graph().Root, imgs) return nil } func TestRuntimeCreate(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) // Make sure we start we 0 containers if len(runtime.List()) != 0 { t.Errorf("Expected 0 containers, %v found", len(runtime.List())) } container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "", ) if err != nil { t.Fatal(err) } defer func() { if err := runtime.Destroy(container); err != nil { t.Error(err) } }() // Make sure we can find the newly created container with List() if len(runtime.List()) != 1 { t.Errorf("Expected 1 container, %v found", len(runtime.List())) } // Make sure the container List() returns is the right one if runtime.List()[0].ID != container.ID { t.Errorf("Unexpected container %v returned by List", runtime.List()[0]) } // Make sure we can get the container with Get() if runtime.Get(container.ID) == nil { t.Errorf("Unable to get newly created container") } // Make sure it is the right container if runtime.Get(container.ID) != container { t.Errorf("Get() returned the wrong container") } // Make sure Exists returns it as existing if !runtime.Exists(container.ID) { t.Errorf("Exists() returned false for a newly created container") } // Test that conflict error displays correct details testContainer, _, _ := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "conflictname", ) if _, _, err := runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) { t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error()) } // Make sure create with bad parameters returns an error if _, _, err = runtime.Create(&runconfig.Config{Image: GetTestImage(runtime).ID}, ""); err == nil { t.Fatal("Builder.Create should throw an error when Cmd is missing") } if _, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{}, }, "", ); err == nil { t.Fatal("Builder.Create should throw an error when Cmd is empty") } config := &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/ls"}, PortSpecs: []string{"80"}, } container, _, err = runtime.Create(config, "") _, err = runtime.Commit(container, "testrepo", "testtag", "", "", config) if err != nil { t.Error(err) } // test expose 80:8000 container, warnings, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, PortSpecs: []string{"80:8000"}, }, "", ) if err != nil { t.Fatal(err) } if warnings == nil || len(warnings) != 1 { t.Error("Expected a warning, got none") } } func TestDestroy(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "") if err != nil { t.Fatal(err) } // Destroy if err := runtime.Destroy(container); err != nil { t.Error(err) } // Make sure runtime.Exists() behaves correctly if runtime.Exists("test_destroy") { t.Errorf("Exists() returned true") } // Make sure runtime.List() doesn't list the destroyed container if len(runtime.List()) != 0 { t.Errorf("Expected 0 container, %v found", len(runtime.List())) } // Make sure runtime.Get() refuses to return the unexisting container if runtime.Get(container.ID) != nil { t.Errorf("Unable to get newly created container") } // Test double destroy if err := runtime.Destroy(container); err == nil { // It should have failed t.Errorf("Double destroy did not fail") } } func TestGet(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container1) container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container2) container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container3) if runtime.Get(container1.ID) != container1 { t.Errorf("Get(test1) returned %v while expecting %v", runtime.Get(container1.ID), container1) } if runtime.Get(container2.ID) != container2 { t.Errorf("Get(test2) returned %v while expecting %v", runtime.Get(container2.ID), container2) } if runtime.Get(container3.ID) != container3 { t.Errorf("Get(test3) returned %v while expecting %v", runtime.Get(container3.ID), container3) } } func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) { var ( err error id string strPort string eng = NewTestEngine(t) runtime = mkRuntimeFromEngine(eng, t) port = 5554 p nat.Port ) defer func() { if err != nil { runtime.Nuke() } }() for { port += 1 strPort = strconv.Itoa(port) var cmd string if proto == "tcp" { cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat" } else if proto == "udp" { cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat" } else { t.Fatal(fmt.Errorf("Unknown protocol %v", proto)) } ep := make(map[nat.Port]struct{}, 1) p = nat.Port(fmt.Sprintf("%s/%s", strPort, proto)) ep[p] = struct{}{} jobCreate := eng.Job("create") jobCreate.Setenv("Image", unitTestImageID) jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd}) jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)}) jobCreate.SetenvJson("ExposedPorts", ep) jobCreate.Stdout.AddString(&id) if err := jobCreate.Run(); err != nil { t.Fatal(err) } // FIXME: this relies on the undocumented behavior of runtime.Create // which will return a nil error AND container if the exposed ports // are invalid. That behavior should be fixed! if id != "" { break } t.Logf("Port %v already in use, trying another one", strPort) } jobStart := eng.Job("start", id) portBindings := make(map[nat.Port][]nat.PortBinding) portBindings[p] = []nat.PortBinding{ {}, } if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil { t.Fatal(err) } if err := jobStart.Run(); err != nil { t.Fatal(err) } container := runtime.Get(id) if container == nil { t.Fatalf("Couldn't fetch test container %s", id) } setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { for !container.State.IsRunning() { time.Sleep(10 * time.Millisecond) } }) // Even if the state is running, lets give some time to lxc to spawn the process container.WaitTimeout(500 * time.Millisecond) strPort = container.NetworkSettings.Ports[p][0].HostPort return runtime, container, strPort } // Run a container with a TCP port allocated, and test that it can receive connections on localhost func TestAllocateTCPPortLocalhost(t *testing.T) { runtime, container, port := startEchoServerContainer(t, "tcp") defer nuke(runtime) defer container.Kill() for i := 0; i != 10; i++ { conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port)) if err != nil { t.Fatal(err) } defer conn.Close() input := bytes.NewBufferString("well hello there\n") _, err = conn.Write(input.Bytes()) if err != nil { t.Fatal(err) } buf := make([]byte, 16) read := 0 conn.SetReadDeadline(time.Now().Add(3 * time.Second)) read, err = conn.Read(buf) if err != nil { if err, ok := err.(*net.OpError); ok { if err.Err == syscall.ECONNRESET { t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec") conn.Close() time.Sleep(time.Second) continue } if err.Timeout() { t.Log("Timeout, trying again") conn.Close() continue } } t.Fatal(err) } output := string(buf[:read]) if !strings.Contains(output, "well hello there") { t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output)) } else { return } } t.Fatal("No reply from the container") } // Run a container with an UDP port allocated, and test that it can receive connections on localhost func TestAllocateUDPPortLocalhost(t *testing.T) { runtime, container, port := startEchoServerContainer(t, "udp") defer nuke(runtime) defer container.Kill() conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port)) if err != nil { t.Fatal(err) } defer conn.Close() input := bytes.NewBufferString("well hello there\n") buf := make([]byte, 16) // Try for a minute, for some reason the select in socat may take ages // to return even though everything on the path seems fine (i.e: the // UDPProxy forwards the traffic correctly and you can see the packets // on the interface from within the container). for i := 0; i != 120; i++ { _, err := conn.Write(input.Bytes()) if err != nil { t.Fatal(err) } conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond)) read, err := conn.Read(buf) if err == nil { output := string(buf[:read]) if strings.Contains(output, "well hello there") { return } } } t.Fatal("No reply from the container") } func TestRestore(t *testing.T) { eng := NewTestEngine(t) runtime1 := mkRuntimeFromEngine(eng, t) defer runtime1.Nuke() // Create a container with one instance of docker container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t) defer runtime1.Destroy(container1) // Create a second container meant to be killed container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t) defer runtime1.Destroy(container2) // Start the container non blocking if err := container2.Start(); err != nil { t.Fatal(err) } if !container2.State.IsRunning() { t.Fatalf("Container %v should appear as running but isn't", container2.ID) } // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running' cStdin, _ := container2.StdinPipe() cStdin.Close() if err := container2.WaitTimeout(2 * time.Second); err != nil { t.Fatal(err) } container2.State.SetRunning(42) container2.ToDisk() if len(runtime1.List()) != 2 { t.Errorf("Expected 2 container, %v found", len(runtime1.List())) } if err := container1.Run(); err != nil { t.Fatal(err) } if !container2.State.IsRunning() { t.Fatalf("Container %v should appear as running but isn't", container2.ID) } // Here are are simulating a docker restart - that is, reloading all containers // from scratch eng = newTestEngine(t, false, eng.Root()) runtime2 := mkRuntimeFromEngine(eng, t) if len(runtime2.List()) != 2 { t.Errorf("Expected 2 container, %v found", len(runtime2.List())) } runningCount := 0 for _, c := range runtime2.List() { if c.State.IsRunning() { t.Errorf("Running container found: %v (%v)", c.ID, c.Path) runningCount++ } } if runningCount != 0 { t.Fatalf("Expected 0 container alive, %d found", runningCount) } container3 := runtime2.Get(container1.ID) if container3 == nil { t.Fatal("Unable to Get container") } if err := container3.Run(); err != nil { t.Fatal(err) } container2.State.SetStopped(0) } func TestDefaultContainerName(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } container := runtime.Get(createNamedTestContainer(eng, config, t, "some_name")) containerID := container.ID if container.Name != "/some_name" { t.Fatalf("Expect /some_name got %s", container.Name) } if c := runtime.Get("/some_name"); c == nil { t.Fatalf("Couldn't retrieve test container as /some_name") } else if c.ID != containerID { t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID) } } func TestRandomContainerName(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) config, _, _, err := runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } container := runtime.Get(createTestContainer(eng, config, t)) containerID := container.ID if container.Name == "" { t.Fatalf("Expected not empty container name") } if c := runtime.Get(container.Name); c == nil { log.Fatalf("Could not lookup container %s by its name", container.Name) } else if c.ID != containerID { log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID) } } func TestContainerNameValidation(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) for _, test := range []struct { Name string Valid bool }{ {"abc-123_AAA.1", true}, {"\000asdf", false}, } { config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { if !test.Valid { continue } t.Fatal(err) } var shortID string job := eng.Job("create", test.Name) if err := job.ImportEnv(config); err != nil { t.Fatal(err) } job.Stdout.AddString(&shortID) if err := job.Run(); err != nil { if !test.Valid { continue } t.Fatal(err) } container := runtime.Get(shortID) if container.Name != "/"+test.Name { t.Fatalf("Expect /%s got %s", test.Name, container.Name) } if c := runtime.Get("/" + test.Name); c == nil { t.Fatalf("Couldn't retrieve test container as /%s", test.Name) } else if c.ID != container.ID { t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID) } } } func TestLinkChildContainer(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp")) webapp, err := runtime.GetByName("/webapp") if err != nil { t.Fatal(err) } if webapp.ID != container.ID { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } config, _, _, err = runconfig.Parse([]string{GetTestImage(runtime).ID, "echo test"}, nil) if err != nil { t.Fatal(err) } childContainer := runtime.Get(createTestContainer(eng, config, t)) if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil { t.Fatal(err) } // Get the child by it's new name db, err := runtime.GetByName("/webapp/db") if err != nil { t.Fatal(err) } if db.ID != childContainer.ID { t.Fatalf("Expect db id to match container id: %s != %s", db.ID, childContainer.ID) } } func TestGetAllChildren(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } container := runtime.Get(createNamedTestContainer(eng, config, t, "/webapp")) webapp, err := runtime.GetByName("/webapp") if err != nil { t.Fatal(err) } if webapp.ID != container.ID { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } config, _, _, err = runconfig.Parse([]string{unitTestImageID, "echo test"}, nil) if err != nil { t.Fatal(err) } childContainer := runtime.Get(createTestContainer(eng, config, t)) if err := runtime.RegisterLink(webapp, childContainer, "db"); err != nil { t.Fatal(err) } children, err := runtime.Children("/webapp") if err != nil { t.Fatal(err) } if children == nil { t.Fatal("Children should not be nil") } if len(children) == 0 { t.Fatal("Children should not be empty") } for key, value := range children { if key != "/webapp/db" { t.Fatalf("Expected /webapp/db got %s", key) } if value.ID != childContainer.ID { t.Fatalf("Expected id %s got %s", childContainer.ID, value.ID) } } } func TestDestroyWithInitLayer(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ls", "-al"}, }, "") if err != nil { t.Fatal(err) } // Destroy if err := runtime.Destroy(container); err != nil { t.Fatal(err) } // Make sure runtime.Exists() behaves correctly if runtime.Exists("test_destroy") { t.Fatalf("Exists() returned true") } // Make sure runtime.List() doesn't list the destroyed container if len(runtime.List()) != 0 { t.Fatalf("Expected 0 container, %v found", len(runtime.List())) } driver := runtime.Graph().Driver() // Make sure that the container does not exist in the driver if _, err := driver.Get(container.ID); err == nil { t.Fatal("Conttainer should not exist in the driver") } // Make sure that the init layer is removed from the driver if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID)); err == nil { t.Fatal("Container's init layer should not exist in the driver") } } docker-0.9.1/integration/buildfile_test.go0000644000175000017500000005476712314376205016776 0ustar tagtagpackage docker import ( "fmt" "github.com/dotcloud/docker" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/utils" "io/ioutil" "net" "net/http" "net/http/httptest" "strings" "testing" ) // A testContextTemplate describes a build context and how to test it type testContextTemplate struct { // Contents of the Dockerfile dockerfile string // Additional files in the context, eg [][2]string{"./passwd", "gordon"} files [][2]string // Additional remote files to host on a local HTTP server. remoteFiles [][2]string } func (context testContextTemplate) Archive(dockerfile string, t *testing.T) archive.Archive { input := []string{"Dockerfile", dockerfile} for _, pair := range context.files { input = append(input, pair[0], pair[1]) } a, err := archive.Generate(input...) if err != nil { t.Fatal(err) } return a } // A table of all the contexts to build and test. // A new docker runtime will be created and torn down for each context. var testContexts = []testContextTemplate{ { ` from {IMAGE} run sh -c 'echo root:testpass > /tmp/passwd' run mkdir -p /var/run/sshd run [ "$(cat /tmp/passwd)" = "root:testpass" ] run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] `, nil, nil, }, // Exactly the same as above, except uses a line split with a \ to test // multiline support. { ` from {IMAGE} run sh -c 'echo root:testpass \ > /tmp/passwd' run mkdir -p /var/run/sshd run [ "$(cat /tmp/passwd)" = "root:testpass" ] run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] `, nil, nil, }, // Line containing literal "\n" { ` from {IMAGE} run sh -c 'echo root:testpass > /tmp/passwd' run echo "foo \n bar"; echo "baz" run mkdir -p /var/run/sshd run [ "$(cat /tmp/passwd)" = "root:testpass" ] run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] `, nil, nil, }, { ` from {IMAGE} add foo /usr/lib/bla/bar run [ "$(cat /usr/lib/bla/bar)" = 'hello' ] add http://{SERVERADDR}/baz /usr/lib/baz/quux run [ "$(cat /usr/lib/baz/quux)" = 'world!' ] `, [][2]string{{"foo", "hello"}}, [][2]string{{"/baz", "world!"}}, }, { ` from {IMAGE} add f / run [ "$(cat /f)" = "hello" ] add f /abc run [ "$(cat /abc)" = "hello" ] add f /x/y/z run [ "$(cat /x/y/z)" = "hello" ] add f /x/y/d/ run [ "$(cat /x/y/d/f)" = "hello" ] add d / run [ "$(cat /ga)" = "bu" ] add d /somewhere run [ "$(cat /somewhere/ga)" = "bu" ] add d /anotherplace/ run [ "$(cat /anotherplace/ga)" = "bu" ] add d /somewheeeere/over/the/rainbooow run [ "$(cat /somewheeeere/over/the/rainbooow/ga)" = "bu" ] `, [][2]string{ {"f", "hello"}, {"d/ga", "bu"}, }, nil, }, { ` from {IMAGE} add http://{SERVERADDR}/x /a/b/c run [ "$(cat /a/b/c)" = "hello" ] add http://{SERVERADDR}/x?foo=bar / run [ "$(cat /x)" = "hello" ] add http://{SERVERADDR}/x /d/ run [ "$(cat /d/x)" = "hello" ] add http://{SERVERADDR} /e run [ "$(cat /e)" = "blah" ] `, nil, [][2]string{{"/x", "hello"}, {"/", "blah"}}, }, // Comments, shebangs, and executability, oh my! { ` FROM {IMAGE} # This is an ordinary comment. RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh RUN [ ! -x /hello.sh ] RUN chmod +x /hello.sh RUN [ -x /hello.sh ] RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] RUN [ "$(/hello.sh)" = "hello world" ] `, nil, nil, }, // Users and groups { ` FROM {IMAGE} # Make sure our defaults work RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] # TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) USER root RUN [ "$(id -G):$(id -Gn)" = '0:root' ] # Setup dockerio user and group RUN echo 'dockerio:x:1000:1000::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1000:' >> /etc/group # Make sure we can switch to our user and all the information is exactly as we expect it to be USER dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] # Switch back to root and double check that worked exactly as we might expect it to USER root RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0:root' ] # Add a "supplementary" group for our dockerio user RUN echo 'supplementary:x:1001:dockerio' >> /etc/group # ... and then go verify that we get it like we expect USER dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000 1001:dockerio supplementary' ] USER 1000 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000 1001:dockerio supplementary' ] # super test the new "user:group" syntax USER dockerio:dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] USER 1000:dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] USER dockerio:1000 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] USER 1000:1000 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1000/dockerio:dockerio/1000:dockerio' ] USER dockerio:supplementary RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] USER dockerio:1001 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] USER 1000:supplementary RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] USER 1000:1001 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1000:1001/dockerio:supplementary/1001:supplementary' ] # make sure unknown uid/gid still works properly USER 1042:1043 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ] `, nil, nil, }, // Environment variable { ` from {IMAGE} env FOO BAR run [ "$FOO" = "BAR" ] `, nil, nil, }, // Environment overwriting { ` from {IMAGE} env FOO BAR run [ "$FOO" = "BAR" ] env FOO BAZ run [ "$FOO" = "BAZ" ] `, nil, nil, }, { ` from {IMAGE} ENTRYPOINT /bin/echo CMD Hello world `, nil, nil, }, { ` from {IMAGE} VOLUME /test CMD Hello world `, nil, nil, }, { ` from {IMAGE} env FOO /foo/baz env BAR /bar env BAZ $BAR env FOOPATH $PATH:$FOO run [ "$BAR" = "$BAZ" ] run [ "$FOOPATH" = "$PATH:/foo/baz" ] `, nil, nil, }, { ` from {IMAGE} env FOO /bar env TEST testdir env BAZ /foobar add testfile $BAZ/ add $TEST $FOO run [ "$(cat /foobar/testfile)" = "test1" ] run [ "$(cat /bar/withfile)" = "test2" ] `, [][2]string{ {"testfile", "test1"}, {"testdir/withfile", "test2"}, }, nil, }, // JSON! { ` FROM {IMAGE} RUN ["/bin/echo","hello","world"] CMD ["/bin/true"] ENTRYPOINT ["/bin/echo","your command -->"] `, nil, nil, }, { ` FROM {IMAGE} ADD test /test RUN ["chmod","+x","/test"] RUN ["/test"] RUN [ "$(cat /testfile)" = 'test!' ] `, [][2]string{ {"test", "#!/bin/sh\necho 'test!' > /testfile"}, }, nil, }, } // FIXME: test building with 2 successive overlapping ADD commands func constructDockerfile(template string, ip net.IP, port string) string { serverAddr := fmt.Sprintf("%s:%s", ip, port) replacer := strings.NewReplacer("{IMAGE}", unitTestImageID, "{SERVERADDR}", serverAddr) return replacer.Replace(template) } func mkTestingFileServer(files [][2]string) (*httptest.Server, error) { mux := http.NewServeMux() for _, file := range files { name, contents := file[0], file[1] mux.HandleFunc(name, func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(contents)) }) } // This is how httptest.NewServer sets up a net.Listener, except that our listener must accept remote // connections (from the container). listener, err := net.Listen("tcp", ":0") if err != nil { return nil, err } s := httptest.NewUnstartedServer(mux) s.Listener = listener s.Start() return s, nil } func TestBuild(t *testing.T) { for _, ctx := range testContexts { _, err := buildImage(ctx, t, nil, true) if err != nil { t.Fatal(err) } } } func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) (*docker.Image, error) { if eng == nil { eng = NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) // FIXME: we might not need runtime, why not simply nuke // the engine? defer nuke(runtime) } srv := mkServerFromEngine(eng, t) httpServer, err := mkTestingFileServer(context.remoteFiles) if err != nil { t.Fatal(err) } defer httpServer.Close() idx := strings.LastIndex(httpServer.URL, ":") if idx < 0 { t.Fatalf("could not get port from test http server address %s", httpServer.URL) } port := httpServer.URL[idx+1:] iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") if iIP == nil { t.Fatal("Legacy bridgeIP field not set in engine") } ip, ok := iIP.(net.IP) if !ok { panic("Legacy bridgeIP field in engine does not cast to net.IP") } dockerfile := constructDockerfile(context.dockerfile, ip, port) buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) id, err := buildfile.Build(context.Archive(dockerfile, t)) if err != nil { return nil, err } return srv.ImageInspect(id) } func TestVolume(t *testing.T) { img, err := buildImage(testContextTemplate{` from {IMAGE} volume /test cmd Hello world `, nil, nil}, t, nil, true) if err != nil { t.Fatal(err) } if len(img.Config.Volumes) == 0 { t.Fail() } for key := range img.Config.Volumes { if key != "/test" { t.Fail() } } } func TestBuildMaintainer(t *testing.T) { img, err := buildImage(testContextTemplate{` from {IMAGE} maintainer dockerio `, nil, nil}, t, nil, true) if err != nil { t.Fatal(err) } if img.Author != "dockerio" { t.Fail() } } func TestBuildUser(t *testing.T) { img, err := buildImage(testContextTemplate{` from {IMAGE} user dockerio `, nil, nil}, t, nil, true) if err != nil { t.Fatal(err) } if img.Config.User != "dockerio" { t.Fail() } } func TestBuildEnv(t *testing.T) { img, err := buildImage(testContextTemplate{` from {IMAGE} env port 4243 `, nil, nil}, t, nil, true) if err != nil { t.Fatal(err) } hasEnv := false for _, envVar := range img.Config.Env { if envVar == "port=4243" { hasEnv = true break } } if !hasEnv { t.Fail() } } func TestBuildCmd(t *testing.T) { img, err := buildImage(testContextTemplate{` from {IMAGE} cmd ["/bin/echo", "Hello World"] `, nil, nil}, t, nil, true) if err != nil { t.Fatal(err) } if img.Config.Cmd[0] != "/bin/echo" { t.Log(img.Config.Cmd[0]) t.Fail() } if img.Config.Cmd[1] != "Hello World" { t.Log(img.Config.Cmd[1]) t.Fail() } } func TestBuildExpose(t *testing.T) { img, err := buildImage(testContextTemplate{` from {IMAGE} expose 4243 `, nil, nil}, t, nil, true) if err != nil { t.Fatal(err) } if img.Config.PortSpecs[0] != "4243" { t.Fail() } } func TestBuildEntrypoint(t *testing.T) { img, err := buildImage(testContextTemplate{` from {IMAGE} entrypoint ["/bin/echo"] `, nil, nil}, t, nil, true) if err != nil { t.Fatal(err) } if img.Config.Entrypoint[0] != "/bin/echo" { t.Log(img.Config.Entrypoint[0]) t.Fail() } } // testing #1405 - config.Cmd does not get cleaned up if // utilizing cache func TestBuildEntrypointRunCleanup(t *testing.T) { eng := NewTestEngine(t) defer nuke(mkRuntimeFromEngine(eng, t)) img, err := buildImage(testContextTemplate{` from {IMAGE} run echo "hello" `, nil, nil}, t, eng, true) if err != nil { t.Fatal(err) } img, err = buildImage(testContextTemplate{` from {IMAGE} run echo "hello" add foo /foo entrypoint ["/bin/echo"] `, [][2]string{{"foo", "HEYO"}}, nil}, t, eng, true) if err != nil { t.Fatal(err) } if len(img.Config.Cmd) != 0 { t.Fail() } } func checkCacheBehavior(t *testing.T, template testContextTemplate, expectHit bool) (imageId string) { eng := NewTestEngine(t) defer nuke(mkRuntimeFromEngine(eng, t)) img, err := buildImage(template, t, eng, true) if err != nil { t.Fatal(err) } imageId = img.ID img, err = buildImage(template, t, eng, expectHit) if err != nil { t.Fatal(err) } if hit := imageId == img.ID; hit != expectHit { t.Fatalf("Cache misbehavior, got hit=%t, expected hit=%t: (first: %s, second %s)", hit, expectHit, imageId, img.ID) } return } func checkCacheBehaviorFromEngime(t *testing.T, template testContextTemplate, expectHit bool, eng *engine.Engine) (imageId string) { img, err := buildImage(template, t, eng, true) if err != nil { t.Fatal(err) } imageId = img.ID img, err = buildImage(template, t, eng, expectHit) if err != nil { t.Fatal(err) } if hit := imageId == img.ID; hit != expectHit { t.Fatalf("Cache misbehavior, got hit=%t, expected hit=%t: (first: %s, second %s)", hit, expectHit, imageId, img.ID) } return } func TestBuildImageWithCache(t *testing.T) { template := testContextTemplate{` from {IMAGE} maintainer dockerio `, nil, nil} checkCacheBehavior(t, template, true) } func TestBuildImageWithoutCache(t *testing.T) { template := testContextTemplate{` from {IMAGE} maintainer dockerio `, nil, nil} checkCacheBehavior(t, template, false) } func TestBuildADDLocalFileWithCache(t *testing.T) { template := testContextTemplate{` from {IMAGE} maintainer dockerio run echo "first" add foo /usr/lib/bla/bar run [ "$(cat /usr/lib/bla/bar)" = "hello" ] run echo "second" add . /src/ run [ "$(cat /src/foo)" = "hello" ] `, [][2]string{ {"foo", "hello"}, }, nil} eng := NewTestEngine(t) defer nuke(mkRuntimeFromEngine(eng, t)) id1 := checkCacheBehaviorFromEngime(t, template, true, eng) template.files = append(template.files, [2]string{"bar", "hello2"}) id2 := checkCacheBehaviorFromEngime(t, template, true, eng) if id1 == id2 { t.Fatal("The cache should have been invalided but hasn't.") } id3 := checkCacheBehaviorFromEngime(t, template, true, eng) if id2 != id3 { t.Fatal("The cache should have been used but hasn't.") } template.files[1][1] = "hello3" id4 := checkCacheBehaviorFromEngime(t, template, true, eng) if id3 == id4 { t.Fatal("The cache should have been invalided but hasn't.") } template.dockerfile += ` add ./bar /src2/ run ls /src2/bar ` id5 := checkCacheBehaviorFromEngime(t, template, true, eng) if id4 == id5 { t.Fatal("The cache should have been invalided but hasn't.") } template.files[1][1] = "hello4" id6 := checkCacheBehaviorFromEngime(t, template, true, eng) if id5 == id6 { t.Fatal("The cache should have been invalided but hasn't.") } template.dockerfile += ` add bar /src2/bar2 add /bar /src2/bar3 run ls /src2/bar2 /src2/bar3 ` id7 := checkCacheBehaviorFromEngime(t, template, true, eng) if id6 == id7 { t.Fatal("The cache should have been invalided but hasn't.") } template.files[1][1] = "hello5" id8 := checkCacheBehaviorFromEngime(t, template, true, eng) if id7 == id8 { t.Fatal("The cache should have been invalided but hasn't.") } } func TestBuildADDLocalFileWithoutCache(t *testing.T) { template := testContextTemplate{` from {IMAGE} maintainer dockerio run echo "first" add foo /usr/lib/bla/bar run echo "second" `, [][2]string{{"foo", "hello"}}, nil} checkCacheBehavior(t, template, false) } func TestBuildADDCurrentDirectoryWithCache(t *testing.T) { template := testContextTemplate{` from {IMAGE} maintainer dockerio add . /usr/lib/bla `, nil, nil} checkCacheBehavior(t, template, true) } func TestBuildADDCurrentDirectoryWithoutCache(t *testing.T) { template := testContextTemplate{` from {IMAGE} maintainer dockerio add . /usr/lib/bla `, nil, nil} checkCacheBehavior(t, template, false) } func TestBuildADDRemoteFileWithCache(t *testing.T) { template := testContextTemplate{` from {IMAGE} maintainer dockerio run echo "first" add http://{SERVERADDR}/baz /usr/lib/baz/quux run echo "second" `, nil, [][2]string{{"/baz", "world!"}}} checkCacheBehavior(t, template, true) } func TestBuildADDRemoteFileWithoutCache(t *testing.T) { template := testContextTemplate{` from {IMAGE} maintainer dockerio run echo "first" add http://{SERVERADDR}/baz /usr/lib/baz/quux run echo "second" `, nil, [][2]string{{"/baz", "world!"}}} checkCacheBehavior(t, template, false) } func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) { template := testContextTemplate{` from {IMAGE} maintainer dockerio run echo "first" add foo /usr/lib/bla/bar add http://{SERVERADDR}/baz /usr/lib/baz/quux run echo "second" `, [][2]string{{"foo", "hello"}}, [][2]string{{"/baz", "world!"}}} checkCacheBehavior(t, template, true) } func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { template := testContextTemplate{` from {IMAGE} maintainer dockerio run echo "first" add foo /usr/lib/bla/bar add http://{SERVERADDR}/baz /usr/lib/baz/quux run echo "second" `, [][2]string{{"foo", "hello"}}, [][2]string{{"/baz", "world!"}}} checkCacheBehavior(t, template, false) } func TestForbiddenContextPath(t *testing.T) { eng := NewTestEngine(t) defer nuke(mkRuntimeFromEngine(eng, t)) srv := mkServerFromEngine(eng, t) context := testContextTemplate{` from {IMAGE} maintainer dockerio add ../../ test/ `, [][2]string{{"test.txt", "test1"}, {"other.txt", "other"}}, nil} httpServer, err := mkTestingFileServer(context.remoteFiles) if err != nil { t.Fatal(err) } defer httpServer.Close() idx := strings.LastIndex(httpServer.URL, ":") if idx < 0 { t.Fatalf("could not get port from test http server address %s", httpServer.URL) } port := httpServer.URL[idx+1:] iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") if iIP == nil { t.Fatal("Legacy bridgeIP field not set in engine") } ip, ok := iIP.(net.IP) if !ok { panic("Legacy bridgeIP field in engine does not cast to net.IP") } dockerfile := constructDockerfile(context.dockerfile, ip, port) buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { t.Log("Error should not be nil") t.Fail() } if err.Error() != "Forbidden path outside the build context: ../../ (/)" { t.Logf("Error message is not expected: %s", err.Error()) t.Fail() } } func TestBuildADDFileNotFound(t *testing.T) { eng := NewTestEngine(t) defer nuke(mkRuntimeFromEngine(eng, t)) context := testContextTemplate{` from {IMAGE} add foo /usr/local/bar `, nil, nil} httpServer, err := mkTestingFileServer(context.remoteFiles) if err != nil { t.Fatal(err) } defer httpServer.Close() idx := strings.LastIndex(httpServer.URL, ":") if idx < 0 { t.Fatalf("could not get port from test http server address %s", httpServer.URL) } port := httpServer.URL[idx+1:] iIP := eng.Hack_GetGlobalVar("httpapi.bridgeIP") if iIP == nil { t.Fatal("Legacy bridgeIP field not set in engine") } ip, ok := iIP.(net.IP) if !ok { panic("Legacy bridgeIP field in engine does not cast to net.IP") } dockerfile := constructDockerfile(context.dockerfile, ip, port) buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { t.Log("Error should not be nil") t.Fail() } if err.Error() != "foo: no such file or directory" { t.Logf("Error message is not expected: %s", err.Error()) t.Fail() } } func TestBuildInheritance(t *testing.T) { eng := NewTestEngine(t) defer nuke(mkRuntimeFromEngine(eng, t)) img, err := buildImage(testContextTemplate{` from {IMAGE} expose 4243 `, nil, nil}, t, eng, true) if err != nil { t.Fatal(err) } img2, _ := buildImage(testContextTemplate{fmt.Sprintf(` from %s entrypoint ["/bin/echo"] `, img.ID), nil, nil}, t, eng, true) if err != nil { t.Fatal(err) } // from child if img2.Config.Entrypoint[0] != "/bin/echo" { t.Fail() } // from parent if img.Config.PortSpecs[0] != "4243" { t.Fail() } } func TestBuildFails(t *testing.T) { _, err := buildImage(testContextTemplate{` from {IMAGE} run sh -c "exit 23" `, nil, nil}, t, nil, true) if err == nil { t.Fatal("Error should not be nil") } sterr, ok := err.(*utils.JSONError) if !ok { t.Fatalf("Error should be utils.JSONError") } if sterr.Code != 23 { t.Fatalf("StatusCode %d unexpected, should be 23", sterr.Code) } } func TestBuildFailsDockerfileEmpty(t *testing.T) { _, err := buildImage(testContextTemplate{``, nil, nil}, t, nil, true) if err != docker.ErrDockerfileEmpty { t.Fatal("Expected: %v, got: %v", docker.ErrDockerfileEmpty, err) } } func TestBuildOnBuildTrigger(t *testing.T) { _, err := buildImage(testContextTemplate{` from {IMAGE} onbuild run echo here is the trigger onbuild run touch foobar `, nil, nil, }, t, nil, true, ) if err != nil { t.Fatal(err) } // FIXME: test that the 'foobar' file was created in the final build. } func TestBuildOnBuildForbiddenChainedTrigger(t *testing.T) { _, err := buildImage(testContextTemplate{` from {IMAGE} onbuild onbuild run echo test `, nil, nil, }, t, nil, true, ) if err == nil { t.Fatal("Error should not be nil") } } func TestBuildOnBuildForbiddenFromTrigger(t *testing.T) { _, err := buildImage(testContextTemplate{` from {IMAGE} onbuild from {IMAGE} `, nil, nil, }, t, nil, true, ) if err == nil { t.Fatal("Error should not be nil") } } func TestBuildOnBuildForbiddenMaintainerTrigger(t *testing.T) { _, err := buildImage(testContextTemplate{` from {IMAGE} onbuild maintainer test `, nil, nil, }, t, nil, true, ) if err == nil { t.Fatal("Error should not be nil") } } docker-0.9.1/integration/container_test.go0000644000175000017500000011764312314376205017012 0ustar tagtagpackage docker import ( "bufio" "fmt" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" "os" "path" "regexp" "sort" "strings" "testing" "time" ) func TestIDFormat(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container1, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/sh", "-c", "echo hello world"}, }, "", ) if err != nil { t.Fatal(err) } match, err := regexp.Match("^[0-9a-f]{64}$", []byte(container1.ID)) if err != nil { t.Fatal(err) } if !match { t.Fatalf("Invalid container ID: %s", container1.ID) } } func TestMultipleAttachRestart(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, _ := mkContainer( runtime, []string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"}, t, ) defer runtime.Destroy(container) // Simulate 3 client attaching to the container and stop/restart stdout1, err := container.StdoutPipe() if err != nil { t.Fatal(err) } stdout2, err := container.StdoutPipe() if err != nil { t.Fatal(err) } stdout3, err := container.StdoutPipe() if err != nil { t.Fatal(err) } if err := container.Start(); err != nil { t.Fatal(err) } l1, err := bufio.NewReader(stdout1).ReadString('\n') if err != nil { t.Fatal(err) } if strings.Trim(l1, " \r\n") != "hello" { t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l1) } l2, err := bufio.NewReader(stdout2).ReadString('\n') if err != nil { t.Fatal(err) } if strings.Trim(l2, " \r\n") != "hello" { t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l2) } l3, err := bufio.NewReader(stdout3).ReadString('\n') if err != nil { t.Fatal(err) } if strings.Trim(l3, " \r\n") != "hello" { t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3) } if err := container.Stop(10); err != nil { t.Fatal(err) } stdout1, err = container.StdoutPipe() if err != nil { t.Fatal(err) } stdout2, err = container.StdoutPipe() if err != nil { t.Fatal(err) } stdout3, err = container.StdoutPipe() if err != nil { t.Fatal(err) } if err := container.Start(); err != nil { t.Fatal(err) } setTimeout(t, "Timeout reading from the process", 3*time.Second, func() { l1, err = bufio.NewReader(stdout1).ReadString('\n') if err != nil { t.Fatal(err) } if strings.Trim(l1, " \r\n") != "hello" { t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l1) } l2, err = bufio.NewReader(stdout2).ReadString('\n') if err != nil { t.Fatal(err) } if strings.Trim(l2, " \r\n") != "hello" { t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l2) } l3, err = bufio.NewReader(stdout3).ReadString('\n') if err != nil { t.Fatal(err) } if strings.Trim(l3, " \r\n") != "hello" { t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3) } }) container.Wait() } func TestDiff(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) // Create a container and remove a file container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t) defer runtime.Destroy(container1) // The changelog should be empty and not fail before run. See #1705 c, err := container1.Changes() if err != nil { t.Fatal(err) } if len(c) != 0 { t.Fatalf("Changelog should be empty before run") } if err := container1.Run(); err != nil { t.Fatal(err) } // Check the changelog c, err = container1.Changes() if err != nil { t.Fatal(err) } success := false for _, elem := range c { if elem.Path == "/etc/passwd" && elem.Kind == 2 { success = true } } if !success { t.Fatalf("/etc/passwd as been removed but is not present in the diff") } // Commit the container img, err := runtime.Commit(container1, "", "", "unit test commited image - diff", "", nil) if err != nil { t.Fatal(err) } // Create a new container from the commited image container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t) defer runtime.Destroy(container2) if err := container2.Run(); err != nil { t.Fatal(err) } // Check the changelog c, err = container2.Changes() if err != nil { t.Fatal(err) } for _, elem := range c { if elem.Path == "/etc/passwd" { t.Fatalf("/etc/passwd should not be present in the diff after commit.") } } // Create a new container container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t) defer runtime.Destroy(container3) if err := container3.Run(); err != nil { t.Fatal(err) } // Check the changelog c, err = container3.Changes() if err != nil { t.Fatal(err) } success = false for _, elem := range c { if elem.Path == "/bin/httpd" && elem.Kind == 2 { success = true } } if !success { t.Fatalf("/bin/httpd should be present in the diff after commit.") } } func TestCommitAutoRun(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) defer runtime.Destroy(container1) if container1.State.IsRunning() { t.Errorf("Container shouldn't be running") } if err := container1.Run(); err != nil { t.Fatal(err) } if container1.State.IsRunning() { t.Errorf("Container shouldn't be running") } img, err := runtime.Commit(container1, "", "", "unit test commited image", "", &runconfig.Config{Cmd: []string{"cat", "/world"}}) if err != nil { t.Error(err) } // FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world container2, _, _ := mkContainer(runtime, []string{img.ID}, t) defer runtime.Destroy(container2) stdout, err := container2.StdoutPipe() if err != nil { t.Fatal(err) } stderr, err := container2.StderrPipe() if err != nil { t.Fatal(err) } if err := container2.Start(); err != nil { t.Fatal(err) } container2.Wait() output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } output2, err := ioutil.ReadAll(stderr) if err != nil { t.Fatal(err) } if err := stdout.Close(); err != nil { t.Fatal(err) } if err := stderr.Close(); err != nil { t.Fatal(err) } if string(output) != "hello\n" { t.Fatalf("Unexpected output. Expected %s, received: %s (err: %s)", "hello\n", output, output2) } } func TestCommitRun(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t) defer runtime.Destroy(container1) if container1.State.IsRunning() { t.Errorf("Container shouldn't be running") } if err := container1.Run(); err != nil { t.Fatal(err) } if container1.State.IsRunning() { t.Errorf("Container shouldn't be running") } img, err := runtime.Commit(container1, "", "", "unit test commited image", "", nil) if err != nil { t.Error(err) } // FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t) defer runtime.Destroy(container2) stdout, err := container2.StdoutPipe() if err != nil { t.Fatal(err) } stderr, err := container2.StderrPipe() if err != nil { t.Fatal(err) } if err := container2.Start(); err != nil { t.Fatal(err) } container2.Wait() output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } output2, err := ioutil.ReadAll(stderr) if err != nil { t.Fatal(err) } if err := stdout.Close(); err != nil { t.Fatal(err) } if err := stderr.Close(); err != nil { t.Fatal(err) } if string(output) != "hello\n" { t.Fatalf("Unexpected output. Expected %s, received: %s (err: %s)", "hello\n", output, output2) } } func TestStart(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, _ := mkContainer(runtime, []string{"-i", "_", "/bin/cat"}, t) defer runtime.Destroy(container) cStdin, err := container.StdinPipe() if err != nil { t.Fatal(err) } if err := container.Start(); err != nil { t.Fatal(err) } // Give some time to the process to start container.WaitTimeout(500 * time.Millisecond) if !container.State.IsRunning() { t.Errorf("Container should be running") } if err := container.Start(); err == nil { t.Fatalf("A running container should be able to be started") } // Try to avoid the timeout in destroy. Best effort, don't check error cStdin.Close() container.WaitTimeout(2 * time.Second) } func TestCpuShares(t *testing.T) { _, err1 := os.Stat("/sys/fs/cgroup/cpuacct,cpu") _, err2 := os.Stat("/sys/fs/cgroup/cpu,cpuacct") if err1 == nil || err2 == nil { t.Skip("Fixme. Setting cpu cgroup shares doesn't work in dind on a Fedora host. The lxc utils are confused by the cpu,cpuacct mount.") } runtime := mkRuntime(t) defer nuke(runtime) container, _, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t) defer runtime.Destroy(container) cStdin, err := container.StdinPipe() if err != nil { t.Fatal(err) } if err := container.Start(); err != nil { t.Fatal(err) } // Give some time to the process to start container.WaitTimeout(500 * time.Millisecond) if !container.State.IsRunning() { t.Errorf("Container should be running") } if err := container.Start(); err == nil { t.Fatalf("A running container should be able to be started") } // Try to avoid the timeout in destroy. Best effort, don't check error cStdin.Close() container.WaitTimeout(2 * time.Second) } func TestRun(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t) defer runtime.Destroy(container) if container.State.IsRunning() { t.Errorf("Container shouldn't be running") } if err := container.Run(); err != nil { t.Fatal(err) } if container.State.IsRunning() { t.Errorf("Container shouldn't be running") } } func TestOutput(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) output, err := container.Output() if err != nil { t.Fatal(err) } if string(output) != "foobar" { t.Fatalf("%s != %s", string(output), "foobar") } } func TestKillDifferentUser(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, OpenStdin: true, User: "daemon", }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) // FIXME @shykes: this seems redundant, but is very old, I'm leaving it in case // there is a side effect I'm not seeing. // defer container.stdin.Close() if container.State.IsRunning() { t.Errorf("Container shouldn't be running") } if err := container.Start(); err != nil { t.Fatal(err) } setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { for !container.State.IsRunning() { time.Sleep(10 * time.Millisecond) } }) setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { out, _ := container.StdoutPipe() in, _ := container.StdinPipe() if err := assertPipe("hello\n", "hello", out, in, 150); err != nil { t.Fatal(err) } }) if err := container.Kill(); err != nil { t.Fatal(err) } if container.State.IsRunning() { t.Errorf("Container shouldn't be running") } container.Wait() if container.State.IsRunning() { t.Errorf("Container shouldn't be running") } // Try stopping twice if err := container.Kill(); err != nil { t.Fatal(err) } } // Test that creating a container with a volume doesn't crash. Regression test for #995. func TestCreateVolume(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) config, hc, _, err := runconfig.Parse([]string{"-v", "/var/lib/data", unitTestImageID, "echo", "hello", "world"}, nil) if err != nil { t.Fatal(err) } jobCreate := eng.Job("create") if err := jobCreate.ImportEnv(config); err != nil { t.Fatal(err) } var id string jobCreate.Stdout.AddString(&id) if err := jobCreate.Run(); err != nil { t.Fatal(err) } jobStart := eng.Job("start", id) if err := jobStart.ImportEnv(hc); err != nil { t.Fatal(err) } if err := jobStart.Run(); err != nil { t.Fatal(err) } // FIXME: this hack can be removed once Wait is a job c := runtime.Get(id) if c == nil { t.Fatalf("Couldn't retrieve container %s from runtime", id) } c.WaitTimeout(500 * time.Millisecond) c.Wait() } func TestKill(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) if container.State.IsRunning() { t.Errorf("Container shouldn't be running") } if err := container.Start(); err != nil { t.Fatal(err) } // Give some time to lxc to spawn the process container.WaitTimeout(500 * time.Millisecond) if !container.State.IsRunning() { t.Errorf("Container should be running") } if err := container.Kill(); err != nil { t.Fatal(err) } if container.State.IsRunning() { t.Errorf("Container shouldn't be running") } container.Wait() if container.State.IsRunning() { t.Errorf("Container shouldn't be running") } // Try stopping twice if err := container.Kill(); err != nil { t.Fatal(err) } } func TestExitCode(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) trueContainer, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/true"}, }, "") if err != nil { t.Fatal(err) } defer runtime.Destroy(trueContainer) if err := trueContainer.Run(); err != nil { t.Fatal(err) } if code := trueContainer.State.GetExitCode(); code != 0 { t.Fatalf("Unexpected exit code %d (expected 0)", code) } falseContainer, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/false"}, }, "") if err != nil { t.Fatal(err) } defer runtime.Destroy(falseContainer) if err := falseContainer.Run(); err != nil { t.Fatal(err) } if code := falseContainer.State.GetExitCode(); code != 1 { t.Fatalf("Unexpected exit code %d (expected 1)", code) } } func TestRestart(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) output, err := container.Output() if err != nil { t.Fatal(err) } if string(output) != "foobar" { t.Error(string(output)) } // Run the container again and check the output output, err = container.Output() if err != nil { t.Fatal(err) } if string(output) != "foobar" { t.Error(string(output)) } } func TestRestartStdin(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, OpenStdin: true, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) stdin, err := container.StdinPipe() if err != nil { t.Fatal(err) } stdout, err := container.StdoutPipe() if err != nil { t.Fatal(err) } if err := container.Start(); err != nil { t.Fatal(err) } if _, err := io.WriteString(stdin, "hello world"); err != nil { t.Fatal(err) } if err := stdin.Close(); err != nil { t.Fatal(err) } container.Wait() output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } if err := stdout.Close(); err != nil { t.Fatal(err) } if string(output) != "hello world" { t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) } // Restart and try again stdin, err = container.StdinPipe() if err != nil { t.Fatal(err) } stdout, err = container.StdoutPipe() if err != nil { t.Fatal(err) } if err := container.Start(); err != nil { t.Fatal(err) } if _, err := io.WriteString(stdin, "hello world #2"); err != nil { t.Fatal(err) } if err := stdin.Close(); err != nil { t.Fatal(err) } container.Wait() output, err = ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } if err := stdout.Close(); err != nil { t.Fatal(err) } if string(output) != "hello world #2" { t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world #2", string(output)) } } func TestUser(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) // Default user must be root container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) output, err := container.Output() if err != nil { t.Fatal(err) } if !strings.Contains(string(output), "uid=0(root) gid=0(root)") { t.Error(string(output)) } // Set a username container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, User: "root", }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) output, err = container.Output() if code := container.State.GetExitCode(); err != nil || code != 0 { t.Fatal(err) } if !strings.Contains(string(output), "uid=0(root) gid=0(root)") { t.Error(string(output)) } // Set a UID container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, User: "0", }, "", ) if code := container.State.GetExitCode(); err != nil || code != 0 { t.Fatal(err) } defer runtime.Destroy(container) output, err = container.Output() if code := container.State.GetExitCode(); err != nil || code != 0 { t.Fatal(err) } if !strings.Contains(string(output), "uid=0(root) gid=0(root)") { t.Error(string(output)) } // Set a different user by uid container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, User: "1", }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) output, err = container.Output() if err != nil { t.Fatal(err) } else if code := container.State.GetExitCode(); code != 0 { t.Fatalf("Container exit code is invalid: %d\nOutput:\n%s\n", code, output) } if !strings.Contains(string(output), "uid=1(daemon) gid=1(daemon)") { t.Error(string(output)) } // Set a different user by username container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, User: "daemon", }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) output, err = container.Output() if code := container.State.GetExitCode(); err != nil || code != 0 { t.Fatal(err) } if !strings.Contains(string(output), "uid=1(daemon) gid=1(daemon)") { t.Error(string(output)) } // Test an wrong username container, _, err = runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"id"}, User: "unknownuser", }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) output, err = container.Output() if container.State.GetExitCode() == 0 { t.Fatal("Starting container with wrong uid should fail but it passed.") } } func TestMultipleContainers(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container1, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container1) container2, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sleep", "2"}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container2) // Start both containers if err := container1.Start(); err != nil { t.Fatal(err) } if err := container2.Start(); err != nil { t.Fatal(err) } // Make sure they are running before trying to kill them container1.WaitTimeout(250 * time.Millisecond) container2.WaitTimeout(250 * time.Millisecond) // If we are here, both containers should be running if !container1.State.IsRunning() { t.Fatal("Container not running") } if !container2.State.IsRunning() { t.Fatal("Container not running") } // Kill them if err := container1.Kill(); err != nil { t.Fatal(err) } if err := container2.Kill(); err != nil { t.Fatal(err) } } func TestStdin(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, OpenStdin: true, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) stdin, err := container.StdinPipe() if err != nil { t.Fatal(err) } stdout, err := container.StdoutPipe() if err != nil { t.Fatal(err) } if err := container.Start(); err != nil { t.Fatal(err) } defer stdin.Close() defer stdout.Close() if _, err := io.WriteString(stdin, "hello world"); err != nil { t.Fatal(err) } if err := stdin.Close(); err != nil { t.Fatal(err) } container.Wait() output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } if string(output) != "hello world" { t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) } } func TestTty(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat"}, OpenStdin: true, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) stdin, err := container.StdinPipe() if err != nil { t.Fatal(err) } stdout, err := container.StdoutPipe() if err != nil { t.Fatal(err) } if err := container.Start(); err != nil { t.Fatal(err) } defer stdin.Close() defer stdout.Close() if _, err := io.WriteString(stdin, "hello world"); err != nil { t.Fatal(err) } if err := stdin.Close(); err != nil { t.Fatal(err) } container.Wait() output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } if string(output) != "hello world" { t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) } } func TestEnv(t *testing.T) { os.Setenv("TRUE", "false") os.Setenv("TRICKY", "tri\ncky\n") runtime := mkRuntime(t) defer nuke(runtime) config, _, _, err := runconfig.Parse([]string{"-e=FALSE=true", "-e=TRUE", "-e=TRICKY", GetTestImage(runtime).ID, "env"}, nil) if err != nil { t.Fatal(err) } container, _, err := runtime.Create(config, "") if err != nil { t.Fatal(err) } defer runtime.Destroy(container) stdout, err := container.StdoutPipe() if err != nil { t.Fatal(err) } defer stdout.Close() if err := container.Start(); err != nil { t.Fatal(err) } container.Wait() output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } actualEnv := strings.Split(string(output), "\n") if actualEnv[len(actualEnv)-1] == "" { actualEnv = actualEnv[:len(actualEnv)-1] } sort.Strings(actualEnv) goodEnv := []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOME=/", "HOSTNAME=" + utils.TruncateID(container.ID), "FALSE=true", "TRUE=false", "TRICKY=tri", "cky", "", } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { t.Fatalf("Wrong environment: should be %d variables, not: '%s'\n", len(goodEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } } func TestEntrypoint(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Entrypoint: []string{"/bin/echo"}, Cmd: []string{"-n", "foobar"}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) output, err := container.Output() if err != nil { t.Fatal(err) } if string(output) != "foobar" { t.Error(string(output)) } } func TestEntrypointNoCmd(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Entrypoint: []string{"/bin/echo", "foobar"}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) output, err := container.Output() if err != nil { t.Fatal(err) } if strings.Trim(string(output), "\r\n") != "foobar" { t.Error(string(output)) } } func BenchmarkRunSequencial(b *testing.B) { runtime := mkRuntime(b) defer nuke(runtime) for i := 0; i < b.N; i++ { container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foo"}, }, "", ) if err != nil { b.Fatal(err) } defer runtime.Destroy(container) output, err := container.Output() if err != nil { b.Fatal(err) } if string(output) != "foo" { b.Fatalf("Unexpected output: %s", output) } if err := runtime.Destroy(container); err != nil { b.Fatal(err) } } } func BenchmarkRunParallel(b *testing.B) { runtime := mkRuntime(b) defer nuke(runtime) var tasks []chan error for i := 0; i < b.N; i++ { complete := make(chan error) tasks = append(tasks, complete) go func(i int, complete chan error) { container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foo"}, }, "", ) if err != nil { complete <- err return } defer runtime.Destroy(container) if err := container.Start(); err != nil { complete <- err return } if err := container.WaitTimeout(15 * time.Second); err != nil { complete <- err return } // if string(output) != "foo" { // complete <- fmt.Errorf("Unexecpted output: %v", string(output)) // } if err := runtime.Destroy(container); err != nil { complete <- err return } complete <- nil }(i, complete) } var errors []error for _, task := range tasks { err := <-task if err != nil { errors = append(errors, err) } } if len(errors) > 0 { b.Fatal(errors) } } func tempDir(t *testing.T) string { tmpDir, err := ioutil.TempDir("", "docker-test-container") if err != nil { t.Fatal(err) } return tmpDir } // Test for #1737 func TestCopyVolumeUidGid(t *testing.T) { eng := NewTestEngine(t) r := mkRuntimeFromEngine(eng, t) defer r.Nuke() // Add directory not owned by root container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t) defer r.Destroy(container1) if container1.State.IsRunning() { t.Errorf("Container shouldn't be running") } if err := container1.Run(); err != nil { t.Fatal(err) } if container1.State.IsRunning() { t.Errorf("Container shouldn't be running") } img, err := r.Commit(container1, "", "", "unit test commited image", "", nil) if err != nil { t.Error(err) } // Test that the uid and gid is copied from the image to the volume tmpDir1 := tempDir(t) defer os.RemoveAll(tmpDir1) stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "stat", "-c", "%U %G", "/hello"}, t) if !strings.Contains(stdout1, "daemon daemon") { t.Fatal("Container failed to transfer uid and gid to volume") } } // Test for #1582 func TestCopyVolumeContent(t *testing.T) { eng := NewTestEngine(t) r := mkRuntimeFromEngine(eng, t) defer r.Nuke() // Put some content in a directory of a container and commit it container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t) defer r.Destroy(container1) if container1.State.IsRunning() { t.Errorf("Container shouldn't be running") } if err := container1.Run(); err != nil { t.Fatal(err) } if container1.State.IsRunning() { t.Errorf("Container shouldn't be running") } img, err := r.Commit(container1, "", "", "unit test commited image", "", nil) if err != nil { t.Error(err) } // Test that the content is copied from the image to the volume tmpDir1 := tempDir(t) defer os.RemoveAll(tmpDir1) stdout1, _ := runContainer(eng, r, []string{"-v", "/hello", img.ID, "find", "/hello"}, t) if !(strings.Contains(stdout1, "/hello/local/world") && strings.Contains(stdout1, "/hello/local")) { t.Fatal("Container failed to transfer content to volume") } } func TestBindMounts(t *testing.T) { eng := NewTestEngine(t) r := mkRuntimeFromEngine(eng, t) defer r.Nuke() tmpDir := tempDir(t) defer os.RemoveAll(tmpDir) writeFile(path.Join(tmpDir, "touch-me"), "", t) // Test reading from a read-only bind mount stdout, _ := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "_", "ls", "/tmp"}, t) if !strings.Contains(stdout, "touch-me") { t.Fatal("Container failed to read from bind mount") } // test writing to bind mount runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "_", "touch", "/tmp/holla"}, t) readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist // test mounting to an illegal destination directory if _, err := runContainer(eng, r, []string{"-v", fmt.Sprintf("%s:.", tmpDir), "_", "ls", "."}, nil); err == nil { t.Fatal("Container bind mounted illegal directory") } // test mount a file runContainer(eng, r, []string{"-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "_", "sh", "-c", "echo -n 'yotta' > /tmp/holla"}, t) content := readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist if content != "yotta" { t.Fatal("Container failed to write to bind mount file") } } // Test that -volumes-from supports both read-only mounts func TestFromVolumesInReadonlyMode(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) _, err = container.Output() if err != nil { t.Fatal(err) } if !container.VolumesRW["/test"] { t.Fail() } container2, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: container.ID + ":ro", }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container2) _, err = container2.Output() if err != nil { t.Fatal(err) } if container.Volumes["/test"] != container2.Volumes["/test"] { t.Logf("container volumes do not match: %s | %s ", container.Volumes["/test"], container2.Volumes["/test"]) t.Fail() } _, exists := container2.VolumesRW["/test"] if !exists { t.Logf("container2 is missing '/test' volume: %s", container2.VolumesRW) t.Fail() } if container2.VolumesRW["/test"] != false { t.Log("'/test' volume mounted in read-write mode, expected read-only") t.Fail() } } // Test that VolumesRW values are copied to the new container. Regression test for #1201 func TestVolumesFromReadonlyMount(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) _, err = container.Output() if err != nil { t.Fatal(err) } if !container.VolumesRW["/test"] { t.Fail() } container2, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: container.ID, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container2) _, err = container2.Output() if err != nil { t.Fatal(err) } if container.Volumes["/test"] != container2.Volumes["/test"] { t.Fail() } actual, exists := container2.VolumesRW["/test"] if !exists { t.Fail() } if container.VolumesRW["/test"] != actual { t.Fail() } } // Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. func TestRestartWithVolumes(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"echo", "-n", "foobar"}, Volumes: map[string]struct{}{"/test": {}}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) for key := range container.Config.Volumes { if key != "/test" { t.Fail() } } _, err = container.Output() if err != nil { t.Fatal(err) } expected := container.Volumes["/test"] if expected == "" { t.Fail() } // Run the container again to verify the volume path persists _, err = container.Output() if err != nil { t.Fatal(err) } actual := container.Volumes["/test"] if expected != actual { t.Fatalf("Expected volume path: %s Actual path: %s", expected, actual) } } // Test for #1351 func TestVolumesFromWithVolumes(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) for key := range container.Config.Volumes { if key != "/test" { t.Fail() } } _, err = container.Output() if err != nil { t.Fatal(err) } expected := container.Volumes["/test"] if expected == "" { t.Fail() } container2, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"cat", "/test/foo"}, VolumesFrom: container.ID, Volumes: map[string]struct{}{"/test": {}}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container2) output, err := container2.Output() if err != nil { t.Fatal(err) } if string(output) != "bar" { t.Fail() } if container.Volumes["/test"] != container2.Volumes["/test"] { t.Fail() } // Ensure it restarts successfully _, err = container2.Output() if err != nil { t.Fatal(err) } } func TestContainerNetwork(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, // If I change this to ping 8.8.8.8 it fails. Any idea why? - timthelion Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) if err := container.Run(); err != nil { t.Fatal(err) } if code := container.State.GetExitCode(); code != 0 { t.Fatalf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", code) } } // Issue #4681 func TestLoopbackFunctionsWhenNetworkingIsDissabled(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, NetworkDisabled: true, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) if err := container.Run(); err != nil { t.Fatal(err) } if code := container.State.GetExitCode(); code != 0 { t.Fatalf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", code) } } func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) if err != nil { t.Fatal(err) } jobCreate := eng.Job("create") if err := jobCreate.ImportEnv(config); err != nil { t.Fatal(err) } var id string jobCreate.Stdout.AddString(&id) if err := jobCreate.Run(); err != nil { t.Fatal(err) } // FIXME: this hack can be removed once Wait is a job c := runtime.Get(id) if c == nil { t.Fatalf("Couldn't retrieve container %s from runtime", id) } stdout, err := c.StdoutPipe() if err != nil { t.Fatal(err) } jobStart := eng.Job("start", id) if err := jobStart.ImportEnv(hc); err != nil { t.Fatal(err) } if err := jobStart.Run(); err != nil { t.Fatal(err) } c.WaitTimeout(500 * time.Millisecond) c.Wait() output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } interfaces := regexp.MustCompile(`(?m)^[0-9]+: [a-zA-Z0-9]+`).FindAllString(string(output), -1) if len(interfaces) != 1 { t.Fatalf("Wrong interface count in test container: expected [*: lo], got %s", interfaces) } if !strings.HasSuffix(interfaces[0], ": lo") { t.Fatalf("Wrong interface in test container: expected [*: lo], got %s", interfaces) } } func TestPrivilegedCanMknod(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() if output, err := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { t.Fatalf("Could not mknod into privileged container %s %v", output, err) } } func TestPrivilegedCanMount(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" { t.Fatal("Could not mount into privileged container") } } func TestPrivilegedCannotMknod(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" { t.Fatal("Could mknod into secure container") } } func TestPrivilegedCannotMount(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mount -t tmpfs none /tmp || echo ok"}, t); output != "ok\n" { t.Fatal("Could mount into secure container") } } func TestMultipleVolumesFrom(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create(&runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container) for key := range container.Config.Volumes { if key != "/test" { t.Fail() } } _, err = container.Output() if err != nil { t.Fatal(err) } expected := container.Volumes["/test"] if expected == "" { t.Fail() } container2, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"}, Volumes: map[string]struct{}{"/other": {}}, }, "", ) if err != nil { t.Fatal(err) } defer runtime.Destroy(container2) for key := range container2.Config.Volumes { if key != "/other" { t.FailNow() } } if _, err := container2.Output(); err != nil { t.Fatal(err) } container3, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"/bin/echo", "-n", "foobar"}, VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","), }, "") if err != nil { t.Fatal(err) } defer runtime.Destroy(container3) if _, err := container3.Output(); err != nil { t.Fatal(err) } if container3.Volumes["/test"] != container.Volumes["/test"] { t.Fail() } if container3.Volumes["/other"] != container2.Volumes["/other"] { t.Fail() } } func TestRestartGhost(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) container, _, err := runtime.Create( &runconfig.Config{ Image: GetTestImage(runtime).ID, Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, Volumes: map[string]struct{}{"/test": {}}, }, "", ) if err != nil { t.Fatal(err) } if err := container.Kill(); err != nil { t.Fatal(err) } container.State.SetGhost(true) _, err = container.Output() if err != nil { t.Fatal(err) } } docker-0.9.1/sysinit/0000755000175000017500000000000012314376205012605 5ustar tagtagdocker-0.9.1/sysinit/sysinit.go0000644000175000017500000000450412314376205014641 0ustar tagtagpackage sysinit import ( "encoding/json" "flag" "fmt" "github.com/dotcloud/docker/execdriver" _ "github.com/dotcloud/docker/execdriver/lxc" _ "github.com/dotcloud/docker/execdriver/native" "io/ioutil" "log" "os" "strings" ) // Clear environment pollution introduced by lxc-start func setupEnv(args *execdriver.InitArgs) { os.Clearenv() for _, kv := range args.Env { parts := strings.SplitN(kv, "=", 2) if len(parts) == 1 { parts = append(parts, "") } os.Setenv(parts[0], parts[1]) } } func executeProgram(args *execdriver.InitArgs) error { setupEnv(args) dockerInitFct, err := execdriver.GetInitFunc(args.Driver) if err != nil { panic(err) } return dockerInitFct(args) } // Sys Init code // This code is run INSIDE the container and is responsible for setting // up the environment before running the actual process func SysInit() { if len(os.Args) <= 1 { fmt.Println("You should not invoke dockerinit manually") os.Exit(1) } var ( // Get cmdline arguments user = flag.String("u", "", "username or uid") gateway = flag.String("g", "", "gateway address") ip = flag.String("i", "", "ip address") workDir = flag.String("w", "", "workdir") privileged = flag.Bool("privileged", false, "privileged mode") mtu = flag.Int("mtu", 1500, "interface mtu") driver = flag.String("driver", "", "exec driver") pipe = flag.Int("pipe", 0, "sync pipe fd") console = flag.String("console", "", "console (pty slave) path") root = flag.String("root", ".", "root path for configuration files") ) flag.Parse() // Get env var env []string content, err := ioutil.ReadFile(".dockerenv") if err != nil { log.Fatalf("Unable to load environment variables: %v", err) } if err := json.Unmarshal(content, &env); err != nil { log.Fatalf("Unable to unmarshal environment variables: %v", err) } // Propagate the plugin-specific container env variable env = append(env, "container="+os.Getenv("container")) args := &execdriver.InitArgs{ User: *user, Gateway: *gateway, Ip: *ip, WorkDir: *workDir, Privileged: *privileged, Env: env, Args: flag.Args(), Mtu: *mtu, Driver: *driver, Console: *console, Pipe: *pipe, Root: *root, } if err := executeProgram(args); err != nil { log.Fatal(err) } } docker-0.9.1/.travis.yml0000644000175000017500000000200212314376205013206 0ustar tagtag# Note: right now we don't use go-specific features of travis. # Later we might automate "go test" etc. (or do it inside a docker container...?) language: go go: 1.2 # Disable the normal go build. install: true before_script: - env | sort - sudo apt-get update -qq - sudo apt-get install -qq python-yaml - git remote add upstream git://github.com/dotcloud/docker.git - upstream=master; if [ "$TRAVIS_PULL_REQUEST" != false ]; then upstream=$TRAVIS_BRANCH; fi; git fetch --append --no-tags upstream refs/heads/$upstream:refs/remotes/upstream/$upstream # sometimes we have upstream master already as origin/master (PRs), but other times we don't, so let's just make sure we have a completely unambiguous way to specify "upstream master" from here out # but if it's a PR against non-master, we need that upstream branch instead :) - sudo pip install -r docs/requirements.txt script: - hack/travis/dco.py - hack/travis/gofmt.py - make -sC docs SPHINXOPTS=-q docs man # vim:set sw=2 ts=2: docker-0.9.1/runconfig/0000755000175000017500000000000012314376205013075 5ustar tagtagdocker-0.9.1/runconfig/config_test.go0000644000175000017500000001070112314376205015727 0ustar tagtagpackage runconfig import ( "github.com/dotcloud/docker/nat" "testing" ) func TestCompare(t *testing.T) { volumes1 := make(map[string]struct{}) volumes1["/test1"] = struct{}{} config1 := Config{ Dns: []string{"1.1.1.1", "2.2.2.2"}, PortSpecs: []string{"1111:1111", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "11111111", Volumes: volumes1, } config2 := Config{ Dns: []string{"0.0.0.0", "2.2.2.2"}, PortSpecs: []string{"1111:1111", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "11111111", Volumes: volumes1, } config3 := Config{ Dns: []string{"1.1.1.1", "2.2.2.2"}, PortSpecs: []string{"0000:0000", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "11111111", Volumes: volumes1, } config4 := Config{ Dns: []string{"1.1.1.1", "2.2.2.2"}, PortSpecs: []string{"0000:0000", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "22222222", Volumes: volumes1, } volumes2 := make(map[string]struct{}) volumes2["/test2"] = struct{}{} config5 := Config{ Dns: []string{"1.1.1.1", "2.2.2.2"}, PortSpecs: []string{"0000:0000", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "11111111", Volumes: volumes2, } if Compare(&config1, &config2) { t.Fatalf("Compare should return false, Dns are different") } if Compare(&config1, &config3) { t.Fatalf("Compare should return false, PortSpecs are different") } if Compare(&config1, &config4) { t.Fatalf("Compare should return false, VolumesFrom are different") } if Compare(&config1, &config5) { t.Fatalf("Compare should return false, Volumes are different") } if !Compare(&config1, &config1) { t.Fatalf("Compare should return true") } } func TestMerge(t *testing.T) { volumesImage := make(map[string]struct{}) volumesImage["/test1"] = struct{}{} volumesImage["/test2"] = struct{}{} configImage := &Config{ Dns: []string{"1.1.1.1", "2.2.2.2"}, PortSpecs: []string{"1111:1111", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "1111", Volumes: volumesImage, } volumesUser := make(map[string]struct{}) volumesUser["/test3"] = struct{}{} configUser := &Config{ Dns: []string{"3.3.3.3"}, PortSpecs: []string{"3333:2222", "3333:3333"}, Env: []string{"VAR2=3", "VAR3=3"}, Volumes: volumesUser, } if err := Merge(configUser, configImage); err != nil { t.Error(err) } if len(configUser.Dns) != 3 { t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns)) } for _, dns := range configUser.Dns { if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" { t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns) } } if len(configUser.ExposedPorts) != 3 { t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) } for portSpecs := range configUser.ExposedPorts { if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) } } if len(configUser.Env) != 3 { t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) } for _, env := range configUser.Env { if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) } } if len(configUser.Volumes) != 3 { t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) } for v := range configUser.Volumes { if v != "/test1" && v != "/test2" && v != "/test3" { t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) } } if configUser.VolumesFrom != "1111" { t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom) } ports, _, err := nat.ParsePortSpecs([]string{"0000"}) if err != nil { t.Error(err) } configImage2 := &Config{ ExposedPorts: ports, } if err := Merge(configUser, configImage2); err != nil { t.Error(err) } if len(configUser.ExposedPorts) != 4 { t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) } for portSpecs := range configUser.ExposedPorts { if portSpecs.Port() != "0000" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { t.Fatalf("Expected 0000 or 1111 or 2222 or 3333, found %s", portSpecs) } } } docker-0.9.1/runconfig/compare.go0000644000175000017500000000275012314376205015056 0ustar tagtagpackage runconfig // Compare two Config struct. Do not compare the "Image" nor "Hostname" fields // If OpenStdin is set, then it differs func Compare(a, b *Config) bool { if a == nil || b == nil || a.OpenStdin || b.OpenStdin { return false } if a.AttachStdout != b.AttachStdout || a.AttachStderr != b.AttachStderr || a.User != b.User || a.Memory != b.Memory || a.MemorySwap != b.MemorySwap || a.CpuShares != b.CpuShares || a.OpenStdin != b.OpenStdin || a.Tty != b.Tty || a.VolumesFrom != b.VolumesFrom { return false } if len(a.Cmd) != len(b.Cmd) || len(a.Dns) != len(b.Dns) || len(a.Env) != len(b.Env) || len(a.PortSpecs) != len(b.PortSpecs) || len(a.ExposedPorts) != len(b.ExposedPorts) || len(a.Entrypoint) != len(b.Entrypoint) || len(a.Volumes) != len(b.Volumes) { return false } for i := 0; i < len(a.Cmd); i++ { if a.Cmd[i] != b.Cmd[i] { return false } } for i := 0; i < len(a.Dns); i++ { if a.Dns[i] != b.Dns[i] { return false } } for i := 0; i < len(a.Env); i++ { if a.Env[i] != b.Env[i] { return false } } for i := 0; i < len(a.PortSpecs); i++ { if a.PortSpecs[i] != b.PortSpecs[i] { return false } } for k := range a.ExposedPorts { if _, exists := b.ExposedPorts[k]; !exists { return false } } for i := 0; i < len(a.Entrypoint); i++ { if a.Entrypoint[i] != b.Entrypoint[i] { return false } } for key := range a.Volumes { if _, exists := b.Volumes[key]; !exists { return false } } return true } docker-0.9.1/runconfig/hostconfig.go0000644000175000017500000000161612314376205015573 0ustar tagtagpackage runconfig import ( "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" ) type HostConfig struct { Binds []string ContainerIDFile string LxcConf []KeyValuePair Privileged bool PortBindings nat.PortMap Links []string PublishAllPorts bool } type KeyValuePair struct { Key string Value string } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { hostConfig := &HostConfig{ ContainerIDFile: job.Getenv("ContainerIDFile"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } if Links := job.GetenvList("Links"); Links != nil { hostConfig.Links = Links } return hostConfig } docker-0.9.1/runconfig/merge.go0000644000175000017500000000625612314376205014534 0ustar tagtagpackage runconfig import ( "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/utils" "strings" ) func Merge(userConf, imageConf *Config) error { if userConf.User == "" { userConf.User = imageConf.User } if userConf.Memory == 0 { userConf.Memory = imageConf.Memory } if userConf.MemorySwap == 0 { userConf.MemorySwap = imageConf.MemorySwap } if userConf.CpuShares == 0 { userConf.CpuShares = imageConf.CpuShares } if userConf.ExposedPorts == nil || len(userConf.ExposedPorts) == 0 { userConf.ExposedPorts = imageConf.ExposedPorts } else if imageConf.ExposedPorts != nil { if userConf.ExposedPorts == nil { userConf.ExposedPorts = make(nat.PortSet) } for port := range imageConf.ExposedPorts { if _, exists := userConf.ExposedPorts[port]; !exists { userConf.ExposedPorts[port] = struct{}{} } } } if userConf.PortSpecs != nil && len(userConf.PortSpecs) > 0 { if userConf.ExposedPorts == nil { userConf.ExposedPorts = make(nat.PortSet) } ports, _, err := nat.ParsePortSpecs(userConf.PortSpecs) if err != nil { return err } for port := range ports { if _, exists := userConf.ExposedPorts[port]; !exists { userConf.ExposedPorts[port] = struct{}{} } } userConf.PortSpecs = nil } if imageConf.PortSpecs != nil && len(imageConf.PortSpecs) > 0 { // FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia. utils.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) if userConf.ExposedPorts == nil { userConf.ExposedPorts = make(nat.PortSet) } ports, _, err := nat.ParsePortSpecs(imageConf.PortSpecs) if err != nil { return err } for port := range ports { if _, exists := userConf.ExposedPorts[port]; !exists { userConf.ExposedPorts[port] = struct{}{} } } } if !userConf.Tty { userConf.Tty = imageConf.Tty } if !userConf.OpenStdin { userConf.OpenStdin = imageConf.OpenStdin } if !userConf.StdinOnce { userConf.StdinOnce = imageConf.StdinOnce } if userConf.Env == nil || len(userConf.Env) == 0 { userConf.Env = imageConf.Env } else { for _, imageEnv := range imageConf.Env { found := false imageEnvKey := strings.Split(imageEnv, "=")[0] for _, userEnv := range userConf.Env { userEnvKey := strings.Split(userEnv, "=")[0] if imageEnvKey == userEnvKey { found = true } } if !found { userConf.Env = append(userConf.Env, imageEnv) } } } if userConf.Cmd == nil || len(userConf.Cmd) == 0 { userConf.Cmd = imageConf.Cmd } if userConf.Dns == nil || len(userConf.Dns) == 0 { userConf.Dns = imageConf.Dns } else { //duplicates aren't an issue here userConf.Dns = append(userConf.Dns, imageConf.Dns...) } if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 { userConf.Entrypoint = imageConf.Entrypoint } if userConf.WorkingDir == "" { userConf.WorkingDir = imageConf.WorkingDir } if userConf.VolumesFrom == "" { userConf.VolumesFrom = imageConf.VolumesFrom } if userConf.Volumes == nil || len(userConf.Volumes) == 0 { userConf.Volumes = imageConf.Volumes } else { for k, v := range imageConf.Volumes { userConf.Volumes[k] = v } } return nil } docker-0.9.1/runconfig/parse.go0000644000175000017500000002117612314376205014545 0ustar tagtagpackage runconfig import ( "fmt" "github.com/dotcloud/docker/nat" flag "github.com/dotcloud/docker/pkg/mflag" "github.com/dotcloud/docker/pkg/opts" "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/utils" "io/ioutil" "path" "strings" ) var ( ErrInvalidWorikingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: -rm and -d") ) //FIXME Only used in tests func Parse(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { cmd := flag.NewFlagSet("run", flag.ContinueOnError) cmd.SetOutput(ioutil.Discard) cmd.Usage = nil return parseRun(cmd, args, sysInfo) } // FIXME: this maps the legacy commands.go code. It should be merged with Parse to only expose a single parse function. func ParseSubcommand(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { return parseRun(cmd, args, sysInfo) } func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { var ( // FIXME: use utils.ListOpts for attach and volumes? flAttach = opts.NewListOpts(opts.ValidateAttach) flVolumes = opts.NewListOpts(opts.ValidatePath) flLinks = opts.NewListOpts(opts.ValidateLink) flEnv = opts.NewListOpts(opts.ValidateEnv) flPublish opts.ListOpts flExpose opts.ListOpts flDns opts.ListOpts flVolumesFrom opts.ListOpts flLxcOpts opts.ListOpts flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id") flNetwork = cmd.Bool([]string{"n", "-networking"}, true, "Enable networking for this container") flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces") flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep stdin open even if not attached") flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-tty") flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default entrypoint of the image") flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit (format: , where unit = b, k, m or g)") flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") _ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") ) cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to stdin, stdout or stderr.") cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)") cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)") cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") if err := cmd.Parse(args); err != nil { return nil, nil, cmd, err } // Check if the kernel supports memory limit cgroup. if sysInfo != nil && *flMemoryString != "" && !sysInfo.MemoryLimit { *flMemoryString = "" } // Validate input params if *flDetach && flAttach.Len() > 0 { return nil, nil, cmd, ErrConflictAttachDetach } if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) { return nil, nil, cmd, ErrInvalidWorikingDirectory } if *flDetach && *flAutoRemove { return nil, nil, cmd, ErrConflictDetachAutoRemove } // If neither -d or -a are set, attach to everything by default if flAttach.Len() == 0 && !*flDetach { if !*flDetach { flAttach.Set("stdout") flAttach.Set("stderr") if *flStdin { flAttach.Set("stdin") } } } var flMemory int64 if *flMemoryString != "" { parsedMemory, err := utils.RAMInBytes(*flMemoryString) if err != nil { return nil, nil, cmd, err } flMemory = parsedMemory } var binds []string // add any bind targets to the list of container volumes for bind := range flVolumes.GetMap() { if arr := strings.Split(bind, ":"); len(arr) > 1 { if arr[0] == "/" { return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'") } dstDir := arr[1] flVolumes.Set(dstDir) binds = append(binds, bind) flVolumes.Delete(bind) } else if bind == "/" { return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'") } } var ( parsedArgs = cmd.Args() runCmd []string entrypoint []string image string ) if len(parsedArgs) >= 1 { image = cmd.Arg(0) } if len(parsedArgs) > 1 { runCmd = parsedArgs[1:] } if *flEntrypoint != "" { entrypoint = []string{*flEntrypoint} } lxcConf, err := parseLxcConfOpts(flLxcOpts) if err != nil { return nil, nil, cmd, err } var ( domainname string hostname = *flHostname parts = strings.SplitN(hostname, ".", 2) ) if len(parts) > 1 { hostname = parts[0] domainname = parts[1] } ports, portBindings, err := nat.ParsePortSpecs(flPublish.GetAll()) if err != nil { return nil, nil, cmd, err } // Merge in exposed ports to the map of published ports for _, e := range flExpose.GetAll() { if strings.Contains(e, ":") { return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) } p := nat.NewPort(nat.SplitProtoPort(e)) if _, exists := ports[p]; !exists { ports[p] = struct{}{} } } config := &Config{ Hostname: hostname, Domainname: domainname, PortSpecs: nil, // Deprecated ExposedPorts: ports, User: *flUser, Tty: *flTty, NetworkDisabled: !*flNetwork, OpenStdin: *flStdin, Memory: flMemory, CpuShares: *flCpuShares, AttachStdin: flAttach.Get("stdin"), AttachStdout: flAttach.Get("stdout"), AttachStderr: flAttach.Get("stderr"), Env: flEnv.GetAll(), Cmd: runCmd, Dns: flDns.GetAll(), Image: image, Volumes: flVolumes.GetMap(), VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), Entrypoint: entrypoint, WorkingDir: *flWorkingDir, } hostConfig := &HostConfig{ Binds: binds, ContainerIDFile: *flContainerIDFile, LxcConf: lxcConf, Privileged: *flPrivileged, PortBindings: portBindings, Links: flLinks.GetAll(), PublishAllPorts: *flPublishAll, } if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { //fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") config.MemorySwap = -1 } // When allocating stdin in attached mode, close stdin at client disconnect if config.OpenStdin && config.AttachStdin { config.StdinOnce = true } return config, hostConfig, cmd, nil } func parseLxcConfOpts(opts opts.ListOpts) ([]KeyValuePair, error) { out := make([]KeyValuePair, opts.Len()) for i, o := range opts.GetAll() { k, v, err := parseLxcOpt(o) if err != nil { return nil, err } out[i] = KeyValuePair{Key: k, Value: v} } return out, nil } func parseLxcOpt(opt string) (string, string, error) { parts := strings.SplitN(opt, "=", 2) if len(parts) != 2 { return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt) } return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } docker-0.9.1/runconfig/parse_test.go0000644000175000017500000000050612314376205015576 0ustar tagtagpackage runconfig import ( "testing" ) func TestParseLxcConfOpt(t *testing.T) { opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} for _, o := range opts { k, v, err := parseLxcOpt(o) if err != nil { t.FailNow() } if k != "lxc.utsname" { t.Fail() } if v != "docker" { t.Fail() } } } docker-0.9.1/runconfig/config.go0000644000175000017500000000524112314376205014673 0ustar tagtagpackage runconfig import ( "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" ) // Note: the Config structure should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". // Non-portable information *should* appear in HostConfig. type Config struct { Hostname string Domainname string User string Memory int64 // Memory limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap CpuShares int64 // CPU shares (relative weight vs. other containers) AttachStdin bool AttachStdout bool AttachStderr bool PortSpecs []string // Deprecated - Can be in the format of 8080/tcp ExposedPorts map[nat.Port]struct{} Tty bool // Attach standard streams to a tty, including stdin if it is not closed. OpenStdin bool // Open stdin StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string Cmd []string Dns []string Image string // Name of the image as it was passed by the operator (eg. could be symbolic) Volumes map[string]struct{} VolumesFrom string WorkingDir string Entrypoint []string NetworkDisabled bool OnBuild []string } func ContainerConfigFromJob(job *engine.Job) *Config { config := &Config{ Hostname: job.Getenv("Hostname"), Domainname: job.Getenv("Domainname"), User: job.Getenv("User"), Memory: job.GetenvInt64("Memory"), MemorySwap: job.GetenvInt64("MemorySwap"), CpuShares: job.GetenvInt64("CpuShares"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStdout: job.GetenvBool("AttachStdout"), AttachStderr: job.GetenvBool("AttachStderr"), Tty: job.GetenvBool("Tty"), OpenStdin: job.GetenvBool("OpenStdin"), StdinOnce: job.GetenvBool("StdinOnce"), Image: job.Getenv("Image"), VolumesFrom: job.Getenv("VolumesFrom"), WorkingDir: job.Getenv("WorkingDir"), NetworkDisabled: job.GetenvBool("NetworkDisabled"), } job.GetenvJson("ExposedPorts", &config.ExposedPorts) job.GetenvJson("Volumes", &config.Volumes) if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { config.PortSpecs = PortSpecs } if Env := job.GetenvList("Env"); Env != nil { config.Env = Env } if Cmd := job.GetenvList("Cmd"); Cmd != nil { config.Cmd = Cmd } if Dns := job.GetenvList("Dns"); Dns != nil { config.Dns = Dns } if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { config.Entrypoint = Entrypoint } return config } docker-0.9.1/pkg/0000755000175000017500000000000012314376205011664 5ustar tagtagdocker-0.9.1/pkg/listenbuffer/0000755000175000017500000000000012314376205014354 5ustar tagtagdocker-0.9.1/pkg/listenbuffer/buffer.go0000644000175000017500000000215312314376205016155 0ustar tagtag/* Package to allow go applications to immediately start listening on a socket, unix, tcp, udp but hold connections until the application has booted and is ready to accept them */ package listenbuffer import "net" // NewListenBuffer returns a listener listening on addr with the protocol. func NewListenBuffer(proto, addr string, activate chan struct{}) (net.Listener, error) { wrapped, err := net.Listen(proto, addr) if err != nil { return nil, err } return &defaultListener{ wrapped: wrapped, activate: activate, }, nil } type defaultListener struct { wrapped net.Listener // the real listener to wrap ready bool // is the listner ready to start accpeting connections activate chan struct{} } func (l *defaultListener) Close() error { return l.wrapped.Close() } func (l *defaultListener) Addr() net.Addr { return l.wrapped.Addr() } func (l *defaultListener) Accept() (net.Conn, error) { // if the listen has been told it is ready then we can go ahead and // start returning connections if l.ready { return l.wrapped.Accept() } <-l.activate l.ready = true return l.Accept() } docker-0.9.1/pkg/opts/0000755000175000017500000000000012314376205012651 5ustar tagtagdocker-0.9.1/pkg/opts/opts.go0000644000175000017500000000640012314376205014165 0ustar tagtagpackage opts import ( "fmt" "github.com/dotcloud/docker/utils" "os" "path/filepath" "regexp" "strings" ) // ListOpts type type ListOpts struct { values []string validator ValidatorFctType } func NewListOpts(validator ValidatorFctType) ListOpts { return ListOpts{ validator: validator, } } func (opts *ListOpts) String() string { return fmt.Sprintf("%v", []string(opts.values)) } // Set validates if needed the input value and add it to the // internal slice. func (opts *ListOpts) Set(value string) error { if opts.validator != nil { v, err := opts.validator(value) if err != nil { return err } value = v } opts.values = append(opts.values, value) return nil } // Delete remove the given element from the slice. func (opts *ListOpts) Delete(key string) { for i, k := range opts.values { if k == key { opts.values = append(opts.values[:i], opts.values[i+1:]...) return } } } // GetMap returns the content of values in a map in order to avoid // duplicates. // FIXME: can we remove this? func (opts *ListOpts) GetMap() map[string]struct{} { ret := make(map[string]struct{}) for _, k := range opts.values { ret[k] = struct{}{} } return ret } // GetAll returns the values' slice. // FIXME: Can we remove this? func (opts *ListOpts) GetAll() []string { return opts.values } // Get checks the existence of the given key. func (opts *ListOpts) Get(key string) bool { for _, k := range opts.values { if k == key { return true } } return false } // Len returns the amount of element in the slice. func (opts *ListOpts) Len() int { return len(opts.values) } // Validators type ValidatorFctType func(val string) (string, error) func ValidateAttach(val string) (string, error) { if val != "stdin" && val != "stdout" && val != "stderr" { return val, fmt.Errorf("Unsupported stream name: %s", val) } return val, nil } func ValidateLink(val string) (string, error) { if _, err := parseLink(val); err != nil { return val, err } return val, nil } // FIXME: this is a duplicate of docker.utils.parseLink. // it can't be moved to a separate links/ package because // links depends on Container which is defined in the core. // // Links come in the format of // name:alias func parseLink(rawLink string) (map[string]string, error) { return utils.PartParser("name:alias", rawLink) } func ValidatePath(val string) (string, error) { var containerPath string if strings.Count(val, ":") > 2 { return val, fmt.Errorf("bad format for volumes: %s", val) } splited := strings.SplitN(val, ":", 2) if len(splited) == 1 { containerPath = splited[0] val = filepath.Clean(splited[0]) } else { containerPath = splited[1] val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1])) } if !filepath.IsAbs(containerPath) { return val, fmt.Errorf("%s is not an absolute path", containerPath) } return val, nil } func ValidateEnv(val string) (string, error) { arr := strings.Split(val, "=") if len(arr) > 1 { return val, nil } return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil } func ValidateIp4Address(val string) (string, error) { re := regexp.MustCompile(`^(([0-9]+\.){3}([0-9]+))\s*$`) var ns = re.FindSubmatch([]byte(val)) if len(ns) > 0 { return string(ns[1]), nil } return "", fmt.Errorf("%s is not an ip4 address", val) } docker-0.9.1/pkg/opts/opts_test.go0000644000175000017500000000123012314376205015220 0ustar tagtagpackage opts import ( "testing" ) func TestValidateIP4(t *testing.T) { if ret, err := ValidateIp4Address(`1.2.3.4`); err != nil || ret == "" { t.Fatalf("ValidateIp4Address(`1.2.3.4`) got %s %s", ret, err) } if ret, err := ValidateIp4Address(`127.0.0.1`); err != nil || ret == "" { t.Fatalf("ValidateIp4Address(`127.0.0.1`) got %s %s", ret, err) } if ret, err := ValidateIp4Address(`127`); err == nil || ret != "" { t.Fatalf("ValidateIp4Address(`127`) got %s %s", ret, err) } if ret, err := ValidateIp4Address(`random invalid string`); err == nil || ret != "" { t.Fatalf("ValidateIp4Address(`random invalid string`) got %s %s", ret, err) } } docker-0.9.1/pkg/libcontainer/0000755000175000017500000000000012314376205014335 5ustar tagtagdocker-0.9.1/pkg/libcontainer/utils/0000755000175000017500000000000012314376205015475 5ustar tagtagdocker-0.9.1/pkg/libcontainer/utils/utils.go0000644000175000017500000000135012314376205017163 0ustar tagtagpackage utils import ( "crypto/rand" "encoding/hex" "io" "path/filepath" ) // GenerateRandomName returns a new name joined with a prefix. This size // specified is used to truncate the randomly generated value func GenerateRandomName(prefix string, size int) (string, error) { id := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, id); err != nil { return "", err } return prefix + hex.EncodeToString(id)[:size], nil } // ResolveRootfs ensures that the current working directory is // not a symlink and returns the absolute path to the rootfs func ResolveRootfs(uncleanRootfs string) (string, error) { rootfs, err := filepath.Abs(uncleanRootfs) if err != nil { return "", err } return filepath.EvalSymlinks(rootfs) } docker-0.9.1/pkg/libcontainer/types_linux.go0000644000175000017500000000071412314376205017251 0ustar tagtagpackage libcontainer import ( "syscall" ) func init() { namespaceList = Namespaces{ {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt"}, {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts"}, {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc"}, {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user"}, {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid"}, {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net"}, } } docker-0.9.1/pkg/libcontainer/nsinit/0000755000175000017500000000000012314376205015641 5ustar tagtagdocker-0.9.1/pkg/libcontainer/nsinit/init.go0000644000175000017500000001066112314376205017137 0ustar tagtag// +build linux package nsinit import ( "fmt" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/apparmor" "github.com/dotcloud/docker/pkg/libcontainer/capabilities" "github.com/dotcloud/docker/pkg/libcontainer/network" "github.com/dotcloud/docker/pkg/libcontainer/utils" "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/pkg/user" "os" "syscall" ) // Init is the init process that first runs inside a new namespace to setup mounts, users, networking, // and other options required for the new container. func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error { rootfs, err := utils.ResolveRootfs(uncleanRootfs) if err != nil { return err } // We always read this as it is a way to sync with the parent as well context, err := syncPipe.ReadFromParent() if err != nil { syncPipe.Close() return err } syncPipe.Close() if console != "" { slave, err := system.OpenTerminal(console, syscall.O_RDWR) if err != nil { return fmt.Errorf("open terminal %s", err) } if err := dupSlave(slave); err != nil { return fmt.Errorf("dup2 slave %s", err) } } if _, err := system.Setsid(); err != nil { return fmt.Errorf("setsid %s", err) } if console != "" { if err := system.Setctty(); err != nil { return fmt.Errorf("setctty %s", err) } } // this is our best effort to let the process know that the parent has died and that it // should it should act on it how it sees fit if err := system.ParentDeathSignal(uintptr(syscall.SIGTERM)); err != nil { return fmt.Errorf("parent death signal %s", err) } if err := setupNewMountNamespace(rootfs, console, container.ReadonlyFs, container.NoPivotRoot); err != nil { return fmt.Errorf("setup mount namespace %s", err) } if err := setupNetwork(container, context); err != nil { return fmt.Errorf("setup networking %s", err) } if err := system.Sethostname(container.Hostname); err != nil { return fmt.Errorf("sethostname %s", err) } if err := finalizeNamespace(container); err != nil { return fmt.Errorf("finalize namespace %s", err) } if err := apparmor.ApplyProfile(os.Getpid(), container.Context["apparmor_profile"]); err != nil { return err } return system.Execv(args[0], args[0:], container.Env) } func setupUser(container *libcontainer.Container) error { switch container.User { case "root", "": if err := system.Setgroups(nil); err != nil { return err } if err := system.Setresgid(0, 0, 0); err != nil { return err } if err := system.Setresuid(0, 0, 0); err != nil { return err } default: uid, gid, suppGids, err := user.GetUserGroupSupplementary(container.User, syscall.Getuid(), syscall.Getgid()) if err != nil { return err } if err := system.Setgroups(suppGids); err != nil { return err } if err := system.Setgid(gid); err != nil { return err } if err := system.Setuid(uid); err != nil { return err } } return nil } // dupSlave dup2 the pty slave's fd into stdout and stdin and ensures that // the slave's fd is 0, or stdin func dupSlave(slave *os.File) error { if err := system.Dup2(slave.Fd(), 0); err != nil { return err } if err := system.Dup2(slave.Fd(), 1); err != nil { return err } if err := system.Dup2(slave.Fd(), 2); err != nil { return err } return nil } // setupVethNetwork uses the Network config if it is not nil to initialize // the new veth interface inside the container for use by changing the name to eth0 // setting the MTU and IP address along with the default gateway func setupNetwork(container *libcontainer.Container, context libcontainer.Context) error { for _, config := range container.Networks { strategy, err := network.GetStrategy(config.Type) if err != nil { return err } err1 := strategy.Initialize(config, context) if err1 != nil { return err1 } } return nil } // finalizeNamespace drops the caps and sets the correct user // and working dir before execing the command inside the namespace func finalizeNamespace(container *libcontainer.Container) error { if err := capabilities.DropCapabilities(container); err != nil { return fmt.Errorf("drop capabilities %s", err) } if err := setupUser(container); err != nil { return fmt.Errorf("setup user %s", err) } if container.WorkingDir != "" { if err := system.Chdir(container.WorkingDir); err != nil { return fmt.Errorf("chdir to %s %s", container.WorkingDir, err) } } return nil } docker-0.9.1/pkg/libcontainer/nsinit/exec.go0000644000175000017500000000456312314376205017124 0ustar tagtag// +build linux package nsinit import ( "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/network" "github.com/dotcloud/docker/pkg/system" "os" "os/exec" "syscall" ) // Exec performes setup outside of a namespace so that a container can be // executed. Exec is a high level function for working with container namespaces. func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) { var ( master *os.File console string err error ) // create a pipe so that we can syncronize with the namespaced process and // pass the veth name to the child syncPipe, err := NewSyncPipe() if err != nil { return -1, err } if container.Tty { master, console, err = system.CreateMasterAndConsole() if err != nil { return -1, err } term.SetMaster(master) } command := ns.commandFactory.Create(container, console, syncPipe.child, args) if err := term.Attach(command); err != nil { return -1, err } defer term.Close() if err := command.Start(); err != nil { return -1, err } if err := ns.stateWriter.WritePid(command.Process.Pid); err != nil { command.Process.Kill() return -1, err } defer ns.stateWriter.DeletePid() // Do this before syncing with child so that no children // can escape the cgroup if err := ns.SetupCgroups(container, command.Process.Pid); err != nil { command.Process.Kill() return -1, err } if err := ns.InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil { command.Process.Kill() return -1, err } // Sync with child syncPipe.Close() if err := command.Wait(); err != nil { if _, ok := err.(*exec.ExitError); !ok { return -1, err } } return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil } func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) error { if container.Cgroups != nil { if err := container.Cgroups.Apply(nspid); err != nil { return err } } return nil } func (ns *linuxNs) InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error { context := libcontainer.Context{} for _, config := range container.Networks { strategy, err := network.GetStrategy(config.Type) if err != nil { return err } if err := strategy.Create(config, nspid, context); err != nil { return err } } return pipe.SendToChild(context) } docker-0.9.1/pkg/libcontainer/nsinit/nsinit/0000755000175000017500000000000012314376205017145 5ustar tagtagdocker-0.9.1/pkg/libcontainer/nsinit/nsinit/main.go0000644000175000017500000000462512314376205020427 0ustar tagtagpackage main import ( "encoding/json" "flag" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/nsinit" "io/ioutil" "log" "os" "path/filepath" "strconv" ) var ( root, console string pipeFd int ) func registerFlags() { flag.StringVar(&console, "console", "", "console (pty slave) path") flag.IntVar(&pipeFd, "pipe", 0, "sync pipe fd") flag.StringVar(&root, "root", ".", "root for storing configuration data") flag.Parse() } func main() { registerFlags() if flag.NArg() < 1 { log.Fatalf("wrong number of argments %d", flag.NArg()) } container, err := loadContainer() if err != nil { log.Fatal(err) } ns, err := newNsInit() if err != nil { log.Fatal(err) } switch flag.Arg(0) { case "exec": // this is executed outside of the namespace in the cwd var exitCode int nspid, err := readPid() if err != nil { if !os.IsNotExist(err) { log.Fatal(err) } } if nspid > 0 { exitCode, err = ns.ExecIn(container, nspid, flag.Args()[1:]) } else { term := nsinit.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty) exitCode, err = ns.Exec(container, term, flag.Args()[1:]) } if err != nil { log.Fatal(err) } os.Exit(exitCode) case "init": // this is executed inside of the namespace to setup the container cwd, err := os.Getwd() if err != nil { log.Fatal(err) } if flag.NArg() < 2 { log.Fatalf("wrong number of argments %d", flag.NArg()) } syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(pipeFd)) if err != nil { log.Fatal(err) } if err := ns.Init(container, cwd, console, syncPipe, flag.Args()[1:]); err != nil { log.Fatal(err) } default: log.Fatalf("command not supported for nsinit %s", flag.Arg(0)) } } func loadContainer() (*libcontainer.Container, error) { f, err := os.Open(filepath.Join(root, "container.json")) if err != nil { return nil, err } defer f.Close() var container *libcontainer.Container if err := json.NewDecoder(f).Decode(&container); err != nil { return nil, err } return container, nil } func readPid() (int, error) { data, err := ioutil.ReadFile(filepath.Join(root, "pid")) if err != nil { return -1, err } pid, err := strconv.Atoi(string(data)) if err != nil { return -1, err } return pid, nil } func newNsInit() (nsinit.NsInit, error) { return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}, &nsinit.DefaultStateWriter{root}), nil } docker-0.9.1/pkg/libcontainer/nsinit/state.go0000644000175000017500000000112612314376205017310 0ustar tagtagpackage nsinit import ( "fmt" "io/ioutil" "os" "path/filepath" ) // StateWriter handles writing and deleting the pid file // on disk type StateWriter interface { WritePid(pid int) error DeletePid() error } type DefaultStateWriter struct { Root string } // writePidFile writes the namespaced processes pid to pid in the rootfs for the container func (d *DefaultStateWriter) WritePid(pid int) error { return ioutil.WriteFile(filepath.Join(d.Root, "pid"), []byte(fmt.Sprint(pid)), 0655) } func (d *DefaultStateWriter) DeletePid() error { return os.Remove(filepath.Join(d.Root, "pid")) } docker-0.9.1/pkg/libcontainer/nsinit/execin.go0000644000175000017500000000460012314376205017443 0ustar tagtag// +build linux package nsinit import ( "fmt" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/system" "os" "path/filepath" "strconv" "syscall" ) // ExecIn uses an existing pid and joins the pid's namespaces with the new command. func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { for _, nsv := range container.Namespaces { // skip the PID namespace on unshare because it it not supported if nsv.Key != "NEWPID" { if err := system.Unshare(nsv.Value); err != nil { return -1, err } } } fds, err := ns.getNsFds(nspid, container) closeFds := func() { for _, f := range fds { system.Closefd(f) } } if err != nil { closeFds() return -1, err } // foreach namespace fd, use setns to join an existing container's namespaces for _, fd := range fds { if fd > 0 { if err := system.Setns(fd, 0); err != nil { closeFds() return -1, fmt.Errorf("setns %s", err) } } system.Closefd(fd) } // if the container has a new pid and mount namespace we need to // remount proc and sys to pick up the changes if container.Namespaces.Contains("NEWNS") && container.Namespaces.Contains("NEWPID") { pid, err := system.Fork() if err != nil { return -1, err } if pid == 0 { // TODO: make all raw syscalls to be fork safe if err := system.Unshare(syscall.CLONE_NEWNS); err != nil { return -1, err } if err := remountProc(); err != nil { return -1, fmt.Errorf("remount proc %s", err) } if err := remountSys(); err != nil { return -1, fmt.Errorf("remount sys %s", err) } goto dropAndExec } proc, err := os.FindProcess(pid) if err != nil { return -1, err } state, err := proc.Wait() if err != nil { return -1, err } os.Exit(state.Sys().(syscall.WaitStatus).ExitStatus()) } dropAndExec: if err := finalizeNamespace(container); err != nil { return -1, err } if err := system.Execv(args[0], args[0:], container.Env); err != nil { return -1, err } panic("unreachable") } func (ns *linuxNs) getNsFds(pid int, container *libcontainer.Container) ([]uintptr, error) { fds := make([]uintptr, len(container.Namespaces)) for i, ns := range container.Namespaces { f, err := os.OpenFile(filepath.Join("/proc/", strconv.Itoa(pid), "ns", ns.File), os.O_RDONLY, 0) if err != nil { return fds, err } fds[i] = f.Fd() } return fds, nil } docker-0.9.1/pkg/libcontainer/nsinit/command.go0000644000175000017500000000267112314376205017614 0ustar tagtagpackage nsinit import ( "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/system" "os" "os/exec" ) // CommandFactory takes the container's configuration and options passed by the // parent processes and creates an *exec.Cmd that will be used to fork/exec the // namespaced init process type CommandFactory interface { Create(container *libcontainer.Container, console string, syncFd *os.File, args []string) *exec.Cmd } type DefaultCommandFactory struct { Root string } // Create will return an exec.Cmd with the Cloneflags set to the proper namespaces // defined on the container's configuration and use the current binary as the init with the // args provided func (c *DefaultCommandFactory) Create(container *libcontainer.Container, console string, pipe *os.File, args []string) *exec.Cmd { // get our binary name from arg0 so we can always reexec ourself command := exec.Command(os.Args[0], append([]string{ "-console", console, "-pipe", "3", "-root", c.Root, "init"}, args...)...) system.SetCloneFlags(command, uintptr(GetNamespaceFlags(container.Namespaces))) command.Env = container.Env command.ExtraFiles = []*os.File{pipe} return command } // GetNamespaceFlags parses the container's Namespaces options to set the correct // flags on clone, unshare, and setns func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { for _, ns := range namespaces { flag |= ns.Value } return flag } docker-0.9.1/pkg/libcontainer/nsinit/term.go0000644000175000017500000000434512314376205017145 0ustar tagtagpackage nsinit import ( "github.com/dotcloud/docker/pkg/term" "io" "os" "os/exec" ) type Terminal interface { io.Closer SetMaster(*os.File) Attach(*exec.Cmd) error Resize(h, w int) error } func NewTerminal(stdin io.Reader, stdout, stderr io.Writer, tty bool) Terminal { if tty { return &TtyTerminal{ stdin: stdin, stdout: stdout, stderr: stderr, } } return &StdTerminal{ stdin: stdin, stdout: stdout, stderr: stderr, } } type TtyTerminal struct { stdin io.Reader stdout, stderr io.Writer master *os.File state *term.State } func (t *TtyTerminal) Resize(h, w int) error { return term.SetWinsize(t.master.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) } func (t *TtyTerminal) SetMaster(master *os.File) { t.master = master } func (t *TtyTerminal) Attach(command *exec.Cmd) error { go io.Copy(t.stdout, t.master) go io.Copy(t.master, t.stdin) state, err := t.setupWindow(t.master, os.Stdin) if err != nil { command.Process.Kill() return err } t.state = state return err } // SetupWindow gets the parent window size and sets the master // pty to the current size and set the parents mode to RAW func (t *TtyTerminal) setupWindow(master, parent *os.File) (*term.State, error) { ws, err := term.GetWinsize(parent.Fd()) if err != nil { return nil, err } if err := term.SetWinsize(master.Fd(), ws); err != nil { return nil, err } return term.SetRawTerminal(parent.Fd()) } func (t *TtyTerminal) Close() error { term.RestoreTerminal(os.Stdin.Fd(), t.state) return t.master.Close() } type StdTerminal struct { stdin io.Reader stdout, stderr io.Writer } func (s *StdTerminal) SetMaster(*os.File) { // no need to set master on non tty } func (s *StdTerminal) Close() error { return nil } func (s *StdTerminal) Resize(h, w int) error { return nil } func (s *StdTerminal) Attach(command *exec.Cmd) error { inPipe, err := command.StdinPipe() if err != nil { return err } outPipe, err := command.StdoutPipe() if err != nil { return err } errPipe, err := command.StderrPipe() if err != nil { return err } go func() { defer inPipe.Close() io.Copy(inPipe, s.stdin) }() go io.Copy(s.stdout, outPipe) go io.Copy(s.stderr, errPipe) return nil } docker-0.9.1/pkg/libcontainer/nsinit/unsupported.go0000644000175000017500000000105412314376205020560 0ustar tagtag// +build !linux package nsinit import ( "github.com/dotcloud/docker/pkg/libcontainer" ) func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) { return -1, libcontainer.ErrUnsupported } func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { return -1, libcontainer.ErrUnsupported } func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error { return libcontainer.ErrUnsupported } docker-0.9.1/pkg/libcontainer/nsinit/mount.go0000644000175000017500000001731212314376205017336 0ustar tagtag// +build linux package nsinit import ( "fmt" "github.com/dotcloud/docker/pkg/system" "io/ioutil" "os" "path/filepath" "syscall" ) // default mount point flags const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV // setupNewMountNamespace is used to initialize a new mount namespace for an new // container in the rootfs that is specified. // // There is no need to unmount the new mounts because as soon as the mount namespace // is no longer in use, the mounts will be removed automatically func setupNewMountNamespace(rootfs, console string, readonly, noPivotRoot bool) error { flag := syscall.MS_PRIVATE if noPivotRoot { flag = syscall.MS_SLAVE } if err := system.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil { return fmt.Errorf("mounting / as slave %s", err) } if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { return fmt.Errorf("mouting %s as bind %s", rootfs, err) } if readonly { if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, ""); err != nil { return fmt.Errorf("mounting %s as readonly %s", rootfs, err) } } if err := mountSystem(rootfs); err != nil { return fmt.Errorf("mount system %s", err) } if err := copyDevNodes(rootfs); err != nil { return fmt.Errorf("copy dev nodes %s", err) } // In non-privileged mode, this fails. Discard the error. setupLoopbackDevices(rootfs) if err := setupDev(rootfs); err != nil { return err } if err := setupPtmx(rootfs, console); err != nil { return err } if err := system.Chdir(rootfs); err != nil { return fmt.Errorf("chdir into %s %s", rootfs, err) } if noPivotRoot { if err := rootMsMove(rootfs); err != nil { return err } } else { if err := rootPivot(rootfs); err != nil { return err } } system.Umask(0022) return nil } // use a pivot root to setup the rootfs func rootPivot(rootfs string) error { pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root") if err != nil { return fmt.Errorf("can't create pivot_root dir %s", pivotDir, err) } if err := system.Pivotroot(rootfs, pivotDir); err != nil { return fmt.Errorf("pivot_root %s", err) } if err := system.Chdir("/"); err != nil { return fmt.Errorf("chdir / %s", err) } // path to pivot dir now changed, update pivotDir = filepath.Join("/", filepath.Base(pivotDir)) if err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { return fmt.Errorf("unmount pivot_root dir %s", err) } if err := os.Remove(pivotDir); err != nil { return fmt.Errorf("remove pivot_root dir %s", err) } return nil } // use MS_MOVE and chroot to setup the rootfs func rootMsMove(rootfs string) error { if err := system.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { return fmt.Errorf("mount move %s into / %s", rootfs, err) } if err := system.Chroot("."); err != nil { return fmt.Errorf("chroot . %s", err) } if err := system.Chdir("/"); err != nil { return fmt.Errorf("chdir / %s", err) } return nil } // copyDevNodes mknods the hosts devices so the new container has access to them func copyDevNodes(rootfs string) error { oldMask := system.Umask(0000) defer system.Umask(oldMask) for _, node := range []string{ "null", "zero", "full", "random", "urandom", "tty", } { if err := copyDevNode(rootfs, node); err != nil { return err } } return nil } func setupLoopbackDevices(rootfs string) error { for i := 0; ; i++ { if err := copyDevNode(rootfs, fmt.Sprintf("loop%d", i)); err != nil { if !os.IsNotExist(err) { return err } break } } return nil } func copyDevNode(rootfs, node string) error { stat, err := os.Stat(filepath.Join("/dev", node)) if err != nil { return err } var ( dest = filepath.Join(rootfs, "dev", node) st = stat.Sys().(*syscall.Stat_t) ) if err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) { return fmt.Errorf("copy %s %s", node, err) } return nil } // setupDev symlinks the current processes pipes into the // appropriate destination on the containers rootfs func setupDev(rootfs string) error { for _, link := range []struct { from string to string }{ {"/proc/kcore", "/dev/core"}, {"/proc/self/fd", "/dev/fd"}, {"/proc/self/fd/0", "/dev/stdin"}, {"/proc/self/fd/1", "/dev/stdout"}, {"/proc/self/fd/2", "/dev/stderr"}, } { dest := filepath.Join(rootfs, link.to) if err := os.Remove(dest); err != nil && !os.IsNotExist(err) { return fmt.Errorf("remove %s %s", dest, err) } if err := os.Symlink(link.from, dest); err != nil { return fmt.Errorf("symlink %s %s", dest, err) } } return nil } // setupConsole ensures that the container has a proper /dev/console setup func setupConsole(rootfs, console string) error { oldMask := system.Umask(0000) defer system.Umask(oldMask) stat, err := os.Stat(console) if err != nil { return fmt.Errorf("stat console %s %s", console, err) } var ( st = stat.Sys().(*syscall.Stat_t) dest = filepath.Join(rootfs, "dev/console") ) if err := os.Remove(dest); err != nil && !os.IsNotExist(err) { return fmt.Errorf("remove %s %s", dest, err) } if err := os.Chmod(console, 0600); err != nil { return err } if err := os.Chown(console, 0, 0); err != nil { return err } if err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil { return fmt.Errorf("mknod %s %s", dest, err) } if err := system.Mount(console, dest, "bind", syscall.MS_BIND, ""); err != nil { return fmt.Errorf("bind %s to %s %s", console, dest, err) } return nil } // mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts // inside the mount namespace func mountSystem(rootfs string) error { for _, m := range []struct { source string path string device string flags int data string }{ {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, {source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}, {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: "mode=1777,size=65536k"}, {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: "newinstance,ptmxmode=0666,mode=620,gid=5"}, } { if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) { return fmt.Errorf("mkdirall %s %s", m.path, err) } if err := system.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil { return fmt.Errorf("mounting %s into %s %s", m.source, m.path, err) } } return nil } // setupPtmx adds a symlink to pts/ptmx for /dev/ptmx and // finishes setting up /dev/console func setupPtmx(rootfs, console string) error { ptmx := filepath.Join(rootfs, "dev/ptmx") if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { return err } if err := os.Symlink("pts/ptmx", ptmx); err != nil { return fmt.Errorf("symlink dev ptmx %s", err) } if console != "" { if err := setupConsole(rootfs, console); err != nil { return err } } return nil } // remountProc is used to detach and remount the proc filesystem // commonly needed with running a new process inside an existing container func remountProc() error { if err := system.Unmount("/proc", syscall.MNT_DETACH); err != nil { return err } if err := system.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil { return err } return nil } func remountSys() error { if err := system.Unmount("/sys", syscall.MNT_DETACH); err != nil { if err != syscall.EINVAL { return err } } else { if err := system.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil { return err } } return nil } docker-0.9.1/pkg/libcontainer/nsinit/sync_pipe.go0000644000175000017500000000266112314376205020166 0ustar tagtagpackage nsinit import ( "encoding/json" "fmt" "github.com/dotcloud/docker/pkg/libcontainer" "io/ioutil" "os" ) // SyncPipe allows communication to and from the child processes // to it's parent and allows the two independent processes to // syncronize their state. type SyncPipe struct { parent, child *os.File } func NewSyncPipe() (s *SyncPipe, err error) { s = &SyncPipe{} s.child, s.parent, err = os.Pipe() if err != nil { return nil, err } return s, nil } func NewSyncPipeFromFd(parendFd, childFd uintptr) (*SyncPipe, error) { s := &SyncPipe{} if parendFd > 0 { s.parent = os.NewFile(parendFd, "parendPipe") } else if childFd > 0 { s.child = os.NewFile(childFd, "childPipe") } else { return nil, fmt.Errorf("no valid sync pipe fd specified") } return s, nil } func (s *SyncPipe) SendToChild(context libcontainer.Context) error { data, err := json.Marshal(context) if err != nil { return err } s.parent.Write(data) return nil } func (s *SyncPipe) ReadFromParent() (libcontainer.Context, error) { data, err := ioutil.ReadAll(s.child) if err != nil { return nil, fmt.Errorf("error reading from sync pipe %s", err) } var context libcontainer.Context if len(data) > 0 { if err := json.Unmarshal(data, &context); err != nil { return nil, err } } return context, nil } func (s *SyncPipe) Close() error { if s.parent != nil { s.parent.Close() } if s.child != nil { s.child.Close() } return nil } docker-0.9.1/pkg/libcontainer/nsinit/nsinit.go0000644000175000017500000000134712314376205017501 0ustar tagtagpackage nsinit import ( "github.com/dotcloud/docker/pkg/libcontainer" ) // NsInit is an interface with the public facing methods to provide high level // exec operations on a container type NsInit interface { Exec(container *libcontainer.Container, term Terminal, args []string) (int, error) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error } type linuxNs struct { root string commandFactory CommandFactory stateWriter StateWriter } func NewNsInit(command CommandFactory, state StateWriter) NsInit { return &linuxNs{ commandFactory: command, stateWriter: state, } } docker-0.9.1/pkg/libcontainer/container.json0000644000175000017500000000204412314376205017212 0ustar tagtag{ "hostname": "koye", "tty": true, "environment": [ "HOME=/", "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", "container=docker", "TERM=xterm-256color" ], "namespaces": [ "NEWIPC", "NEWNS", "NEWPID", "NEWUTS", "NEWNET" ], "capabilities": [ "SETPCAP", "SYS_MODULE", "SYS_RAWIO", "SYS_PACCT", "SYS_ADMIN", "SYS_NICE", "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG", "MKNOD", "AUDIT_WRITE", "AUDIT_CONTROL", "MAC_OVERRIDE", "MAC_ADMIN", "NET_ADMIN" ], "networks": [{ "type": "veth", "context": { "bridge": "docker0", "prefix": "dock" }, "address": "172.17.0.100/16", "gateway": "172.17.42.1", "mtu": 1500 } ], "cgroups": { "name": "docker-koye", "parent": "docker", "memory": 5248000 } } docker-0.9.1/pkg/libcontainer/network/0000755000175000017500000000000012314376205016026 5ustar tagtagdocker-0.9.1/pkg/libcontainer/network/strategy.go0000644000175000017500000000152512314376205020222 0ustar tagtagpackage network import ( "errors" "github.com/dotcloud/docker/pkg/libcontainer" ) var ( ErrNotValidStrategyType = errors.New("not a valid network strategy type") ) var strategies = map[string]NetworkStrategy{ "veth": &Veth{}, "loopback": &Loopback{}, } // NetworkStrategy represents a specific network configuration for // a container's networking stack type NetworkStrategy interface { Create(*libcontainer.Network, int, libcontainer.Context) error Initialize(*libcontainer.Network, libcontainer.Context) error } // GetStrategy returns the specific network strategy for the // provided type. If no strategy is registered for the type an // ErrNotValidStrategyType is returned. func GetStrategy(tpe string) (NetworkStrategy, error) { s, exists := strategies[tpe] if !exists { return nil, ErrNotValidStrategyType } return s, nil } docker-0.9.1/pkg/libcontainer/network/network.go0000644000175000017500000000313512314376205020050 0ustar tagtagpackage network import ( "github.com/dotcloud/docker/pkg/netlink" "net" ) func InterfaceUp(name string) error { iface, err := net.InterfaceByName(name) if err != nil { return err } return netlink.NetworkLinkUp(iface) } func InterfaceDown(name string) error { iface, err := net.InterfaceByName(name) if err != nil { return err } return netlink.NetworkLinkDown(iface) } func ChangeInterfaceName(old, newName string) error { iface, err := net.InterfaceByName(old) if err != nil { return err } return netlink.NetworkChangeName(iface, newName) } func CreateVethPair(name1, name2 string) error { return netlink.NetworkCreateVethPair(name1, name2) } func SetInterfaceInNamespacePid(name string, nsPid int) error { iface, err := net.InterfaceByName(name) if err != nil { return err } return netlink.NetworkSetNsPid(iface, nsPid) } func SetInterfaceMaster(name, master string) error { iface, err := net.InterfaceByName(name) if err != nil { return err } masterIface, err := net.InterfaceByName(master) if err != nil { return err } return netlink.NetworkSetMaster(iface, masterIface) } func SetDefaultGateway(ip string) error { return netlink.AddDefaultGw(net.ParseIP(ip)) } func SetInterfaceIp(name string, rawIp string) error { iface, err := net.InterfaceByName(name) if err != nil { return err } ip, ipNet, err := net.ParseCIDR(rawIp) if err != nil { return err } return netlink.NetworkLinkAddIp(iface, ip, ipNet) } func SetMtu(name string, mtu int) error { iface, err := net.InterfaceByName(name) if err != nil { return err } return netlink.NetworkSetMTU(iface, mtu) } docker-0.9.1/pkg/libcontainer/network/loopback.go0000644000175000017500000000113412314376205020146 0ustar tagtagpackage network import ( "fmt" "github.com/dotcloud/docker/pkg/libcontainer" ) // Loopback is a network strategy that provides a basic loopback device type Loopback struct { } func (l *Loopback) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error { return nil } func (l *Loopback) Initialize(config *libcontainer.Network, context libcontainer.Context) error { if err := SetMtu("lo", config.Mtu); err != nil { return fmt.Errorf("set lo mtu to %d %s", config.Mtu, err) } if err := InterfaceUp("lo"); err != nil { return fmt.Errorf("lo up %s", err) } return nil } docker-0.9.1/pkg/libcontainer/network/veth.go0000644000175000017500000000503312314376205017324 0ustar tagtagpackage network import ( "fmt" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/utils" ) // Veth is a network strategy that uses a bridge and creates // a veth pair, one that stays outside on the host and the other // is placed inside the container's namespace type Veth struct { } func (v *Veth) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error { var ( bridge string prefix string exists bool ) if bridge, exists = n.Context["bridge"]; !exists { return fmt.Errorf("bridge does not exist in network context") } if prefix, exists = n.Context["prefix"]; !exists { return fmt.Errorf("veth prefix does not exist in network context") } name1, name2, err := createVethPair(prefix) if err != nil { return err } context["veth-host"] = name1 context["veth-child"] = name2 if err := SetInterfaceMaster(name1, bridge); err != nil { return err } if err := SetMtu(name1, n.Mtu); err != nil { return err } if err := InterfaceUp(name1); err != nil { return err } if err := SetInterfaceInNamespacePid(name2, nspid); err != nil { return err } return nil } func (v *Veth) Initialize(config *libcontainer.Network, context libcontainer.Context) error { var ( vethChild string exists bool ) if vethChild, exists = context["veth-child"]; !exists { return fmt.Errorf("vethChild does not exist in network context") } if err := InterfaceDown(vethChild); err != nil { return fmt.Errorf("interface down %s %s", vethChild, err) } if err := ChangeInterfaceName(vethChild, "eth0"); err != nil { return fmt.Errorf("change %s to eth0 %s", vethChild, err) } if err := SetInterfaceIp("eth0", config.Address); err != nil { return fmt.Errorf("set eth0 ip %s", err) } if err := SetMtu("eth0", config.Mtu); err != nil { return fmt.Errorf("set eth0 mtu to %d %s", config.Mtu, err) } if err := InterfaceUp("eth0"); err != nil { return fmt.Errorf("eth0 up %s", err) } if config.Gateway != "" { if err := SetDefaultGateway(config.Gateway); err != nil { return fmt.Errorf("set gateway to %s %s", config.Gateway, err) } } return nil } // createVethPair will automatically generage two random names for // the veth pair and ensure that they have been created func createVethPair(prefix string) (name1 string, name2 string, err error) { name1, err = utils.GenerateRandomName(prefix, 4) if err != nil { return } name2, err = utils.GenerateRandomName(prefix, 4) if err != nil { return } if err = CreateVethPair(name1, name2); err != nil { return } return } docker-0.9.1/pkg/libcontainer/MAINTAINERS0000644000175000017500000000016112314376205016030 0ustar tagtagMichael Crosby (@crosbymichael) Guillaume Charmes (@creack) docker-0.9.1/pkg/libcontainer/container.go0000644000175000017500000000375012314376205016653 0ustar tagtagpackage libcontainer import ( "github.com/dotcloud/docker/pkg/cgroups" ) // Context is a generic key value pair that allows // arbatrary data to be sent type Context map[string]string // Container defines configuration options for how a // container is setup inside a directory and how a process should be executed type Container struct { Hostname string `json:"hostname,omitempty"` // hostname ReadonlyFs bool `json:"readonly_fs,omitempty"` // set the containers rootfs as readonly NoPivotRoot bool `json:"no_pivot_root,omitempty"` // this can be enabled if you are running in ramdisk User string `json:"user,omitempty"` // user to execute the process as WorkingDir string `json:"working_dir,omitempty"` // current working directory Env []string `json:"environment,omitempty"` // environment to set Tty bool `json:"tty,omitempty"` // setup a proper tty or not Namespaces Namespaces `json:"namespaces,omitempty"` // namespaces to apply Capabilities Capabilities `json:"capabilities,omitempty"` // capabilities to drop Networks []*Network `json:"networks,omitempty"` // nil for host's network stack Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux) } // Network defines configuration for a container's networking stack // // The network configuration can be omited from a container causing the // container to be setup with the host's networking stack type Network struct { Type string `json:"type,omitempty"` // type of networking to setup i.e. veth, macvlan, etc Context Context `json:"context,omitempty"` // generic context for type specific networking options Address string `json:"address,omitempty"` Gateway string `json:"gateway,omitempty"` Mtu int `json:"mtu,omitempty"` } docker-0.9.1/pkg/libcontainer/types_test.go0000644000175000017500000000125012314376205017065 0ustar tagtagpackage libcontainer import ( "testing" ) func TestNamespacesContains(t *testing.T) { ns := Namespaces{ GetNamespace("NEWPID"), GetNamespace("NEWNS"), GetNamespace("NEWUTS"), } if ns.Contains("NEWNET") { t.Fatal("namespaces should not contain NEWNET") } if !ns.Contains("NEWPID") { t.Fatal("namespaces should contain NEWPID but does not") } } func TestCapabilitiesContains(t *testing.T) { caps := Capabilities{ GetCapability("MKNOD"), GetCapability("SETPCAP"), } if caps.Contains("SYS_ADMIN") { t.Fatal("capabilities should not contain SYS_ADMIN") } if !caps.Contains("MKNOD") { t.Fatal("capabilities should container MKNOD but does not") } } docker-0.9.1/pkg/libcontainer/TODO.md0000644000175000017500000000146012314376205015425 0ustar tagtag#### goals * small and simple - line count is not everything but less code is better * clean lines between what we do in the pkg * provide primitives for working with namespaces not cater to every option * extend via configuration not by features - host networking, no networking, veth network can be accomplished via adjusting the container.json, nothing to do with code #### tasks * proper tty for a new process in an existing container * use exec or raw syscalls for new process in existing container * setup proper user in namespace if specified * implement hook or clean interface for cgroups * example configs for different setups (host networking, boot init) * improve pkg documentation with comments * testing - this is hard in a low level pkg but we could do some, maybe * pivot root * selinux * apparmor docker-0.9.1/pkg/libcontainer/README.md0000644000175000017500000000657012314376205015624 0ustar tagtag## libcontainer - reference implementation for containers #### background libcontainer specifies configuration options for what a container is. It provides a native Go implementation for using linux namespaces with no external dependencies. libcontainer provides many convience functions for working with namespaces, networking, and management. #### container A container is a self contained directory that is able to run one or more processes without affecting the host system. The directory is usually a full system tree. Inside the directory a `container.json` file is placed with the runtime configuration for how the processes should be contained and ran. Environment, networking, and different capabilities for the process are specified in this file. The configuration is used for each process executed inside the container. Sample `container.json` file: ```json { "hostname": "koye", "tty": true, "environment": [ "HOME=/", "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", "container=docker", "TERM=xterm-256color" ], "namespaces": [ "NEWIPC", "NEWNS", "NEWPID", "NEWUTS", "NEWNET" ], "capabilities": [ "SETPCAP", "SYS_MODULE", "SYS_RAWIO", "SYS_PACCT", "SYS_ADMIN", "SYS_NICE", "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG", "MKNOD", "AUDIT_WRITE", "AUDIT_CONTROL", "MAC_OVERRIDE", "MAC_ADMIN", "NET_ADMIN" ], "networks": [{ "type": "veth", "context": { "bridge": "docker0", "prefix": "dock" }, "address": "172.17.0.100/16", "gateway": "172.17.42.1", "mtu": 1500 } ], "cgroups": { "name": "docker-koye", "parent": "docker", "memory": 5248000 } } ``` Using this configuration and the current directory holding the rootfs for a process, one can use libcontainer to exec the container. Running the life of the namespace, a `pid` file is written to the current directory with the pid of the namespaced process to the external world. A client can use this pid to wait, kill, or perform other operation with the container. If a user tries to run an new process inside an existing container with a live namespace the namespace will be joined by the new process. You may also specify an alternate root place where the `container.json` file is read and where the `pid` file will be saved. #### nsinit `nsinit` is a cli application used as the reference implementation of libcontainer. It is able to spawn or join new containers giving the current directory. To use `nsinit` cd into a linux rootfs and copy a `container.json` file into the directory with your specified configuration. To execute `/bin/bash` in the current directory as a container just run: ```bash nsinit exec /bin/bash ``` If you wish to spawn another process inside the container while your current bash session is running just run the exact same command again to get another bash shell or change the command. If the original process dies, PID 1, all other processes spawned inside the container will also be killed and the namespace will be removed. You can identify if a process is running in a container by looking to see if `pid` is in the root of the directory. docker-0.9.1/pkg/libcontainer/types.go0000644000175000017500000000570712314376205016041 0ustar tagtagpackage libcontainer import ( "encoding/json" "errors" "github.com/syndtr/gocapability/capability" ) var ( ErrUnkownNamespace = errors.New("Unknown namespace") ErrUnkownCapability = errors.New("Unknown capability") ErrUnsupported = errors.New("Unsupported method") ) // namespaceList is used to convert the libcontainer types // into the names of the files located in /proc//ns/* for // each namespace var ( namespaceList = Namespaces{} capabilityList = Capabilities{ {Key: "SETPCAP", Value: capability.CAP_SETPCAP}, {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE}, {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO}, {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT}, {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN}, {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE}, {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE}, {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME}, {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG}, {Key: "MKNOD", Value: capability.CAP_MKNOD}, {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE}, {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL}, {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE}, {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN}, {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN}, } ) type ( Namespace struct { Key string Value int File string } Namespaces []*Namespace ) func (ns *Namespace) String() string { return ns.Key } func (ns *Namespace) MarshalJSON() ([]byte, error) { return json.Marshal(ns.Key) } func (ns *Namespace) UnmarshalJSON(src []byte) error { var nsName string if err := json.Unmarshal(src, &nsName); err != nil { return err } ret := GetNamespace(nsName) if ret == nil { return ErrUnkownNamespace } *ns = *ret return nil } func GetNamespace(key string) *Namespace { for _, ns := range namespaceList { if ns.Key == key { return ns } } return nil } // Contains returns true if the specified Namespace is // in the slice func (n Namespaces) Contains(ns string) bool { for _, nsp := range n { if nsp.Key == ns { return true } } return false } type ( Capability struct { Key string Value capability.Cap } Capabilities []*Capability ) func (c *Capability) String() string { return c.Key } func (c *Capability) MarshalJSON() ([]byte, error) { return json.Marshal(c.Key) } func (c *Capability) UnmarshalJSON(src []byte) error { var capName string if err := json.Unmarshal(src, &capName); err != nil { return err } ret := GetCapability(capName) if ret == nil { return ErrUnkownCapability } *c = *ret return nil } func GetCapability(key string) *Capability { for _, capp := range capabilityList { if capp.Key == key { return capp } } return nil } // Contains returns true if the specified Capability is // in the slice func (c Capabilities) Contains(capp string) bool { for _, cap := range c { if cap.Key == capp { return true } } return false } docker-0.9.1/pkg/libcontainer/apparmor/0000755000175000017500000000000012314376205016156 5ustar tagtagdocker-0.9.1/pkg/libcontainer/apparmor/setup.go0000644000175000017500000000545412314376205017655 0ustar tagtagpackage apparmor import ( "fmt" "io/ioutil" "os" "os/exec" "path" ) const DefaultProfilePath = "/etc/apparmor.d/docker" const DefaultProfile = ` # AppArmor profile from lxc for containers. @{HOME}=@{HOMEDIRS}/*/ /root/ @{HOMEDIRS}=/home/ #@{HOMEDIRS}+= @{multiarch}=*-linux-gnu* @{PROC}=/proc/ profile docker-default flags=(attach_disconnected,mediate_deleted) { network, capability, file, umount, # ignore DENIED message on / remount deny mount options=(ro, remount) -> /, # allow tmpfs mounts everywhere mount fstype=tmpfs, # allow mqueue mounts everywhere mount fstype=mqueue, # allow fuse mounts everywhere mount fstype=fuse.*, # allow bind mount of /lib/init/fstab for lxcguest mount options=(rw, bind) /lib/init/fstab.lxc/ -> /lib/init/fstab/, # deny writes in /proc/sys/fs but allow binfmt_misc to be mounted mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/, deny @{PROC}/sys/fs/** wklx, # allow efivars to be mounted, writing to it will be blocked though mount fstype=efivarfs -> /sys/firmware/efi/efivars/, # block some other dangerous paths deny @{PROC}/sysrq-trigger rwklx, deny @{PROC}/mem rwklx, deny @{PROC}/kmem rwklx, deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, deny @{PROC}/sys/kernel/*/** wklx, # deny writes in /sys except for /sys/fs/cgroup, also allow # fusectl, securityfs and debugfs to be mounted there (read-only) mount fstype=fusectl -> /sys/fs/fuse/connections/, mount fstype=securityfs -> /sys/kernel/security/, mount fstype=debugfs -> /sys/kernel/debug/, deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/, mount fstype=proc -> /proc/, mount fstype=sysfs -> /sys/, deny /sys/[^f]*/** wklx, deny /sys/f[^s]*/** wklx, deny /sys/fs/[^c]*/** wklx, deny /sys/fs/c[^g]*/** wklx, deny /sys/fs/cg[^r]*/** wklx, deny /sys/firmware/efi/efivars/** rwklx, deny /sys/kernel/security/** rwklx, mount options=(move) /sys/fs/cgroup/cgmanager/ -> /sys/fs/cgroup/cgmanager.lower/, # the container may never be allowed to mount devpts. If it does, it # will remount the host's devpts. We could allow it to do it with # the newinstance option (but, right now, we don't). deny mount fstype=devpts, } ` func InstallDefaultProfile() error { if !IsEnabled() { return nil } // If the profile already exists, let it be. if _, err := os.Stat(DefaultProfilePath); err == nil { return nil } // Make sure /etc/apparmor.d exists if err := os.MkdirAll(path.Dir(DefaultProfilePath), 0755); err != nil { return err } if err := ioutil.WriteFile(DefaultProfilePath, []byte(DefaultProfile), 0644); err != nil { return err } output, err := exec.Command("/lib/init/apparmor-profile-load", "docker").CombinedOutput() if err != nil { return fmt.Errorf("Error loading docker profile: %s (%s)", err, output) } return nil } docker-0.9.1/pkg/libcontainer/apparmor/apparmor_disabled.go0000644000175000017500000000024712314376205022160 0ustar tagtag// +build !apparmor !linux !amd64 package apparmor import () func IsEnabled() bool { return false } func ApplyProfile(pid int, name string) error { return nil } docker-0.9.1/pkg/libcontainer/apparmor/apparmor.go0000644000175000017500000000107512314376205020331 0ustar tagtag// +build apparmor,linux,amd64 package apparmor // #cgo LDFLAGS: -lapparmor // #include // #include import "C" import ( "io/ioutil" "unsafe" ) func IsEnabled() bool { buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") return err == nil && len(buf) > 1 && buf[0] == 'Y' } func ApplyProfile(pid int, name string) error { if !IsEnabled() || name == "" { return nil } cName := C.CString(name) defer C.free(unsafe.Pointer(cName)) if _, err := C.aa_change_onexec(cName); err != nil { return err } return nil } docker-0.9.1/pkg/libcontainer/capabilities/0000755000175000017500000000000012314376205016766 5ustar tagtagdocker-0.9.1/pkg/libcontainer/capabilities/capabilities.go0000644000175000017500000000155212314376205021751 0ustar tagtagpackage capabilities import ( "github.com/dotcloud/docker/pkg/libcontainer" "github.com/syndtr/gocapability/capability" "os" ) // DropCapabilities drops capabilities for the current process based // on the container's configuration. func DropCapabilities(container *libcontainer.Container) error { if drop := getCapabilities(container); len(drop) > 0 { c, err := capability.NewPid(os.Getpid()) if err != nil { return err } c.Unset(capability.CAPS|capability.BOUNDS, drop...) if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil { return err } } return nil } // getCapabilities returns the specific cap values for the libcontainer types func getCapabilities(container *libcontainer.Container) []capability.Cap { drop := []capability.Cap{} for _, c := range container.Capabilities { drop = append(drop, c.Value) } return drop } docker-0.9.1/pkg/mount/0000755000175000017500000000000012314376205013026 5ustar tagtagdocker-0.9.1/pkg/mount/flags_unsupported.go0000644000175000017500000000016612314376205017124 0ustar tagtag// +build !linux !amd64 package mount func parseOptions(options string) (int, string) { panic("Not implemented") } docker-0.9.1/pkg/mount/MAINTAINERS0000644000175000017500000000007412314376205014524 0ustar tagtagMichael Crosby (@crosbymichael) docker-0.9.1/pkg/mount/mounter_unsupported.go0000644000175000017500000000033312314376205017515 0ustar tagtag// +build !linux !amd64 package mount func mount(device, target, mType string, flag uintptr, data string) error { panic("Not implemented") } func unmount(target string, flag int) error { panic("Not implemented") } docker-0.9.1/pkg/mount/flags_linux.go0000644000175000017500000000354512314376205015677 0ustar tagtag// +build amd64 package mount import ( "strings" "syscall" ) // Parse fstab type mount options into mount() flags // and device specific data func parseOptions(options string) (int, string) { var ( flag int data []string ) flags := map[string]struct { clear bool flag int }{ "defaults": {false, 0}, "ro": {false, syscall.MS_RDONLY}, "rw": {true, syscall.MS_RDONLY}, "suid": {true, syscall.MS_NOSUID}, "nosuid": {false, syscall.MS_NOSUID}, "dev": {true, syscall.MS_NODEV}, "nodev": {false, syscall.MS_NODEV}, "exec": {true, syscall.MS_NOEXEC}, "noexec": {false, syscall.MS_NOEXEC}, "sync": {false, syscall.MS_SYNCHRONOUS}, "async": {true, syscall.MS_SYNCHRONOUS}, "dirsync": {false, syscall.MS_DIRSYNC}, "remount": {false, syscall.MS_REMOUNT}, "mand": {false, syscall.MS_MANDLOCK}, "nomand": {true, syscall.MS_MANDLOCK}, "atime": {true, syscall.MS_NOATIME}, "noatime": {false, syscall.MS_NOATIME}, "diratime": {true, syscall.MS_NODIRATIME}, "nodiratime": {false, syscall.MS_NODIRATIME}, "bind": {false, syscall.MS_BIND}, "rbind": {false, syscall.MS_BIND | syscall.MS_REC}, "private": {false, syscall.MS_PRIVATE}, "relatime": {false, syscall.MS_RELATIME}, "norelatime": {true, syscall.MS_RELATIME}, "strictatime": {false, syscall.MS_STRICTATIME}, "nostrictatime": {true, syscall.MS_STRICTATIME}, } for _, o := range strings.Split(options, ",") { // If the option does not exist in the flags table then it is a // data value for a specific fs type if f, exists := flags[o]; exists { if f.clear { flag &= ^f.flag } else { flag |= f.flag } } else { data = append(data, o) } } return flag, strings.Join(data, ",") } docker-0.9.1/pkg/mount/mounter_linux.go0000644000175000017500000000104312314376205016263 0ustar tagtag// +build amd64 package mount import ( "syscall" ) func mount(device, target, mType string, flag uintptr, data string) error { if err := syscall.Mount(device, target, mType, flag, data); err != nil { return err } // If we have a bind mount or remount, remount... if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) } return nil } func unmount(target string, flag int) error { return syscall.Unmount(target, flag) } docker-0.9.1/pkg/mount/mountinfo.go0000644000175000017500000000431612314376205015377 0ustar tagtagpackage mount import ( "bufio" "fmt" "io" "os" "strings" ) const ( /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) (1) mount ID: unique identifier of the mount (may be reused after umount) (2) parent ID: ID of parent (or of self for the top of the mount tree) (3) major:minor: value of st_dev for files on filesystem (4) root: root of the mount within the filesystem (5) mount point: mount point relative to the process's root (6) mount options: per mount options (7) optional fields: zero or more fields of the form "tag[:value]" (8) separator: marks the end of the optional fields (9) filesystem type: name of filesystem of the form "type[.subtype]" (10) mount source: filesystem specific information or "none" (11) super options: per super block options*/ mountinfoFormat = "%d %d %d:%d %s %s %s " ) type MountInfo struct { Id, Parent, Major, Minor int Root, Mountpoint, Opts string Fstype, Source, VfsOpts string } // Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts func parseMountTable() ([]*MountInfo, error) { f, err := os.Open("/proc/self/mountinfo") if err != nil { return nil, err } defer f.Close() return parseInfoFile(f) } func parseInfoFile(r io.Reader) ([]*MountInfo, error) { var ( s = bufio.NewScanner(r) out = []*MountInfo{} ) for s.Scan() { if err := s.Err(); err != nil { return nil, err } var ( p = &MountInfo{} text = s.Text() ) if _, err := fmt.Sscanf(text, mountinfoFormat, &p.Id, &p.Parent, &p.Major, &p.Minor, &p.Root, &p.Mountpoint, &p.Opts); err != nil { return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) } // Safe as mountinfo encodes mountpoints with spaces as \040. index := strings.Index(text, " - ") postSeparatorFields := strings.Fields(text[index+3:]) if len(postSeparatorFields) != 3 { return nil, fmt.Errorf("Error did not find 3 fields post '-' in '%s'", text) } p.Fstype = postSeparatorFields[0] p.Source = postSeparatorFields[1] p.VfsOpts = postSeparatorFields[2] out = append(out, p) } return out, nil } docker-0.9.1/pkg/mount/mountinfo_test.go0000644000175000017500000016750312314376205016446 0ustar tagtagpackage mount import ( "bytes" "testing" ) const ( fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered` ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw 16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered 21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw 23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw 24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw 25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k 26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children 27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw 28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu 29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw 31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct 32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory 33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices 34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer 35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio 36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event 37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb 38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd 39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw 20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw 22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw 23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw 24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc 26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children 27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children 28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children 29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children 30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children 31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children 32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children 33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota 35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw 36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw 42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw 43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw 44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c 85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c 39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c 40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c 41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c 45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c 46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c 47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c 48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c 49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c 50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c 51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c 52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c 53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c 54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c 55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c 56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c 57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c 59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c 60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c 61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c 62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c 63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c 64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c 65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c 66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c 70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c 71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c 72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c 73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c 76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c 77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c 78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c 79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c 80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c 81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c 82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c 83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c 84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c 94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c 95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c 96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c 97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c 98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c 102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c 103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c 104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c 105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c 106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c 107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c 108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c 109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c 110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c 111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c 112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c 113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c 114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c 117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c 118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c 119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c 120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c 121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c 122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c 123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c 126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c 127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c 128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c 130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c 131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c 132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c 133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c 134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c 135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c 136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c 137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c 138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c 139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c 140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c 141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c 142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c 143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c 144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c 147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c 150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c 151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c 152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c 153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c 154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c 155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c 156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c 157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c 158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c 159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c 160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c 162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c 163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c 164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c 165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c 166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c 167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c 168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c 169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c 170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c 171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c 172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c 173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c 174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c 184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c 187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c 188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c 189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c 190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c 191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c 192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c 193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c 194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c 195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c 196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c 197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c 198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c 199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c 200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c 201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c 202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c 203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c 204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c 205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c 206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c 207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c 208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c 209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c 210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c 211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c 212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c 213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c 214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c 215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c 216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c 217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c 218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c 219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c 220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c 221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c 222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c 223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c 224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c 225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c 226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c 227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c 228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c 229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c 230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c 231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c 232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c 233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c 234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c 235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c 237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c 238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c 239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c 240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c 241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c 242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c 243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c 244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c 245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c 246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c 247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c 249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c 250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c 251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c 252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c 253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c 254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c 255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c 256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c 257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c 259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c 260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c 261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c 262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c 263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c 264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c 58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c 67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c 265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c 270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c 273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c 278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c 281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c 286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c 289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c 99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` ) func TestParseFedoraMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(fedoraMountinfo)) _, err := parseInfoFile(r) if err != nil { t.Fatal(err) } } func TestParseUbuntuMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(ubuntuMountInfo)) _, err := parseInfoFile(r) if err != nil { t.Fatal(err) } } func TestParseGentooMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(gentooMountinfo)) _, err := parseInfoFile(r) if err != nil { t.Fatal(err) } } docker-0.9.1/pkg/mount/mount.go0000644000175000017500000000311312314376205014515 0ustar tagtagpackage mount import ( "time" ) func GetMounts() ([]*MountInfo, error) { return parseMountTable() } // Looks at /proc/self/mountinfo to determine of the specified // mountpoint has been mounted func Mounted(mountpoint string) (bool, error) { entries, err := parseMountTable() if err != nil { return false, err } // Search the table for the mountpoint for _, e := range entries { if e.Mountpoint == mountpoint { return true, nil } } return false, nil } // Mount the specified options at the target path only if // the target is not mounted // Options must be specified as fstab style func Mount(device, target, mType, options string) error { if mounted, err := Mounted(target); err != nil || mounted { return err } return ForceMount(device, target, mType, options) } // Mount the specified options at the target path // reguardless if the target is mounted or not // Options must be specified as fstab style func ForceMount(device, target, mType, options string) error { flag, data := parseOptions(options) if err := mount(device, target, mType, uintptr(flag), data); err != nil { return err } return nil } // Unmount the target only if it is mounted func Unmount(target string) error { if mounted, err := Mounted(target); err != nil || !mounted { return err } return ForceUnmount(target) } // Unmount the target reguardless if it is mounted or not func ForceUnmount(target string) (err error) { // Simple retry logic for unmount for i := 0; i < 10; i++ { if err = unmount(target, 0); err == nil { return nil } time.Sleep(100 * time.Millisecond) } return } docker-0.9.1/pkg/mount/mount_test.go0000644000175000017500000000374512314376205015567 0ustar tagtagpackage mount import ( "os" "path" "syscall" "testing" ) func TestMountOptionsParsing(t *testing.T) { options := "bind,ro,size=10k" flag, data := parseOptions(options) if data != "size=10k" { t.Fatalf("Expected size=10 got %s", data) } expectedFlag := syscall.MS_BIND | syscall.MS_RDONLY if flag != expectedFlag { t.Fatalf("Expected %d got %d", expectedFlag, flag) } } func TestMounted(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourcePath = path.Join(tmp, "sourcefile.txt") targetPath = path.Join(tmp, "targetfile.txt") ) f, err := os.Create(sourcePath) if err != nil { t.Fatal(err) } f.WriteString("hello") f.Close() f, err = os.Create(targetPath) if err != nil { t.Fatal(err) } f.Close() if err := Mount(sourcePath, targetPath, "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetPath); err != nil { t.Fatal(err) } }() mounted, err := Mounted(targetPath) if err != nil { t.Fatal(err) } if !mounted { t.Fatalf("Expected %s to be mounted", targetPath) } if _, err := os.Stat(targetPath); err != nil { t.Fatal(err) } } func TestMountReadonly(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourcePath = path.Join(tmp, "sourcefile.txt") targetPath = path.Join(tmp, "targetfile.txt") ) f, err := os.Create(sourcePath) if err != nil { t.Fatal(err) } f.WriteString("hello") f.Close() f, err = os.Create(targetPath) if err != nil { t.Fatal(err) } f.Close() if err := Mount(sourcePath, targetPath, "none", "bind,ro"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetPath); err != nil { t.Fatal(err) } }() f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) if err == nil { t.Fatal("Should not be able to open a ro file as rw") } } docker-0.9.1/pkg/cgroups/0000755000175000017500000000000012314376205013346 5ustar tagtagdocker-0.9.1/pkg/cgroups/cgroups_test.go0000644000175000017500000000063712314376205016424 0ustar tagtagpackage cgroups import ( "bytes" "testing" ) const ( cgroupsContents = `11:hugetlb:/ 10:perf_event:/ 9:blkio:/ 8:net_cls:/ 7:freezer:/ 6:devices:/ 5:memory:/ 4:cpuacct,cpu:/ 3:cpuset:/ 2:name=systemd:/user.slice/user-1000.slice/session-16.scope` ) func TestParseCgroups(t *testing.T) { r := bytes.NewBuffer([]byte(cgroupsContents)) _, err := parseCgroupFile("blkio", r) if err != nil { t.Fatal(err) } } docker-0.9.1/pkg/cgroups/cgroups.go0000644000175000017500000001335612314376205015367 0ustar tagtagpackage cgroups import ( "bufio" "fmt" "github.com/dotcloud/docker/pkg/mount" "io" "io/ioutil" "os" "path/filepath" "strconv" "strings" ) type Cgroup struct { Name string `json:"name,omitempty"` Parent string `json:"parent,omitempty"` DeviceAccess bool `json:"device_access,omitempty"` // name of parent cgroup or slice Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) } // https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt func FindCgroupMountpoint(subsystem string) (string, error) { mounts, err := mount.GetMounts() if err != nil { return "", err } for _, mount := range mounts { if mount.Fstype == "cgroup" { for _, opt := range strings.Split(mount.VfsOpts, ",") { if opt == subsystem { return mount.Mountpoint, nil } } } } return "", fmt.Errorf("cgroup mountpoint not found for %s", subsystem) } // Returns the relative path to the cgroup docker is running in. func GetThisCgroupDir(subsystem string) (string, error) { f, err := os.Open("/proc/self/cgroup") if err != nil { return "", err } defer f.Close() return parseCgroupFile(subsystem, f) } func GetInitCgroupDir(subsystem string) (string, error) { f, err := os.Open("/proc/1/cgroup") if err != nil { return "", err } defer f.Close() return parseCgroupFile(subsystem, f) } func (c *Cgroup) Path(root, subsystem string) (string, error) { cgroup := c.Name if c.Parent != "" { cgroup = filepath.Join(c.Parent, cgroup) } initPath, err := GetInitCgroupDir(subsystem) if err != nil { return "", err } return filepath.Join(root, subsystem, initPath, cgroup), nil } func (c *Cgroup) Join(root, subsystem string, pid int) (string, error) { path, err := c.Path(root, subsystem) if err != nil { return "", err } if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { return "", err } if err := writeFile(path, "tasks", strconv.Itoa(pid)); err != nil { return "", err } return path, nil } func (c *Cgroup) Cleanup(root string) error { get := func(subsystem string) string { path, _ := c.Path(root, subsystem) return path } for _, path := range []string{ get("memory"), get("devices"), get("cpu"), } { os.RemoveAll(path) } return nil } func parseCgroupFile(subsystem string, r io.Reader) (string, error) { s := bufio.NewScanner(r) for s.Scan() { if err := s.Err(); err != nil { return "", err } text := s.Text() parts := strings.Split(text, ":") for _, subs := range strings.Split(parts[1], ",") { if subs == subsystem { return parts[2], nil } } } return "", fmt.Errorf("cgroup '%s' not found in /proc/self/cgroup", subsystem) } func writeFile(dir, file, data string) error { return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) } func (c *Cgroup) Apply(pid int) error { // We have two implementation of cgroups support, one is based on // systemd and the dbus api, and one is based on raw cgroup fs operations // following the pre-single-writer model docs at: // http://www.freedesktop.org/wiki/Software/systemd/PaxControlGroups/ // // we can pick any subsystem to find the root cgroupRoot, err := FindCgroupMountpoint("cpu") if err != nil { return err } cgroupRoot = filepath.Dir(cgroupRoot) if _, err := os.Stat(cgroupRoot); err != nil { return fmt.Errorf("cgroups fs not found") } if err := c.setupDevices(cgroupRoot, pid); err != nil { return err } if err := c.setupMemory(cgroupRoot, pid); err != nil { return err } if err := c.setupCpu(cgroupRoot, pid); err != nil { return err } return nil } func (c *Cgroup) setupDevices(cgroupRoot string, pid int) (err error) { if !c.DeviceAccess { dir, err := c.Join(cgroupRoot, "devices", pid) if err != nil { return err } defer func() { if err != nil { os.RemoveAll(dir) } }() if err := writeFile(dir, "devices.deny", "a"); err != nil { return err } allow := []string{ // /dev/null, zero, full "c 1:3 rwm", "c 1:5 rwm", "c 1:7 rwm", // consoles "c 5:1 rwm", "c 5:0 rwm", "c 4:0 rwm", "c 4:1 rwm", // /dev/urandom,/dev/random "c 1:9 rwm", "c 1:8 rwm", // /dev/pts/ - pts namespaces are "coming soon" "c 136:* rwm", "c 5:2 rwm", // tuntap "c 10:200 rwm", } for _, val := range allow { if err := writeFile(dir, "devices.allow", val); err != nil { return err } } } return nil } func (c *Cgroup) setupMemory(cgroupRoot string, pid int) (err error) { if c.Memory != 0 || c.MemorySwap != 0 { dir, err := c.Join(cgroupRoot, "memory", pid) if err != nil { return err } defer func() { if err != nil { os.RemoveAll(dir) } }() if c.Memory != 0 { if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil { return err } if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil { return err } } // By default, MemorySwap is set to twice the size of RAM. // If you want to omit MemorySwap, set it to `-1'. if c.MemorySwap != -1 { if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(c.Memory*2, 10)); err != nil { return err } } } return nil } func (c *Cgroup) setupCpu(cgroupRoot string, pid int) (err error) { // We always want to join the cpu group, to allow fair cpu scheduling // on a container basis dir, err := c.Join(cgroupRoot, "cpu", pid) if err != nil { return err } if c.CpuShares != 0 { if err := writeFile(dir, "cpu.shares", strconv.FormatInt(c.CpuShares, 10)); err != nil { return err } } return nil } docker-0.9.1/pkg/cgroups/MAINTAINERS0000644000175000017500000000007412314376205015044 0ustar tagtagMichael Crosby (@crosbymichael) docker-0.9.1/pkg/user/0000755000175000017500000000000012314376205012642 5ustar tagtagdocker-0.9.1/pkg/user/user_test.go0000644000175000017500000000512212314376205015206 0ustar tagtagpackage user import ( "strings" "testing" ) func TestUserParseLine(t *testing.T) { var ( a, b string c []string d int ) parseLine("", &a, &b) if a != "" || b != "" { t.Fatalf("a and b should be empty ('%v', '%v')", a, b) } parseLine("a", &a, &b) if a != "a" || b != "" { t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) } parseLine("bad boys:corny cows", &a, &b) if a != "bad boys" || b != "corny cows" { t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) } parseLine("", &c) if len(c) != 0 { t.Fatalf("c should be empty (%#v)", c) } parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) } parseLine("::::::::::", &a, &b, &c) if a != "" || b != "" || len(c) != 0 { t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) } parseLine("not a number", &d) if d != 0 { t.Fatalf("d should be 0 (%v)", d) } parseLine("b:12:c", &a, &d, &b) if a != "b" || b != "c" || d != 12 { t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) } } func TestUserParsePasswd(t *testing.T) { users, err := parsePasswdFile(strings.NewReader(` root:x:0:0:root:/root:/bin/bash adm:x:3:4:adm:/var/adm:/bin/false this is just some garbage data `), nil) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(users) != 3 { t.Fatalf("Expected 3 users, got %v", len(users)) } if users[0].Uid != 0 || users[0].Name != "root" { t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) } if users[1].Uid != 3 || users[1].Name != "adm" { t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) } } func TestUserParseGroup(t *testing.T) { groups, err := parseGroupFile(strings.NewReader(` root:x:0:root adm:x:4:root,adm,daemon this is just some garbage data `), nil) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(groups) != 3 { t.Fatalf("Expected 3 groups, got %v", len(groups)) } if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) } if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) } } docker-0.9.1/pkg/user/MAINTAINERS0000644000175000017500000000005512314376205014337 0ustar tagtagTianon Gravi (@tianon) docker-0.9.1/pkg/user/user.go0000644000175000017500000001264412314376205014156 0ustar tagtagpackage user import ( "bufio" "fmt" "io" "os" "strconv" "strings" ) type User struct { Name string Pass string Uid int Gid int Gecos string Home string Shell string } type Group struct { Name string Pass string Gid int List []string } func parseLine(line string, v ...interface{}) { if line == "" { return } parts := strings.Split(line, ":") for i, p := range parts { if len(v) <= i { // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files break } switch e := v[i].(type) { case *string: // "root", "adm", "/bin/bash" *e = p case *int: // "0", "4", "1000" // ignore string to int conversion errors, for great "tolerance" of naughty configuration files *e, _ = strconv.Atoi(p) case *[]string: // "", "root", "root,adm,daemon" if p != "" { *e = strings.Split(p, ",") } else { *e = []string{} } default: // panic, because this is a programming/logic error, not a runtime one panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") } } } func ParsePasswd() ([]*User, error) { return ParsePasswdFilter(nil) } func ParsePasswdFilter(filter func(*User) bool) ([]*User, error) { f, err := os.Open("/etc/passwd") if err != nil { return nil, err } defer f.Close() return parsePasswdFile(f, filter) } func parsePasswdFile(r io.Reader, filter func(*User) bool) ([]*User, error) { var ( s = bufio.NewScanner(r) out = []*User{} ) for s.Scan() { if err := s.Err(); err != nil { return nil, err } text := strings.TrimSpace(s.Text()) if text == "" { continue } // see: man 5 passwd // name:password:UID:GID:GECOS:directory:shell // Name:Pass:Uid:Gid:Gecos:Home:Shell // root:x:0:0:root:/root:/bin/bash // adm:x:3:4:adm:/var/adm:/bin/false p := &User{} parseLine( text, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, ) if filter == nil || filter(p) { out = append(out, p) } } return out, nil } func ParseGroup() ([]*Group, error) { return ParseGroupFilter(nil) } func ParseGroupFilter(filter func(*Group) bool) ([]*Group, error) { f, err := os.Open("/etc/group") if err != nil { return nil, err } defer f.Close() return parseGroupFile(f, filter) } func parseGroupFile(r io.Reader, filter func(*Group) bool) ([]*Group, error) { var ( s = bufio.NewScanner(r) out = []*Group{} ) for s.Scan() { if err := s.Err(); err != nil { return nil, err } text := s.Text() if text == "" { continue } // see: man 5 group // group_name:password:GID:user_list // Name:Pass:Gid:List // root:x:0:root // adm:x:4:root,adm,daemon p := &Group{} parseLine( text, &p.Name, &p.Pass, &p.Gid, &p.List, ) if filter == nil || filter(p) { out = append(out, p) } } return out, nil } // Given a string like "user", "1000", "user:group", "1000:1000", returns the uid, gid, and list of supplementary group IDs, if possible. func GetUserGroupSupplementary(userSpec string, defaultUid int, defaultGid int) (int, int, []int, error) { var ( uid = defaultUid gid = defaultGid suppGids = []int{} userArg, groupArg string ) // allow for userArg to have either "user" syntax, or optionally "user:group" syntax parseLine(userSpec, &userArg, &groupArg) users, err := ParsePasswdFilter(func(u *User) bool { if userArg == "" { return u.Uid == uid } return u.Name == userArg || strconv.Itoa(u.Uid) == userArg }) if err != nil && !os.IsNotExist(err) { if userArg == "" { userArg = strconv.Itoa(uid) } return 0, 0, nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) } haveUser := users != nil && len(users) > 0 if haveUser { // if we found any user entries that matched our filter, let's take the first one as "correct" uid = users[0].Uid gid = users[0].Gid } else if userArg != "" { // we asked for a user but didn't find them... let's check to see if we wanted a numeric user uid, err = strconv.Atoi(userArg) if err != nil { // not numeric - we have to bail return 0, 0, nil, fmt.Errorf("Unable to find user %v", userArg) } // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit } if groupArg != "" || (haveUser && users[0].Name != "") { groups, err := ParseGroupFilter(func(g *Group) bool { if groupArg != "" { return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg } for _, u := range g.List { if u == users[0].Name { return true } } return false }) if err != nil && !os.IsNotExist(err) { return 0, 0, nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) } haveGroup := groups != nil && len(groups) > 0 if groupArg != "" { if haveGroup { // if we found any group entries that matched our filter, let's take the first one as "correct" gid = groups[0].Gid } else { // we asked for a group but didn't find id... let's check to see if we wanted a numeric group gid, err = strconv.Atoi(groupArg) if err != nil { // not numeric - we have to bail return 0, 0, nil, fmt.Errorf("Unable to find group %v", groupArg) } // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit } } else if haveGroup { suppGids = make([]int, len(groups)) for i, group := range groups { suppGids[i] = group.Gid } } } return uid, gid, suppGids, nil } docker-0.9.1/pkg/namesgenerator/0000755000175000017500000000000012314376205014676 5ustar tagtagdocker-0.9.1/pkg/namesgenerator/names-generator_test.go0000644000175000017500000000216212314376205021354 0ustar tagtagpackage namesgenerator import ( "testing" ) type FalseChecker struct{} func (n *FalseChecker) Exists(name string) bool { return false } type TrueChecker struct{} func (n *TrueChecker) Exists(name string) bool { return true } func TestGenerateRandomName(t *testing.T) { if _, err := GenerateRandomName(&FalseChecker{}); err != nil { t.Error(err) } if _, err := GenerateRandomName(&TrueChecker{}); err == nil { t.Error("An error was expected") } } // Make sure the generated names are awesome func TestGenerateAwesomeNames(t *testing.T) { name, err := GenerateRandomName(&FalseChecker{}) if err != nil { t.Error(err) } if !isAwesome(name) { t.Fatalf("Generated name '%s' is not awesome.", name) } } // To be awesome, a container name must involve cool inventors, be easy to remember, // be at least mildly funny, and always be politically correct for enterprise adoption. func isAwesome(name string) bool { coolInventorNames := true easyToRemember := true mildlyFunnyOnOccasion := true politicallyCorrect := true return coolInventorNames && easyToRemember && mildlyFunnyOnOccasion && politicallyCorrect } docker-0.9.1/pkg/namesgenerator/names-generator.go0000644000175000017500000001261712314376205020323 0ustar tagtagpackage namesgenerator import ( "fmt" "math/rand" "time" ) type NameChecker interface { Exists(name string) bool } var ( left = [...]string{"happy", "jolly", "dreamy", "sad", "angry", "pensive", "focused", "sleepy", "grave", "distracted", "determined", "stoic", "stupefied", "sharp", "agitated", "cocky", "tender", "goofy", "furious", "desperate", "hopeful", "compassionate", "silly", "lonely", "condescending", "naughty", "kickass", "drunk", "boring", "nostalgic", "ecstatic", "insane", "cranky", "mad", "jovial", "sick", "hungry", "thirsty", "elegant", "backstabbing", "clever", "trusting", "loving", "suspicious", "berserk", "high", "romantic", "prickly", "evil"} // Docker 0.7.x generates names from notable scientists and hackers. // // Ada Lovelace invented the first algorithm. http://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) // Alan Turing was a founding father of computer science. http://en.wikipedia.org/wiki/Alan_Turing. // Albert Einstein invented the general theory of relativity. http://en.wikipedia.org/wiki/Albert_Einstein // Ambroise Pare invented modern surgery. http://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. http://en.wikipedia.org/wiki/Archimedes // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. // Charles Babbage invented the concept of a programmable computer. http://en.wikipedia.org/wiki/Charles_Babbage. // Charles Darwin established the principles of natural evolution. http://en.wikipedia.org/wiki/Charles_Darwin. // Dennis Ritchie and Ken Thompson created UNIX and the C programming language. http://en.wikipedia.org/wiki/Dennis_Ritchie http://en.wikipedia.org/wiki/Ken_Thompson // Douglas Engelbart gave the mother of all demos: http://en.wikipedia.org/wiki/Douglas_Engelbart // Emmett Brown invented time travel. http://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) // Enrico Fermi invented the first nuclear reactor. http://en.wikipedia.org/wiki/Enrico_Fermi. // Euclid invented geometry. http://en.wikipedia.org/wiki/Euclid // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. http://en.wikipedia.org/wiki/Galileo_Galilei // Henry Poincare made fundamental contributions in several fields of mathematics. http://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 // Isaac Newton invented classic mechanics and modern optics. http://en.wikipedia.org/wiki/Isaac_Newton // John McCarthy invented LISP: http://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) // Leonardo Da Vinci invented too many things to list here. http://en.wikipedia.org/wiki/Leonardo_da_Vinci. // Linus Torvalds invented Linux and Git. http://en.wikipedia.org/wiki/Linus_Torvalds // Louis Pasteur discovered vaccination, fermentation and pasteurization. http://en.wikipedia.org/wiki/Louis_Pasteur. // Malcolm McLean invented the modern shipping container: http://en.wikipedia.org/wiki/Malcom_McLean // Marie Curie discovered radioactivity. http://en.wikipedia.org/wiki/Marie_Curie. // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. http://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB // Niels Bohr is the father of quantum theory. http://en.wikipedia.org/wiki/Niels_Bohr. // Nikola Tesla invented the AC electric system and every gaget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla // Pierre de Fermat pioneered several aspects of modern mathematics. http://en.wikipedia.org/wiki/Pierre_de_Fermat // Richard Feynman was a key contributor to quantum mechanics and particle physics. http://en.wikipedia.org/wiki/Richard_Feynman // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. http://en.wikipedia.org/wiki/Rob_Pike // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. http://en.wikipedia.org/wiki/Stephen_Hawking // Steve Wozniak invented the Apple I and Apple II. http://en.wikipedia.org/wiki/Steve_Wozniak // Werner Heisenberg was a founding father of quantum mechanics. http://en.wikipedia.org/wiki/Werner_Heisenberg // William Shockley, Walter Houser Brattain and John Bardeen co-invented the transistor (thanks Brian Goff). // http://en.wikipedia.org/wiki/John_Bardeen // http://en.wikipedia.org/wiki/Walter_Houser_Brattain // http://en.wikipedia.org/wiki/William_Shockley right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley"} ) func GenerateRandomName(checker NameChecker) (string, error) { retry := 5 rand.Seed(time.Now().UnixNano()) name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) for checker != nil && checker.Exists(name) && retry > 0 { name = fmt.Sprintf("%s%d", name, rand.Intn(10)) retry = retry - 1 } if retry == 0 { return name, fmt.Errorf("Error generating random name") } return name, nil } docker-0.9.1/pkg/sysinfo/0000755000175000017500000000000012314376205013356 5ustar tagtagdocker-0.9.1/pkg/sysinfo/sysinfo.go0000644000175000017500000000236512314376205015405 0ustar tagtagpackage sysinfo import ( "github.com/dotcloud/docker/pkg/cgroups" "io/ioutil" "log" "os" "path" ) type SysInfo struct { MemoryLimit bool SwapLimit bool IPv4ForwardingDisabled bool AppArmor bool } func New(quiet bool) *SysInfo { sysInfo := &SysInfo{} if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil { if !quiet { log.Printf("WARNING: %s\n", err) } } else { _, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes")) _, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes")) sysInfo.MemoryLimit = err1 == nil && err2 == nil if !sysInfo.MemoryLimit && !quiet { log.Printf("WARNING: Your kernel does not support cgroup memory limit.") } _, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes")) sysInfo.SwapLimit = err == nil if !sysInfo.SwapLimit && !quiet { log.Printf("WARNING: Your kernel does not support cgroup swap limit.") } } // Check if AppArmor seems to be enabled on this system. if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) { sysInfo.AppArmor = false } else { sysInfo.AppArmor = true } return sysInfo } docker-0.9.1/pkg/sysinfo/MAINTAINERS0000644000175000017500000000017212314376205015053 0ustar tagtagMichael Crosby (@crosbymichael) Guillaume J. Charmes (@creack) docker-0.9.1/pkg/netlink/0000755000175000017500000000000012314376205013330 5ustar tagtagdocker-0.9.1/pkg/netlink/netlink_unsupported.go0000644000175000017500000000221612314376205017774 0ustar tagtag// +build !linux !amd64 package netlink import ( "errors" "net" ) var ( ErrNotImplemented = errors.New("not implemented") ) func NetworkGetRoutes() ([]Route, error) { return nil, ErrNotImplemented } func NetworkLinkAdd(name string, linkType string) error { return ErrNotImplemented } func NetworkLinkUp(iface *net.Interface) error { return ErrNotImplemented } func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { return ErrNotImplemented } func AddDefaultGw(ip net.IP) error { return ErrNotImplemented } func NetworkSetMTU(iface *net.Interface, mtu int) error { return ErrNotImplemented } func NetworkCreateVethPair(name1, name2 string) error { return ErrNotImplemented } func NetworkChangeName(iface *net.Interface, newName string) error { return ErrNotImplemented } func NetworkSetNsFd(iface *net.Interface, fd int) error { return ErrNotImplemented } func NetworkSetNsPid(iface *net.Interface, nspid int) error { return ErrNotImplemented } func NetworkSetMaster(iface, master *net.Interface) error { return ErrNotImplemented } func NetworkLinkDown(iface *net.Interface) error { return ErrNotImplemented } docker-0.9.1/pkg/netlink/MAINTAINERS0000644000175000017500000000016112314376205015023 0ustar tagtagMichael Crosby (@crosbymichael) Guillaume Charmes (@creack) docker-0.9.1/pkg/netlink/netlink_linux.go0000644000175000017500000004031212314376205016542 0ustar tagtag// +build amd64 package netlink import ( "encoding/binary" "fmt" "net" "syscall" "unsafe" ) const ( IFNAMSIZ = 16 DEFAULT_CHANGE = 0xFFFFFFFF IFLA_INFO_KIND = 1 IFLA_INFO_DATA = 2 VETH_INFO_PEER = 1 IFLA_NET_NS_FD = 28 ) var nextSeqNr int func nativeEndian() binary.ByteOrder { var x uint32 = 0x01020304 if *(*byte)(unsafe.Pointer(&x)) == 0x01 { return binary.BigEndian } return binary.LittleEndian } func getSeq() int { nextSeqNr = nextSeqNr + 1 return nextSeqNr } func getIpFamily(ip net.IP) int { if len(ip) <= net.IPv4len { return syscall.AF_INET } if ip.To4() != nil { return syscall.AF_INET } return syscall.AF_INET6 } type NetlinkRequestData interface { Len() int ToWireFormat() []byte } type IfInfomsg struct { syscall.IfInfomsg } func newIfInfomsg(family int) *IfInfomsg { return &IfInfomsg{ IfInfomsg: syscall.IfInfomsg{ Family: uint8(family), }, } } func newIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { msg := newIfInfomsg(family) parent.children = append(parent.children, msg) return msg } func (msg *IfInfomsg) ToWireFormat() []byte { native := nativeEndian() length := syscall.SizeofIfInfomsg b := make([]byte, length) b[0] = msg.Family b[1] = 0 native.PutUint16(b[2:4], msg.Type) native.PutUint32(b[4:8], uint32(msg.Index)) native.PutUint32(b[8:12], msg.Flags) native.PutUint32(b[12:16], msg.Change) return b } func (msg *IfInfomsg) Len() int { return syscall.SizeofIfInfomsg } type IfAddrmsg struct { syscall.IfAddrmsg } func newIfAddrmsg(family int) *IfAddrmsg { return &IfAddrmsg{ IfAddrmsg: syscall.IfAddrmsg{ Family: uint8(family), }, } } func (msg *IfAddrmsg) ToWireFormat() []byte { native := nativeEndian() length := syscall.SizeofIfAddrmsg b := make([]byte, length) b[0] = msg.Family b[1] = msg.Prefixlen b[2] = msg.Flags b[3] = msg.Scope native.PutUint32(b[4:8], msg.Index) return b } func (msg *IfAddrmsg) Len() int { return syscall.SizeofIfAddrmsg } type RtMsg struct { syscall.RtMsg } func newRtMsg(family int) *RtMsg { return &RtMsg{ RtMsg: syscall.RtMsg{ Family: uint8(family), Table: syscall.RT_TABLE_MAIN, Scope: syscall.RT_SCOPE_UNIVERSE, Protocol: syscall.RTPROT_BOOT, Type: syscall.RTN_UNICAST, }, } } func (msg *RtMsg) ToWireFormat() []byte { native := nativeEndian() length := syscall.SizeofRtMsg b := make([]byte, length) b[0] = msg.Family b[1] = msg.Dst_len b[2] = msg.Src_len b[3] = msg.Tos b[4] = msg.Table b[5] = msg.Protocol b[6] = msg.Scope b[7] = msg.Type native.PutUint32(b[8:12], msg.Flags) return b } func (msg *RtMsg) Len() int { return syscall.SizeofRtMsg } func rtaAlignOf(attrlen int) int { return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) } type RtAttr struct { syscall.RtAttr Data []byte children []NetlinkRequestData } func newRtAttr(attrType int, data []byte) *RtAttr { return &RtAttr{ RtAttr: syscall.RtAttr{ Type: uint16(attrType), }, children: []NetlinkRequestData{}, Data: data, } } func newRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { attr := newRtAttr(attrType, data) parent.children = append(parent.children, attr) return attr } func (a *RtAttr) Len() int { l := 0 for _, child := range a.children { l += child.Len() + syscall.SizeofRtAttr } if l == 0 { l++ } return rtaAlignOf(l + len(a.Data)) } func (a *RtAttr) ToWireFormat() []byte { native := nativeEndian() length := a.Len() buf := make([]byte, rtaAlignOf(length+syscall.SizeofRtAttr)) if a.Data != nil { copy(buf[4:], a.Data) } else { next := 4 for _, child := range a.children { childBuf := child.ToWireFormat() copy(buf[next:], childBuf) next += rtaAlignOf(len(childBuf)) } } if l := uint16(rtaAlignOf(length)); l != 0 { native.PutUint16(buf[0:2], l+1) } native.PutUint16(buf[2:4], a.Type) return buf } type NetlinkRequest struct { syscall.NlMsghdr Data []NetlinkRequestData } func (rr *NetlinkRequest) ToWireFormat() []byte { native := nativeEndian() length := rr.Len dataBytes := make([][]byte, len(rr.Data)) for i, data := range rr.Data { dataBytes[i] = data.ToWireFormat() length += uint32(len(dataBytes[i])) } b := make([]byte, length) native.PutUint32(b[0:4], length) native.PutUint16(b[4:6], rr.Type) native.PutUint16(b[6:8], rr.Flags) native.PutUint32(b[8:12], rr.Seq) native.PutUint32(b[12:16], rr.Pid) next := 16 for _, data := range dataBytes { copy(b[next:], data) next += len(data) } return b } func (rr *NetlinkRequest) AddData(data NetlinkRequestData) { if data != nil { rr.Data = append(rr.Data, data) } } func newNetlinkRequest(proto, flags int) *NetlinkRequest { return &NetlinkRequest{ NlMsghdr: syscall.NlMsghdr{ Len: uint32(syscall.NLMSG_HDRLEN), Type: uint16(proto), Flags: syscall.NLM_F_REQUEST | uint16(flags), Seq: uint32(getSeq()), }, } } type NetlinkSocket struct { fd int lsa syscall.SockaddrNetlink } func getNetlinkSocket() (*NetlinkSocket, error) { fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_ROUTE) if err != nil { return nil, err } s := &NetlinkSocket{ fd: fd, } s.lsa.Family = syscall.AF_NETLINK if err := syscall.Bind(fd, &s.lsa); err != nil { syscall.Close(fd) return nil, err } return s, nil } func (s *NetlinkSocket) Close() { syscall.Close(s.fd) } func (s *NetlinkSocket) Send(request *NetlinkRequest) error { if err := syscall.Sendto(s.fd, request.ToWireFormat(), 0, &s.lsa); err != nil { return err } return nil } func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { rb := make([]byte, syscall.Getpagesize()) nr, _, err := syscall.Recvfrom(s.fd, rb, 0) if err != nil { return nil, err } if nr < syscall.NLMSG_HDRLEN { return nil, ErrShortResponse } rb = rb[:nr] return syscall.ParseNetlinkMessage(rb) } func (s *NetlinkSocket) GetPid() (uint32, error) { lsa, err := syscall.Getsockname(s.fd) if err != nil { return 0, err } switch v := lsa.(type) { case *syscall.SockaddrNetlink: return v.Pid, nil } return 0, ErrWrongSockType } func (s *NetlinkSocket) HandleAck(seq uint32) error { native := nativeEndian() pid, err := s.GetPid() if err != nil { return err } done: for { msgs, err := s.Receive() if err != nil { return err } for _, m := range msgs { if m.Header.Seq != seq { return fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, seq) } if m.Header.Pid != pid { return fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) } if m.Header.Type == syscall.NLMSG_DONE { break done } if m.Header.Type == syscall.NLMSG_ERROR { error := int32(native.Uint32(m.Data[0:4])) if error == 0 { break done } return syscall.Errno(-error) } } } return nil } // Add a new default gateway. Identical to: // ip route add default via $ip func AddDefaultGw(ip net.IP) error { s, err := getNetlinkSocket() if err != nil { return err } defer s.Close() family := getIpFamily(ip) wb := newNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) msg := newRtMsg(family) wb.AddData(msg) var ipData []byte if family == syscall.AF_INET { ipData = ip.To4() } else { ipData = ip.To16() } gateway := newRtAttr(syscall.RTA_GATEWAY, ipData) wb.AddData(gateway) if err := s.Send(wb); err != nil { return err } return s.HandleAck(wb.Seq) } // Bring up a particular network interface func NetworkLinkUp(iface *net.Interface) error { s, err := getNetlinkSocket() if err != nil { return err } defer s.Close() wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) msg := newIfInfomsg(syscall.AF_UNSPEC) msg.Change = syscall.IFF_UP msg.Flags = syscall.IFF_UP msg.Index = int32(iface.Index) wb.AddData(msg) if err := s.Send(wb); err != nil { return err } return s.HandleAck(wb.Seq) } func NetworkLinkDown(iface *net.Interface) error { s, err := getNetlinkSocket() if err != nil { return err } defer s.Close() wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) msg := newIfInfomsg(syscall.AF_UNSPEC) msg.Change = syscall.IFF_UP msg.Flags = 0 & ^syscall.IFF_UP msg.Index = int32(iface.Index) wb.AddData(msg) if err := s.Send(wb); err != nil { return err } return s.HandleAck(wb.Seq) } func NetworkSetMTU(iface *net.Interface, mtu int) error { s, err := getNetlinkSocket() if err != nil { return err } defer s.Close() wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) msg := newIfInfomsg(syscall.AF_UNSPEC) msg.Type = syscall.RTM_SETLINK msg.Flags = syscall.NLM_F_REQUEST msg.Index = int32(iface.Index) msg.Change = DEFAULT_CHANGE wb.AddData(msg) var ( b = make([]byte, 4) native = nativeEndian() ) native.PutUint32(b, uint32(mtu)) data := newRtAttr(syscall.IFLA_MTU, b) wb.AddData(data) if err := s.Send(wb); err != nil { return err } return s.HandleAck(wb.Seq) } // same as ip link set $name master $master func NetworkSetMaster(iface, master *net.Interface) error { s, err := getNetlinkSocket() if err != nil { return err } defer s.Close() wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) msg := newIfInfomsg(syscall.AF_UNSPEC) msg.Type = syscall.RTM_SETLINK msg.Flags = syscall.NLM_F_REQUEST msg.Index = int32(iface.Index) msg.Change = DEFAULT_CHANGE wb.AddData(msg) var ( b = make([]byte, 4) native = nativeEndian() ) native.PutUint32(b, uint32(master.Index)) data := newRtAttr(syscall.IFLA_MASTER, b) wb.AddData(data) if err := s.Send(wb); err != nil { return err } return s.HandleAck(wb.Seq) } func NetworkSetNsPid(iface *net.Interface, nspid int) error { s, err := getNetlinkSocket() if err != nil { return err } defer s.Close() wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) msg := newIfInfomsg(syscall.AF_UNSPEC) msg.Type = syscall.RTM_SETLINK msg.Flags = syscall.NLM_F_REQUEST msg.Index = int32(iface.Index) msg.Change = DEFAULT_CHANGE wb.AddData(msg) var ( b = make([]byte, 4) native = nativeEndian() ) native.PutUint32(b, uint32(nspid)) data := newRtAttr(syscall.IFLA_NET_NS_PID, b) wb.AddData(data) if err := s.Send(wb); err != nil { return err } return s.HandleAck(wb.Seq) } func NetworkSetNsFd(iface *net.Interface, fd int) error { s, err := getNetlinkSocket() if err != nil { return err } defer s.Close() wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) msg := newIfInfomsg(syscall.AF_UNSPEC) msg.Type = syscall.RTM_SETLINK msg.Flags = syscall.NLM_F_REQUEST msg.Index = int32(iface.Index) msg.Change = DEFAULT_CHANGE wb.AddData(msg) var ( b = make([]byte, 4) native = nativeEndian() ) native.PutUint32(b, uint32(fd)) data := newRtAttr(IFLA_NET_NS_FD, b) wb.AddData(data) if err := s.Send(wb); err != nil { return err } return s.HandleAck(wb.Seq) } // Add an Ip address to an interface. This is identical to: // ip addr add $ip/$ipNet dev $iface func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { s, err := getNetlinkSocket() if err != nil { return err } defer s.Close() family := getIpFamily(ip) wb := newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) msg := newIfAddrmsg(family) msg.Index = uint32(iface.Index) prefixLen, _ := ipNet.Mask.Size() msg.Prefixlen = uint8(prefixLen) wb.AddData(msg) var ipData []byte if family == syscall.AF_INET { ipData = ip.To4() } else { ipData = ip.To16() } localData := newRtAttr(syscall.IFA_LOCAL, ipData) wb.AddData(localData) addrData := newRtAttr(syscall.IFA_ADDRESS, ipData) wb.AddData(addrData) if err := s.Send(wb); err != nil { return err } return s.HandleAck(wb.Seq) } func zeroTerminated(s string) []byte { return []byte(s + "\000") } func nonZeroTerminated(s string) []byte { return []byte(s) } // Add a new network link of a specified type. This is identical to // running: ip add link $name type $linkType func NetworkLinkAdd(name string, linkType string) error { s, err := getNetlinkSocket() if err != nil { return err } defer s.Close() wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) msg := newIfInfomsg(syscall.AF_UNSPEC) wb.AddData(msg) if name != "" { nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name)) wb.AddData(nameData) } kindData := newRtAttr(IFLA_INFO_KIND, nonZeroTerminated(linkType)) infoData := newRtAttr(syscall.IFLA_LINKINFO, kindData.ToWireFormat()) wb.AddData(infoData) if err := s.Send(wb); err != nil { return err } return s.HandleAck(wb.Seq) } // Returns an array of IPNet for all the currently routed subnets on ipv4 // This is similar to the first column of "ip route" output func NetworkGetRoutes() ([]Route, error) { native := nativeEndian() s, err := getNetlinkSocket() if err != nil { return nil, err } defer s.Close() wb := newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) msg := newIfInfomsg(syscall.AF_UNSPEC) wb.AddData(msg) if err := s.Send(wb); err != nil { return nil, err } pid, err := s.GetPid() if err != nil { return nil, err } res := make([]Route, 0) done: for { msgs, err := s.Receive() if err != nil { return nil, err } for _, m := range msgs { if m.Header.Seq != wb.Seq { return nil, fmt.Errorf("Wrong Seq nr %d, expected 1", m.Header.Seq) } if m.Header.Pid != pid { return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) } if m.Header.Type == syscall.NLMSG_DONE { break done } if m.Header.Type == syscall.NLMSG_ERROR { error := int32(native.Uint32(m.Data[0:4])) if error == 0 { break done } return nil, syscall.Errno(-error) } if m.Header.Type != syscall.RTM_NEWROUTE { continue } var r Route msg := (*RtMsg)(unsafe.Pointer(&m.Data[0:syscall.SizeofRtMsg][0])) if msg.Flags&syscall.RTM_F_CLONED != 0 { // Ignore cloned routes continue } if msg.Table != syscall.RT_TABLE_MAIN { // Ignore non-main tables continue } if msg.Family != syscall.AF_INET { // Ignore non-ipv4 routes continue } if msg.Dst_len == 0 { // Default routes r.Default = true } attrs, err := syscall.ParseNetlinkRouteAttr(&m) if err != nil { return nil, err } for _, attr := range attrs { switch attr.Attr.Type { case syscall.RTA_DST: ip := attr.Value r.IPNet = &net.IPNet{ IP: ip, Mask: net.CIDRMask(int(msg.Dst_len), 8*len(ip)), } case syscall.RTA_OIF: index := int(native.Uint32(attr.Value[0:4])) r.Iface, _ = net.InterfaceByIndex(index) } } if r.Default || r.IPNet != nil { res = append(res, r) } } } return res, nil } func getIfSocket() (fd int, err error) { for _, socket := range []int{ syscall.AF_INET, syscall.AF_PACKET, syscall.AF_INET6, } { if fd, err = syscall.Socket(socket, syscall.SOCK_DGRAM, 0); err == nil { break } } if err == nil { return fd, nil } return -1, err } func NetworkChangeName(iface *net.Interface, newName string) error { fd, err := getIfSocket() if err != nil { return err } defer syscall.Close(fd) data := [IFNAMSIZ * 2]byte{} // the "-1"s here are very important for ensuring we get proper null // termination of our new C strings copy(data[:IFNAMSIZ-1], iface.Name) copy(data[IFNAMSIZ:IFNAMSIZ*2-1], newName) if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 { return errno } return nil } func NetworkCreateVethPair(name1, name2 string) error { s, err := getNetlinkSocket() if err != nil { return err } defer s.Close() wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) msg := newIfInfomsg(syscall.AF_UNSPEC) wb.AddData(msg) nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1)) wb.AddData(nameData) nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth")) nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) nest3 := newRtAttrChild(nest2, VETH_INFO_PEER, nil) newIfInfomsgChild(nest3, syscall.AF_UNSPEC) newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2)) wb.AddData(nest1) if err := s.Send(wb); err != nil { return err } return s.HandleAck(wb.Seq) } docker-0.9.1/pkg/netlink/netlink.go0000644000175000017500000000072112314376205015323 0ustar tagtag// Packet netlink provide access to low level Netlink sockets and messages. // // Actual implementations are in: // netlink_linux.go // netlink_darwin.go package netlink import ( "errors" "net" ) var ( ErrWrongSockType = errors.New("Wrong socket type") ErrShortResponse = errors.New("Got short response from netlink") ) // A Route is a subnet associated with the interface to reach it. type Route struct { *net.IPNet Iface *net.Interface Default bool } docker-0.9.1/pkg/mflag/0000755000175000017500000000000012314376205012752 5ustar tagtagdocker-0.9.1/pkg/mflag/flag_test.go0000644000175000017500000002372312314376205015260 0ustar tagtag// Copyright 2014 The Docker & Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package mflag_test import ( "bytes" "fmt" . "github.com/dotcloud/docker/pkg/mflag" "os" "sort" "strings" "testing" "time" ) // ResetForTesting clears all flag state and sets the usage function as directed. // After calling ResetForTesting, parse errors in flag handling will not // exit the program. func ResetForTesting(usage func()) { CommandLine = NewFlagSet(os.Args[0], ContinueOnError) Usage = usage } func boolString(s string) string { if s == "0" { return "false" } return "true" } func TestEverything(t *testing.T) { ResetForTesting(nil) Bool([]string{"test_bool"}, false, "bool value") Int([]string{"test_int"}, 0, "int value") Int64([]string{"test_int64"}, 0, "int64 value") Uint([]string{"test_uint"}, 0, "uint value") Uint64([]string{"test_uint64"}, 0, "uint64 value") String([]string{"test_string"}, "0", "string value") Float64([]string{"test_float64"}, 0, "float64 value") Duration([]string{"test_duration"}, 0, "time.Duration value") m := make(map[string]*Flag) desired := "0" visitor := func(f *Flag) { for _, name := range f.Names { if len(name) > 5 && name[0:5] == "test_" { m[name] = f ok := false switch { case f.Value.String() == desired: ok = true case name == "test_bool" && f.Value.String() == boolString(desired): ok = true case name == "test_duration" && f.Value.String() == desired+"s": ok = true } if !ok { t.Error("Visit: bad value", f.Value.String(), "for", name) } } } } VisitAll(visitor) if len(m) != 8 { t.Error("VisitAll misses some flags") for k, v := range m { t.Log(k, *v) } } m = make(map[string]*Flag) Visit(visitor) if len(m) != 0 { t.Errorf("Visit sees unset flags") for k, v := range m { t.Log(k, *v) } } // Now set all flags Set("test_bool", "true") Set("test_int", "1") Set("test_int64", "1") Set("test_uint", "1") Set("test_uint64", "1") Set("test_string", "1") Set("test_float64", "1") Set("test_duration", "1s") desired = "1" Visit(visitor) if len(m) != 8 { t.Error("Visit fails after set") for k, v := range m { t.Log(k, *v) } } // Now test they're visited in sort order. var flagNames []string Visit(func(f *Flag) { for _, name := range f.Names { flagNames = append(flagNames, name) } }) if !sort.StringsAreSorted(flagNames) { t.Errorf("flag names not sorted: %v", flagNames) } } func TestGet(t *testing.T) { ResetForTesting(nil) Bool([]string{"test_bool"}, true, "bool value") Int([]string{"test_int"}, 1, "int value") Int64([]string{"test_int64"}, 2, "int64 value") Uint([]string{"test_uint"}, 3, "uint value") Uint64([]string{"test_uint64"}, 4, "uint64 value") String([]string{"test_string"}, "5", "string value") Float64([]string{"test_float64"}, 6, "float64 value") Duration([]string{"test_duration"}, 7, "time.Duration value") visitor := func(f *Flag) { for _, name := range f.Names { if len(name) > 5 && name[0:5] == "test_" { g, ok := f.Value.(Getter) if !ok { t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) return } switch name { case "test_bool": ok = g.Get() == true case "test_int": ok = g.Get() == int(1) case "test_int64": ok = g.Get() == int64(2) case "test_uint": ok = g.Get() == uint(3) case "test_uint64": ok = g.Get() == uint64(4) case "test_string": ok = g.Get() == "5" case "test_float64": ok = g.Get() == float64(6) case "test_duration": ok = g.Get() == time.Duration(7) } if !ok { t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) } } } } VisitAll(visitor) } func TestUsage(t *testing.T) { called := false ResetForTesting(func() { called = true }) if CommandLine.Parse([]string{"-x"}) == nil { t.Error("parse did not fail for unknown flag") } if !called { t.Error("did not call Usage for unknown flag") } } func testParse(f *FlagSet, t *testing.T) { if f.Parsed() { t.Error("f.Parse() = true before Parse") } boolFlag := f.Bool([]string{"bool"}, false, "bool value") bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") intFlag := f.Int([]string{"-int"}, 0, "int value") int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") uintFlag := f.Uint([]string{"uint"}, 0, "uint value") uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") stringFlag := f.String([]string{"string"}, "0", "string value") float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") extra := "one-extra-argument" args := []string{ "-bool", "-bool2=true", "--int", "22", "--int64", "0x23", "-uint", "24", "--uint64", "25", "-string", "hello", "-float64", "2718e28", "-duration", "2m", extra, } if err := f.Parse(args); err != nil { t.Fatal(err) } if !f.Parsed() { t.Error("f.Parse() = false after Parse") } if *boolFlag != true { t.Error("bool flag should be true, is ", *boolFlag) } if *bool2Flag != true { t.Error("bool2 flag should be true, is ", *bool2Flag) } if *intFlag != 22 { t.Error("int flag should be 22, is ", *intFlag) } if *int64Flag != 0x23 { t.Error("int64 flag should be 0x23, is ", *int64Flag) } if *uintFlag != 24 { t.Error("uint flag should be 24, is ", *uintFlag) } if *uint64Flag != 25 { t.Error("uint64 flag should be 25, is ", *uint64Flag) } if *stringFlag != "hello" { t.Error("string flag should be `hello`, is ", *stringFlag) } if *float64Flag != 2718e28 { t.Error("float64 flag should be 2718e28, is ", *float64Flag) } if *durationFlag != 2*time.Minute { t.Error("duration flag should be 2m, is ", *durationFlag) } if len(f.Args()) != 1 { t.Error("expected one argument, got", len(f.Args())) } else if f.Args()[0] != extra { t.Errorf("expected argument %q got %q", extra, f.Args()[0]) } } func testPanic(f *FlagSet, t *testing.T) { f.Int([]string{"-int"}, 0, "int value") if f.Parsed() { t.Error("f.Parse() = true before Parse") } args := []string{ "-int", "21", } f.Parse(args) } func TestParsePanic(t *testing.T) { ResetForTesting(func() {}) testPanic(CommandLine, t) } func TestParse(t *testing.T) { ResetForTesting(func() { t.Error("bad parse") }) testParse(CommandLine, t) } func TestFlagSetParse(t *testing.T) { testParse(NewFlagSet("test", ContinueOnError), t) } // Declare a user-defined flag type. type flagVar []string func (f *flagVar) String() string { return fmt.Sprint([]string(*f)) } func (f *flagVar) Set(value string) error { *f = append(*f, value) return nil } func TestUserDefined(t *testing.T) { var flags FlagSet flags.Init("test", ContinueOnError) var v flagVar flags.Var(&v, []string{"v"}, "usage") if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { t.Error(err) } if len(v) != 3 { t.Fatal("expected 3 args; got ", len(v)) } expect := "[1 2 3]" if v.String() != expect { t.Errorf("expected value %q got %q", expect, v.String()) } } // Declare a user-defined boolean flag type. type boolFlagVar struct { count int } func (b *boolFlagVar) String() string { return fmt.Sprintf("%d", b.count) } func (b *boolFlagVar) Set(value string) error { if value == "true" { b.count++ } return nil } func (b *boolFlagVar) IsBoolFlag() bool { return b.count < 4 } func TestUserDefinedBool(t *testing.T) { var flags FlagSet flags.Init("test", ContinueOnError) var b boolFlagVar var err error flags.Var(&b, []string{"b"}, "usage") if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { if b.count < 4 { t.Error(err) } } if b.count != 4 { t.Errorf("want: %d; got: %d", 4, b.count) } if err == nil { t.Error("expected error; got none") } } func TestSetOutput(t *testing.T) { var flags FlagSet var buf bytes.Buffer flags.SetOutput(&buf) flags.Init("test", ContinueOnError) flags.Parse([]string{"-unknown"}) if out := buf.String(); !strings.Contains(out, "-unknown") { t.Logf("expected output mentioning unknown; got %q", out) } } // This tests that one can reset the flags. This still works but not well, and is // superseded by FlagSet. func TestChangingArgs(t *testing.T) { ResetForTesting(func() { t.Fatal("bad parse") }) oldArgs := os.Args defer func() { os.Args = oldArgs }() os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} before := Bool([]string{"before"}, false, "") if err := CommandLine.Parse(os.Args[1:]); err != nil { t.Fatal(err) } cmd := Arg(0) os.Args = Args() after := Bool([]string{"after"}, false, "") Parse() args := Args() if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) } } // Test that -help invokes the usage message and returns ErrHelp. func TestHelp(t *testing.T) { var helpCalled = false fs := NewFlagSet("help test", ContinueOnError) fs.Usage = func() { helpCalled = true } var flag bool fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") // Regular flag invocation should work err := fs.Parse([]string{"-flag=true"}) if err != nil { t.Fatal("expected no error; got ", err) } if !flag { t.Error("flag was not set by -flag") } if helpCalled { t.Error("help called for regular flag") helpCalled = false // reset for next test } // Help flag should work as expected. err = fs.Parse([]string{"-help"}) if err == nil { t.Fatal("error expected") } if err != ErrHelp { t.Fatal("expected ErrHelp; got ", err) } if !helpCalled { t.Fatal("help was not called") } // If we define a help flag, that should override. var help bool fs.BoolVar(&help, []string{"help"}, false, "help flag") helpCalled = false err = fs.Parse([]string{"-help"}) if err != nil { t.Fatal("expected no error for defined -help; got ", err) } if helpCalled { t.Fatal("help was called; should not have been for defined help flag") } } docker-0.9.1/pkg/mflag/LICENSE0000644000175000017500000000272012314376205013760 0ustar tagtagCopyright (c) 2014 The Docker & Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. docker-0.9.1/pkg/mflag/flag.go0000644000175000017500000007165012314376205014223 0ustar tagtag// Copyright 2014 The Docker & Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package flag implements command-line flag parsing. Usage: Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. import "flag" var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") If you like, you can bind the flag to a variable using the Var() functions. var flagvar int func init() { // -flaghidden will work, but will be hidden from the usage flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") } Or you can create custom flags that satisfy the Value interface (with pointer receivers) and couple them to flag parsing by flag.Var(&flagVal, []string{"name"}, "help message for flagname") For such flags, the default value is just the initial value of the variable. After all flags are defined, call flag.Parse() to parse the command line into the defined flags. Flags may then be used directly. If you're using the flags themselves, they are all pointers; if you bind to variables, they're values. fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) After parsing, the arguments after the flag are available as the slice flag.Args() or individually as flag.Arg(i). The arguments are indexed from 0 through flag.NArg()-1. Command line flag syntax: -flag -flag=x -flag x // non-boolean flags only One or two minus signs may be used; they are equivalent. The last form is not permitted for boolean flags because the meaning of the command cmd -x * will change if there is a file called 0, false, etc. You must use the -flag=false form to turn off a boolean flag. Flag parsing stops just before the first non-flag argument ("-" is a non-flag argument) or after the terminator "--". Integer flags accept 1234, 0664, 0x1234 and may be negative. Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. Duration flags accept any input valid for time.ParseDuration. The default set of command-line flags is controlled by top-level functions. The FlagSet type allows one to define independent sets of flags, such as to implement subcommands in a command-line interface. The methods of FlagSet are analogous to the top-level functions for the command-line flag set. */ package mflag import ( "errors" "fmt" "io" "os" "sort" "strconv" "strings" "time" ) // ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. var ErrHelp = errors.New("flag: help requested") // ErrRetry is the error returned if you need to try letter by letter var ErrRetry = errors.New("flag: retry") // -- bool Value type boolValue bool func newBoolValue(val bool, p *bool) *boolValue { *p = val return (*boolValue)(p) } func (b *boolValue) Set(s string) error { v, err := strconv.ParseBool(s) *b = boolValue(v) return err } func (b *boolValue) Get() interface{} { return bool(*b) } func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } func (b *boolValue) IsBoolFlag() bool { return true } // optional interface to indicate boolean flags that can be // supplied without "=value" text type boolFlag interface { Value IsBoolFlag() bool } // -- int Value type intValue int func newIntValue(val int, p *int) *intValue { *p = val return (*intValue)(p) } func (i *intValue) Set(s string) error { v, err := strconv.ParseInt(s, 0, 64) *i = intValue(v) return err } func (i *intValue) Get() interface{} { return int(*i) } func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } // -- int64 Value type int64Value int64 func newInt64Value(val int64, p *int64) *int64Value { *p = val return (*int64Value)(p) } func (i *int64Value) Set(s string) error { v, err := strconv.ParseInt(s, 0, 64) *i = int64Value(v) return err } func (i *int64Value) Get() interface{} { return int64(*i) } func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } // -- uint Value type uintValue uint func newUintValue(val uint, p *uint) *uintValue { *p = val return (*uintValue)(p) } func (i *uintValue) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) *i = uintValue(v) return err } func (i *uintValue) Get() interface{} { return uint(*i) } func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } // -- uint64 Value type uint64Value uint64 func newUint64Value(val uint64, p *uint64) *uint64Value { *p = val return (*uint64Value)(p) } func (i *uint64Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) *i = uint64Value(v) return err } func (i *uint64Value) Get() interface{} { return uint64(*i) } func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } // -- string Value type stringValue string func newStringValue(val string, p *string) *stringValue { *p = val return (*stringValue)(p) } func (s *stringValue) Set(val string) error { *s = stringValue(val) return nil } func (s *stringValue) Get() interface{} { return string(*s) } func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } // -- float64 Value type float64Value float64 func newFloat64Value(val float64, p *float64) *float64Value { *p = val return (*float64Value)(p) } func (f *float64Value) Set(s string) error { v, err := strconv.ParseFloat(s, 64) *f = float64Value(v) return err } func (f *float64Value) Get() interface{} { return float64(*f) } func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } // -- time.Duration Value type durationValue time.Duration func newDurationValue(val time.Duration, p *time.Duration) *durationValue { *p = val return (*durationValue)(p) } func (d *durationValue) Set(s string) error { v, err := time.ParseDuration(s) *d = durationValue(v) return err } func (d *durationValue) Get() interface{} { return time.Duration(*d) } func (d *durationValue) String() string { return (*time.Duration)(d).String() } // Value is the interface to the dynamic value stored in a flag. // (The default value is represented as a string.) // // If a Value has an IsBoolFlag() bool method returning true, // the command-line parser makes -name equivalent to -name=true // rather than using the next command-line argument. type Value interface { String() string Set(string) error } // Getter is an interface that allows the contents of a Value to be retrieved. // It wraps the Value interface, rather than being part of it, because it // appeared after Go 1 and its compatibility rules. All Value types provided // by this package satisfy the Getter interface. type Getter interface { Value Get() interface{} } // ErrorHandling defines how to handle flag parsing errors. type ErrorHandling int const ( ContinueOnError ErrorHandling = iota ExitOnError PanicOnError ) // A FlagSet represents a set of defined flags. The zero value of a FlagSet // has no name and has ContinueOnError error handling. type FlagSet struct { // Usage is the function called when an error occurs while parsing flags. // The field is a function (not a method) that may be changed to point to // a custom error handler. Usage func() name string parsed bool actual map[string]*Flag formal map[string]*Flag args []string // arguments after flags errorHandling ErrorHandling output io.Writer // nil means stderr; use out() accessor } // A Flag represents the state of a flag. type Flag struct { Names []string // name as it appears on command line Usage string // help message Value Value // value as set DefValue string // default value (as text); for usage message } // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[string]*Flag) []*Flag { var list sort.StringSlice for _, f := range flags { fName := strings.TrimPrefix(f.Names[0], "#") if len(f.Names) == 1 { list = append(list, fName) continue } found := false for _, name := range list { if name == fName { found = true break } } if !found { list = append(list, fName) } } list.Sort() result := make([]*Flag, len(list)) for i, name := range list { result[i] = flags[name] } return result } func (f *FlagSet) out() io.Writer { if f.output == nil { return os.Stderr } return f.output } // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (f *FlagSet) SetOutput(output io.Writer) { f.output = output } // VisitAll visits the flags in lexicographical order, calling fn for each. // It visits all flags, even those not set. func (f *FlagSet) VisitAll(fn func(*Flag)) { for _, flag := range sortFlags(f.formal) { fn(flag) } } // VisitAll visits the command-line flags in lexicographical order, calling // fn for each. It visits all flags, even those not set. func VisitAll(fn func(*Flag)) { CommandLine.VisitAll(fn) } // Visit visits the flags in lexicographical order, calling fn for each. // It visits only those flags that have been set. func (f *FlagSet) Visit(fn func(*Flag)) { for _, flag := range sortFlags(f.actual) { fn(flag) } } // Visit visits the command-line flags in lexicographical order, calling fn // for each. It visits only those flags that have been set. func Visit(fn func(*Flag)) { CommandLine.Visit(fn) } // Lookup returns the Flag structure of the named flag, returning nil if none exists. func (f *FlagSet) Lookup(name string) *Flag { return f.formal[name] } // Lookup returns the Flag structure of the named command-line flag, // returning nil if none exists. func Lookup(name string) *Flag { return CommandLine.formal[name] } // Set sets the value of the named flag. func (f *FlagSet) Set(name, value string) error { flag, ok := f.formal[name] if !ok { return fmt.Errorf("no such flag -%v", name) } err := flag.Value.Set(value) if err != nil { return err } if f.actual == nil { f.actual = make(map[string]*Flag) } f.actual[name] = flag return nil } // Set sets the value of the named command-line flag. func Set(name, value string) error { return CommandLine.Set(name, value) } // PrintDefaults prints, to standard error unless configured // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { f.VisitAll(func(flag *Flag) { format := " -%s=%s: %s\n" if _, ok := flag.Value.(*stringValue); ok { // put quotes on the value format = " -%s=%q: %s\n" } names := []string{} for _, name := range flag.Names { if name[0] != '#' { names = append(names, name) } } if len(names) > 0 { fmt.Fprintf(f.out(), format, strings.Join(names, ", -"), flag.DefValue, flag.Usage) } }) } // PrintDefaults prints to standard error the default values of all defined command-line flags. func PrintDefaults() { CommandLine.PrintDefaults() } // defaultUsage is the default function to print a usage message. func defaultUsage(f *FlagSet) { if f.name == "" { fmt.Fprintf(f.out(), "Usage:\n") } else { fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) } f.PrintDefaults() } // NOTE: Usage is not just defaultUsage(CommandLine) // because it serves (via godoc flag Usage) as the example // for how to write your own usage function. // Usage prints to standard error a usage message documenting all defined command-line flags. // The function is a variable that may be changed to point to a custom function. var Usage = func() { fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) PrintDefaults() } // NFlag returns the number of flags that have been set. func (f *FlagSet) NFlag() int { return len(f.actual) } // NFlag returns the number of command-line flags that have been set. func NFlag() int { return len(CommandLine.actual) } // Arg returns the i'th argument. Arg(0) is the first remaining argument // after flags have been processed. func (f *FlagSet) Arg(i int) string { if i < 0 || i >= len(f.args) { return "" } return f.args[i] } // Arg returns the i'th command-line argument. Arg(0) is the first remaining argument // after flags have been processed. func Arg(i int) string { return CommandLine.Arg(i) } // NArg is the number of arguments remaining after flags have been processed. func (f *FlagSet) NArg() int { return len(f.args) } // NArg is the number of arguments remaining after flags have been processed. func NArg() int { return len(CommandLine.args) } // Args returns the non-flag arguments. func (f *FlagSet) Args() []string { return f.args } // Args returns the non-flag command-line arguments. func Args() []string { return CommandLine.args } // BoolVar defines a bool flag with specified name, default value, and usage string. // The argument p points to a bool variable in which to store the value of the flag. func (f *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { f.Var(newBoolValue(value, p), names, usage) } // BoolVar defines a bool flag with specified name, default value, and usage string. // The argument p points to a bool variable in which to store the value of the flag. func BoolVar(p *bool, names []string, value bool, usage string) { CommandLine.Var(newBoolValue(value, p), names, usage) } // Bool defines a bool flag with specified name, default value, and usage string. // The return value is the address of a bool variable that stores the value of the flag. func (f *FlagSet) Bool(names []string, value bool, usage string) *bool { p := new(bool) f.BoolVar(p, names, value, usage) return p } // Bool defines a bool flag with specified name, default value, and usage string. // The return value is the address of a bool variable that stores the value of the flag. func Bool(names []string, value bool, usage string) *bool { return CommandLine.Bool(names, value, usage) } // IntVar defines an int flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. func (f *FlagSet) IntVar(p *int, names []string, value int, usage string) { f.Var(newIntValue(value, p), names, usage) } // IntVar defines an int flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. func IntVar(p *int, names []string, value int, usage string) { CommandLine.Var(newIntValue(value, p), names, usage) } // Int defines an int flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. func (f *FlagSet) Int(names []string, value int, usage string) *int { p := new(int) f.IntVar(p, names, value, usage) return p } // Int defines an int flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. func Int(names []string, value int, usage string) *int { return CommandLine.Int(names, value, usage) } // Int64Var defines an int64 flag with specified name, default value, and usage string. // The argument p points to an int64 variable in which to store the value of the flag. func (f *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { f.Var(newInt64Value(value, p), names, usage) } // Int64Var defines an int64 flag with specified name, default value, and usage string. // The argument p points to an int64 variable in which to store the value of the flag. func Int64Var(p *int64, names []string, value int64, usage string) { CommandLine.Var(newInt64Value(value, p), names, usage) } // Int64 defines an int64 flag with specified name, default value, and usage string. // The return value is the address of an int64 variable that stores the value of the flag. func (f *FlagSet) Int64(names []string, value int64, usage string) *int64 { p := new(int64) f.Int64Var(p, names, value, usage) return p } // Int64 defines an int64 flag with specified name, default value, and usage string. // The return value is the address of an int64 variable that stores the value of the flag. func Int64(names []string, value int64, usage string) *int64 { return CommandLine.Int64(names, value, usage) } // UintVar defines a uint flag with specified name, default value, and usage string. // The argument p points to a uint variable in which to store the value of the flag. func (f *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { f.Var(newUintValue(value, p), names, usage) } // UintVar defines a uint flag with specified name, default value, and usage string. // The argument p points to a uint variable in which to store the value of the flag. func UintVar(p *uint, names []string, value uint, usage string) { CommandLine.Var(newUintValue(value, p), names, usage) } // Uint defines a uint flag with specified name, default value, and usage string. // The return value is the address of a uint variable that stores the value of the flag. func (f *FlagSet) Uint(names []string, value uint, usage string) *uint { p := new(uint) f.UintVar(p, names, value, usage) return p } // Uint defines a uint flag with specified name, default value, and usage string. // The return value is the address of a uint variable that stores the value of the flag. func Uint(names []string, value uint, usage string) *uint { return CommandLine.Uint(names, value, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func (f *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { f.Var(newUint64Value(value, p), names, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func Uint64Var(p *uint64, names []string, value uint64, usage string) { CommandLine.Var(newUint64Value(value, p), names, usage) } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func (f *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { p := new(uint64) f.Uint64Var(p, names, value, usage) return p } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func Uint64(names []string, value uint64, usage string) *uint64 { return CommandLine.Uint64(names, value, usage) } // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. func (f *FlagSet) StringVar(p *string, names []string, value string, usage string) { f.Var(newStringValue(value, p), names, usage) } // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. func StringVar(p *string, names []string, value string, usage string) { CommandLine.Var(newStringValue(value, p), names, usage) } // String defines a string flag with specified name, default value, and usage string. // The return value is the address of a string variable that stores the value of the flag. func (f *FlagSet) String(names []string, value string, usage string) *string { p := new(string) f.StringVar(p, names, value, usage) return p } // String defines a string flag with specified name, default value, and usage string. // The return value is the address of a string variable that stores the value of the flag. func String(names []string, value string, usage string) *string { return CommandLine.String(names, value, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func (f *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { f.Var(newFloat64Value(value, p), names, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func Float64Var(p *float64, names []string, value float64, usage string) { CommandLine.Var(newFloat64Value(value, p), names, usage) } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func (f *FlagSet) Float64(names []string, value float64, usage string) *float64 { p := new(float64) f.Float64Var(p, names, value, usage) return p } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func Float64(names []string, value float64, usage string) *float64 { return CommandLine.Float64(names, value, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func (f *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { f.Var(newDurationValue(value, p), names, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { CommandLine.Var(newDurationValue(value, p), names, usage) } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func (f *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { p := new(time.Duration) f.DurationVar(p, names, value, usage) return p } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func Duration(names []string, value time.Duration, usage string) *time.Duration { return CommandLine.Duration(names, value, usage) } // Var defines a flag with the specified name and usage string. The type and // value of the flag are represented by the first argument, of type Value, which // typically holds a user-defined implementation of Value. For instance, the // caller could create a flag that turns a comma-separated string into a slice // of strings by giving the slice the methods of Value; in particular, Set would // decompose the comma-separated string into the slice. func (f *FlagSet) Var(value Value, names []string, usage string) { // Remember the default value as a string; it won't change. flag := &Flag{names, usage, value, value.String()} for _, name := range names { name = strings.TrimPrefix(name, "#") _, alreadythere := f.formal[name] if alreadythere { var msg string if f.name == "" { msg = fmt.Sprintf("flag redefined: %s", name) } else { msg = fmt.Sprintf("%s flag redefined: %s", f.name, name) } fmt.Fprintln(f.out(), msg) panic(msg) // Happens only if flags are declared with identical names } if f.formal == nil { f.formal = make(map[string]*Flag) } f.formal[name] = flag } } // Var defines a flag with the specified name and usage string. The type and // value of the flag are represented by the first argument, of type Value, which // typically holds a user-defined implementation of Value. For instance, the // caller could create a flag that turns a comma-separated string into a slice // of strings by giving the slice the methods of Value; in particular, Set would // decompose the comma-separated string into the slice. func Var(value Value, names []string, usage string) { CommandLine.Var(value, names, usage) } // failf prints to standard error a formatted error and usage message and // returns the error. func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) fmt.Fprintln(f.out(), err) f.usage() return err } // usage calls the Usage method for the flag set, or the usage function if // the flag set is CommandLine. func (f *FlagSet) usage() { if f == CommandLine { Usage() } else if f.Usage == nil { defaultUsage(f) } else { f.Usage() } } // parseOne parses one flag. It reports whether a flag was seen. func (f *FlagSet) parseOne() (bool, string, error) { if len(f.args) == 0 { return false, "", nil } s := f.args[0] if len(s) == 0 || s[0] != '-' || len(s) == 1 { return false, "", nil } if s[1] == '-' && len(s) == 2 { // "--" terminates the flags f.args = f.args[1:] return false, "", nil } name := s[1:] if len(name) == 0 || name[0] == '=' { return false, "", f.failf("bad flag syntax: %s", s) } // it's a flag. does it have an argument? f.args = f.args[1:] has_value := false value := "" for i := 1; i < len(name); i++ { // equals cannot be first if name[i] == '=' { value = name[i+1:] has_value = true name = name[0:i] break } } m := f.formal flag, alreadythere := m[name] // BUG if !alreadythere { if name == "-help" || name == "help" || name == "h" { // special case for nice help message. f.usage() return false, "", ErrHelp } if len(name) > 0 && name[0] == '-' { return false, "", f.failf("flag provided but not defined: -%s", name) } return false, name, ErrRetry } if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg if has_value { if err := fv.Set(value); err != nil { return false, "", f.failf("invalid boolean value %q for -%s: %v", value, name, err) } } else { fv.Set("true") } } else { // It must have a value, which might be the next argument. if !has_value && len(f.args) > 0 { // value is the next arg has_value = true value, f.args = f.args[0], f.args[1:] } if !has_value { return false, "", f.failf("flag needs an argument: -%s", name) } if err := flag.Value.Set(value); err != nil { return false, "", f.failf("invalid value %q for flag -%s: %v", value, name, err) } } if f.actual == nil { f.actual = make(map[string]*Flag) } f.actual[name] = flag for i, n := range flag.Names { if n == fmt.Sprintf("#%s", name) { replacement := "" for j := i; j < len(flag.Names); j++ { if flag.Names[j][0] != '#' { replacement = flag.Names[j] break } } if replacement != "" { fmt.Fprintf(f.out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) } else { fmt.Fprintf(f.out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) } } } return true, "", nil } // Parse parses flag definitions from the argument list, which should not // include the command name. Must be called after all flags in the FlagSet // are defined and before flags are accessed by the program. // The return value will be ErrHelp if -help was set but not defined. func (f *FlagSet) Parse(arguments []string) error { f.parsed = true f.args = arguments for { seen, name, err := f.parseOne() if seen { continue } if err == nil { break } if err == ErrRetry { if len(name) > 1 { err = nil for _, letter := range strings.Split(name, "") { f.args = append([]string{"-" + letter}, f.args...) seen2, _, err2 := f.parseOne() if seen2 { continue } if err2 != nil { err = f.failf("flag provided but not defined: -%s", name) break } } if err == nil { continue } } else { err = f.failf("flag provided but not defined: -%s", name) } } switch f.errorHandling { case ContinueOnError: return err case ExitOnError: os.Exit(2) case PanicOnError: panic(err) } } return nil } // Parsed reports whether f.Parse has been called. func (f *FlagSet) Parsed() bool { return f.parsed } // Parse parses the command-line flags from os.Args[1:]. Must be called // after all flags are defined and before flags are accessed by the program. func Parse() { // Ignore errors; CommandLine is set for ExitOnError. CommandLine.Parse(os.Args[1:]) } // Parsed returns true if the command-line flags have been parsed. func Parsed() bool { return CommandLine.Parsed() } // CommandLine is the default set of command-line flags, parsed from os.Args. // The top-level functions such as BoolVar, Arg, and on are wrappers for the // methods of CommandLine. var CommandLine = NewFlagSet(os.Args[0], ExitOnError) // NewFlagSet returns a new, empty flag set with the specified name and // error handling property. func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { f := &FlagSet{ name: name, errorHandling: errorHandling, } return f } // Init sets the name and error handling property for a flag set. // By default, the zero FlagSet uses an empty name and the // ContinueOnError error handling policy. func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { f.name = name f.errorHandling = errorHandling } docker-0.9.1/pkg/mflag/example/0000755000175000017500000000000012314376205014405 5ustar tagtagdocker-0.9.1/pkg/mflag/example/example.go0000644000175000017500000000177012314376205016374 0ustar tagtagpackage main import ( "fmt" flag "github.com/dotcloud/docker/pkg/mflag" ) var ( i int str string b, b2, h bool ) func init() { flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") flag.Parse() } func main() { if h { flag.PrintDefaults() } fmt.Printf("s/#hidden/-string: %s\n", str) fmt.Printf("b: %b\n", b) fmt.Printf("-bool: %b\n", b2) fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) fmt.Printf("ARGS: %v\n", flag.Args()) } docker-0.9.1/pkg/mflag/README.md0000644000175000017500000000151712314376205014235 0ustar tagtagPackage mflag (aka multiple-flag) implements command-line flag parsing. It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) It adds: * both short and long flag version `./example -s red` `./example --string blue` * multiple names for the same option ``` $>./example -h Usage of example: -s, --string="": a simple string ``` ___ It is very flexible on purpose, so you can do things like: ``` $>./example -h Usage of example: -s, -string, --string="": a simple string ``` Or: ``` $>./example -h Usage of example: -oldflag, --newflag="": a simple string ``` You can also hide some flags from the usage, so if we want only `--newflag`: ``` $>./example -h Usage of example: --newflag="": a simple string $>./example -oldflag str str ``` See [example.go](example/example.go) for more details. docker-0.9.1/pkg/collections/0000755000175000017500000000000012314376205014202 5ustar tagtagdocker-0.9.1/pkg/collections/orderedintset.go0000644000175000017500000000355312314376205017412 0ustar tagtagpackage collections import ( "sync" ) // OrderedIntSet is a thread-safe sorted set and a stack. type OrderedIntSet struct { sync.RWMutex set []int } // NewOrderedSet returns an initialized OrderedSet func NewOrderedIntSet() *OrderedIntSet { return &OrderedIntSet{} } // Push takes a string and adds it to the set. If the elem aready exists, it has no effect. func (s *OrderedIntSet) Push(elem int) { s.RLock() for _, e := range s.set { if e == elem { s.RUnlock() return } } s.RUnlock() s.Lock() // Make sure the list is always sorted for i, e := range s.set { if elem < e { s.set = append(s.set[:i], append([]int{elem}, s.set[i:]...)...) s.Unlock() return } } // If we reach here, then elem is the biggest elem of the list. s.set = append(s.set, elem) s.Unlock() } // Pop is an alias to PopFront() func (s *OrderedIntSet) Pop() int { return s.PopFront() } // Pop returns the first elemen from the list and removes it. // If the list is empty, it returns 0 func (s *OrderedIntSet) PopFront() int { s.RLock() for i, e := range s.set { ret := e s.RUnlock() s.Lock() s.set = append(s.set[:i], s.set[i+1:]...) s.Unlock() return ret } s.RUnlock() return 0 } // PullBack retrieve the last element of the list. // The element is not removed. // If the list is empty, an empty element is returned. func (s *OrderedIntSet) PullBack() int { if len(s.set) == 0 { return 0 } return s.set[len(s.set)-1] } // Exists checks if the given element present in the list. func (s *OrderedIntSet) Exists(elem int) bool { for _, e := range s.set { if e == elem { return true } } return false } // Remove removes an element from the list. // If the element is not found, it has no effect. func (s *OrderedIntSet) Remove(elem int) { for i, e := range s.set { if e == elem { s.set = append(s.set[:i], s.set[i+1:]...) return } } } docker-0.9.1/pkg/version/0000755000175000017500000000000012314376205013351 5ustar tagtagdocker-0.9.1/pkg/version/version.go0000644000175000017500000000167112314376205015372 0ustar tagtagpackage version import ( "strconv" "strings" ) type Version string func (me Version) compareTo(other string) int { var ( meTab = strings.Split(string(me), ".") otherTab = strings.Split(other, ".") ) for i, s := range meTab { var meInt, otherInt int meInt, _ = strconv.Atoi(s) if len(otherTab) > i { otherInt, _ = strconv.Atoi(otherTab[i]) } if meInt > otherInt { return 1 } if otherInt > meInt { return -1 } } if len(otherTab) > len(meTab) { return -1 } return 0 } func (me Version) LessThan(other string) bool { return me.compareTo(other) == -1 } func (me Version) LessThanOrEqualTo(other string) bool { return me.compareTo(other) <= 0 } func (me Version) GreaterThan(other string) bool { return me.compareTo(other) == 1 } func (me Version) GreaterThanOrEqualTo(other string) bool { return me.compareTo(other) >= 0 } func (me Version) Equal(other string) bool { return me.compareTo(other) == 0 } docker-0.9.1/pkg/version/version_test.go0000644000175000017500000000124512314376205016426 0ustar tagtagpackage version import ( "testing" ) func assertVersion(t *testing.T, a, b string, result int) { if r := Version(a).compareTo(b); r != result { t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) } } func TestCompareVersion(t *testing.T) { assertVersion(t, "1.12", "1.12", 0) assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) assertVersion(t, "1", "1.0.1", -1) assertVersion(t, "1.0.1", "1", 1) assertVersion(t, "1.0.1", "1.0.2", -1) assertVersion(t, "1.0.2", "1.0.3", -1) assertVersion(t, "1.0.3", "1.1", -1) assertVersion(t, "1.1", "1.1.1", -1) assertVersion(t, "1.1.1", "1.1.2", -1) assertVersion(t, "1.1.2", "1.2", -1) } docker-0.9.1/pkg/systemd/0000755000175000017500000000000012314376205013354 5ustar tagtagdocker-0.9.1/pkg/systemd/activation/0000755000175000017500000000000012314376205015515 5ustar tagtagdocker-0.9.1/pkg/systemd/activation/listeners.go0000644000175000017500000000203512314376205020054 0ustar tagtag/* Copyright 2014 CoreOS Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package activation import ( "fmt" "net" ) // Listeners returns net.Listeners for all socket activated fds passed to this process. func Listeners(unsetEnv bool) ([]net.Listener, error) { files := Files(unsetEnv) listeners := make([]net.Listener, len(files)) for i, f := range files { var err error listeners[i], err = net.FileListener(f) if err != nil { return nil, fmt.Errorf("Error setting up FileListener for fd %d: %s", f.Fd(), err.Error()) } } return listeners, nil } docker-0.9.1/pkg/systemd/activation/files.go0000644000175000017500000000264112314376205017151 0ustar tagtag/* Copyright 2013 CoreOS Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package activation implements primitives for systemd socket activation. package activation import ( "os" "strconv" "syscall" ) // based on: https://gist.github.com/alberts/4640792 const ( listenFdsStart = 3 ) func Files(unsetEnv bool) []*os.File { if unsetEnv { // there is no way to unset env in golang os package for now // https://code.google.com/p/go/issues/detail?id=6423 defer os.Setenv("LISTEN_PID", "") defer os.Setenv("LISTEN_FDS", "") } pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) if err != nil || pid != os.Getpid() { return nil } nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) if err != nil || nfds == 0 { return nil } var files []*os.File for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { syscall.CloseOnExec(fd) files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) } return files } docker-0.9.1/pkg/systemd/MAINTAINERS0000644000175000017500000000007012314376205015046 0ustar tagtagBrandon Philips (@philips) docker-0.9.1/pkg/systemd/sd_notify.go0000644000175000017500000000102612314376205015700 0ustar tagtagpackage systemd import ( "errors" "net" "os" ) var SdNotifyNoSocket = errors.New("No socket") // Send a message to the init daemon. It is common to ignore the error. func SdNotify(state string) error { socketAddr := &net.UnixAddr{ Name: os.Getenv("NOTIFY_SOCKET"), Net: "unixgram", } if socketAddr.Name == "" { return SdNotifyNoSocket } conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) if err != nil { return err } _, err = conn.Write([]byte(state)) if err != nil { return err } return nil } docker-0.9.1/pkg/systemd/listendfd.go0000644000175000017500000000154712314376205015666 0ustar tagtagpackage systemd import ( "errors" "net" "strconv" "github.com/dotcloud/docker/pkg/systemd/activation" ) // ListenFD returns the specified socket activated files as a slice of // net.Listeners or all of the activated files if "*" is given. func ListenFD(addr string) ([]net.Listener, error) { // socket activation listeners, err := activation.Listeners(false) if err != nil { return nil, err } if listeners == nil || len(listeners) == 0 { return nil, errors.New("No sockets found") } // default to all fds just like unix:// and tcp:// if addr == "" { addr = "*" } fdNum, _ := strconv.Atoi(addr) fdOffset := fdNum - 3 if (addr != "*") && (len(listeners) < int(fdOffset)+1) { return nil, errors.New("Too few socket activated files passed in") } if addr == "*" { return listeners, nil } return []net.Listener{listeners[fdOffset]}, nil } docker-0.9.1/pkg/proxy/0000755000175000017500000000000012314376205013045 5ustar tagtagdocker-0.9.1/pkg/proxy/udp_proxy.go0000644000175000017500000000761512314376205015436 0ustar tagtagpackage proxy import ( "encoding/binary" "log" "net" "strings" "sync" "syscall" "time" ) const ( UDPConnTrackTimeout = 90 * time.Second UDPBufSize = 2048 ) // A net.Addr where the IP is split into two fields so you can use it as a key // in a map: type connTrackKey struct { IPHigh uint64 IPLow uint64 Port int } func newConnTrackKey(addr *net.UDPAddr) *connTrackKey { if len(addr.IP) == net.IPv4len { return &connTrackKey{ IPHigh: 0, IPLow: uint64(binary.BigEndian.Uint32(addr.IP)), Port: addr.Port, } } return &connTrackKey{ IPHigh: binary.BigEndian.Uint64(addr.IP[:8]), IPLow: binary.BigEndian.Uint64(addr.IP[8:]), Port: addr.Port, } } type connTrackMap map[connTrackKey]*net.UDPConn type UDPProxy struct { listener *net.UDPConn frontendAddr *net.UDPAddr backendAddr *net.UDPAddr connTrackTable connTrackMap connTrackLock sync.Mutex } func NewUDPProxy(frontendAddr, backendAddr *net.UDPAddr) (*UDPProxy, error) { listener, err := net.ListenUDP("udp", frontendAddr) if err != nil { return nil, err } return &UDPProxy{ listener: listener, frontendAddr: listener.LocalAddr().(*net.UDPAddr), backendAddr: backendAddr, connTrackTable: make(connTrackMap), }, nil } func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr, clientKey *connTrackKey) { defer func() { proxy.connTrackLock.Lock() delete(proxy.connTrackTable, *clientKey) proxy.connTrackLock.Unlock() proxyConn.Close() }() readBuf := make([]byte, UDPBufSize) for { proxyConn.SetReadDeadline(time.Now().Add(UDPConnTrackTimeout)) again: read, err := proxyConn.Read(readBuf) if err != nil { if err, ok := err.(*net.OpError); ok && err.Err == syscall.ECONNREFUSED { // This will happen if the last write failed // (e.g: nothing is actually listening on the // proxied port on the container), ignore it // and continue until UDPConnTrackTimeout // expires: goto again } return } for i := 0; i != read; { written, err := proxy.listener.WriteToUDP(readBuf[i:read], clientAddr) if err != nil { return } i += written } } } func (proxy *UDPProxy) Run() { readBuf := make([]byte, UDPBufSize) for { read, from, err := proxy.listener.ReadFromUDP(readBuf) if err != nil { // NOTE: Apparently ReadFrom doesn't return // ECONNREFUSED like Read do (see comment in // UDPProxy.replyLoop) if !isClosedError(err) { log.Printf("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) } break } fromKey := newConnTrackKey(from) proxy.connTrackLock.Lock() proxyConn, hit := proxy.connTrackTable[*fromKey] if !hit { proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr) if err != nil { log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) continue } proxy.connTrackTable[*fromKey] = proxyConn go proxy.replyLoop(proxyConn, from, fromKey) } proxy.connTrackLock.Unlock() for i := 0; i != read; { written, err := proxyConn.Write(readBuf[i:read]) if err != nil { log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) break } i += written } } } func (proxy *UDPProxy) Close() { proxy.listener.Close() proxy.connTrackLock.Lock() defer proxy.connTrackLock.Unlock() for _, conn := range proxy.connTrackTable { conn.Close() } } func (proxy *UDPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } func (proxy *UDPProxy) BackendAddr() net.Addr { return proxy.backendAddr } func isClosedError(err error) bool { /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. * See: * http://golang.org/src/pkg/net/net.go * https://code.google.com/p/go/issues/detail?id=4337 * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ */ return strings.HasSuffix(err.Error(), "use of closed network connection") } docker-0.9.1/pkg/proxy/proxy.go0000644000175000017500000000131412314376205014554 0ustar tagtagpackage proxy import ( "fmt" "net" ) type Proxy interface { // Start forwarding traffic back and forth the front and back-end // addresses. Run() // Stop forwarding traffic and close both ends of the Proxy. Close() // Return the address on which the proxy is listening. FrontendAddr() net.Addr // Return the proxied address. BackendAddr() net.Addr } func NewProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { switch frontendAddr.(type) { case *net.UDPAddr: return NewUDPProxy(frontendAddr.(*net.UDPAddr), backendAddr.(*net.UDPAddr)) case *net.TCPAddr: return NewTCPProxy(frontendAddr.(*net.TCPAddr), backendAddr.(*net.TCPAddr)) default: panic(fmt.Errorf("Unsupported protocol")) } } docker-0.9.1/pkg/proxy/MAINTAINERS0000644000175000017500000000007412314376205014543 0ustar tagtagMichael Crosby (@crosbymichael) docker-0.9.1/pkg/proxy/tcp_proxy.go0000644000175000017500000000420612314376205015425 0ustar tagtagpackage proxy import ( "io" "log" "net" "syscall" ) type TCPProxy struct { listener *net.TCPListener frontendAddr *net.TCPAddr backendAddr *net.TCPAddr } func NewTCPProxy(frontendAddr, backendAddr *net.TCPAddr) (*TCPProxy, error) { listener, err := net.ListenTCP("tcp", frontendAddr) if err != nil { return nil, err } // If the port in frontendAddr was 0 then ListenTCP will have a picked // a port to listen on, hence the call to Addr to get that actual port: return &TCPProxy{ listener: listener, frontendAddr: listener.Addr().(*net.TCPAddr), backendAddr: backendAddr, }, nil } func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { backend, err := net.DialTCP("tcp", nil, proxy.backendAddr) if err != nil { log.Printf("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err) client.Close() return } event := make(chan int64) var broker = func(to, from *net.TCPConn) { written, err := io.Copy(to, from) if err != nil { // If the socket we are writing to is shutdown with // SHUT_WR, forward it to the other end of the pipe: if err, ok := err.(*net.OpError); ok && err.Err == syscall.EPIPE { from.CloseWrite() } } to.CloseRead() event <- written } go broker(client, backend) go broker(backend, client) var transferred int64 = 0 for i := 0; i < 2; i++ { select { case written := <-event: transferred += written case <-quit: // Interrupt the two brokers and "join" them. client.Close() backend.Close() for ; i < 2; i++ { transferred += <-event } return } } client.Close() backend.Close() } func (proxy *TCPProxy) Run() { quit := make(chan bool) defer close(quit) for { client, err := proxy.listener.Accept() if err != nil { log.Printf("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) return } go proxy.clientLoop(client.(*net.TCPConn), quit) } } func (proxy *TCPProxy) Close() { proxy.listener.Close() } func (proxy *TCPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } func (proxy *TCPProxy) BackendAddr() net.Addr { return proxy.backendAddr } docker-0.9.1/pkg/proxy/network_proxy_test.go0000644000175000017500000001255412314376205017374 0ustar tagtagpackage proxy import ( "bytes" "fmt" "io" "net" "strings" "testing" "time" ) var testBuf = []byte("Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo") var testBufSize = len(testBuf) type EchoServer interface { Run() Close() LocalAddr() net.Addr } type TCPEchoServer struct { listener net.Listener testCtx *testing.T } type UDPEchoServer struct { conn net.PacketConn testCtx *testing.T } func NewEchoServer(t *testing.T, proto, address string) EchoServer { var server EchoServer if strings.HasPrefix(proto, "tcp") { listener, err := net.Listen(proto, address) if err != nil { t.Fatal(err) } server = &TCPEchoServer{listener: listener, testCtx: t} } else { socket, err := net.ListenPacket(proto, address) if err != nil { t.Fatal(err) } server = &UDPEchoServer{conn: socket, testCtx: t} } return server } func (server *TCPEchoServer) Run() { go func() { for { client, err := server.listener.Accept() if err != nil { return } go func(client net.Conn) { if _, err := io.Copy(client, client); err != nil { server.testCtx.Logf("can't echo to the client: %v\n", err.Error()) } client.Close() }(client) } }() } func (server *TCPEchoServer) LocalAddr() net.Addr { return server.listener.Addr() } func (server *TCPEchoServer) Close() { server.listener.Addr() } func (server *UDPEchoServer) Run() { go func() { readBuf := make([]byte, 1024) for { read, from, err := server.conn.ReadFrom(readBuf) if err != nil { return } for i := 0; i != read; { written, err := server.conn.WriteTo(readBuf[i:read], from) if err != nil { break } i += written } } }() } func (server *UDPEchoServer) LocalAddr() net.Addr { return server.conn.LocalAddr() } func (server *UDPEchoServer) Close() { server.conn.Close() } func testProxyAt(t *testing.T, proto string, proxy Proxy, addr string) { defer proxy.Close() go proxy.Run() client, err := net.Dial(proto, addr) if err != nil { t.Fatalf("Can't connect to the proxy: %v", err) } defer client.Close() client.SetDeadline(time.Now().Add(10 * time.Second)) if _, err = client.Write(testBuf); err != nil { t.Fatal(err) } recvBuf := make([]byte, testBufSize) if _, err = client.Read(recvBuf); err != nil { t.Fatal(err) } if !bytes.Equal(testBuf, recvBuf) { t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) } } func testProxy(t *testing.T, proto string, proxy Proxy) { testProxyAt(t, proto, proxy, proxy.FrontendAddr().String()) } func TestTCP4Proxy(t *testing.T) { backend := NewEchoServer(t, "tcp", "127.0.0.1:0") defer backend.Close() backend.Run() frontendAddr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "tcp", proxy) } func TestTCP6Proxy(t *testing.T) { backend := NewEchoServer(t, "tcp", "[::1]:0") defer backend.Close() backend.Run() frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "tcp", proxy) } func TestTCPDualStackProxy(t *testing.T) { // If I understand `godoc -src net favoriteAddrFamily` (used by the // net.Listen* functions) correctly this should work, but it doesn't. t.Skip("No support for dual stack yet") backend := NewEchoServer(t, "tcp", "[::1]:0") defer backend.Close() backend.Run() frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } ipv4ProxyAddr := &net.TCPAddr{ IP: net.IPv4(127, 0, 0, 1), Port: proxy.FrontendAddr().(*net.TCPAddr).Port, } testProxyAt(t, "tcp", proxy, ipv4ProxyAddr.String()) } func TestUDP4Proxy(t *testing.T) { backend := NewEchoServer(t, "udp", "127.0.0.1:0") defer backend.Close() backend.Run() frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "udp", proxy) } func TestUDP6Proxy(t *testing.T) { backend := NewEchoServer(t, "udp", "[::1]:0") defer backend.Close() backend.Run() frontendAddr := &net.UDPAddr{IP: net.IPv6loopback, Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "udp", proxy) } func TestUDPWriteError(t *testing.T) { frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} // Hopefully, this port will be free: */ backendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 25587} proxy, err := NewProxy(frontendAddr, backendAddr) if err != nil { t.Fatal(err) } defer proxy.Close() go proxy.Run() client, err := net.Dial("udp", "127.0.0.1:25587") if err != nil { t.Fatalf("Can't connect to the proxy: %v", err) } defer client.Close() // Make sure the proxy doesn't stop when there is no actual backend: client.Write(testBuf) client.Write(testBuf) backend := NewEchoServer(t, "udp", "127.0.0.1:25587") defer backend.Close() backend.Run() client.SetDeadline(time.Now().Add(10 * time.Second)) if _, err = client.Write(testBuf); err != nil { t.Fatal(err) } recvBuf := make([]byte, testBufSize) if _, err = client.Read(recvBuf); err != nil { t.Fatal(err) } if !bytes.Equal(testBuf, recvBuf) { t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) } } docker-0.9.1/pkg/proxy/stub_proxy.go0000644000175000017500000000076512314376205015622 0ustar tagtagpackage proxy import ( "net" ) type StubProxy struct { frontendAddr net.Addr backendAddr net.Addr } func (p *StubProxy) Run() {} func (p *StubProxy) Close() {} func (p *StubProxy) FrontendAddr() net.Addr { return p.frontendAddr } func (p *StubProxy) BackendAddr() net.Addr { return p.backendAddr } func NewStubProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { return &StubProxy{ frontendAddr: frontendAddr, backendAddr: backendAddr, }, nil } docker-0.9.1/pkg/term/0000755000175000017500000000000012314376205012633 5ustar tagtagdocker-0.9.1/pkg/term/MAINTAINERS0000644000175000017500000000014412314376205014327 0ustar tagtagGuillaume Charmes (@creack) Solomon Hykes (@shykes) docker-0.9.1/pkg/term/termios_linux.go0000644000175000017500000000223712314376205016067 0ustar tagtagpackage term import ( "syscall" "unsafe" ) const ( getTermios = syscall.TCGETS setTermios = syscall.TCSETS ) type Termios struct { Iflag uint32 Oflag uint32 Cflag uint32 Lflag uint32 Cc [20]byte Ispeed uint32 Ospeed uint32 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) newState.Oflag &^= syscall.OPOST newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) newState.Cflag |= syscall.CS8 if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err } return &oldState, nil } docker-0.9.1/pkg/term/term.go0000644000175000017500000000432212314376205014132 0ustar tagtagpackage term import ( "errors" "os" "os/signal" "syscall" "unsafe" ) var ( ErrInvalidState = errors.New("Invalid terminal state") ) type State struct { termios Termios } type Winsize struct { Height uint16 Width uint16 x uint16 y uint16 } func GetWinsize(fd uintptr) (*Winsize, error) { ws := &Winsize{} _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) // Skipp errno = 0 if err == 0 { return ws, nil } return ws, err } func SetWinsize(fd uintptr, ws *Winsize) error { _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) // Skipp errno = 0 if err == 0 { return nil } return err } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { var termios Termios _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&termios))) return err == 0 } // Restore restores the terminal connected to the given file descriptor to a // previous state. func RestoreTerminal(fd uintptr, state *State) error { if state == nil { return ErrInvalidState } _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&state.termios))) if err != 0 { return err } return nil } func SaveState(fd uintptr) (*State, error) { var oldState State if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } return &oldState, nil } func DisableEcho(fd uintptr, state *State) error { newState := state.termios newState.Lflag &^= syscall.ECHO if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { return err } handleInterrupt(fd, state) return nil } func SetRawTerminal(fd uintptr) (*State, error) { oldState, err := MakeRaw(fd) if err != nil { return nil, err } handleInterrupt(fd, oldState) return oldState, err } func handleInterrupt(fd uintptr, state *State) { sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, os.Interrupt) go func() { _ = <-sigchan RestoreTerminal(fd, state) os.Exit(0) }() } docker-0.9.1/pkg/term/termios_darwin.go0000644000175000017500000000304012314376205016205 0ustar tagtagpackage term import ( "syscall" "unsafe" ) const ( getTermios = syscall.TIOCGETA setTermios = syscall.TIOCSETA IGNBRK = syscall.IGNBRK PARMRK = syscall.PARMRK INLCR = syscall.INLCR IGNCR = syscall.IGNCR ECHONL = syscall.ECHONL CSIZE = syscall.CSIZE ICRNL = syscall.ICRNL ISTRIP = syscall.ISTRIP PARENB = syscall.PARENB ECHO = syscall.ECHO ICANON = syscall.ICANON ISIG = syscall.ISIG IXON = syscall.IXON BRKINT = syscall.BRKINT INPCK = syscall.INPCK OPOST = syscall.OPOST CS8 = syscall.CS8 IEXTEN = syscall.IEXTEN ) type Termios struct { Iflag uint64 Oflag uint64 Cflag uint64 Lflag uint64 Cc [20]byte Ispeed uint64 Ospeed uint64 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) newState.Oflag &^= OPOST newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) newState.Cflag &^= (CSIZE | PARENB) newState.Cflag |= CS8 newState.Cc[syscall.VMIN] = 1 newState.Cc[syscall.VTIME] = 0 if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err } return &oldState, nil } docker-0.9.1/pkg/signal/0000755000175000017500000000000012314376205013141 5ustar tagtagdocker-0.9.1/pkg/signal/signal.go0000644000175000017500000000045312314376205014747 0ustar tagtagpackage signal import ( "os" "os/signal" ) func CatchAll(sigc chan os.Signal) { handledSigs := []os.Signal{} for _, s := range SignalMap { handledSigs = append(handledSigs, s) } signal.Notify(sigc, handledSigs...) } func StopCatch(sigc chan os.Signal) { signal.Stop(sigc) close(sigc) } docker-0.9.1/pkg/signal/signal_darwin.go0000644000175000017500000000171412314376205016314 0ustar tagtagpackage signal import ( "syscall" ) var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.SIGABRT, "ALRM": syscall.SIGALRM, "BUG": syscall.SIGBUS, "CHLD": syscall.SIGCHLD, "CONT": syscall.SIGCONT, "EMT": syscall.SIGEMT, "FPE": syscall.SIGFPE, "HUP": syscall.SIGHUP, "ILL": syscall.SIGILL, "INFO": syscall.SIGINFO, "INT": syscall.SIGINT, "IO": syscall.SIGIO, "IOT": syscall.SIGIOT, "KILL": syscall.SIGKILL, "PIPE": syscall.SIGPIPE, "PROF": syscall.SIGPROF, "QUIT": syscall.SIGQUIT, "SEGV": syscall.SIGSEGV, "STOP": syscall.SIGSTOP, "SYS": syscall.SIGSYS, "TERM": syscall.SIGTERM, "TRAP": syscall.SIGTRAP, "TSTP": syscall.SIGTSTP, "TTIN": syscall.SIGTTIN, "TTOU": syscall.SIGTTOU, "URG": syscall.SIGURG, "USR1": syscall.SIGUSR1, "USR2": syscall.SIGUSR2, "VTALRM": syscall.SIGVTALRM, "WINCH": syscall.SIGWINCH, "XCPU": syscall.SIGXCPU, "XFSZ": syscall.SIGXFSZ, } docker-0.9.1/pkg/signal/signal_unsupported.go0000644000175000017500000000016612314376205017420 0ustar tagtag// +build !linux,!darwin,!freebsd package signal import ( "syscall" ) var SignalMap = map[string]syscall.Signal{} docker-0.9.1/pkg/signal/signal_freebsd.go0000644000175000017500000000202512314376205016436 0ustar tagtagpackage signal import ( "os" "os/signal" "syscall" ) var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.SIGABRT, "ALRM": syscall.SIGALRM, "BUF": syscall.SIGBUS, "CHLD": syscall.SIGCHLD, "CONT": syscall.SIGCONT, "EMT": syscall.SIGEMT, "FPE": syscall.SIGFPE, "HUP": syscall.SIGHUP, "ILL": syscall.SIGILL, "INFO": syscall.SIGINFO, "INT": syscall.SIGINT, "IO": syscall.SIGIO, "IOT": syscall.SIGIOT, "KILL": syscall.SIGKILL, "LWP": syscall.SIGLWP, "PIPE": syscall.SIGPIPE, "PROF": syscall.SIGPROF, "QUIT": syscall.SIGQUIT, "SEGV": syscall.SIGSEGV, "STOP": syscall.SIGSTOP, "SYS": syscall.SIGSYS, "TERM": syscall.SIGTERM, "THR": syscall.SIGTHR, "TRAP": syscall.SIGTRAP, "TSTP": syscall.SIGTSTP, "TTIN": syscall.SIGTTIN, "TTOU": syscall.SIGTTOU, "URG": syscall.SIGURG, "USR1": syscall.SIGUSR1, "USR2": syscall.SIGUSR2, "VTALRM": syscall.SIGVTALRM, "WINCH": syscall.SIGWINCH, "XCPU": syscall.SIGXCPU, "XFSZ": syscall.SIGXFSZ, } docker-0.9.1/pkg/signal/signal_linux.go0000644000175000017500000000204312314376205016163 0ustar tagtagpackage signal import ( "syscall" ) var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.SIGABRT, "ALRM": syscall.SIGALRM, "BUS": syscall.SIGBUS, "CHLD": syscall.SIGCHLD, "CLD": syscall.SIGCLD, "CONT": syscall.SIGCONT, "FPE": syscall.SIGFPE, "HUP": syscall.SIGHUP, "ILL": syscall.SIGILL, "INT": syscall.SIGINT, "IO": syscall.SIGIO, "IOT": syscall.SIGIOT, "KILL": syscall.SIGKILL, "PIPE": syscall.SIGPIPE, "POLL": syscall.SIGPOLL, "PROF": syscall.SIGPROF, "PWR": syscall.SIGPWR, "QUIT": syscall.SIGQUIT, "SEGV": syscall.SIGSEGV, "STKFLT": syscall.SIGSTKFLT, "STOP": syscall.SIGSTOP, "SYS": syscall.SIGSYS, "TERM": syscall.SIGTERM, "TRAP": syscall.SIGTRAP, "TSTP": syscall.SIGTSTP, "TTIN": syscall.SIGTTIN, "TTOU": syscall.SIGTTOU, "UNUSED": syscall.SIGUNUSED, "URG": syscall.SIGURG, "USR1": syscall.SIGUSR1, "USR2": syscall.SIGUSR2, "VTALRM": syscall.SIGVTALRM, "WINCH": syscall.SIGWINCH, "XCPU": syscall.SIGXCPU, "XFSZ": syscall.SIGXFSZ, } docker-0.9.1/pkg/system/0000755000175000017500000000000012314376205013210 5ustar tagtagdocker-0.9.1/pkg/system/stat_linux.go0000644000175000017500000000031612314376205015731 0ustar tagtagpackage system import ( "syscall" ) func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { return stat.Atim } func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { return stat.Mtim } docker-0.9.1/pkg/system/errors.go0000644000175000017500000000020212314376205015045 0ustar tagtagpackage system import ( "errors" ) var ( ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") ) docker-0.9.1/pkg/system/xattrs_linux.go0000644000175000017500000000312112314376205016300 0ustar tagtagpackage system import ( "syscall" "unsafe" ) // Returns a nil slice and nil error if the xattr is not set func Lgetxattr(path string, attr string) ([]byte, error) { pathBytes, err := syscall.BytePtrFromString(path) if err != nil { return nil, err } attrBytes, err := syscall.BytePtrFromString(attr) if err != nil { return nil, err } dest := make([]byte, 128) destBytes := unsafe.Pointer(&dest[0]) sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) if errno == syscall.ENODATA { return nil, nil } if errno == syscall.ERANGE { dest = make([]byte, sz) destBytes := unsafe.Pointer(&dest[0]) sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) } if errno != 0 { return nil, errno } return dest[:sz], nil } var _zero uintptr func Lsetxattr(path string, attr string, data []byte, flags int) error { pathBytes, err := syscall.BytePtrFromString(path) if err != nil { return err } attrBytes, err := syscall.BytePtrFromString(attr) if err != nil { return err } var dataBytes unsafe.Pointer if len(data) > 0 { dataBytes = unsafe.Pointer(&data[0]) } else { dataBytes = unsafe.Pointer(&_zero) } _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) if errno != 0 { return errno } return nil } docker-0.9.1/pkg/system/utimes_unsupported.go0000644000175000017500000000036212314376205017516 0ustar tagtag// +build !linux package system import "syscall" func LUtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform } func UtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform } docker-0.9.1/pkg/system/utimes_linux.go0000644000175000017500000000127012314376205016264 0ustar tagtagpackage system import ( "syscall" "unsafe" ) func LUtimesNano(path string, ts []syscall.Timespec) error { // These are not currently available in syscall AT_FDCWD := -100 AT_SYMLINK_NOFOLLOW := 0x100 var _path *byte _path, err := syscall.BytePtrFromString(path) if err != nil { return err } if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS { return err } return nil } func UtimesNano(path string, ts []syscall.Timespec) error { if err := syscall.UtimesNano(path, ts); err != nil { return err } return nil } docker-0.9.1/pkg/system/setns_linux.go0000644000175000017500000000123512314376205016113 0ustar tagtagpackage system import ( "fmt" "runtime" "syscall" ) // Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 // // We need different setns values for the different platforms and arch // We are declaring the macro here because the SETNS syscall does not exist in th stdlib var setNsMap = map[string]uintptr{ "linux/amd64": 308, } func Setns(fd uintptr, flags uintptr) error { ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] if !exists { return ErrNotSupportedPlatform } _, _, err := syscall.RawSyscall(ns, fd, flags, 0) if err != 0 { return err } return nil } docker-0.9.1/pkg/system/stat_unsupported.go0000644000175000017500000000034512314376205017164 0ustar tagtag// +build !linux package system import "syscall" func GetLastAccess(stat *syscall.Stat_t) syscall.Timespec { return stat.Atimespec } func GetLastModification(stat *syscall.Stat_t) syscall.Timespec { return stat.Mtimespec } docker-0.9.1/pkg/system/pty_linux.go0000644000175000017500000000334412314376205015576 0ustar tagtagpackage system import ( "fmt" "os" "syscall" "unsafe" ) // Unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. // Unlockpt should be called before opening the slave side of a pseudoterminal. func Unlockpt(f *os.File) error { var u int return Ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) } // Ptsname retrieves the name of the first available pts for the given master. func Ptsname(f *os.File) (string, error) { var n int if err := Ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { return "", err } return fmt.Sprintf("/dev/pts/%d", n), nil } // CreateMasterAndConsole will open /dev/ptmx on the host and retreive the // pts name for use as the pty slave inside the container func CreateMasterAndConsole() (*os.File, string, error) { master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) if err != nil { return nil, "", err } console, err := Ptsname(master) if err != nil { return nil, "", err } if err := Unlockpt(master); err != nil { return nil, "", err } return master, console, nil } // OpenPtmx opens /dev/ptmx, i.e. the PTY master. func OpenPtmx() (*os.File, error) { // O_NOCTTY and O_CLOEXEC are not present in os package so we use the syscall's one for all. return os.OpenFile("/dev/ptmx", syscall.O_RDONLY|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) } // OpenTerminal is a clone of os.OpenFile without the O_CLOEXEC // used to open the pty slave inside the container namespace func OpenTerminal(name string, flag int) (*os.File, error) { r, e := syscall.Open(name, flag, 0) if e != nil { return nil, &os.PathError{"open", name, e} } return os.NewFile(uintptr(r), name), nil } docker-0.9.1/pkg/system/unsupported.go0000644000175000017500000000027212314376205016130 0ustar tagtag// +build !linux package system import ( "os/exec" ) func SetCloneFlags(cmd *exec.Cmd, flag uintptr) { } func UsetCloseOnExec(fd uintptr) error { return ErrNotSupportedPlatform } docker-0.9.1/pkg/system/xattrs_unsupported.go0000644000175000017500000000036012314376205017533 0ustar tagtag// +build !linux package system func Lgetxattr(path string, attr string) ([]byte, error) { return nil, ErrNotSupportedPlatform } func Lsetxattr(path string, attr string, data []byte, flags int) error { return ErrNotSupportedPlatform } docker-0.9.1/pkg/system/calls_linux.go0000644000175000017500000000561012314376205016056 0ustar tagtagpackage system import ( "os/exec" "syscall" ) func Chroot(dir string) error { return syscall.Chroot(dir) } func Chdir(dir string) error { return syscall.Chdir(dir) } func Exec(cmd string, args []string, env []string) error { return syscall.Exec(cmd, args, env) } func Execv(cmd string, args []string, env []string) error { name, err := exec.LookPath(cmd) if err != nil { return err } return Exec(name, args, env) } func Fork() (int, error) { syscall.ForkLock.Lock() pid, _, err := syscall.Syscall(syscall.SYS_FORK, 0, 0, 0) syscall.ForkLock.Unlock() if err != 0 { return -1, err } return int(pid), nil } func Mount(source, target, fstype string, flags uintptr, data string) error { return syscall.Mount(source, target, fstype, flags, data) } func Unmount(target string, flags int) error { return syscall.Unmount(target, flags) } func Pivotroot(newroot, putold string) error { return syscall.PivotRoot(newroot, putold) } func Unshare(flags int) error { return syscall.Unshare(flags) } func Clone(flags uintptr) (int, error) { syscall.ForkLock.Lock() pid, _, err := syscall.RawSyscall(syscall.SYS_CLONE, flags, 0, 0) syscall.ForkLock.Unlock() if err != 0 { return -1, err } return int(pid), nil } func UsetCloseOnExec(fd uintptr) error { if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, fd, syscall.F_SETFD, 0); err != 0 { return err } return nil } func Setgroups(gids []int) error { return syscall.Setgroups(gids) } func Setresgid(rgid, egid, sgid int) error { return syscall.Setresgid(rgid, egid, sgid) } func Setresuid(ruid, euid, suid int) error { return syscall.Setresuid(ruid, euid, suid) } func Setgid(gid int) error { return syscall.Setgid(gid) } func Setuid(uid int) error { return syscall.Setuid(uid) } func Sethostname(name string) error { return syscall.Sethostname([]byte(name)) } func Setsid() (int, error) { return syscall.Setsid() } func Ioctl(fd uintptr, flag, data uintptr) error { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 { return err } return nil } func Closefd(fd uintptr) error { return syscall.Close(int(fd)) } func Dup2(fd1, fd2 uintptr) error { return syscall.Dup2(int(fd1), int(fd2)) } func Mknod(path string, mode uint32, dev int) error { return syscall.Mknod(path, mode, dev) } func ParentDeathSignal(sig uintptr) error { if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { return err } return nil } func Setctty() error { if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { return err } return nil } func Mkfifo(name string, mode uint32) error { return syscall.Mkfifo(name, mode) } func Umask(mask int) int { return syscall.Umask(mask) } func SetCloneFlags(cmd *exec.Cmd, flag uintptr) { if cmd.SysProcAttr == nil { cmd.SysProcAttr = &syscall.SysProcAttr{} } cmd.SysProcAttr.Cloneflags = flag } docker-0.9.1/pkg/iptables/0000755000175000017500000000000012314376205013467 5ustar tagtagdocker-0.9.1/pkg/iptables/MAINTAINERS0000644000175000017500000000007412314376205015165 0ustar tagtagMichael Crosby (@crosbymichael) docker-0.9.1/pkg/iptables/iptables.go0000644000175000017500000000757512314376205015637 0ustar tagtagpackage iptables import ( "errors" "fmt" "net" "os" "os/exec" "strconv" "strings" ) type Action string const ( Add Action = "-A" Delete Action = "-D" ) var ( ErrIptablesNotFound = errors.New("Iptables not found") nat = []string{"-t", "nat"} ) type Chain struct { Name string Bridge string } func NewChain(name, bridge string) (*Chain, error) { if output, err := Raw("-t", "nat", "-N", name); err != nil { return nil, err } else if len(output) != 0 { return nil, fmt.Errorf("Error creating new iptables chain: %s", output) } chain := &Chain{ Name: name, Bridge: bridge, } if err := chain.Prerouting(Add, "-m", "addrtype", "--dst-type", "LOCAL"); err != nil { return nil, fmt.Errorf("Failed to inject docker in PREROUTING chain: %s", err) } if err := chain.Output(Add, "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8"); err != nil { return nil, fmt.Errorf("Failed to inject docker in OUTPUT chain: %s", err) } return chain, nil } func RemoveExistingChain(name string) error { chain := &Chain{ Name: name, } return chain.Remove() } func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr string, dest_port int) error { daddr := ip.String() if ip.IsUnspecified() { // iptables interprets "0.0.0.0" as "0.0.0.0/32", whereas we // want "0.0.0.0/0". "0/0" is correctly interpreted as "any // value" by both iptables and ip6tables. daddr = "0/0" } if output, err := Raw("-t", "nat", fmt.Sprint(action), c.Name, "-p", proto, "-d", daddr, "--dport", strconv.Itoa(port), "!", "-i", c.Bridge, "-j", "DNAT", "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil { return err } else if len(output) != 0 { return fmt.Errorf("Error iptables forward: %s", output) } fAction := action if fAction == Add { fAction = "-I" } if output, err := Raw(string(fAction), "FORWARD", "!", "-i", c.Bridge, "-o", c.Bridge, "-p", proto, "-d", dest_addr, "--dport", strconv.Itoa(dest_port), "-j", "ACCEPT"); err != nil { return err } else if len(output) != 0 { return fmt.Errorf("Error iptables forward: %s", output) } return nil } func (c *Chain) Prerouting(action Action, args ...string) error { a := append(nat, fmt.Sprint(action), "PREROUTING") if len(args) > 0 { a = append(a, args...) } if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { return fmt.Errorf("Error iptables prerouting: %s", output) } return nil } func (c *Chain) Output(action Action, args ...string) error { a := append(nat, fmt.Sprint(action), "OUTPUT") if len(args) > 0 { a = append(a, args...) } if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { return fmt.Errorf("Error iptables output: %s", output) } return nil } func (c *Chain) Remove() error { // Ignore errors - This could mean the chains were never set up c.Prerouting(Delete, "-m", "addrtype", "--dst-type", "LOCAL") c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8") c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL") // Created in versions <= 0.1.6 c.Prerouting(Delete) c.Output(Delete) Raw("-t", "nat", "-F", c.Name) Raw("-t", "nat", "-X", c.Name) return nil } // Check if an existing rule exists func Exists(args ...string) bool { if _, err := Raw(append([]string{"-C"}, args...)...); err != nil { return false } return true } func Raw(args ...string) ([]byte, error) { path, err := exec.LookPath("iptables") if err != nil { return nil, ErrIptablesNotFound } if os.Getenv("DEBUG") != "" { fmt.Printf("[DEBUG] [iptables]: %s, %v\n", path, args) } output, err := exec.Command(path, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("iptables failed: iptables %v: %s (%s)", strings.Join(args, " "), output, err) } return output, err } docker-0.9.1/pkg/README.md0000644000175000017500000000150312314376205013142 0ustar tagtagpkg/ is a collection of utility packages used by the Docker project without being specific to its internals. Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the Docker organization, to facilitate re-use by other projects. However that is not the priority. The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! Because utility packages are small and neatly separated from the rest of the codebase, they are a good place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! docker-0.9.1/pkg/graphdb/0000755000175000017500000000000012314376205013273 5ustar tagtagdocker-0.9.1/pkg/graphdb/conn_unsupported.go0000644000175000017500000000017312314376205017230 0ustar tagtag// +build !linux !amd64 package graphdb func NewSqliteConn(root string) (*Database, error) { panic("Not implemented") } docker-0.9.1/pkg/graphdb/graphdb.go0000644000175000017500000002446712314376205015246 0ustar tagtagpackage graphdb import ( "database/sql" "fmt" "path" "strings" "sync" ) const ( createEntityTable = ` CREATE TABLE IF NOT EXISTS entity ( id text NOT NULL PRIMARY KEY );` createEdgeTable = ` CREATE TABLE IF NOT EXISTS edge ( "entity_id" text NOT NULL, "parent_id" text NULL, "name" text NOT NULL, CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"), CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id") ); ` createEdgeIndices = ` CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name); ` ) // Entity with a unique id type Entity struct { id string } // An Edge connects two entities together type Edge struct { EntityID string Name string ParentID string } type Entities map[string]*Entity type Edges []*Edge type WalkFunc func(fullPath string, entity *Entity) error // Graph database for storing entities and their relationships type Database struct { conn *sql.DB mux sync.RWMutex } func IsNonUniqueNameError(err error) bool { str := err.Error() // sqlite 3.7.17-1ubuntu1 returns: // Set failure: Abort due to constraint violation: columns parent_id, name are not unique if strings.HasSuffix(str, "name are not unique") { return true } // sqlite-3.8.3-1.fc20 returns: // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { return true } return false } // Create a new graph database initialized with a root entity func NewDatabase(conn *sql.DB, init bool) (*Database, error) { if conn == nil { return nil, fmt.Errorf("Database connection cannot be nil") } db := &Database{conn: conn} if init { if _, err := conn.Exec(createEntityTable); err != nil { return nil, err } if _, err := conn.Exec(createEdgeTable); err != nil { return nil, err } if _, err := conn.Exec(createEdgeIndices); err != nil { return nil, err } rollback := func() { conn.Exec("ROLLBACK") } // Create root entities if _, err := conn.Exec("BEGIN"); err != nil { return nil, err } if _, err := conn.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { rollback() return nil, err } if _, err := conn.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { rollback() return nil, err } if _, err := conn.Exec("COMMIT"); err != nil { return nil, err } } return db, nil } // Close the underlying connection to the database func (db *Database) Close() error { return db.conn.Close() } // Set the entity id for a given path func (db *Database) Set(fullPath, id string) (*Entity, error) { db.mux.Lock() defer db.mux.Unlock() rollback := func() { db.conn.Exec("ROLLBACK") } if _, err := db.conn.Exec("BEGIN EXCLUSIVE"); err != nil { return nil, err } var entityId string if err := db.conn.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityId); err != nil { if err == sql.ErrNoRows { if _, err := db.conn.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { rollback() return nil, err } } else { rollback() return nil, err } } e := &Entity{id} parentPath, name := splitPath(fullPath) if err := db.setEdge(parentPath, name, e); err != nil { rollback() return nil, err } if _, err := db.conn.Exec("COMMIT"); err != nil { return nil, err } return e, nil } // Return true if a name already exists in the database func (db *Database) Exists(name string) bool { db.mux.RLock() defer db.mux.RUnlock() e, err := db.get(name) if err != nil { return false } return e != nil } func (db *Database) setEdge(parentPath, name string, e *Entity) error { parent, err := db.get(parentPath) if err != nil { return err } if parent.id == e.id { return fmt.Errorf("Cannot set self as child") } if _, err := db.conn.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil { return err } return nil } // Return the root "/" entity for the database func (db *Database) RootEntity() *Entity { return &Entity{ id: "0", } } // Return the entity for a given path func (db *Database) Get(name string) *Entity { db.mux.RLock() defer db.mux.RUnlock() e, err := db.get(name) if err != nil { return nil } return e } func (db *Database) get(name string) (*Entity, error) { e := db.RootEntity() // We always know the root name so return it if // it is requested if name == "/" { return e, nil } parts := split(name) for i := 1; i < len(parts); i++ { p := parts[i] if p == "" { continue } next := db.child(e, p) if next == nil { return nil, fmt.Errorf("Cannot find child for %s", name) } e = next } return e, nil } // List all entities by from the name // The key will be the full path of the entity func (db *Database) List(name string, depth int) Entities { db.mux.RLock() defer db.mux.RUnlock() out := Entities{} e, err := db.get(name) if err != nil { return out } children, err := db.children(e, name, depth, nil) if err != nil { return out } for _, c := range children { out[c.FullPath] = c.Entity } return out } // Walk through the child graph of an entity, calling walkFunc for each child entity. // It is safe for walkFunc to call graph functions. func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { children, err := db.Children(name, depth) if err != nil { return err } // Note: the database lock must not be held while calling walkFunc for _, c := range children { if err := walkFunc(c.FullPath, c.Entity); err != nil { return err } } return nil } // Return the children of the specified entity func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { db.mux.RLock() defer db.mux.RUnlock() e, err := db.get(name) if err != nil { return nil, err } return db.children(e, name, depth, nil) } // Return the refrence count for a specified id func (db *Database) Refs(id string) int { db.mux.RLock() defer db.mux.RUnlock() var count int if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { return 0 } return count } // Return all the id's path references func (db *Database) RefPaths(id string) Edges { db.mux.RLock() defer db.mux.RUnlock() refs := Edges{} rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) if err != nil { return refs } defer rows.Close() for rows.Next() { var name string var parentId string if err := rows.Scan(&name, &parentId); err != nil { return refs } refs = append(refs, &Edge{ EntityID: id, Name: name, ParentID: parentId, }) } return refs } // Delete the reference to an entity at a given path func (db *Database) Delete(name string) error { db.mux.Lock() defer db.mux.Unlock() if name == "/" { return fmt.Errorf("Cannot delete root entity") } parentPath, n := splitPath(name) parent, err := db.get(parentPath) if err != nil { return err } if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil { return err } return nil } // Remove the entity with the specified id // Walk the graph to make sure all references to the entity // are removed and return the number of references removed func (db *Database) Purge(id string) (int, error) { db.mux.Lock() defer db.mux.Unlock() rollback := func() { db.conn.Exec("ROLLBACK") } if _, err := db.conn.Exec("BEGIN"); err != nil { return -1, err } // Delete all edges rows, err := db.conn.Exec("DELETE FROM edge WHERE entity_id = ?;", id) if err != nil { rollback() return -1, err } changes, err := rows.RowsAffected() if err != nil { return -1, err } // Delete entity if _, err := db.conn.Exec("DELETE FROM entity where id = ?;", id); err != nil { rollback() return -1, err } if _, err := db.conn.Exec("COMMIT"); err != nil { return -1, err } return int(changes), nil } // Rename an edge for a given path func (db *Database) Rename(currentName, newName string) error { db.mux.Lock() defer db.mux.Unlock() parentPath, name := splitPath(currentName) newParentPath, newEdgeName := splitPath(newName) if parentPath != newParentPath { return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath) } parent, err := db.get(parentPath) if err != nil { return err } rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name) if err != nil { return err } i, err := rows.RowsAffected() if err != nil { return err } if i == 0 { return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name) } return nil } type WalkMeta struct { Parent *Entity Entity *Entity FullPath string Edge *Edge } func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) { if e == nil { return entities, nil } rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) if err != nil { return nil, err } defer rows.Close() for rows.Next() { var entityId, entityName string if err := rows.Scan(&entityId, &entityName); err != nil { return nil, err } child := &Entity{entityId} edge := &Edge{ ParentID: e.id, Name: entityName, EntityID: child.id, } meta := WalkMeta{ Parent: e, Entity: child, FullPath: path.Join(name, edge.Name), Edge: edge, } entities = append(entities, meta) if depth != 0 { nDepth := depth if depth != -1 { nDepth -= 1 } entities, err = db.children(child, meta.FullPath, nDepth, entities) if err != nil { return nil, err } } } return entities, nil } // Return the entity based on the parent path and name func (db *Database) child(parent *Entity, name string) *Entity { var id string if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil { return nil } return &Entity{id} } // Return the id used to reference this entity func (e *Entity) ID() string { return e.id } // Return the paths sorted by depth func (e Entities) Paths() []string { out := make([]string, len(e)) var i int for k := range e { out[i] = k i++ } sortByDepth(out) return out } docker-0.9.1/pkg/graphdb/sort_test.go0000644000175000017500000000074112314376205015652 0ustar tagtagpackage graphdb import ( "testing" ) func TestSort(t *testing.T) { paths := []string{ "/", "/myreallylongname", "/app/db", } sortByDepth(paths) if len(paths) != 3 { t.Fatalf("Expected 3 parts got %d", len(paths)) } if paths[0] != "/app/db" { t.Fatalf("Expected /app/db got %s", paths[0]) } if paths[1] != "/myreallylongname" { t.Fatalf("Expected /myreallylongname got %s", paths[1]) } if paths[2] != "/" { t.Fatalf("Expected / got %s", paths[2]) } } docker-0.9.1/pkg/graphdb/MAINTAINERS0000644000175000017500000000007412314376205014771 0ustar tagtagMichael Crosby (@crosbymichael) docker-0.9.1/pkg/graphdb/utils.go0000644000175000017500000000076512314376205014772 0ustar tagtagpackage graphdb import ( "path" "strings" ) // Split p on / func split(p string) []string { return strings.Split(p, "/") } // Returns the depth or number of / in a given path func PathDepth(p string) int { parts := split(p) if len(parts) == 2 && parts[1] == "" { return 1 } return len(parts) } func splitPath(p string) (parent, name string) { if p[0] != '/' { p = "/" + p } parent, name = path.Split(p) l := len(parent) if parent[l-1] == '/' { parent = parent[:l-1] } return } docker-0.9.1/pkg/graphdb/conn_linux.go0000644000175000017500000000070412314376205015777 0ustar tagtag// +build amd64 package graphdb import ( _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite "database/sql" "os" ) func NewSqliteConn(root string) (*Database, error) { initDatabase := false if _, err := os.Stat(root); err != nil { if os.IsNotExist(err) { initDatabase = true } else { return nil, err } } conn, err := sql.Open("sqlite3", root) if err != nil { return nil, err } return NewDatabase(conn, initDatabase) } docker-0.9.1/pkg/graphdb/graphdb_test.go0000644000175000017500000002445112314376205016276 0ustar tagtagpackage graphdb import ( _ "code.google.com/p/gosqlite/sqlite3" "database/sql" "fmt" "os" "path" "strconv" "testing" ) func newTestDb(t *testing.T) (*Database, string) { p := path.Join(os.TempDir(), "sqlite.db") conn, err := sql.Open("sqlite3", p) db, err := NewDatabase(conn, true) if err != nil { t.Fatal(err) } return db, p } func destroyTestDb(dbPath string) { os.Remove(dbPath) } func TestNewDatabase(t *testing.T) { db, dbpath := newTestDb(t) if db == nil { t.Fatal("Database should not be nil") } db.Close() defer destroyTestDb(dbpath) } func TestCreateRootEnity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) root := db.RootEntity() if root == nil { t.Fatal("Root entity should not be nil") } } func TestGetRootEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) e := db.Get("/") if e == nil { t.Fatal("Entity should not be nil") } if e.ID() != "0" { t.Fatalf("Enity id should be 0, got %s", e.ID()) } } func TestSetEntityWithDifferentName(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/test", "1") if _, err := db.Set("/other", "1"); err != nil { t.Fatal(err) } } func TestSetDuplicateEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) if _, err := db.Set("/foo", "42"); err != nil { t.Fatal(err) } if _, err := db.Set("/foo", "43"); err == nil { t.Fatalf("Creating an entry with a duplciate path did not cause an error") } } func TestCreateChild(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) child, err := db.Set("/db", "1") if err != nil { t.Fatal(err) } if child == nil { t.Fatal("Child should not be nil") } if child.ID() != "1" { t.Fail() } } func TestListAllRootChildren(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) for i := 1; i < 6; i++ { a := strconv.Itoa(i) if _, err := db.Set("/"+a, a); err != nil { t.Fatal(err) } } entries := db.List("/", -1) if len(entries) != 5 { t.Fatalf("Expect 5 entries for / got %d", len(entries)) } } func TestListAllSubChildren(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } entries := db.List("/webapp", 1) if len(entries) != 3 { t.Fatalf("Expect 3 entries for / got %d", len(entries)) } entries = db.List("/webapp", 0) if len(entries) != 2 { t.Fatalf("Expect 2 entries for / got %d", len(entries)) } } func TestAddSelfAsChild(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) child, err := db.Set("/test", "1") if err != nil { t.Fatal(err) } if _, err := db.Set("/test/other", child.ID()); err == nil { t.Fatal("Error should not be nil") } } func TestAddChildToNonExistantRoot(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) if _, err := db.Set("/myapp", "1"); err != nil { t.Fatal(err) } if _, err := db.Set("/myapp/proxy/db", "2"); err == nil { t.Fatal("Error should not be nil") } } func TestWalkAll(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/db/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } if err := db.Walk("/", func(p string, e *Entity) error { t.Logf("Path: %s Entity: %s", p, e.ID()) return nil }, -1); err != nil { t.Fatal(err) } } func TestGetEntityByPath(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } entity := db.Get("/webapp/db/logs") if entity == nil { t.Fatal("Entity should not be nil") } if entity.ID() != "4" { t.Fatalf("Expected to get entity with id 4, got %s", entity.ID()) } } func TestEnitiesPaths(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } out := db.List("/", -1) for _, p := range out.Paths() { t.Log(p) } } func TestDeleteRootEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) if err := db.Delete("/"); err == nil { t.Fatal("Error should not be nil") } } func TestDeleteEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } if err := db.Delete("/webapp/sentry"); err != nil { t.Fatal(err) } entity := db.Get("/webapp/sentry") if entity != nil { t.Fatal("Entity /webapp/sentry should be nil") } } func TestCountRefs(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") if db.Refs("1") != 1 { t.Fatal("Expect reference count to be 1") } db.Set("/db", "2") db.Set("/webapp/db", "2") if db.Refs("2") != 2 { t.Fatal("Expect reference count to be 2") } } func TestPurgeId(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") if db.Refs("1") != 1 { t.Fatal("Expect reference count to be 1") } db.Set("/db", "2") db.Set("/webapp/db", "2") count, err := db.Purge("2") if err != nil { t.Fatal(err) } if count != 2 { t.Fatal("Expected 2 references to be removed") } } func TestRename(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") if db.Refs("1") != 1 { t.Fatal("Expect reference count to be 1") } db.Set("/db", "2") db.Set("/webapp/db", "2") if db.Get("/webapp/db") == nil { t.Fatal("Cannot find entity at path /webapp/db") } if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil { t.Fatal(err) } if db.Get("/webapp/db") != nil { t.Fatal("Entity should not exist at /webapp/db") } if db.Get("/webapp/newdb") == nil { t.Fatal("Cannot find entity at path /webapp/newdb") } } func TestCreateMultipleNames(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/db", "1") if _, err := db.Set("/myapp", "1"); err != nil { t.Fatal(err) } db.Walk("/", func(p string, e *Entity) error { t.Logf("%s\n", p) return nil }, -1) } func TestRefPaths(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") db.Set("/db", "2") db.Set("/webapp/db", "2") refs := db.RefPaths("2") if len(refs) != 2 { t.Fatalf("Expected reference count to be 2, got %d", len(refs)) } } func TestExistsTrue(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/testing", "1") if !db.Exists("/testing") { t.Fatalf("/tesing should exist") } } func TestExistsFalse(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/toerhe", "1") if db.Exists("/testing") { t.Fatalf("/tesing should not exist") } } func TestGetNameWithTrailingSlash(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/todo", "1") e := db.Get("/todo/") if e == nil { t.Fatalf("Entity should not be nil") } } func TestConcurrentWrites(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) errs := make(chan error, 2) save := func(name string, id string) { if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil { errs <- err } errs <- nil } purge := func(id string) { if _, err := db.Purge(id); err != nil { errs <- err } errs <- nil } save("/1", "1") go purge("1") go save("/2", "2") any := false for i := 0; i < 2; i++ { if err := <-errs; err != nil { any = true t.Log(err) } } if any { t.Fatal() } } docker-0.9.1/pkg/graphdb/sort.go0000644000175000017500000000073612314376205014617 0ustar tagtagpackage graphdb import "sort" type pathSorter struct { paths []string by func(i, j string) bool } func sortByDepth(paths []string) { s := &pathSorter{paths, func(i, j string) bool { return PathDepth(i) > PathDepth(j) }} sort.Sort(s) } func (s *pathSorter) Len() int { return len(s.paths) } func (s *pathSorter) Swap(i, j int) { s.paths[i], s.paths[j] = s.paths[j], s.paths[i] } func (s *pathSorter) Less(i, j int) bool { return s.by(s.paths[i], s.paths[j]) } docker-0.9.1/links/0000755000175000017500000000000012314376205012223 5ustar tagtagdocker-0.9.1/links/links_test.go0000644000175000017500000000361312314376205014734 0ustar tagtagpackage links import ( "github.com/dotcloud/docker/nat" "strings" "testing" ) func TestLinkNew(t *testing.T) { ports := make(nat.PortSet) ports[nat.Port("6379/tcp")] = struct{}{} link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports, nil) if err != nil { t.Fatal(err) } if link == nil { t.FailNow() } if link.Name != "/db/docker" { t.Fail() } if link.Alias() != "docker" { t.Fail() } if link.ParentIP != "172.0.17.3" { t.Fail() } if link.ChildIP != "172.0.17.2" { t.Fail() } for _, p := range link.Ports { if p != nat.Port("6379/tcp") { t.Fail() } } } func TestLinkEnv(t *testing.T) { ports := make(nat.PortSet) ports[nat.Port("6379/tcp")] = struct{}{} link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) if err != nil { t.Fatal(err) } rawEnv := link.ToEnv() env := make(map[string]string, len(rawEnv)) for _, e := range rawEnv { parts := strings.Split(e, "=") if len(parts) != 2 { t.FailNow() } env[parts[0]] = parts[1] } if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) } if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) } if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) } if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) } if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) } if env["DOCKER_NAME"] != "/db/docker" { t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) } if env["DOCKER_ENV_PASSWORD"] != "gordon" { t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) } } docker-0.9.1/links/links.go0000644000175000017500000000645412314376205013703 0ustar tagtagpackage links import ( "fmt" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" "path" "strings" ) type Link struct { ParentIP string ChildIP string Name string ChildEnvironment []string Ports []nat.Port IsEnabled bool eng *engine.Engine } func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}, eng *engine.Engine) (*Link, error) { var ( i int ports = make([]nat.Port, len(exposedPorts)) ) for p := range exposedPorts { ports[i] = p i++ } l := &Link{ Name: name, ChildIP: childIP, ParentIP: parentIP, ChildEnvironment: env, Ports: ports, eng: eng, } return l, nil } func (l *Link) Alias() string { _, alias := path.Split(l.Name) return alias } func (l *Link) ToEnv() []string { env := []string{} alias := strings.ToUpper(l.Alias()) if p := l.getDefaultPort(); p != nil { env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) } // Load exposed ports into the environment for _, p := range l.Ports { env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) } // Load the linked container's name into the environment env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) if l.ChildEnvironment != nil { for _, v := range l.ChildEnvironment { parts := strings.Split(v, "=") if len(parts) != 2 { continue } // Ignore a few variables that are added during docker build (and not really relevant to linked containers) if parts[0] == "HOME" || parts[0] == "PATH" { continue } env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) } } return env } // Default port rules func (l *Link) getDefaultPort() *nat.Port { var p nat.Port i := len(l.Ports) if i == 0 { return nil } else if i > 1 { nat.Sort(l.Ports, func(ip, jp nat.Port) bool { // If the two ports have the same number, tcp takes priority // Sort in desc order return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") }) } p = l.Ports[0] return &p } func (l *Link) Enable() error { if err := l.toggle("-I", false); err != nil { return err } l.IsEnabled = true return nil } func (l *Link) Disable() { // We do not care about errors here because the link may not // exist in iptables l.toggle("-D", true) l.IsEnabled = false } func (l *Link) toggle(action string, ignoreErrors bool) error { job := l.eng.Job("link", action) job.Setenv("ParentIP", l.ParentIP) job.Setenv("ChildIP", l.ChildIP) job.SetenvBool("IgnoreErrors", ignoreErrors) out := make([]string, len(l.Ports)) for i, p := range l.Ports { out[i] = fmt.Sprintf("%s/%s", p.Port(), p.Proto()) } job.SetenvList("Ports", out) if err := job.Run(); err != nil { // TODO: get ouput from job return err } return nil } docker-0.9.1/archive/0000755000175000017500000000000012314376205012524 5ustar tagtagdocker-0.9.1/archive/changes.go0000644000175000017500000002157012314376205014470 0ustar tagtagpackage archive import ( "bytes" "fmt" "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "os" "path/filepath" "strings" "syscall" "time" ) type ChangeType int const ( ChangeModify = iota ChangeAdd ChangeDelete ) type Change struct { Path string Kind ChangeType } func (change *Change) String() string { var kind string switch change.Kind { case ChangeModify: kind = "C" case ChangeAdd: kind = "A" case ChangeDelete: kind = "D" } return fmt.Sprintf("%s %s", kind, change.Path) } // Gnu tar and the go tar writer don't have sub-second mtime // precision, which is problematic when we apply changes via tar // files, we handle this by comparing for exact times, *or* same // second count and either a or b having exactly 0 nanoseconds func sameFsTime(a, b time.Time) bool { return a == b || (a.Unix() == b.Unix() && (a.Nanosecond() == 0 || b.Nanosecond() == 0)) } func sameFsTimeSpec(a, b syscall.Timespec) bool { return a.Sec == b.Sec && (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) } func Changes(layers []string, rw string) ([]Change, error) { var changes []Change err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path path, err = filepath.Rel(rw, path) if err != nil { return err } path = filepath.Join("/", path) // Skip root if path == "/" { return nil } // Skip AUFS metadata if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched { return err } change := Change{ Path: path, } // Find out what kind of modification happened file := filepath.Base(path) // If there is a whiteout, then the file was removed if strings.HasPrefix(file, ".wh.") { originalFile := file[len(".wh."):] change.Path = filepath.Join(filepath.Dir(path), originalFile) change.Kind = ChangeDelete } else { // Otherwise, the file was added change.Kind = ChangeAdd // ...Unless it already existed in a top layer, in which case, it's a modification for _, layer := range layers { stat, err := os.Stat(filepath.Join(layer, path)) if err != nil && !os.IsNotExist(err) { return err } if err == nil { // The file existed in the top layer, so that's a modification // However, if it's a directory, maybe it wasn't actually modified. // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar if stat.IsDir() && f.IsDir() { if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { // Both directories are the same, don't record the change return nil } } change.Kind = ChangeModify break } } } // Record change changes = append(changes, change) return nil }) if err != nil && !os.IsNotExist(err) { return nil, err } return changes, nil } type FileInfo struct { parent *FileInfo name string stat syscall.Stat_t children map[string]*FileInfo capability []byte } func (root *FileInfo) LookUp(path string) *FileInfo { parent := root if path == "/" { return root } pathElements := strings.Split(path, "/") for _, elem := range pathElements { if elem != "" { child := parent.children[elem] if child == nil { return nil } parent = child } } return parent } func (info *FileInfo) path() string { if info.parent == nil { return "/" } return filepath.Join(info.parent.path(), info.name) } func (info *FileInfo) isDir() bool { return info.parent == nil || info.stat.Mode&syscall.S_IFDIR == syscall.S_IFDIR } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { if oldInfo == nil { // add change := Change{ Path: info.path(), Kind: ChangeAdd, } *changes = append(*changes, change) } // We make a copy so we can modify it to detect additions // also, we only recurse on the old dir if the new info is a directory // otherwise any previous delete/change is considered recursive oldChildren := make(map[string]*FileInfo) if oldInfo != nil && info.isDir() { for k, v := range oldInfo.children { oldChildren[k] = v } } for name, newChild := range info.children { oldChild, _ := oldChildren[name] if oldChild != nil { // change? oldStat := &oldChild.stat newStat := &newChild.stat // Note: We can't compare inode or ctime or blocksize here, because these change // when copying a file into a container. However, that is not generally a problem // because any content change will change mtime, and any status change should // be visible when actually comparing the stat fields. The only time this // breaks down is if some code intentionally hides a change by setting // back mtime if oldStat.Mode != newStat.Mode || oldStat.Uid != newStat.Uid || oldStat.Gid != newStat.Gid || oldStat.Rdev != newStat.Rdev || // Don't look at size for dirs, its not a good measure of change (oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) || !sameFsTimeSpec(system.GetLastModification(oldStat), system.GetLastModification(newStat)) || bytes.Compare(oldChild.capability, newChild.capability) != 0 { change := Change{ Path: newChild.path(), Kind: ChangeModify, } *changes = append(*changes, change) } // Remove from copy so we can detect deletions delete(oldChildren, name) } newChild.addChanges(oldChild, changes) } for _, oldChild := range oldChildren { // delete change := Change{ Path: oldChild.path(), Kind: ChangeDelete, } *changes = append(*changes, change) } } func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { var changes []Change info.addChanges(oldInfo, &changes) return changes } func newRootFileInfo() *FileInfo { root := &FileInfo{ name: "/", children: make(map[string]*FileInfo), } return root } func collectFileInfo(sourceDir string) (*FileInfo, error) { root := newRootFileInfo() err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path relPath, err := filepath.Rel(sourceDir, path) if err != nil { return err } relPath = filepath.Join("/", relPath) if relPath == "/" { return nil } parent := root.LookUp(filepath.Dir(relPath)) if parent == nil { return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) } info := &FileInfo{ name: filepath.Base(relPath), children: make(map[string]*FileInfo), parent: parent, } if err := syscall.Lstat(path, &info.stat); err != nil { return err } info.capability, _ = system.Lgetxattr(path, "security.capability") parent.children[info.name] = info return nil }) if err != nil { return nil, err } return root, nil } // Compare two directories and generate an array of Change objects describing the changes func ChangesDirs(newDir, oldDir string) ([]Change, error) { oldRoot, err := collectFileInfo(oldDir) if err != nil { return nil, err } newRoot, err := collectFileInfo(newDir) if err != nil { return nil, err } return newRoot.Changes(oldRoot), nil } func ChangesSize(newDir string, changes []Change) int64 { var size int64 for _, change := range changes { if change.Kind == ChangeModify || change.Kind == ChangeAdd { file := filepath.Join(newDir, change.Path) fileInfo, _ := os.Lstat(file) if fileInfo != nil && !fileInfo.IsDir() { size += fileInfo.Size() } } } return size } func major(device uint64) uint64 { return (device >> 8) & 0xfff } func minor(device uint64) uint64 { return (device & 0xff) | ((device >> 12) & 0xfff00) } func ExportChanges(dir string, changes []Change) (Archive, error) { reader, writer := io.Pipe() tw := tar.NewWriter(writer) go func() { // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this for _, change := range changes { if change.Kind == ChangeDelete { whiteOutDir := filepath.Dir(change.Path) whiteOutBase := filepath.Base(change.Path) whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) hdr := &tar.Header{ Name: whiteOut[1:], Size: 0, ModTime: time.Now(), AccessTime: time.Now(), ChangeTime: time.Now(), } if err := tw.WriteHeader(hdr); err != nil { utils.Debugf("Can't write whiteout header: %s\n", err) } } else { path := filepath.Join(dir, change.Path) if err := addTarFile(path, change.Path[1:], tw); err != nil { utils.Debugf("Can't add file %s to tar: %s\n", path, err) } } } // Make sure to check the error on Close. if err := tw.Close(); err != nil { utils.Debugf("Can't close layer: %s\n", err) } writer.Close() }() return reader, nil } docker-0.9.1/archive/wrap.go0000644000175000017500000000274312314376205014032 0ustar tagtagpackage archive import ( "bytes" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io/ioutil" ) // Generate generates a new archive from the content provided // as input. // // `files` is a sequence of path/content pairs. A new file is // added to the archive for each pair. // If the last pair is incomplete, the file is created with an // empty content. For example: // // Generate("foo.txt", "hello world", "emptyfile") // // The above call will return an archive with 2 files: // * ./foo.txt with content "hello world" // * ./empty with empty content // // FIXME: stream content instead of buffering // FIXME: specify permissions and other archive metadata func Generate(input ...string) (Archive, error) { files := parseStringPairs(input...) buf := new(bytes.Buffer) tw := tar.NewWriter(buf) for _, file := range files { name, content := file[0], file[1] hdr := &tar.Header{ Name: name, Size: int64(len(content)), } if err := tw.WriteHeader(hdr); err != nil { return nil, err } if _, err := tw.Write([]byte(content)); err != nil { return nil, err } } if err := tw.Close(); err != nil { return nil, err } return ioutil.NopCloser(buf), nil } func parseStringPairs(input ...string) (output [][2]string) { output = make([][2]string, 0, len(input)/2+1) for i := 0; i < len(input); i += 2 { var pair [2]string pair[0] = input[i] if i+1 < len(input) { pair[1] = input[i+1] } output = append(output, pair) } return } docker-0.9.1/archive/archive_test.go0000644000175000017500000000727712314376205015550 0ustar tagtagpackage archive import ( "bytes" "fmt" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "os" "os/exec" "path" "testing" "time" ) func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := CmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error) go func() { _, err := io.Copy(ioutil.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := CmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := ioutil.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") out, err := CmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := ioutil.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func tarUntar(t *testing.T, origin string, compression Compression) error { archive, err := Tar(origin, compression) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) if detectedCompression.Extension() != compression.Extension() { return fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := ioutil.TempDir("", "docker-test-untar") if err != nil { return err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return err } if _, err := os.Stat(tmp); err != nil { return err } changes, err := ChangesDirs(origin, tmp) if err != nil { return err } if len(changes) != 0 { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } return nil } func TestTarUntar(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { if err := tarUntar(t, origin, c); err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} err := createTarFile("pax_global_header", "some_dir", &hdr, nil) if err != nil { t.Fatal(err) } } docker-0.9.1/archive/MAINTAINERS0000644000175000017500000000007412314376205014222 0ustar tagtagMichael Crosby (@crosbymichael) docker-0.9.1/archive/diff.go0000644000175000017500000001101012314376205013754 0ustar tagtagpackage archive import ( "fmt" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "os" "path/filepath" "strings" "syscall" "time" ) // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, // then the top 12 bits of the minor func mkdev(major int64, minor int64) uint32 { return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) } func timeToTimespec(time time.Time) (ts syscall.Timespec) { if time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 ts.Nsec = ((1 << 30) - 2) return } return syscall.NsecToTimespec(time.UnixNano()) } // ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. func ApplyLayer(dest string, layer ArchiveReader) error { // We need to be able to set any perms oldmask := syscall.Umask(0) defer syscall.Umask(oldmask) layer, err := DecompressStream(layer) if err != nil { return err } tr := tar.NewReader(layer) var dirs []*tar.Header aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // Normalize name, for safety and for a simple is-root check hdr.Name = filepath.Clean(hdr.Name) if !strings.HasSuffix(hdr.Name, "/") { // Not the root directory, ensure that the parent directory exists. // This happened in some tests where an image had a tarfile without any // parent directories. parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = os.MkdirAll(parentPath, 600) if err != nil { return err } } } // Skip AUFS metadata dirs if strings.HasPrefix(hdr.Name, ".wh..wh.") { // Regular files inside /.wh..wh.plnk can be used as hardlink targets // We don't want this directory, but we need the files in them so that // such hardlinks can be resolved. if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { basename := filepath.Base(hdr.Name) aufsHardlinks[basename] = hdr if aufsTempdir == "" { if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { return err } defer os.RemoveAll(aufsTempdir) } if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr); err != nil { return err } } continue } path := filepath.Join(dest, hdr.Name) base := filepath.Base(path) if strings.HasPrefix(base, ".wh.") { originalBase := base[len(".wh."):] originalPath := filepath.Join(filepath.Dir(path), originalBase) if err := os.RemoveAll(originalPath); err != nil { return err } } else { // If path exits we almost always just want to remove and replace it. // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } srcData := io.Reader(tr) srcHdr := hdr // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so // we manually retarget these into the temporary files we extracted them into if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { linkBasename := filepath.Base(hdr.Linkname) srcHdr = aufsHardlinks[linkBasename] if srcHdr == nil { return fmt.Errorf("Invalid aufs hardlink") } tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) if err != nil { return err } defer tmpFile.Close() srcData = tmpFile } if err := createTarFile(path, dest, srcHdr, srcData); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} if err := syscall.UtimesNano(path, ts); err != nil { return err } } return nil } docker-0.9.1/archive/archive.go0000644000175000017500000003704012314376205014500 0ustar tagtagpackage archive import ( "bytes" "compress/bzip2" "compress/gzip" "errors" "fmt" "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strings" "syscall" ) type ( Archive io.ReadCloser ArchiveReader io.Reader Compression int TarOptions struct { Includes []string Compression Compression } ) var ( ErrNotImplemented = errors.New("Function not implemented") ) const ( Uncompressed Compression = iota Bzip2 Gzip Xz ) func DetectCompression(source []byte) Compression { sourceLen := len(source) for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, } { fail := false if len(m) > sourceLen { utils.Debugf("Len too short") continue } i := 0 for _, b := range m { if b != source[i] { fail = true break } i++ } if !fail { return compression } } return Uncompressed } func xzDecompress(archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return CmdStream(exec.Command(args[0], args[1:]...), archive) } func DecompressStream(archive io.Reader) (io.ReadCloser, error) { buf := make([]byte, 10) totalN := 0 for totalN < 10 { n, err := archive.Read(buf[totalN:]) if err != nil { if err == io.EOF { return nil, fmt.Errorf("Tarball too short") } return nil, err } totalN += n utils.Debugf("[tar autodetect] n: %d", n) } compression := DetectCompression(buf) wrap := io.MultiReader(bytes.NewReader(buf), archive) switch compression { case Uncompressed: return ioutil.NopCloser(wrap), nil case Gzip: return gzip.NewReader(wrap) case Bzip2: return ioutil.NopCloser(bzip2.NewReader(wrap)), nil case Xz: return xzDecompress(wrap) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { switch compression { case Uncompressed: return utils.NopWriteCloser(dest), nil case Gzip: return gzip.NewWriter(dest), nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" } return "" } func addTarFile(path, name string, tw *tar.Writer) error { fi, err := os.Lstat(path) if err != nil { return err } link := "" if fi.Mode()&os.ModeSymlink != 0 { if link, err = os.Readlink(path); err != nil { return err } } hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return err } if fi.IsDir() && !strings.HasSuffix(name, "/") { name = name + "/" } hdr.Name = name stat, ok := fi.Sys().(*syscall.Stat_t) if ok { // Currently go does not fill in the major/minors if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { hdr.Devmajor = int64(major(uint64(stat.Rdev))) hdr.Devminor = int64(minor(uint64(stat.Rdev))) } } capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability) } if err := tw.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg { if file, err := os.Open(path); err != nil { return err } else { _, err := io.Copy(tw, file) if err != nil { return err } file.Close() } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: mode := uint32(hdr.Mode & 07777) switch hdr.Typeflag { case tar.TypeBlock: mode |= syscall.S_IFBLK case tar.TypeChar: mode |= syscall.S_IFCHR case tar.TypeFifo: mode |= syscall.S_IFIFO } if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { return err } case tar.TypeLink: if err := os.Link(filepath.Join(extractDir, hdr.Linkname), path); err != nil { return err } case tar.TypeSymlink: if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: utils.Debugf("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) } if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { return err } for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { return err } } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if hdr.Typeflag != tar.TypeSymlink { if err := os.Chmod(path, hdrInfo.Mode()); err != nil { return err } } ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and if hdr.Typeflag != tar.TypeSymlink { if err := system.UtimesNano(path, ts); err != nil { return err } } else { if err := system.LUtimesNano(path, ts); err != nil { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarFilter(path, &TarOptions{Compression: compression}) } func escapeName(name string) string { escaped := make([]byte, 0) for i, c := range []byte(name) { if i == 0 && c == '/' { continue } // all printable chars except "-" which is 0x2d if (0x20 <= c && c <= 0x7E) && c != 0x2d { escaped = append(escaped, c) } else { escaped = append(escaped, fmt.Sprintf("\\%03o", c)...) } } return string(escaped) } // Tar creates an archive from the directory at `path`, only including files whose relative // paths are included in `filter`. If `filter` is nil, then all files are included. func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) { pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } tw := tar.NewWriter(compressWriter) go func() { // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this if options.Includes == nil { options.Includes = []string{"."} } for _, include := range options.Includes { filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { if err != nil { utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil { return nil } if err := addTarFile(filePath, relFilePath, tw); err != nil { utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err) } return nil }) } // Make sure to check the error on Close. if err := tw.Close(); err != nil { utils.Debugf("Can't close tar writer: %s\n", err) } if err := compressWriter.Close(); err != nil { utils.Debugf("Can't close compress writer: %s\n", err) } if err := pipeWriter.Close(); err != nil { utils.Debugf("Can't close pipe writer: %s\n", err) } }() return pipeReader, nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `path`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(archive io.Reader, dest string, options *TarOptions) error { if archive == nil { return fmt.Errorf("Empty archive") } decompressedArchive, err := DecompressStream(archive) if err != nil { return err } defer decompressedArchive.Close() tr := tar.NewReader(decompressedArchive) var dirs []*tar.Header // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // Normalize name, for safety and for a simple is-root check hdr.Name = filepath.Clean(hdr.Name) if !strings.HasSuffix(hdr.Name, "/") { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = os.MkdirAll(parentPath, 600) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } if err := createTarFile(path, dest, hdr, tr); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} if err := syscall.UtimesNano(path, ts); err != nil { return err } } return nil } // TarUntar is a convenience function which calls Tar and Untar, with // the output of one piped into the other. If either Tar or Untar fails, // TarUntar aborts and returns the error. func TarUntar(src string, dst string) error { utils.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarFilter(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() return Untar(archive, dst, nil) } // UntarPath is a convenience function which looks for an archive // at filesystem path `src`, and unpacks it at `dst`. func UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() if err := Untar(archive, dst, nil); err != nil { return err } return nil } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. // func CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return CopyFileWithTar(src, dst) } // Create dst, copy src's content into it utils.Debugf("Creating dest directory: %s", dst) if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { return err } utils.Debugf("Calling TarUntar(%s, %s)", src, dst) return TarUntar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. // // If `dst` ends with a trailing slash '/', the final destination path // will be `dst/base(src)`. func CopyFileWithTar(src, dst string) (err error) { utils.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing / if dst[len(dst)-1] == '/' { dst = path.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { return err } r, w := io.Pipe() errC := utils.Go(func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() tw := tar.NewWriter(w) hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Name = filepath.Base(dst) if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } tw.Close() return nil }) defer func() { if er := <-errC; err != nil { err = er } }() return Untar(r, filepath.Dir(dst), nil) } // CmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { if input != nil { stdin, err := cmd.StdinPipe() if err != nil { return nil, err } // Write stdin if any go func() { io.Copy(stdin, input) stdin.Close() }() } stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stderr, err := cmd.StderrPipe() if err != nil { return nil, err } pipeR, pipeW := io.Pipe() errChan := make(chan []byte) // Collect stderr, we will use it in case of an error go func() { errText, e := ioutil.ReadAll(stderr) if e != nil { errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") } errChan <- errText }() // Copy stdout to the returned pipe go func() { _, err := io.Copy(pipeW, stdout) if err != nil { pipeW.CloseWithError(err) } errText := <-errChan if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) } else { pipeW.Close() } }() // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } return pipeR, nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src Archive, dir string) (*TempArchive, error) { f, err := ioutil.TempFile(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{f, size}, nil } type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) if err != nil { os.Remove(archive.File.Name()) } return n, err } docker-0.9.1/archive/changes_test.go0000644000175000017500000001603512314376205015527 0ustar tagtagpackage archive import ( "io/ioutil" "os" "os/exec" "path" "sort" "testing" "time" ) func max(x, y int) int { if x >= y { return x } return y } func copyDir(src, dst string) error { cmd := exec.Command("cp", "-a", src, dst) if err := cmd.Run(); err != nil { return err } return nil } // Helper to sort []Change by path type byPath struct{ changes []Change } func (b byPath) Less(i, j int) bool { return b.changes[i].Path < b.changes[j].Path } func (b byPath) Len() int { return len(b.changes) } func (b byPath) Swap(i, j int) { b.changes[i], b.changes[j] = b.changes[j], b.changes[i] } type FileType uint32 const ( Regular FileType = iota Dir Symlink ) type FileData struct { filetype FileType path string contents string permissions os.FileMode } func createSampleDir(t *testing.T, root string) { files := []FileData{ {Regular, "file1", "file1\n", 0600}, {Regular, "file2", "file2\n", 0666}, {Regular, "file3", "file3\n", 0404}, {Regular, "file4", "file4\n", 0600}, {Regular, "file5", "file5\n", 0600}, {Regular, "file6", "file6\n", 0600}, {Regular, "file7", "file7\n", 0600}, {Dir, "dir1", "", 0740}, {Regular, "dir1/file1-1", "file1-1\n", 01444}, {Regular, "dir1/file1-2", "file1-2\n", 0666}, {Dir, "dir2", "", 0700}, {Regular, "dir2/file2-1", "file2-1\n", 0666}, {Regular, "dir2/file2-2", "file2-2\n", 0666}, {Dir, "dir3", "", 0700}, {Regular, "dir3/file3-1", "file3-1\n", 0666}, {Regular, "dir3/file3-2", "file3-2\n", 0666}, {Dir, "dir4", "", 0700}, {Regular, "dir4/file3-1", "file4-1\n", 0666}, {Regular, "dir4/file3-2", "file4-2\n", 0666}, {Symlink, "symlink1", "target1", 0666}, {Symlink, "symlink2", "target2", 0666}, } now := time.Now() for _, info := range files { p := path.Join(root, info.path) if info.filetype == Dir { if err := os.MkdirAll(p, info.permissions); err != nil { t.Fatal(err) } } else if info.filetype == Regular { if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { t.Fatal(err) } } else if info.filetype == Symlink { if err := os.Symlink(info.contents, p); err != nil { t.Fatal(err) } } if info.filetype != Symlink { // Set a consistent ctime, atime for all files and dirs if err := os.Chtimes(p, now, now); err != nil { t.Fatal(err) } } } } // Create an directory, copy it, make sure we report no changes between the two func TestChangesDirsEmpty(t *testing.T) { src, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } createSampleDir(t, src) dst := src + "-copy" if err := copyDir(src, dst); err != nil { t.Fatal(err) } changes, err := ChangesDirs(dst, src) if err != nil { t.Fatal(err) } if len(changes) != 0 { t.Fatalf("Reported changes for identical dirs: %v", changes) } os.RemoveAll(src) os.RemoveAll(dst) } func mutateSampleDir(t *testing.T, root string) { // Remove a regular file if err := os.RemoveAll(path.Join(root, "file1")); err != nil { t.Fatal(err) } // Remove a directory if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { t.Fatal(err) } // Remove a symlink if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { t.Fatal(err) } // Rewrite a file if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileN\n"), 0777); err != nil { t.Fatal(err) } // Replace a file if err := os.RemoveAll(path.Join(root, "file3")); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileM\n"), 0404); err != nil { t.Fatal(err) } // Touch file if err := os.Chtimes(path.Join(root, "file4"), time.Now(), time.Now()); err != nil { t.Fatal(err) } // Replace file with dir if err := os.RemoveAll(path.Join(root, "file5")); err != nil { t.Fatal(err) } if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { t.Fatal(err) } // Create new file if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { t.Fatal(err) } // Create new dir if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { t.Fatal(err) } // Create a new symlink if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { t.Fatal(err) } // Change a symlink if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { t.Fatal(err) } if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { t.Fatal(err) } // Replace dir with file if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { t.Fatal(err) } // Touch dir if err := os.Chtimes(path.Join(root, "dir3"), time.Now(), time.Now()); err != nil { t.Fatal(err) } } func TestChangesDirsMutated(t *testing.T) { src, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } createSampleDir(t, src) dst := src + "-copy" if err := copyDir(src, dst); err != nil { t.Fatal(err) } defer os.RemoveAll(src) defer os.RemoveAll(dst) mutateSampleDir(t, dst) changes, err := ChangesDirs(dst, src) if err != nil { t.Fatal(err) } sort.Sort(byPath{changes}) expectedChanges := []Change{ {"/dir1", ChangeDelete}, {"/dir2", ChangeModify}, {"/dir3", ChangeModify}, {"/dirnew", ChangeAdd}, {"/file1", ChangeDelete}, {"/file2", ChangeModify}, {"/file3", ChangeModify}, {"/file4", ChangeModify}, {"/file5", ChangeModify}, {"/filenew", ChangeAdd}, {"/symlink1", ChangeDelete}, {"/symlink2", ChangeModify}, {"/symlinknew", ChangeAdd}, } for i := 0; i < max(len(changes), len(expectedChanges)); i++ { if i >= len(expectedChanges) { t.Fatalf("unexpected change %s\n", changes[i].String()) } if i >= len(changes) { t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) } if changes[i].Path == expectedChanges[i].Path { if changes[i] != expectedChanges[i] { t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) } } else if changes[i].Path < expectedChanges[i].Path { t.Fatalf("unexpected change %s\n", changes[i].String()) } else { t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) } } } func TestApplyLayer(t *testing.T) { src, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } createSampleDir(t, src) defer os.RemoveAll(src) dst := src + "-copy" if err := copyDir(src, dst); err != nil { t.Fatal(err) } mutateSampleDir(t, dst) defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) if err != nil { t.Fatal(err) } layer, err := ExportChanges(dst, changes) if err != nil { t.Fatal(err) } layerCopy, err := NewTempArchive(layer, "") if err != nil { t.Fatal(err) } if err := ApplyLayer(src, layerCopy); err != nil { t.Fatal(err) } changes2, err := ChangesDirs(src, dst) if err != nil { t.Fatal(err) } if len(changes2) != 0 { t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) } } docker-0.9.1/sorter.go0000644000175000017500000000105312314376205012747 0ustar tagtagpackage docker import "sort" type containerSorter struct { containers []*Container by func(i, j *Container) bool } func (s *containerSorter) Len() int { return len(s.containers) } func (s *containerSorter) Swap(i, j int) { s.containers[i], s.containers[j] = s.containers[j], s.containers[i] } func (s *containerSorter) Less(i, j int) bool { return s.by(s.containers[i], s.containers[j]) } func sortContainers(containers []*Container, predicate func(i, j *Container) bool) { s := &containerSorter{containers, predicate} sort.Sort(s) } docker-0.9.1/README.md0000644000175000017500000001742012314376205012366 0ustar tagtagDocker: the Linux container engine ================================== Docker is an open source project to pack, ship and run any application as a lightweight container Docker containers are both *hardware-agnostic* and *platform-agnostic*. This means that they can run anywhere, from your laptop to the largest EC2 compute instance and everything in between - and they don't require that you use a particular language, framework or packaging system. That makes them great building blocks for deploying and scaling web apps, databases and backend services without depending on a particular stack or provider. Docker is an open-source implementation of the deployment engine which powers [dotCloud](http://dotcloud.com), a popular Platform-as-a-Service. It benefits directly from the experience accumulated over several years of large-scale operation and support of hundreds of thousands of applications and databases. ![Docker L](docs/theme/docker/static/img/dockerlogo-h.png "Docker") ## Better than VMs A common method for distributing applications and sandboxing their execution is to use virtual machines, or VMs. Typical VM formats are VMWare's vmdk, Oracle Virtualbox's vdi, and Amazon EC2's ami. In theory these formats should allow every developer to automatically package their application into a "machine" for easy distribution and deployment. In practice, that almost never happens, for a few reasons: * *Size*: VMs are very large which makes them impractical to store and transfer. * *Performance*: running VMs consumes significant CPU and memory, which makes them impractical in many scenarios, for example local development of multi-tier applications, and large-scale deployment of cpu and memory-intensive applications on large numbers of machines. * *Portability*: competing VM environments don't play well with each other. Although conversion tools do exist, they are limited and add even more overhead. * *Hardware-centric*: VMs were designed with machine operators in mind, not software developers. As a result, they offer very limited tooling for what developers need most: building, testing and running their software. For example, VMs offer no facilities for application versioning, monitoring, configuration, logging or service discovery. By contrast, Docker relies on a different sandboxing method known as *containerization*. Unlike traditional virtualization, containerization takes place at the kernel level. Most modern operating system kernels now support the primitives necessary for containerization, including Linux with [openvz](http://openvz.org), [vserver](http://linux-vserver.org) and more recently [lxc](http://lxc.sourceforge.net), Solaris with [zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc) and FreeBSD with [Jails](http://www.freebsd.org/doc/handbook/jails.html). Docker builds on top of these low-level primitives to offer developers a portable format and runtime environment that solves all 4 problems. Docker containers are small (and their transfer can be optimized with layers), they have basically zero memory and cpu overhead, they are completely portable and are designed from the ground up with an application-centric design. The best part: because Docker operates at the OS level, it can still be run inside a VM! ## Plays well with others Docker does not require that you buy into a particular programming language, framework, packaging system or configuration language. Is your application a Unix process? Does it use files, tcp connections, environment variables, standard Unix streams and command-line arguments as inputs and outputs? Then Docker can run it. Can your application's build be expressed as a sequence of such commands? Then Docker can build it. ## Escape dependency hell A common problem for developers is the difficulty of managing all their application's dependencies in a simple and automated way. This is usually difficult for several reasons: * *Cross-platform dependencies*. Modern applications often depend on a combination of system libraries and binaries, language-specific packages, framework-specific modules, internal components developed for another project, etc. These dependencies live in different "worlds" and require different tools - these tools typically don't work well with each other, requiring awkward custom integrations. * Conflicting dependencies. Different applications may depend on different versions of the same dependency. Packaging tools handle these situations with various degrees of ease - but they all handle them in different and incompatible ways, which again forces the developer to do extra work. * Custom dependencies. A developer may need to prepare a custom version of their application's dependency. Some packaging systems can handle custom versions of a dependency, others can't - and all of them handle it differently. Docker solves dependency hell by giving the developer a simple way to express *all* their application's dependencies in one place, and streamline the process of assembling them. If this makes you think of [XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't *replace* your favorite packaging systems. It simply orchestrates their use in a simple and repeatable way. How does it do that? With layers. Docker defines a build as running a sequence of Unix commands, one after the other, in the same container. Build commands modify the contents of the container (usually by installing new files on the filesystem), the next command modifies it some more, etc. Since each build command inherits the result of the previous commands, the *order* in which the commands are executed expresses *dependencies*. Here's a typical Docker build process: ```bash FROM ubuntu:12.04 RUN apt-get update RUN apt-get install -q -y python python-pip curl RUN curl -L https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv RUN cd helloflask-master && pip install -r requirements.txt ``` Note that Docker doesn't care *how* dependencies are built - as long as they can be built by running a Unix command in a container. Getting started =============== Docker can be installed on your local machine as well as servers - both bare metal and virtualized. It is available as a binary on most modern Linux systems, or as a VM on Windows, Mac and other systems. We also offer an interactive tutorial for quickly learning the basics of using Docker. For up-to-date install instructions and online tutorials, see the [Getting Started page](http://www.docker.io/gettingstarted/). Usage examples ============== Docker can be used to run short-lived commands, long-running daemons (app servers, databases etc.), interactive shell sessions, etc. You can find a [list of real-world examples](http://docs.docker.io/en/latest/examples/) in the documentation. Under the hood -------------- Under the hood, Docker is built on the following components: * The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel; * The [Go](http://golang.org) programming language. Contributing to Docker ====================== Want to hack on Docker? Awesome! There are instructions to get you started [here](CONTRIBUTING.md). They are probably not perfect, please let us know if anything feels wrong or incomplete. ### Legal *Brought to you courtesy of our legal counsel. For more context, please see the Notice document.* Use and transfer of Docker may be subject to certain restrictions by the United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see http://www.bis.doc.gov docker-0.9.1/auth/0000755000175000017500000000000012314376205012044 5ustar tagtagdocker-0.9.1/auth/auth_test.go0000644000175000017500000000671712314376205014406 0ustar tagtagpackage auth import ( "io/ioutil" "os" "testing" ) func TestEncodeAuth(t *testing.T) { newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} authStr := encodeAuth(newAuthConfig) decAuthConfig := &AuthConfig{} var err error decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) if err != nil { t.Fatal(err) } if newAuthConfig.Username != decAuthConfig.Username { t.Fatal("Encode Username doesn't match decoded Username") } if newAuthConfig.Password != decAuthConfig.Password { t.Fatal("Encode Password doesn't match decoded Password") } if authStr != "a2VuOnRlc3Q=" { t.Fatal("AuthString encoding isn't correct.") } } func setupTempConfigFile() (*ConfigFile, error) { root, err := ioutil.TempDir("", "docker-test-auth") if err != nil { return nil, err } configFile := &ConfigFile{ rootPath: root, Configs: make(map[string]AuthConfig), } for _, registry := range []string{"testIndex", IndexServerAddress()} { configFile.Configs[registry] = AuthConfig{ Username: "docker-user", Password: "docker-pass", Email: "docker@docker.io", } } return configFile, nil } func TestSameAuthDataPostSave(t *testing.T) { configFile, err := setupTempConfigFile() if err != nil { t.Fatal(err) } defer os.RemoveAll(configFile.rootPath) err = SaveConfig(configFile) if err != nil { t.Fatal(err) } authConfig := configFile.Configs["testIndex"] if authConfig.Username != "docker-user" { t.Fail() } if authConfig.Password != "docker-pass" { t.Fail() } if authConfig.Email != "docker@docker.io" { t.Fail() } if authConfig.Auth != "" { t.Fail() } } func TestResolveAuthConfigIndexServer(t *testing.T) { configFile, err := setupTempConfigFile() if err != nil { t.Fatal(err) } defer os.RemoveAll(configFile.rootPath) for _, registry := range []string{"", IndexServerAddress()} { resolved := configFile.ResolveAuthConfig(registry) if resolved != configFile.Configs[IndexServerAddress()] { t.Fail() } } } func TestResolveAuthConfigFullURL(t *testing.T) { configFile, err := setupTempConfigFile() if err != nil { t.Fatal(err) } defer os.RemoveAll(configFile.rootPath) registryAuth := AuthConfig{ Username: "foo-user", Password: "foo-pass", Email: "foo@example.com", } localAuth := AuthConfig{ Username: "bar-user", Password: "bar-pass", Email: "bar@example.com", } configFile.Configs["https://registry.example.com/v1/"] = registryAuth configFile.Configs["http://localhost:8000/v1/"] = localAuth configFile.Configs["registry.com"] = registryAuth validRegistries := map[string][]string{ "https://registry.example.com/v1/": { "https://registry.example.com/v1/", "http://registry.example.com/v1/", "registry.example.com", "registry.example.com/v1/", }, "http://localhost:8000/v1/": { "https://localhost:8000/v1/", "http://localhost:8000/v1/", "localhost:8000", "localhost:8000/v1/", }, "registry.com": { "https://registry.com/v1/", "http://registry.com/v1/", "registry.com", "registry.com/v1/", }, } for configKey, registries := range validRegistries { for _, registry := range registries { var ( configured AuthConfig ok bool ) resolved := configFile.ResolveAuthConfig(registry) if configured, ok = configFile.Configs[configKey]; !ok { t.Fail() } if resolved.Email != configured.Email { t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) } } } } docker-0.9.1/auth/MAINTAINERS0000644000175000017500000000020512314376205013536 0ustar tagtagSam Alba (@samalba) Joffrey Fuhrer (@shin-) Ken Cochrane (@kencochrane) docker-0.9.1/auth/auth.go0000644000175000017500000002075212314376205013342 0ustar tagtagpackage auth import ( "encoding/base64" "encoding/json" "errors" "fmt" "github.com/dotcloud/docker/utils" "io/ioutil" "net/http" "os" "path" "strings" ) // Where we store the config file const CONFIGFILE = ".dockercfg" // Only used for user auth + account creation const INDEXSERVER = "https://index.docker.io/v1/" //const INDEXSERVER = "https://indexstaging-docker.dotcloud.com/v1/" var ( ErrConfigFileMissing = errors.New("The Auth config file is missing") ) type AuthConfig struct { Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` Auth string `json:"auth"` Email string `json:"email"` ServerAddress string `json:"serveraddress,omitempty"` } type ConfigFile struct { Configs map[string]AuthConfig `json:"configs,omitempty"` rootPath string } func IndexServerAddress() string { return INDEXSERVER } // create a base64 encoded auth string to store in config func encodeAuth(authConfig *AuthConfig) string { authStr := authConfig.Username + ":" + authConfig.Password msg := []byte(authStr) encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) base64.StdEncoding.Encode(encoded, msg) return string(encoded) } // decode the auth string func decodeAuth(authStr string) (string, string, error) { decLen := base64.StdEncoding.DecodedLen(len(authStr)) decoded := make([]byte, decLen) authByte := []byte(authStr) n, err := base64.StdEncoding.Decode(decoded, authByte) if err != nil { return "", "", err } if n > decLen { return "", "", fmt.Errorf("Something went wrong decoding auth config") } arr := strings.SplitN(string(decoded), ":", 2) if len(arr) != 2 { return "", "", fmt.Errorf("Invalid auth configuration file") } password := strings.Trim(arr[1], "\x00") return arr[0], password, nil } // load up the auth config information and return values // FIXME: use the internal golang config parser func LoadConfig(rootPath string) (*ConfigFile, error) { configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath} confFile := path.Join(rootPath, CONFIGFILE) if _, err := os.Stat(confFile); err != nil { return &configFile, nil //missing file is not an error } b, err := ioutil.ReadFile(confFile) if err != nil { return &configFile, err } if err := json.Unmarshal(b, &configFile.Configs); err != nil { arr := strings.Split(string(b), "\n") if len(arr) < 2 { return &configFile, fmt.Errorf("The Auth config file is empty") } authConfig := AuthConfig{} origAuth := strings.Split(arr[0], " = ") if len(origAuth) != 2 { return &configFile, fmt.Errorf("Invalid Auth config file") } authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) if err != nil { return &configFile, err } origEmail := strings.Split(arr[1], " = ") if len(origEmail) != 2 { return &configFile, fmt.Errorf("Invalid Auth config file") } authConfig.Email = origEmail[1] authConfig.ServerAddress = IndexServerAddress() configFile.Configs[IndexServerAddress()] = authConfig } else { for k, authConfig := range configFile.Configs { authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) if err != nil { return &configFile, err } authConfig.Auth = "" configFile.Configs[k] = authConfig authConfig.ServerAddress = k } } return &configFile, nil } // save the auth config func SaveConfig(configFile *ConfigFile) error { confFile := path.Join(configFile.rootPath, CONFIGFILE) if len(configFile.Configs) == 0 { os.Remove(confFile) return nil } configs := make(map[string]AuthConfig, len(configFile.Configs)) for k, authConfig := range configFile.Configs { authCopy := authConfig authCopy.Auth = encodeAuth(&authCopy) authCopy.Username = "" authCopy.Password = "" authCopy.ServerAddress = "" configs[k] = authCopy } b, err := json.Marshal(configs) if err != nil { return err } err = ioutil.WriteFile(confFile, b, 0600) if err != nil { return err } return nil } // try to register/login to the registry server func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, error) { var ( status string reqBody []byte err error client = &http.Client{} reqStatusCode = 0 serverAddress = authConfig.ServerAddress ) if serverAddress == "" { serverAddress = IndexServerAddress() } loginAgainstOfficialIndex := serverAddress == IndexServerAddress() // to avoid sending the server address to the server it should be removed before being marshalled authCopy := *authConfig authCopy.ServerAddress = "" jsonBody, err := json.Marshal(authCopy) if err != nil { return "", fmt.Errorf("Config Error: %s", err) } // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. b := strings.NewReader(string(jsonBody)) req1, err := http.Post(serverAddress+"users/", "application/json; charset=utf-8", b) if err != nil { return "", fmt.Errorf("Server Error: %s", err) } reqStatusCode = req1.StatusCode defer req1.Body.Close() reqBody, err = ioutil.ReadAll(req1.Body) if err != nil { return "", fmt.Errorf("Server Error: [%#v] %s", reqStatusCode, err) } if reqStatusCode == 201 { if loginAgainstOfficialIndex { status = "Account created. Please use the confirmation link we sent" + " to your e-mail to activate it." } else { status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." } } else if reqStatusCode == 400 { if string(reqBody) == "\"Username or email already exists\"" { req, err := factory.NewRequest("GET", serverAddress+"users/", nil) req.SetBasicAuth(authConfig.Username, authConfig.Password) resp, err := client.Do(req) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } if resp.StatusCode == 200 { status = "Login Succeeded" } else if resp.StatusCode == 401 { return "", fmt.Errorf("Wrong login/password, please try again") } else if resp.StatusCode == 403 { if loginAgainstOfficialIndex { return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") } return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) } else { return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) } } else { return "", fmt.Errorf("Registration: %s", reqBody) } } else if reqStatusCode == 401 { // This case would happen with private registries where /v1/users is // protected, so people can use `docker login` as an auth check. req, err := factory.NewRequest("GET", serverAddress+"users/", nil) req.SetBasicAuth(authConfig.Username, authConfig.Password) resp, err := client.Do(req) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } if resp.StatusCode == 200 { status = "Login Succeeded" } else if resp.StatusCode == 401 { return "", fmt.Errorf("Wrong login/password, please try again") } else { return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) } } else { return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody) } return status, nil } // this method matches a auth configuration to a server address or a url func (config *ConfigFile) ResolveAuthConfig(hostname string) AuthConfig { if hostname == IndexServerAddress() || len(hostname) == 0 { // default to the index server return config.Configs[IndexServerAddress()] } // First try the happy case if c, found := config.Configs[hostname]; found { return c } convertToHostname := func(url string) string { stripped := url if strings.HasPrefix(url, "http://") { stripped = strings.Replace(url, "http://", "", 1) } else if strings.HasPrefix(url, "https://") { stripped = strings.Replace(url, "https://", "", 1) } nameParts := strings.SplitN(stripped, "/", 2) return nameParts[0] } // Maybe they have a legacy config file, we will iterate the keys converting // them to the new format and testing normalizedHostename := convertToHostname(hostname) for registry, config := range config.Configs { if registryHostname := convertToHostname(registry); registryHostname == normalizedHostename { return config } } // When all else fails, return an empty auth config return AuthConfig{} } docker-0.9.1/docker/0000755000175000017500000000000012314376205012352 5ustar tagtagdocker-0.9.1/docker/docker.go0000644000175000017500000001445512314376205014161 0ustar tagtagpackage main import ( "fmt" "log" "os" "strings" "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/builtins" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" flag "github.com/dotcloud/docker/pkg/mflag" "github.com/dotcloud/docker/pkg/opts" "github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/utils" ) func main() { if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") { // Running in init mode sysinit.SysInit() return } var ( flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") flAutoRestart = flag.Bool([]string{"r", "-restart"}, true, "Restart previously running containers") bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking") bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b") pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the docker runtime") flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") flDns = opts.NewListOpts(opts.ValidateIp4Address) flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Disable docker's addition of iptables rules") flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Disable enabling of net.ipv4.ip_forward") flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication") flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver") flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the docker runtime to use a specific exec driver") flHosts = opts.NewListOpts(api.ValidateHost) flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available") ) flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers") flag.Var(&flHosts, []string{"H", "-host"}, "tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified") flag.Parse() if *flVersion { showVersion() return } if flHosts.Len() == 0 { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) } if _, err := api.ValidateHost(defaultHost); err != nil { log.Fatal(err) } flHosts.Set(defaultHost) } if *bridgeName != "" && *bridgeIp != "" { log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.") } if *flDebug { os.Setenv("DEBUG", "1") } if *flDaemon { if flag.NArg() != 0 { flag.Usage() return } // set up the TempDir to use a canonical path tmp := os.TempDir() realTmp, err := utils.ReadSymlinkedDirectory(tmp) if err != nil { log.Fatalf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } os.Setenv("TMPDIR", realTmp) // get the canonical path to the Docker root directory root := *flRoot var realRoot string if _, err := os.Stat(root); err != nil && os.IsNotExist(err) { realRoot = root } else { realRoot, err = utils.ReadSymlinkedDirectory(root) if err != nil { log.Fatalf("Unable to get the full path to root (%s): %s", root, err) } } eng, err := engine.New(realRoot) if err != nil { log.Fatal(err) } // Load builtins builtins.Register(eng) // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting go func() { // Load plugin: httpapi job := eng.Job("initserver") job.Setenv("Pidfile", *pidfile) job.Setenv("Root", realRoot) job.SetenvBool("AutoRestart", *flAutoRestart) job.SetenvList("Dns", flDns.GetAll()) job.SetenvBool("EnableIptables", *flEnableIptables) job.SetenvBool("EnableIpForward", *flEnableIpForward) job.Setenv("BridgeIface", *bridgeName) job.Setenv("BridgeIP", *bridgeIp) job.Setenv("DefaultIp", *flDefaultIp) job.SetenvBool("InterContainerCommunication", *flInterContainerComm) job.Setenv("GraphDriver", *flGraphDriver) job.Setenv("ExecDriver", *flExecDriver) job.SetenvInt("Mtu", *flMtu) if err := job.Run(); err != nil { log.Fatal(err) } // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatal(err) } }() // Serve api job := eng.Job("serveapi", flHosts.GetAll()...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", *flEnableCors) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", *flSocketGroup) if err := job.Run(); err != nil { log.Fatal(err) } } else { if flHosts.Len() > 1 { log.Fatal("Please specify only one -H") } protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2) if err := api.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { log.Println(sterr.Status) } os.Exit(sterr.StatusCode) } log.Fatal(err) } } } func showVersion() { fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT) } docker-0.9.1/config.go0000644000175000017500000000403512314376205012701 0ustar tagtagpackage docker import ( "net" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/networkdriver" ) const ( defaultNetworkMtu = 1500 DisableNetworkBridge = "none" ) // FIXME: separate runtime configuration from http api configuration type DaemonConfig struct { Pidfile string Root string AutoRestart bool Dns []string EnableIptables bool EnableIpForward bool DefaultIp net.IP BridgeIface string BridgeIP string InterContainerCommunication bool GraphDriver string ExecDriver string Mtu int DisableNetwork bool } // ConfigFromJob creates and returns a new DaemonConfig object // by parsing the contents of a job's environment. func DaemonConfigFromJob(job *engine.Job) *DaemonConfig { config := &DaemonConfig{ Pidfile: job.Getenv("Pidfile"), Root: job.Getenv("Root"), AutoRestart: job.GetenvBool("AutoRestart"), EnableIptables: job.GetenvBool("EnableIptables"), EnableIpForward: job.GetenvBool("EnableIpForward"), BridgeIP: job.Getenv("BridgeIP"), BridgeIface: job.Getenv("BridgeIface"), DefaultIp: net.ParseIP(job.Getenv("DefaultIp")), InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), GraphDriver: job.Getenv("GraphDriver"), ExecDriver: job.Getenv("ExecDriver"), } if dns := job.GetenvList("Dns"); dns != nil { config.Dns = dns } if mtu := job.GetenvInt("Mtu"); mtu != 0 { config.Mtu = mtu } else { config.Mtu = GetDefaultNetworkMtu() } config.DisableNetwork = config.BridgeIface == DisableNetworkBridge return config } func GetDefaultNetworkMtu() int { if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { return iface.MTU } return defaultNetworkMtu } docker-0.9.1/NOTICE0000644000175000017500000000117612314376205012014 0ustar tagtagDocker Copyright 2012-2014 Docker, Inc. This product includes software developed at Docker, Inc. (http://www.docker.com). This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: Use and transfer of Docker may be subject to certain restrictions by the United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see http://www.bis.doc.gov See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. docker-0.9.1/.mailmap0000644000175000017500000000572412314376205012534 0ustar tagtag# Generate AUTHORS: git log --format='%aN <%aE>' | sort -uf Guillaume J. Charmes Thatcher Peskens dhrp Thatcher Peskens dhrp Jérôme Petazzoni jpetazzo Jérôme Petazzoni Joffrey F Tim Terhorst Andy Smith Thatcher Peskens Walter Stanish Roberto Hashioka Konstantin Pelykh David Sissitka Nolan Darilek Benoit Chesneau Jordan Arentsen Daniel Garcia Miguel Angel Fernández Bhiraj Butala Faiz Khan Victor Lyuboslavsky Jean-Baptiste Barth Matthew Mueller Shih-Yuan Lee Daniel Mizyrycki root Jean-Baptiste Dalido Sven Dowideit ¨Sven <¨SvenDowideit@home.org.au¨> unclejack docker-0.9.1/hack/0000755000175000017500000000000012314376205012011 5ustar tagtagdocker-0.9.1/hack/stats.sh0000755000175000017500000000073412314376205013512 0ustar tagtag#!/usr/bin/env bash ## Run this script from the root of the docker repository ## to query project stats useful to the maintainers. ## You will need to install `pulls` and `issues` from ## http://github.com/crosbymichael/pulls set -e echo -n "Open pulls: " PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 echo $PULLS echo -n "Pulls alru: " pulls alru echo -n "Open issues: " ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 echo $ISSUES echo -n "Issues alru: " issues alru docker-0.9.1/hack/dind0000755000175000017500000000610112314376205012653 0ustar tagtag#!/bin/bash # DinD: a wrapper script which allows docker to be run inside a docker container. # Original version by Jerome Petazzoni # See the blog post: http://blog.docker.io/2013/09/docker-can-now-run-within-docker/ # # This script should be executed inside a docker container in privilieged mode # ('docker run -privileged', introduced in docker 0.6). # Usage: dind CMD [ARG...] # First, make sure that cgroups are mounted correctly. CGROUP=/sys/fs/cgroup [ -d $CGROUP ] || mkdir $CGROUP mountpoint -q $CGROUP || mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { echo "Could not make a tmpfs mount. Did you use -privileged?" exit 1 } if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security then mount -t securityfs none /sys/kernel/security || { echo "Could not mount /sys/kernel/security." echo "AppArmor detection and -privileged mode might break." } fi # Mount the cgroup hierarchies exactly as they are in the parent system. for SUBSYS in $(cut -d: -f2 /proc/1/cgroup) do [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS mountpoint -q $CGROUP/$SUBSYS || mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS # The two following sections address a bug which manifests itself # by a cryptic "lxc-start: no ns_cgroup option specified" when # trying to start containers withina container. # The bug seems to appear when the cgroup hierarchies are not # mounted on the exact same directories in the host, and in the # container. # Named, control-less cgroups are mounted with "-o name=foo" # (and appear as such under /proc//cgroup) but are usually # mounted on a directory named "foo" (without the "name=" prefix). # Systemd and OpenRC (and possibly others) both create such a # cgroup. To avoid the aforementioned bug, we symlink "foo" to # "name=foo". This shouldn't have any adverse effect. echo $SUBSYS | grep -q ^name= && { NAME=$(echo $SUBSYS | sed s/^name=//) ln -s $SUBSYS $CGROUP/$NAME } # Likewise, on at least one system, it has been reported that # systemd would mount the CPU and CPU accounting controllers # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" # but on a directory called "cpu,cpuacct" (note the inversion # in the order of the groups). This tries to work around it. [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct done # Note: as I write those lines, the LXC userland tools cannot setup # a "sub-container" properly if the "devices" cgroup is not in its # own hierarchy. Let's detect this and issue a warning. grep -q :devices: /proc/1/cgroup || echo "WARNING: the 'devices' cgroup should be in its own hierarchy." grep -qw devices /proc/1/cgroup || echo "WARNING: it looks like the 'devices' cgroup is not mounted." # Now, close extraneous file descriptors. pushd /proc/self/fd >/dev/null for FD in * do case "$FD" in # Keep stdin/stdout/stderr [012]) ;; # Nuke everything else *) eval exec "$FD>&-" ;; esac done popd >/dev/null # Mount /tmp mount -t tmpfs none /tmp [ "$1" ] && exec "$@" echo "You probably want to run hack/make.sh, or maybe a shell?" docker-0.9.1/hack/getmaintainer.sh0000755000175000017500000000222412314376205015177 0ustar tagtag#!/bin/sh if [ $# -ne 1 ]; then echo >&2 "Usage: $0 PATH" echo >&2 "Show the primary and secondary maintainers for a given path" exit 1 fi set -e DEST=$1 DESTFILE="" if [ ! -d $DEST ]; then DESTFILE=$(basename $DEST) DEST=$(dirname $DEST) fi MAINTAINERS=() cd $DEST while true; do if [ -e ./MAINTAINERS ]; then { while read line; do re='^([^:]*): *(.*)$' file=$(echo $line | sed -E -n "s/$re/\1/p") if [ ! -z "$file" ]; then if [ "$file" = "$DESTFILE" ]; then echo "Override: $line" maintainer=$(echo $line | sed -E -n "s/$re/\2/p") MAINTAINERS=("$maintainer" "${MAINTAINERS[@]}") fi else MAINTAINERS+=("$line"); fi done; } < MAINTAINERS fi if [ -d .git ]; then break fi if [ "$(pwd)" = "/" ]; then break fi cd .. done PRIMARY="${MAINTAINERS[0]}" PRIMARY_FIRSTNAME=$(echo $PRIMARY | cut -d' ' -f1) firstname() { echo $1 | cut -d' ' -f1 } echo "--- $PRIMARY is the PRIMARY MAINTAINER of $1. Assign pull requests to him." echo "$(firstname $PRIMARY) may assign pull requests to the following secondary maintainers:" for SECONDARY in "${MAINTAINERS[@]:1}"; do echo "--- $SECONDARY" done docker-0.9.1/hack/PRINCIPLES.md0000644000175000017500000000213612314376205014005 0ustar tagtag# Docker principles In the design and development of Docker we try to follow these principles: (Work in progress) * Don't try to replace every tool. Instead, be an ingredient to improve them. * Less code is better. * Less components is better. Do you really need to add one more class? * 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. * Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. * When hesitating between 2 options, choose the one that is easier to reverse. * No is temporary, Yes is forever. If you're not sure about a new feature, say no. You can change your mind later. * Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. * The less moving parts in a container, the better. * Don't merge it unless you document it. * Don't document it unless you can keep it up-to-date. * Don't merge it unless you test it! * Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. docker-0.9.1/hack/RELEASE-CHECKLIST.md0000644000175000017500000001431712314376205014730 0ustar tagtag# Release Checklist ## A maintainer's guide to releasing Docker So you're in charge of a Docker release? Cool. Here's what to do. If your experience deviates from this document, please document the changes to keep it up-to-date. ### 1. Pull from master and create a release branch ```bash export VERSION=vX.Y.Z git checkout release git fetch git reset --hard origin/release git checkout -b bump_$VERSION git merge origin/master ``` ### 2. Update CHANGELOG.md You can run this command for reference: ```bash LAST_VERSION=$(git tag | grep -E 'v[0-9\.]+$' | sort -nr | head -n 1) git log --stat $LAST_VERSION..HEAD ``` Each change should be listed under a category heading formatted as `#### CATEGORY`. `CATEGORY` should describe which part of the project is affected. Valid categories are: * Builder * Documentation * Hack * Packaging * Remote API * Runtime * Other (please use this category sparingly) Each change should be formatted as `BULLET DESCRIPTION`, given: * BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or upgrade, respectively. * DESCRIPTION: a concise description of the change that is relevant to the end-user, using the present tense. Changes should be described in terms of how they affect the user, for example "Add new feature X which allows Y", "Fix bug which caused X", "Increase performance of Y". EXAMPLES: ```markdown ## 0.3.6 (1995-12-25) #### Builder + 'docker build -t FOO .' applies the tag FOO to the newly built container #### Remote API - Fix a bug in the optional unix socket transport #### Runtime * Improve detection of kernel version ``` If you need a list of contributors between the last major release and the current bump branch, use something like: ```bash git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf ``` Obviously, you'll need to adjust version numbers as necessary. If you just need a count, add a simple `| wc -l`. ### 3. Change the contents of the VERSION file ```bash echo ${VERSION#v} > VERSION ``` ### 4. Run all tests ```bash make test ``` ### 5. Test the docs Make sure that your tree includes documentation for any modified or new features, syntax or semantic changes. Instructions for building the docs are in `docs/README.md`. ### 6. Commit and create a pull request to the "release" branch ```bash git add VERSION CHANGELOG.md git commit -m "Bump version to $VERSION" git push origin bump_$VERSION echo "https://github.com/dotcloud/docker/compare/release...bump_$VERSION" ``` That last command will give you the proper link to visit to ensure that you open the PR against the "release" branch instead of accidentally against "master" (like so many brave souls before you already have). ### 7. Get 2 other maintainers to validate the pull request ### 8. Publish binaries To run this you will need access to the release credentials. Get them from [the infrastructure maintainers]( https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS). ```bash docker build -t docker . export AWS_S3_BUCKET="test.docker.io" export AWS_ACCESS_KEY="$(cat ~/.aws/access_key)" export AWS_SECRET_KEY="$(cat ~/.aws/secret_key)" export GPG_PASSPHRASE=supersecretsesame docker run \ -e AWS_S3_BUCKET=test.docker.io \ -e AWS_ACCESS_KEY \ -e AWS_SECRET_KEY \ -e GPG_PASSPHRASE \ -i -t -privileged \ docker \ hack/release.sh ``` It will run the test suite one more time, build the binaries and packages, and upload to the specified bucket (you should use test.docker.io for general testing, and once everything is fine, switch to get.docker.io as noted below). After the binaries and packages are uploaded to test.docker.io, make sure they get tested in both Ubuntu and Debian for any obvious installation issues or runtime issues. Announcing on IRC in both `#docker` and `#docker-dev` is a great way to get help testing! An easy way to get some useful links for sharing: ```bash echo "Ubuntu/Debian install script: curl -sLS https://test.docker.io/ | sh" echo "Linux 64bit binary: https://test.docker.io/builds/Linux/x86_64/docker-${VERSION#v}" echo "Darwin/OSX 64bit client binary: https://test.docker.io/builds/Darwin/x86_64/docker-${VERSION#v}" echo "Darwin/OSX 32bit client binary: https://test.docker.io/builds/Darwin/i386/docker-${VERSION#v}" echo "Linux 64bit tgz: https://test.docker.io/builds/Linux/x86_64/docker-${VERSION#v}.tgz" ``` Once they're tested and reasonably believed to be working, run against get.docker.io: ```bash docker run \ -e AWS_S3_BUCKET=get.docker.io \ -e AWS_ACCESS_KEY \ -e AWS_SECRET_KEY \ -e GPG_PASSPHRASE \ -i -t -privileged \ docker \ hack/release.sh ``` ### 9. Apply tag ```bash git tag -a $VERSION -m $VERSION bump_$VERSION git push origin $VERSION ``` It's very important that we don't make the tag until after the official release is uploaded to get.docker.io! ### 10. Go to github to merge the `bump_$VERSION` branch into release Don't delete the leftover branch just yet, as we will need it for the next step. ### 11. Go to github to merge the `bump_$VERSION` branch into docs Merging the pull request to the docs branch will automatically update the documentation on the "latest" revision of the docs. You should see the updated docs 5-10 minutes after the merge. The docs will appear on http://docs.docker.io/. For more information about documentation releases, see `docs/README.md`. Don't forget to push that pretty blue button to delete the leftover branch afterwards! ### 12. Create a new pull request to merge release back into master ```bash git checkout master git fetch git reset --hard origin/master git merge origin/release git checkout -b merge_release_$VERSION echo ${VERSION#v}-dev > VERSION git add VERSION git commit -m "Change version to $(cat VERSION)" git push origin merge_release_$VERSION echo "https://github.com/dotcloud/docker/compare/master...merge_release_$VERSION" ``` Again, get two maintainers to validate, then merge, then push that pretty blue button to delete your branch. ### 13. Rejoice and Evangelize! Congratulations! You're done. Go forth and announce the glad tidings of the new release in `#docker`, `#docker-dev`, on the [mailing list](https://groups.google.com/forum/#!forum/docker-dev), and on Twitter! docker-0.9.1/hack/allmaintainers.sh0000755000175000017500000000020712314376205015352 0ustar tagtag#!/bin/sh find $1 -name MAINTAINERS -exec cat {} ';' | sed -E -e 's/^[^:]*: *(.*)$/\1/' | grep -E -v -e '^ *$' -e '^ *#.*$' | sort -u docker-0.9.1/hack/PACKAGERS.md0000644000175000017500000003020612314376205013634 0ustar tagtag# Dear Packager, If you are looking to make Docker available on your favorite software distribution, this document is for you. It summarizes the requirements for building and running the Docker client and the Docker daemon. ## Getting Started We want to help you package Docker successfully. Before doing any packaging, a good first step is to introduce yourself on the [docker-dev mailing list](https://groups.google.com/d/forum/docker-dev), explain what you're trying to achieve, and tell us how we can help. Don't worry, we don't bite! There might even be someone already working on packaging for the same distro! You can also join the IRC channel - #docker and #docker-dev on Freenode are both active and friendly. We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our "Packagers Relations", since he's always working to make sure our packagers have a good, healthy upstream to work with (both in our communication and in our build scripts). If you're having any kind of trouble, feel free to ping him directly. He also likes to keep track of what distributions we have packagers for, so feel free to reach out to him even just to say "Hi!" ## Package Name If possible, your package should be called "docker". If that name is already taken, a second choice is "lxc-docker", but with the caveat that "LXC" is now an optional dependency (as noted below). Another possible choice is "docker.io". ## Official Build vs Distro Build The Docker project maintains its own build and release toolchain. It is pretty neat and entirely based on Docker (surprise!). This toolchain is the canonical way to build Docker. We encourage you to give it a try, and if the circumstances allow you to use it, we recommend that you do. You might not be able to use the official build toolchain - usually because your distribution has a toolchain and packaging policy of its own. We get it! Your house, your rules. The rest of this document should give you the information you need to package Docker your way, without denaturing it in the process. ## Build Dependencies To build Docker, you will need the following: * A recent version of git and mercurial * Go version 1.2 or later * A clean checkout of the source added to a valid [Go workspace](http://golang.org/doc/code.html#Workspaces) under the path *src/github.com/dotcloud/docker* (unless you plan to use `AUTO_GOPATH`, explained in more detail below). To build the Docker daemon, you will additionally need: * An amd64/x86_64 machine running Linux * SQLite version 3.7.9 or later * libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version 2.02.89 or later * btrfs-progs version 3.8 or later (including commit e5cb128 from 2013-01-07) for the necessary btrfs headers Be sure to also check out Docker's Dockerfile for the most up-to-date list of these build-time dependencies. ### Go Dependencies All Go dependencies are vendored under "./vendor". They are used by the official build, so the source of truth for the current version of each dependency is whatever is in "./vendor". To use the vendored dependencies, simply make sure the path to "./vendor" is included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). If you would rather (or must, due to distro policy) package these dependencies yourself, take a look at "./hack/vendor.sh" for an easy-to-parse list of the exact version for each. NOTE: if you're not able to package the exact version (to the exact commit) of a given dependency, please get in touch so we can remediate! Who knows what discrepancies can be caused by even the slightest deviation. We promise to do our best to make everybody happy. ## Stripping Binaries Please, please, please do not strip any compiled binaries. This is really important. In our own testing, stripping the resulting binaries sometimes results in a binary that appears to work, but more often causes random panics, segfaults, and other issues. Even if the binary appears to work, please don't strip. See the following quotes from Dave Cheney, which explain this position better from the upstream Golang perspective. ### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) > Super super important: Do not strip go binaries or archives. It isn't tested, > often breaks, and doesn't work. ### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) > To quote myself: "Please do not strip Go binaries, it is not supported, not > tested, is often broken, and doesn't do what you want" > > To unpack that a bit > > * not supported, as in, we don't support it, and recommend against it when > asked > * not tested, we don't test stripped binaries as part of the build CI process > * is often broken, stripping a go binary will produce anywhere from no, to > subtle, to outright execution failure, see above ### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) > To clarify my previous statements. > > * I do not disagree with the debian policy, it is there for a good reason > * Having said that, it stripping Go binaries doesn't work, and nobody is > looking at making it work, so there is that. > > Thanks for patching the build formula. ## Building Docker Please use our build script ("./hack/make.sh") for all your compilation of Docker. If there's something you need that it isn't doing, or something it could be doing to make your life as a packager easier, please get in touch with Tianon and help us rectify the situation. Chances are good that other packagers have probably run into the same problems and a fix might already be in the works, but none of us will know for sure unless you harass Tianon about it. :) All the commands listed within this section should be run with the Docker source checkout as the current working directory. ### `AUTO_GOPATH` If you'd rather not be bothered with the hassles that setting up `GOPATH` appropriately can be, and prefer to just get a "build that works", you should add something similar to this to whatever script or process you're using to build Docker: ```bash export AUTO_GOPATH=1 ``` This will cause the build scripts to set up a reasonable `GOPATH` that automatically and properly includes both dotcloud/docker from the local directory, and the local "./vendor" directory as necessary. ### `DOCKER_BUILDTAGS` If you're building a binary that may need to be used on platforms that include AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: ```bash export DOCKER_BUILDTAGS='apparmor' ``` ### Static Daemon If it is feasible within the constraints of your distribution, you should seriously consider packaging Docker as a single static binary. A good comparison is Busybox, which is often packaged statically as a feature to enable mass portability. Because of the unique way Docker operates, being similarly static is a "feature". To build a static Docker daemon binary, run the following command (first ensuring that all the necessary libraries are available in static form for linking - see the "Build Dependencies" section above, and the relevant lines within Docker's own Dockerfile that set up our official build environment): ```bash ./hack/make.sh binary ``` This will create a static binary under "./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of the file "./VERSION". This binary is usually installed somewhere like "/usr/bin/docker". ### Dynamic Daemon / Client-only Binary If you are only interested in a Docker client binary, set `DOCKER_CLIENTONLY` to a non-empty value using something similar to the following: (which will prevent the extra step of compiling dockerinit) ```bash export DOCKER_CLIENTONLY=1 ``` If you need to (due to distro policy, distro library availability, or for other reasons) create a dynamically compiled daemon binary, or if you are only interested in creating a client binary for Docker, use something similar to the following: ```bash ./hack/make.sh dynbinary ``` This will create "./bundles/$VERSION/dynbinary/docker-$VERSION", which for client-only builds is the important file to grab and install as appropriate. For daemon builds, you will also need to grab and install "./bundles/$VERSION/dynbinary/dockerinit-$VERSION", which is created from the minimal set of Docker's codebase that _must_ be compiled statically (and is thus a pure static binary). The acceptable locations Docker will search for this file are as follows (in order): * as "dockerinit" in the same directory as the daemon binary (ie, if docker is installed at "/usr/bin/docker", then "/usr/bin/dockerinit" will be the first place this file is searched for) * "/usr/libexec/docker/dockerinit" or "/usr/local/libexec/docker/dockerinit" ([FHS 3.0 Draft](http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec)) * "/usr/lib/docker/dockerinit" or "/usr/local/lib/docker/dockerinit" ([FHS 2.3](http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA)) If (and please, only if) one of the paths above is insufficient due to distro policy or similar issues, you may use the `DOCKER_INITPATH` environment variable at compile-time as follows to set a different path for Docker to search: ```bash export DOCKER_INITPATH=/usr/lib/docker.io/dockerinit ``` If you find yourself needing this, please don't hesitate to reach out to Tianon to see if it would be reasonable or helpful to add more paths to Docker's list, especially if there's a relevant standard worth referencing (such as the FHS). Also, it goes without saying, but for the purposes of the daemon please consider these two binaries ("docker" and "dockerinit") as if they were a single unit. Mixing and matching can cause undesired consequences, and will fail to run properly. ## System Dependencies ### Runtime Dependencies To function properly, the Docker daemon needs the following software to be installed and available at runtime: * iptables version 1.4 or later * XZ Utils version 4.9 or later Additionally, the Docker client needs the following software to be installed and available at runtime: * Git version 1.7 or later ### Kernel Requirements The Docker daemon has very specific kernel requirements. Most pre-packaged kernels already include the necessary options enabled. If you are building your own kernel, you will either need to discover the options necessary via trial and error, or check out the [Gentoo ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), in which a list is maintained (and if there are any issues or discrepancies in that list, please contact Tianon so they can be rectified). Note that in client mode, there are no specific kernel requirements, and that the client will even run on alternative platforms such as Mac OS X / Darwin. ### Optional Dependencies Some of Docker's features are activated by using optional command-line flags or by having support for them in the kernel or userspace. A few examples include: * LXC execution driver (requires version 0.8 or later of the LXC utility scripts) * AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at least the "auplink" utility from aufs-tools) * experimental BTRFS graph driver (requires BTRFS support enabled in the kernel) ## Daemon Init Script Docker expects to run as a daemon at machine startup. Your package will need to include a script for your distro's process supervisor of choice. Be sure to check out the "contrib/init" folder in case a suitable init script already exists (and if one does not, contact Tianon about whether it might be appropriate for your distro's init script to live there too!). In general, Docker should be run as root, similar to the following: ```bash docker -d ``` Generally, a `DOCKER_OPTS` variable of some kind is available for adding more flags (such as changing the graph driver to use BTRFS, switching the location of "/var/lib/docker", etc). ## Communicate As a final note, please do feel free to reach out to Tianon at any time for pretty much anything. He really does love hearing from our packagers and wants to make sure we're not being a "hostile upstream". As should be a given, we appreciate the work our packagers do to make sure we have broad distribution! docker-0.9.1/hack/MAINTAINERS0000644000175000017500000000005512314376205013506 0ustar tagtagTianon Gravi (@tianon) docker-0.9.1/hack/MAINTAINERS.md0000644000175000017500000000744512314376205014117 0ustar tagtag# The Docker Maintainer manual ## Introduction Dear maintainer. Thank you for investing the time and energy to help make Docker as useful as possible. Maintaining a project is difficult, sometimes unrewarding work. Sure, you will get to contribute cool features to the project. But most of your time will be spent reviewing, cleaning up, documenting, answering questions, justifying design decisions - while everyone has all the fun! But remember - the quality of the maintainers work is what distinguishes the good projects from the great. So please be proud of your work, even the unglamourous parts, and encourage a culture of appreciation and respect for *every* aspect of improving the project - not just the hot new features. This document is a manual for maintainers old and new. It explains what is expected of maintainers, how they should work, and what tools are available to them. This is a living document - if you see something out of date or missing, speak up! ## What are a maintainer's responsibility? It is every maintainer's responsibility to: * 1) Expose a clear roadmap for improving their component. * 2) Deliver prompt feedback and decisions on pull requests. * 3) Be available to anyone with questions, bug reports, criticism etc. on their component. This includes IRC, GitHub requests and the mailing list. * 4) Make sure their component respects the philosophy, design and roadmap of the project. ## How are decisions made? Short answer: with pull requests to the docker repository. Docker is an open-source project with an open design philosophy. This means that the repository is the source of truth for EVERY aspect of the project, including its philosophy, design, roadmap and APIs. *If it's part of the project, it's in the repo. It's in the repo, it's part of the project.* As a result, all decisions can be expressed as changes to the repository. An implementation change is a change to the source code. An API change is a change to the API specification. A philosophy change is a change to the philosophy manifesto. And so on. All decisions affecting docker, big and small, follow the same 3 steps: * Step 1: Open a pull request. Anyone can do this. * Step 2: Discuss the pull request. Anyone can do this. * Step 3: Accept or refuse a pull request. The relevant maintainer does this (see below "Who decides what?") ## Who decides what? So all decisions are pull requests, and the relevant maintainer makes the decision by accepting or refusing the pull request. But how do we identify the relevant maintainer for a given pull request? Docker follows the timeless, highly efficient and totally unfair system known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with yours truly, Solomon Hykes, in the role of BDFL. This means that all decisions are made by default by Solomon. Since making every decision myself would be highly un-scalable, in practice decisions are spread across multiple maintainers. The relevant maintainer for a pull request is assigned in 3 steps: * Step 1: Determine the subdirectory affected by the pull request. This might be `src/registry`, `docs/source/api`, or any other part of the repo. * Step 2: Find the `MAINTAINERS` file which affects this directory. If the directory itself does not have a `MAINTAINERS` file, work your way up the repo hierarchy until you find one. * Step 3: The first maintainer listed is the primary maintainer. The pull request is assigned to him. He may assign it to other listed maintainers, at his discretion. ### I'm a maintainer, should I make pull requests too? Yes. Nobody should ever push to master directly. All changes should be made through a pull request. ### Who assigns maintainers? Solomon. ### How is this process changed? Just like everything else: by making a pull request :) docker-0.9.1/hack/make/0000755000175000017500000000000012314376205012726 5ustar tagtagdocker-0.9.1/hack/make/ubuntu0000644000175000017500000001066312314376205014201 0ustar tagtag#!/bin/bash DEST=$1 PKGVERSION="$VERSION" if [ -n "$(git status --porcelain)" ]; then PKGVERSION="$PKGVERSION-$(date +%Y%m%d%H%M%S)-$GITCOMMIT" fi PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" PACKAGE_URL="http://www.docker.io/" PACKAGE_MAINTAINER="docker@dotcloud.com" PACKAGE_DESCRIPTION="Linux container runtime Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers. Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc." PACKAGE_LICENSE="Apache-2.0" # Build docker as an ubuntu package using FPM and REPREPRO (sue me). # bundle_binary must be called first. bundle_ubuntu() { DIR=$DEST/build # Include our udev rules mkdir -p $DIR/etc/udev/rules.d cp contrib/udev/80-docker.rules $DIR/etc/udev/rules.d/ # Include our init scripts mkdir -p $DIR/etc/init cp contrib/init/upstart/docker.conf $DIR/etc/init/ mkdir -p $DIR/etc/init.d cp contrib/init/sysvinit-debian/docker $DIR/etc/init.d/ mkdir -p $DIR/etc/default cp contrib/init/sysvinit-debian/docker.default $DIR/etc/default/docker mkdir -p $DIR/lib/systemd/system cp contrib/init/systemd/docker.service $DIR/lib/systemd/system/ # Copy the binary # This will fail if the binary bundle hasn't been built mkdir -p $DIR/usr/bin cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker # Generate postinst/prerm/postrm scripts cat > $DEST/postinst <<'EOF' #!/bin/sh set -e set -u if [ "$1" = 'configure' ] && [ -z "$2" ]; then if ! getent group docker > /dev/null; then groupadd --system docker fi fi if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then # we only need to do this if upstart isn't in charge update-rc.d docker defaults > /dev/null || true fi if [ -n "$2" ]; then _dh_action=restart else _dh_action=start fi service docker $_dh_action 2>/dev/null || true #DEBHELPER# EOF cat > $DEST/prerm <<'EOF' #!/bin/sh set -e set -u service docker stop 2>/dev/null || true #DEBHELPER# EOF cat > $DEST/postrm <<'EOF' #!/bin/sh set -e set -u if [ "$1" = "purge" ] ; then update-rc.d docker remove > /dev/null || true fi # In case this system is running systemd, we make systemd reload the unit files # to pick up changes. if [ -d /run/systemd/system ] ; then systemctl --system daemon-reload > /dev/null || true fi #DEBHELPER# EOF # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way chmod +x $DEST/postinst $DEST/prerm $DEST/postrm ( # switch directories so we create *.deb in the right folder cd $DEST # create lxc-docker-VERSION package fpm -s dir -C $DIR \ --name lxc-docker-$VERSION --version $PKGVERSION \ --after-install $DEST/postinst \ --before-remove $DEST/prerm \ --after-remove $DEST/postrm \ --architecture "$PACKAGE_ARCHITECTURE" \ --prefix / \ --depends iptables \ --deb-recommends aufs-tools \ --deb-recommends ca-certificates \ --deb-recommends git \ --deb-recommends xz-utils \ --deb-suggests cgroup-lite \ --description "$PACKAGE_DESCRIPTION" \ --maintainer "$PACKAGE_MAINTAINER" \ --conflicts docker \ --conflicts docker.io \ --conflicts lxc-docker-virtual-package \ --provides lxc-docker \ --provides lxc-docker-virtual-package \ --replaces lxc-docker \ --replaces lxc-docker-virtual-package \ --url "$PACKAGE_URL" \ --license "$PACKAGE_LICENSE" \ --config-files /etc/udev/rules.d/80-docker.rules \ --config-files /etc/init/docker.conf \ --config-files /etc/init.d/docker \ --config-files /etc/default/docker \ --deb-compression gz \ -t deb . # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available # create empty lxc-docker wrapper package fpm -s empty \ --name lxc-docker --version $PKGVERSION \ --architecture "$PACKAGE_ARCHITECTURE" \ --depends lxc-docker-$VERSION \ --description "$PACKAGE_DESCRIPTION" \ --maintainer "$PACKAGE_MAINTAINER" \ --url "$PACKAGE_URL" \ --license "$PACKAGE_LICENSE" \ --deb-compression gz \ -t deb ) # clean up after ourselves so we have a clean output directory rm $DEST/postinst $DEST/prerm $DEST/postrm rm -r $DIR } bundle_ubuntu docker-0.9.1/hack/make/test-integration0000644000175000017500000000074212314376205016154 0ustar tagtag#!/bin/bash DEST=$1 set -e bundle_test_integration() { LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" go_test_dir ./integration \ "-coverpkg $(find_dirs '*.go' | sed 's,^\.,github.com/dotcloud/docker,g' | paste -d, -s)" } # this "grep" hides some really irritating warnings that "go test -coverpkg" # spews when it is given packages that aren't used bundle_test_integration 2>&1 \ | grep --line-buffered -v '^warning: no packages being tested depend on ' \ | tee $DEST/test.log docker-0.9.1/hack/make/cover0000644000175000017500000000060712314376205013772 0ustar tagtag#!/bin/bash DEST="$1" bundle_cover() { coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) for p in "${coverprofiles[@]}"; do echo ( set -x go tool cover -func="$p" ) done } if [ "$HAVE_GO_TEST_COVER" ]; then bundle_cover 2>&1 | tee "$DEST/report.log" else echo >&2 'warning: the current version of go does not support -cover' echo >&2 ' skipping test coverage report' fi docker-0.9.1/hack/make/cross0000644000175000017500000000122012314376205013775 0ustar tagtag#!/bin/bash DEST=$1 # if we have our linux/amd64 version compiled, let's symlink it in if [ -x "$DEST/../binary/docker-$VERSION" ]; then mkdir -p "$DEST/linux/amd64" ( cd "$DEST/linux/amd64" ln -s ../../../binary/* ./ ) echo "Created symlinks:" "$DEST/linux/amd64/"* fi for platform in $DOCKER_CROSSPLATFORMS; do ( mkdir -p "$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION export GOOS=${platform%/*} export GOARCH=${platform##*/} export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms (TODO this might change someday) source "$(dirname "$BASH_SOURCE")/binary" "$DEST/$platform" ) done docker-0.9.1/hack/make/dyntest0000644000175000017500000000055012314376205014343 0ustar tagtag#!/bin/bash DEST=$1 INIT=$DEST/../dynbinary/dockerinit-$VERSION set -e if [ ! -x "$INIT" ]; then echo >&2 'error: dynbinary must be run before dyntest' false fi ( export TEST_DOCKERINIT_PATH="$INIT" export LDFLAGS_STATIC_DOCKER=" -X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" " source "$(dirname "$BASH_SOURCE")/test" ) docker-0.9.1/hack/make/binary0000755000175000017500000000034512314376205014142 0ustar tagtag#!/bin/bash DEST=$1 go build \ -o "$DEST/docker-$VERSION" \ "${BUILDFLAGS[@]}" \ -ldflags " $LDFLAGS $LDFLAGS_STATIC_DOCKER " \ ./docker echo "Created binary: $DEST/docker-$VERSION" hash_files "$DEST/docker-$VERSION" docker-0.9.1/hack/make/tgz0000644000175000017500000000113712314376205013457 0ustar tagtag#!/bin/bash DEST="$1" CROSS="$DEST/../cross" set -e if [ ! -d "$CROSS/linux/amd64" ]; then echo >&2 'error: binary and cross must be run before tgz' false fi for d in "$CROSS/"*/*; do GOARCH="$(basename "$d")" GOOS="$(basename "$(dirname "$d")")" mkdir -p "$DEST/$GOOS/$GOARCH" TGZ="$DEST/$GOOS/$GOARCH/docker-$VERSION.tgz" mkdir -p "$DEST/build" mkdir -p "$DEST/build/usr/local/bin" cp -L "$d/docker-$VERSION" "$DEST/build/usr/local/bin/docker" tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr hash_files "$TGZ" rm -rf "$DEST/build" echo "Created tgz: $TGZ" done docker-0.9.1/hack/make/dyntest-integration0000644000175000017500000000060012314376205016660 0ustar tagtag#!/bin/bash DEST=$1 INIT=$DEST/../dynbinary/dockerinit-$VERSION set -e if [ ! -x "$INIT" ]; then echo >&2 'error: dynbinary must be run before dyntest-integration' false fi ( export TEST_DOCKERINIT_PATH="$INIT" export LDFLAGS_STATIC_DOCKER=" -X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" " source "$(dirname "$BASH_SOURCE")/test-integration" ) docker-0.9.1/hack/make/test0000644000175000017500000000217712314376205013637 0ustar tagtag#!/bin/bash DEST=$1 set -e RED=$'\033[31m' GREEN=$'\033[32m' TEXTRESET=$'\033[0m' # reset the foreground colour # Run Docker's test suite, including sub-packages, and store their output as a bundle # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. # You can use this to select certain tests to run, eg. # # TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test # bundle_test() { { date TESTS_FAILED=() for test_dir in $(find_dirs '*_test.go'); do echo if ! LDFLAGS="$LDFLAGS $LDFLAGS_STATIC_DOCKER" go_test_dir "$test_dir"; then TESTS_FAILED+=("$test_dir") echo echo "${RED}Tests failed: $test_dir${TEXTRESET}" sleep 1 # give it a second, so observers watching can take note fi done echo echo echo # if some tests fail, we want the bundlescript to fail, but we want to # try running ALL the tests first, hence TESTS_FAILED if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}" echo false else echo "${GREEN}Test success${TEXTRESET}" echo true fi } 2>&1 | tee $DEST/test.log } bundle_test docker-0.9.1/hack/make/dynbinary0000644000175000017500000000253212314376205014652 0ustar tagtag#!/bin/bash DEST=$1 if [ -z "$DOCKER_CLIENTONLY" ]; then # dockerinit still needs to be a static binary, even if docker is dynamic go build \ -o "$DEST/dockerinit-$VERSION" \ "${BUILDFLAGS[@]}" \ -ldflags " $LDFLAGS $LDFLAGS_STATIC -extldflags \"$EXTLDFLAGS_STATIC\" " \ ./dockerinit echo "Created binary: $DEST/dockerinit-$VERSION" ln -sf "dockerinit-$VERSION" "$DEST/dockerinit" hash_files "$DEST/dockerinit-$VERSION" sha1sum= if command -v sha1sum &> /dev/null; then sha1sum=sha1sum elif command -v shasum &> /dev/null; then # Mac OS X - why couldn't they just use the same command name and be happy? sha1sum=shasum else echo >&2 'error: cannot find sha1sum command or equivalent' exit 1 fi # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" else # DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :) export DOCKER_INITSHA1="" fi # exported so that "dyntest" can easily access it later without recalculating it ( export LDFLAGS_STATIC_DOCKER="-X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" -X github.com/dotcloud/docker/dockerversion.INITPATH \"$DOCKER_INITPATH\"" source "$(dirname "$BASH_SOURCE")/binary" ) docker-0.9.1/hack/make/README.md0000644000175000017500000000063412314376205014210 0ustar tagtagThis directory holds scripts called by `make.sh` in the parent directory. Each script is named after the bundle it creates. They should not be called directly - instead, pass it as argument to make.sh, for example: ``` ./hack/make.sh test ./hack/make.sh binary ubuntu # Or to run all bundles: ./hack/make.sh ``` To add a bundle: * Create a shell-compatible file here * Add it to $DEFAULT_BUNDLES in make.sh docker-0.9.1/hack/bootcamp/0000755000175000017500000000000012314376205013615 5ustar tagtagdocker-0.9.1/hack/bootcamp/README.md0000644000175000017500000001010212314376205015066 0ustar tagtag# Docker maintainer bootcamp ## Introduction: we need more maintainers Docker is growing incredibly fast. At the time of writing, it has received over 200 contributions from 90 people, and its API is used by dozens of 3rd-party tools. Over 1,000 issues have been opened. As the first production deployments start going live, the growth will only accelerate. Also at the time of writing, Docker has 3 full-time maintainers, and 7 part-time subsystem maintainers. If docker is going to live up to the expectations, we need more than that. This document describes a *bootcamp* to guide and train volunteers interested in helping the project, either with individual contributions, maintainer work, or both. This bootcamp is an experiment. If you decide to go through it, consider yourself an alpha-tester. You should expect quirks, and report them to us as you encounter them to help us smooth out the process. ## How it works The maintainer bootcamp is a 12-step program - one step for each of the maintainer's responsibilities. The aspiring maintainer must validate all 12 steps by 1) studying it, 2) practicing it, and 3) getting endorsed for it. Steps are all equally important and can be validated in any order. Validating all 12 steps is a pre-requisite for becoming a core maintainer, but even 1 step will make you a better contributor! ### List of steps #### 1) Be a power user Use docker daily, build cool things with it, know its quirks inside and out. #### 2) Help users Answer questions on irc, twitter, email, in person. #### 3) Manage the bug tracker Help triage tickets - ask the right questions, find duplicates, reference relevant resources, know when to close a ticket when necessary, take the time to go over older tickets. #### 4) Improve the documentation Follow the documentation from scratch regularly and make sure it is still up-to-date. Find and fix inconsistencies. Remove stale information. Find a frequently asked question that is not documented. Simplify the content and the form. #### 5) Evangelize the principles of docker Understand what the underlying goals and principle of docker are. Explain design decisions based on what docker is, and what it is not. When someone is not using docker, find how docker can be valuable to them. If they are using docker, find how they can use it better. #### 6) Fix bugs Self-explanatory. Contribute improvements to docker which solve defects. Bugfixes should be well-tested, and prioritized by impact to the user. #### 7) Improve the testing infrastructure Automated testing is complicated and should be perpetually improved. Invest time to improve the current tooling. Refactor existing tests, create new ones, make testing more accessible to developers, add new testing capabilities (integration tests, mocking, stress test...), improve integration between tests and documentation... #### 8) Contribute features Improve docker to do more things, or get better at doing the same things. Features should be well-tested, not break existing APIs, respect the project goals. They should make the user's life measurably better. Features should be discussed ahead of time to avoid wasting time and duplicating effort. #### 9) Refactor internals Improve docker to repay technical debt. Simplify code layout, improve performance, add missing comments, reduce the number of files and functions, rename functions and variables to be more readable, go over FIXMEs, etc. #### 10) Review and merge contributions Review pull requests in a timely manner, review code in detail and offer feedback. Keep a high bar without being pedantic. Share the load of testing and merging pull requests. #### 11) Release Manage a release of docker from beginning to end. Tests, final review, tags, builds, upload to mirrors, distro packaging, etc. #### 12) Train other maintainers Contribute to training other maintainers. Give advice, delegate work, help organize the bootcamp. This also means contribute to the maintainer's manual, look for ways to improve the project organization etc. ### How to study a step ### How to practice a step ### How to get endorsed for a step docker-0.9.1/hack/ROADMAP.md0000644000175000017500000000575312314376205013430 0ustar tagtag# Docker: what's next? This document is a high-level overview of where we want to take Docker next. It is a curated selection of planned improvements which are either important, difficult, or both. For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/dotcloud/docker/issues). To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. ## Container wiring and service discovery In its current version, docker doesn’t make it very easy to manipulate multiple containers as a cohesive group (ie. orchestration), and it doesn’t make it seamless for containers to connect to each other as network services (ie. wiring). To achieve wiring and orchestration with docker today, you need to write glue scripts yourself, or use one several companion tools available, like Orchestra, Shipper, Deis, Pipeworks, etc. We want the Docker API to support orchestration and wiring natively, so that these tools can cleanly and seamlessly integrate into the Docker user experience, and remain interoperable with each other. ## Better integration with process supervisors For docker to be fully usable in production, it needs to cleanly integrate with the host machine’s process supervisor of choice. Whether it’s sysV-init, upstart, systemd, runit or supervisord, we want to make sure docker plays nice with your existing system. This will be a major focus of the 0.7 release. ## Plugin API We want Docker to run everywhere, and to integrate with every devops tool. Those are ambitious goals, and the only way to reach them is with the Docker community. For the community to participate fully, we need an API which allows Docker to be deeply and easily customized. We are working on a plugin API which will make Docker very, very customization-friendly. We believe it will facilitate the integrations listed above – and many more we didn’t even think about. ## Broader kernel support Our goal is to make Docker run everywhere, but currently Docker requires Linux version 3.8 or higher with lxc and aufs support. If you’re deploying new machines for the purpose of running Docker, this is a fairly easy requirement to meet. However, if you’re adding Docker to an existing deployment, you may not have the flexibility to update and patch the kernel. Expanding Docker’s kernel support is a priority. This includes running on older kernel versions, but also on kernels with no AUFS support, or with incomplete lxc capabilities. ## Cross-architecture support Our goal is to make Docker run everywhere. However currently Docker only runs on x86_64 systems. We plan on expanding architecture support, so that Docker containers can be created and used on more architectures. ## Production-ready Docker is still beta software, and not suited for production. We are working hard to get there, and we are confident that it will be possible within a few months. Stay tuned for a more detailed roadmap soon. docker-0.9.1/hack/make.sh0000755000175000017500000001334012314376205013266 0ustar tagtag#!/usr/bin/env bash set -e # This script builds various binary artifacts from a checkout of the docker # source code. # # Requirements: # - The current directory should be a checkout of the docker source code # (http://github.com/dotcloud/docker). Whatever version is checked out # will be built. # - The VERSION file, at the root of the repository, should exist, and # will be used as Docker binary version and package version. # - The hash of the git commit will also be included in the Docker binary, # with the suffix -dirty if the repository isn't clean. # - The script is intented to be run inside the docker container specified # in the Dockerfile at the root of the source. In other words: # DO NOT CALL THIS SCRIPT DIRECTLY. # - The right way to call this script is to invoke "make" from # your checkout of the Docker repository. # the Makefile will do a "docker build -t docker ." and then # "docker run hack/make.sh" in the resulting container image. # set -o pipefail # We're a nice, sexy, little shell script, and people might try to run us; # but really, they shouldn't. We want to be in a container! if [ "$(pwd)" != '/go/src/github.com/dotcloud/docker' ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then { echo "# WARNING! I don't seem to be running in the Docker container." echo "# The result of this command might be an incorrect build, and will not be" echo "# officially supported." echo "#" echo "# Try this instead: make all" echo "#" } >&2 fi echo # List of bundles to create when no argument is passed DEFAULT_BUNDLES=( binary test test-integration dynbinary dyntest dyntest-integration cover cross tgz ubuntu ) VERSION=$(cat ./VERSION) if command -v git &> /dev/null && git rev-parse &> /dev/null; then GITCOMMIT=$(git rev-parse --short HEAD) if [ -n "$(git status --porcelain --untracked-files=no)" ]; then GITCOMMIT="$GITCOMMIT-dirty" fi elif [ "$DOCKER_GITCOMMIT" ]; then GITCOMMIT="$DOCKER_GITCOMMIT" else echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' echo >&2 ' Please either build with the .git directory accessible, or specify the' echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' echo >&2 ' future accountability in diagnosing build issues. Thanks!' exit 1 fi if [ "$AUTO_GOPATH" ]; then rm -rf .gopath mkdir -p .gopath/src/github.com/dotcloud ln -sf ../../../.. .gopath/src/github.com/dotcloud/docker export GOPATH="$(pwd)/.gopath:$(pwd)/vendor" fi if [ ! "$GOPATH" ]; then echo >&2 'error: missing GOPATH; please see http://golang.org/doc/code.html#GOPATH' echo >&2 ' alternatively, set AUTO_GOPATH=1' exit 1 fi # Use these flags when compiling the tests and final binary LDFLAGS=' -w -X github.com/dotcloud/docker/dockerversion.GITCOMMIT "'$GITCOMMIT'" -X github.com/dotcloud/docker/dockerversion.VERSION "'$VERSION'" ' LDFLAGS_STATIC='-linkmode external' EXTLDFLAGS_STATIC='-static' BUILDFLAGS=( -a -tags "netgo $DOCKER_BUILDTAGS" ) # A few more flags that are specific just to building a completely-static binary (see hack/make/binary) # PLEASE do not use these anywhere else. EXTLDFLAGS_STATIC_DOCKER="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files" LDFLAGS_STATIC_DOCKER=" $LDFLAGS_STATIC -X github.com/dotcloud/docker/dockerversion.IAMSTATIC true -extldflags \"$EXTLDFLAGS_STATIC_DOCKER\" " HAVE_GO_TEST_COVER= if \ go help testflag | grep -- -cover > /dev/null \ && go tool -n cover > /dev/null 2>&1 \ ; then HAVE_GO_TEST_COVER=1 fi # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. # You can use this to select certain tests to run, eg. # # TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test # go_test_dir() { dir=$1 coverpkg=$2 testcover=() if [ "$HAVE_GO_TEST_COVER" ]; then # if our current go install has -cover, we want to use it :) mkdir -p "$DEST/coverprofiles" coverprofile="docker${dir#.}" coverprofile="$DEST/coverprofiles/${coverprofile//\//-}" testcover=( -cover -coverprofile "$coverprofile" $coverpkg ) fi ( set -x cd "$dir" go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS ) } # This helper function walks the current directory looking for directories # holding certain files ($1 parameter), and prints their paths on standard # output, one per line. find_dirs() { find -not \( \ \( -wholename './vendor' -o -wholename './integration' -o -wholename './contrib' -o -wholename './pkg/mflag/example' \) \ -prune \ \) -name "$1" -print0 | xargs -0n1 dirname | sort -u } hash_files() { while [ $# -gt 0 ]; do f="$1" shift dir="$(dirname "$f")" base="$(basename "$f")" for hashAlgo in md5 sha256; do if command -v "${hashAlgo}sum" &> /dev/null; then ( # subshell and cd so that we get output files like: # $HASH docker-$VERSION # instead of: # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION cd "$dir" "${hashAlgo}sum" "$base" > "$base.$hashAlgo" ) fi done done } bundle() { bundlescript=$1 bundle=$(basename $bundlescript) echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)" mkdir -p bundles/$VERSION/$bundle source $bundlescript $(pwd)/bundles/$VERSION/$bundle } main() { # We want this to fail if the bundles already exist and cannot be removed. # This is to avoid mixing bundles from different versions of the code. mkdir -p bundles if [ -e "bundles/$VERSION" ]; then echo "bundles/$VERSION already exists. Removing." rm -fr bundles/$VERSION && mkdir bundles/$VERSION || exit 1 echo fi SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" if [ $# -lt 1 ]; then bundles=(${DEFAULT_BUNDLES[@]}) else bundles=($@) fi for bundle in ${bundles[@]}; do bundle $SCRIPTDIR/make/$bundle echo done } main "$@" docker-0.9.1/hack/travis/0000755000175000017500000000000012314376205013321 5ustar tagtagdocker-0.9.1/hack/travis/gofmt.py0000755000175000017500000000135112314376205015012 0ustar tagtag#!/usr/bin/env python import subprocess from env import commit_range files = subprocess.check_output([ 'git', 'diff', '--diff-filter=ACMR', '--name-only', '...'.join(commit_range), '--', ]) exit_status = 0 for filename in files.split('\n'): if filename.startswith('vendor/'): continue # we can't be changing our upstream vendors for gofmt, so don't even check them if filename.endswith('.go'): try: out = subprocess.check_output(['gofmt', '-s', '-l', filename]) if out != '': print out, exit_status = 1 except subprocess.CalledProcessError: exit_status = 1 if exit_status != 0: print 'Reformat the files listed above with "gofmt -s -w" and try again.' exit(exit_status) print 'All files pass gofmt.' exit(0) docker-0.9.1/hack/travis/dco.py0000755000175000017500000000307012314376205014443 0ustar tagtag#!/usr/bin/env python import re import subprocess import yaml from env import commit_range commit_format = '-%n hash: "%h"%n author: %aN <%aE>%n message: |%n%w(0,2,2).%B' gitlog = subprocess.check_output([ 'git', 'log', '--reverse', '--format=format:'+commit_format, '..'.join(commit_range), '--', ]) commits = yaml.load(gitlog) if not commits: exit(0) # what? how can we have no commits? DCO = 'Docker-DCO-1.1-Signed-off-by:' p = re.compile(r'^{0} ([^<]+) <([^<>@]+@[^<>]+)> \(github: (\S+)\)$'.format(re.escape(DCO)), re.MULTILINE|re.UNICODE) failed_commits = 0 for commit in commits: commit['message'] = commit['message'][1:] # trim off our '.' that exists just to prevent fun YAML parsing issues # see https://github.com/dotcloud/docker/pull/3836#issuecomment-33723094 # and https://travis-ci.org/dotcloud/docker/builds/17926783 commit['stat'] = subprocess.check_output([ 'git', 'log', '--format=format:', '--max-count=1', '--name-status', commit['hash'], '--', ]) if commit['stat'] == '': print 'Commit {0} has no actual changed content, skipping.'.format(commit['hash']) continue m = p.search(commit['message']) if not m: print 'Commit {1} does not have a properly formatted "{0}" marker.'.format(DCO, commit['hash']) failed_commits += 1 continue # print ALL the commits that don't have a proper DCO (name, email, github) = m.groups() # TODO verify that "github" is the person who actually made this commit via the GitHub API if failed_commits > 0: exit(failed_commits) print 'All commits have a valid "{0}" marker.'.format(DCO) exit(0) docker-0.9.1/hack/travis/env.py0000644000175000017500000000151412314376205014464 0ustar tagtagimport os import subprocess if 'TRAVIS' not in os.environ: print 'TRAVIS is not defined; this should run in TRAVIS. Sorry.' exit(127) if os.environ['TRAVIS_PULL_REQUEST'] != 'false': commit_range = ['upstream/' + os.environ['TRAVIS_BRANCH'], 'FETCH_HEAD'] else: try: subprocess.check_call([ 'git', 'log', '-1', '--format=format:', os.environ['TRAVIS_COMMIT_RANGE'], '--', ]) commit_range = os.environ['TRAVIS_COMMIT_RANGE'].split('...') if len(commit_range) == 1: # if it didn't split, it must have been separated by '..' instead commit_range = commit_range[0].split('..') except subprocess.CalledProcessError: print 'TRAVIS_COMMIT_RANGE is invalid. This seems to be a force push. We will just assume it must be against upstream master and compare all commits in between.' commit_range = ['upstream/master', 'HEAD'] docker-0.9.1/hack/vendor.sh0000755000175000017500000000241412314376205013646 0ustar tagtag#!/usr/bin/env bash set -e cd "$(dirname "$BASH_SOURCE")/.." # Downloads dependencies into vendor/ directory mkdir -p vendor cd vendor clone() { vcs=$1 pkg=$2 rev=$3 pkg_url=https://$pkg target_dir=src/$pkg echo -n "$pkg @ $rev: " if [ -d $target_dir ]; then echo -n 'rm old, ' rm -fr $target_dir fi echo -n 'clone, ' case $vcs in git) git clone --quiet --no-checkout $pkg_url $target_dir ( cd $target_dir && git reset --quiet --hard $rev ) ;; hg) hg clone --quiet --updaterev $rev $pkg_url $target_dir ;; esac echo -n 'rm VCS, ' ( cd $target_dir && rm -rf .{git,hg} ) echo done } clone git github.com/kr/pty 3b1f6487b clone git github.com/gorilla/context 708054d61e5 clone git github.com/gorilla/mux 9b36453141c clone git github.com/syndtr/gocapability 3454319be2 clone hg code.google.com/p/go.net 84a4013f96e0 clone hg code.google.com/p/gosqlite 74691fb6f837 # get Go tip's archive/tar, for xattr support # TODO after Go 1.3 drops, bump our minimum supported version and drop this vendored dep clone hg code.google.com/p/go a15f344a9efa mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar rm -rf src/code.google.com/p/go mkdir -p src/code.google.com/p/go/src/pkg/archive mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar docker-0.9.1/hack/release.sh0000755000175000017500000002413612314376205013776 0ustar tagtag#!/usr/bin/env bash set -e # This script looks for bundles built by make.sh, and releases them on a # public S3 bucket. # # Bundles should be available for the VERSION string passed as argument. # # The correct way to call this script is inside a container built by the # official Dockerfile at the root of the Docker source code. The Dockerfile, # make.sh and release.sh should all be from the same source code revision. set -o pipefail # Print a usage message and exit. usage() { cat >&2 <<'EOF' To run, I need: - to be in a container generated by the Dockerfile at the top of the Docker repository; - to be provided with the name of an S3 bucket, in environment variable AWS_S3_BUCKET; - to be provided with AWS credentials for this S3 bucket, in environment variables AWS_ACCESS_KEY and AWS_SECRET_KEY; - the passphrase to unlock the GPG key which will sign the deb packages (passed as environment variable GPG_PASSPHRASE); - a generous amount of good will and nice manners. The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" docker run -e AWS_S3_BUCKET=get-staging.docker.io \ -e AWS_ACCESS_KEY=AKI1234... \ -e AWS_SECRET_KEY=sEs4mE... \ -e GPG_PASSPHRASE=m0resEs4mE... \ -i -t -privileged \ docker ./hack/release.sh EOF exit 1 } [ "$AWS_S3_BUCKET" ] || usage [ "$AWS_ACCESS_KEY" ] || usage [ "$AWS_SECRET_KEY" ] || usage [ "$GPG_PASSPHRASE" ] || usage [ -d /go/src/github.com/dotcloud/docker ] || usage cd /go/src/github.com/dotcloud/docker [ -x hack/make.sh ] || usage RELEASE_BUNDLES=( binary cross tgz ubuntu ) if [ "$1" != '--release-regardless-of-test-failure' ]; then RELEASE_BUNDLES=( test "${RELEASE_BUNDLES[@]}" ) fi VERSION=$(cat VERSION) BUCKET=$AWS_S3_BUCKET # These are the 2 keys we've used to sign the deb's # release (get.docker.io) # GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" # test (test.docker.io) # GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" setup_s3() { # Try creating the bucket. Ignore errors (it might already exist). s3cmd mb s3://$BUCKET 2>/dev/null || true # Check access to the bucket. # s3cmd has no useful exit status, so we cannot check that. # Instead, we check if it outputs anything on standard output. # (When there are problems, it uses standard error instead.) s3cmd info s3://$BUCKET | grep -q . # Make the bucket accessible through website endpoints. s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET } # write_to_s3 uploads the contents of standard input to the specified S3 url. write_to_s3() { DEST=$1 F=`mktemp` cat > $F s3cmd --acl-public --mime-type='text/plain' put $F $DEST rm -f $F } s3_url() { case "$BUCKET" in get.docker.io|test.docker.io) echo "https://$BUCKET" ;; *) s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' ;; esac } build_all() { if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then echo >&2 echo >&2 'The build or tests appear to have failed.' echo >&2 echo >&2 'You, as the release maintainer, now have a couple options:' echo >&2 '- delay release and fix issues' echo >&2 '- delay release and fix issues' echo >&2 '- did we mention how important this is? issues need fixing :)' echo >&2 echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' echo >&2 ' really knows all the hairy problems at hand with the current release' echo >&2 ' issues) may bypass this checking by running this script again with the' echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' echo >&2 ' running the test suite, and will only build the binaries and packages. Please' echo >&2 ' avoid using this if at all possible.' echo >&2 echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' echo >&2 ' should be used. If there are release issues, we should always err on the' echo >&2 ' side of caution.' echo >&2 exit 1 fi } upload_release_build() { src="$1" dst="$2" latest="$3" echo echo "Uploading $src" echo " to $dst" echo s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst" if [ "$latest" ]; then echo echo "Copying to $latest" echo s3cmd --acl-public cp "$dst" "$latest" fi # get hash files too (see hash_files() in hack/make.sh) for hashAlgo in md5 sha256; do if [ -e "$src.$hashAlgo" ]; then echo echo "Uploading $src.$hashAlgo" echo " to $dst.$hashAlgo" echo s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo" if [ "$latest" ]; then echo echo "Copying to $latest.$hashAlgo" echo s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo" fi fi done } release_build() { GOOS=$1 GOARCH=$2 binDir=bundles/$VERSION/cross/$GOOS/$GOARCH tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH binary=docker-$VERSION tgz=docker-$VERSION.tgz latestBase= if [ -z "$NOLATEST" ]; then latestBase=docker-latest fi # we need to map our GOOS and GOARCH to uname values # see https://en.wikipedia.org/wiki/Uname # ie, GOOS=linux -> "uname -s"=Linux s3Os=$GOOS case "$s3Os" in darwin) s3Os=Darwin ;; freebsd) s3Os=FreeBSD ;; linux) s3Os=Linux ;; *) echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" exit 1 ;; esac s3Arch=$GOARCH case "$s3Arch" in amd64) s3Arch=x86_64 ;; 386) s3Arch=i386 ;; arm) s3Arch=armel # someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too ;; *) echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" exit 1 ;; esac s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch latest= latestTgz= if [ "$latestBase" ]; then latest="$s3Dir/$latestBase" latestTgz="$s3Dir/$latestBase.tgz" fi if [ ! -x "$binDir/$binary" ]; then echo >&2 "error: can't find $binDir/$binary - was it compiled properly?" exit 1 fi if [ ! -f "$tgzDir/$tgz" ]; then echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" exit 1 fi upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" } # Upload the 'ubuntu' bundle to S3: # 1. A full APT repository is published at $BUCKET/ubuntu/ # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index release_ubuntu() { [ -e bundles/$VERSION/ubuntu ] || { echo >&2 './hack/make.sh must be run before release_ubuntu' exit 1 } # Sign our packages dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \ --sign builder bundles/$VERSION/ubuntu/*.deb # Setup the APT repo APTDIR=bundles/$VERSION/ubuntu/apt mkdir -p $APTDIR/conf $APTDIR/db s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true cat > $APTDIR/conf/distributions < bundles/$VERSION/ubuntu/gpg s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg # Upload repo s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/ cat < /etc/apt/sources.list.d/docker.list # Then import the repository key apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 # Install docker apt-get update ; apt-get install -y lxc-docker # # Alternatively, just use the curl-able install.sh script provided at $(s3_url) # EOF # Add redirect at /ubuntu/info for URL-backwards-compatibility rm -rf /tmp/emptyfile && touch /tmp/emptyfile s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu" } # Upload binaries and tgz files to S3 release_binaries() { [ -e bundles/$VERSION/cross/linux/amd64/docker-$VERSION ] || { echo >&2 './hack/make.sh must be run before release_binaries' exit 1 } for d in bundles/$VERSION/cross/*/*; do GOARCH="$(basename "$d")" GOOS="$(basename "$(dirname "$d")")" release_build "$GOOS" "$GOARCH" done # TODO create redirect from builds/*/i686 to builds/*/i386 cat </dev/null || { gpg --gen-key --batch < (@kencochrane) Jerome Petazzoni (@jpetazzo) docker-0.9.1/hack/infrastructure/docker-ci/0000755000175000017500000000000012314376205016731 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/report/0000755000175000017500000000000012314376205020244 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/report/Dockerfile0000644000175000017500000000163512314376205022243 0ustar tagtag# VERSION: 0.22 # DOCKER-VERSION 0.6.3 # AUTHOR: Daniel Mizyrycki # DESCRIPTION: Generate docker-ci daily report # COMMENTS: The build process is initiated by deployment.py Report configuration is passed through ./credentials.json at # deployment time. # TO_BUILD: docker build -t report . # TO_DEPLOY: docker run report from ubuntu:12.04 maintainer Daniel Mizyrycki env PYTHONPATH /report # Add report dependencies run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \ /etc/apt/sources.list run apt-get update; apt-get install -y python2.7 python-pip ssh rsync # Set San Francisco timezone run echo "America/Los_Angeles" >/etc/timezone run dpkg-reconfigure --frontend noninteractive tzdata # Add report code and set default container command add . /report cmd "/report/report.py" docker-0.9.1/hack/infrastructure/docker-ci/report/deployment.py0000755000175000017500000001025412314376205023003 0ustar tagtag#!/usr/bin/env python '''Deploy docker-ci report container on Digital Ocean. Usage: export CONFIG_JSON=' { "DROPLET_NAME": "Digital_Ocean_dropplet_name", "DO_CLIENT_ID": "Digital_Ocean_client_id", "DO_API_KEY": "Digital_Ocean_api_key", "DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id", "DOCKER_CI_KEY_PATH": "docker-ci_private_key_path", "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)", "DOCKER_CI_ADDRESS" "user@docker-ci_fqdn_server", "SMTP_USER": "SMTP_server_user", "SMTP_PWD": "SMTP_server_password", "EMAIL_SENDER": "Buildbot_mailing_sender", "EMAIL_RCP": "Buildbot_mailing_receipient" }' python deployment.py ''' import re, json, requests, base64 from fabric import api from fabric.api import cd, run, put, sudo from os import environ as env from time import sleep from datetime import datetime # Populate environment variables CONFIG = json.loads(env['CONFIG_JSON']) for key in CONFIG: env[key] = CONFIG[key] # Load DOCKER_CI_KEY env['DOCKER_CI_KEY'] = open(env['DOCKER_CI_KEY_PATH']).read() DROPLET_NAME = env.get('DROPLET_NAME','report') TIMEOUT = 120 # Seconds before timeout droplet creation IMAGE_ID = 1004145 # Docker on Ubuntu 13.04 REGION_ID = 4 # New York 2 SIZE_ID = 66 # memory 512MB DO_IMAGE_USER = 'root' # Image user on Digital Ocean API_URL = 'https://api.digitalocean.com/' class digital_ocean(): def __init__(self, key, client): '''Set default API parameters''' self.key = key self.client = client self.api_url = API_URL def api(self, cmd_path, api_arg={}): '''Make api call''' api_arg.update({'api_key':self.key, 'client_id':self.client}) resp = requests.get(self.api_url + cmd_path, params=api_arg).text resp = json.loads(resp) if resp['status'] != 'OK': raise Exception(resp['error_message']) return resp def droplet_data(self, name): '''Get droplet data''' data = self.api('droplets') data = [droplet for droplet in data['droplets'] if droplet['name'] == name] return data[0] if data else {} def json_fmt(data): '''Format json output''' return json.dumps(data, sort_keys = True, indent = 2) do = digital_ocean(env['DO_API_KEY'], env['DO_CLIENT_ID']) # Get DROPLET_NAME data data = do.droplet_data(DROPLET_NAME) # Stop processing if DROPLET_NAME exists on Digital Ocean if data: print ('Droplet: {} already deployed. Not further processing.' .format(DROPLET_NAME)) exit(1) # Create droplet do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID, 'image_id':IMAGE_ID, 'size_id':SIZE_ID, 'ssh_key_ids':[env['DOCKER_KEY_ID']]}) # Wait for droplet to be created. start_time = datetime.now() while (data.get('status','') != 'active' and ( datetime.now()-start_time).seconds < TIMEOUT): data = do.droplet_data(DROPLET_NAME) print data['status'] sleep(3) # Wait for the machine to boot sleep(15) # Get droplet IP ip = str(data['ip_address']) print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip) api.env.host_string = ip api.env.user = DO_IMAGE_USER api.env.key_filename = env['DOCKER_CI_KEY_PATH'] # Correct timezone sudo('echo "America/Los_Angeles" >/etc/timezone') sudo('dpkg-reconfigure --frontend noninteractive tzdata') # Load JSON_CONFIG environment for Dockerfile CONFIG_JSON= base64.b64encode( '{{"DOCKER_CI_PUB": "{DOCKER_CI_PUB}",' ' "DOCKER_CI_KEY": "{DOCKER_CI_KEY}",' ' "DOCKER_CI_ADDRESS": "{DOCKER_CI_ADDRESS}",' ' "SMTP_USER": "{SMTP_USER}",' ' "SMTP_PWD": "{SMTP_PWD}",' ' "EMAIL_SENDER": "{EMAIL_SENDER}",' ' "EMAIL_RCP": "{EMAIL_RCP}"}}'.format(**env)) run('mkdir -p /data/report') put('./', '/data/report') with cd('/data/report'): run('chmod 700 report.py') run('echo "{}" > credentials.json'.format(CONFIG_JSON)) run('docker build -t report .') run('rm credentials.json') run("echo -e '30 09 * * * /usr/bin/docker run report\n' |" " /usr/bin/crontab -") docker-0.9.1/hack/infrastructure/docker-ci/report/report.py0000755000175000017500000001257012314376205022141 0ustar tagtag#!/usr/bin/python '''CONFIG_JSON is a json encoded string base64 environment variable. It is used to clone docker-ci database, generate docker-ci report and submit it by email. CONFIG_JSON data comes from the file /report/credentials.json inserted in this container by deployment.py: { "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)", "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)", "DOCKER_CI_ADDRESS": "user@docker-ci_fqdn_server", "SMTP_USER": "SMTP_server_user", "SMTP_PWD": "SMTP_server_password", "EMAIL_SENDER": "Buildbot_mailing_sender", "EMAIL_RCP": "Buildbot_mailing_receipient" } ''' import os, re, json, sqlite3, datetime, base64 import smtplib from datetime import timedelta from subprocess import call from os import environ as env TODAY = datetime.date.today() # Load credentials to the environment env['CONFIG_JSON'] = base64.b64decode(open('/report/credentials.json').read()) # Remove SSH private key as it needs more processing CONFIG = json.loads(re.sub(r'("DOCKER_CI_KEY".+?"(.+?)",)','', env['CONFIG_JSON'], flags=re.DOTALL)) # Populate environment variables for key in CONFIG: env[key] = CONFIG[key] # Load SSH private key env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1', env['CONFIG_JSON'],flags=re.DOTALL) # Prevent rsync to validate host on first connection to docker-ci os.makedirs('/root/.ssh') open('/root/.ssh/id_rsa','w').write(env['DOCKER_CI_KEY']) os.chmod('/root/.ssh/id_rsa',0600) open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n') # Sync buildbot database from docker-ci call('rsync {}:/data/buildbot/master/state.sqlite .'.format( env['DOCKER_CI_ADDRESS']), shell=True) class SQL: def __init__(self, database_name): sql = sqlite3.connect(database_name) # Use column names as keys for fetchall rows sql.row_factory = sqlite3.Row sql = sql.cursor() self.sql = sql def query(self,query_statement): return self.sql.execute(query_statement).fetchall() sql = SQL("state.sqlite") class Report(): def __init__(self,period='',date=''): self.data = [] self.period = 'date' if not period else period self.date = str(TODAY) if not date else date self.compute() def compute(self): '''Compute report''' if self.period == 'week': self.week_report(self.date) else: self.date_report(self.date) def date_report(self,date): '''Create a date test report''' builds = [] # Get a queryset with all builds from date rows = sql.query('SELECT * FROM builds JOIN buildrequests' ' WHERE builds.brid=buildrequests.id and' ' date(start_time, "unixepoch", "localtime") = "{0}"' ' GROUP BY number'.format(date)) build_names = sorted(set([row['buildername'] for row in rows])) # Create a report build line for a given build for build_name in build_names: tried = len([row['buildername'] for row in rows if row['buildername'] == build_name]) fail_tests = [row['buildername'] for row in rows if ( row['buildername'] == build_name and row['results'] != 0)] fail = len(fail_tests) fail_details = '' fail_pct = int(100.0*fail/tried) if tried != 0 else 100 builds.append({'name': build_name, 'tried': tried, 'fail': fail, 'fail_pct': fail_pct, 'fail_details':fail_details}) if builds: self.data.append({'date': date, 'builds': builds}) def week_report(self,date): '''Add the week's date test reports to report.data''' date = datetime.datetime.strptime(date,'%Y-%m-%d').date() last_monday = date - datetime.timedelta(days=date.weekday()) week_dates = [last_monday + timedelta(days=x) for x in range(7,-1,-1)] for date in week_dates: self.date_report(str(date)) def render_text(self): '''Return rendered report in text format''' retval = '' fail_tests = {} for builds in self.data: retval += 'Test date: {0}\n'.format(builds['date'],retval) table = '' for build in builds['builds']: table += ('Build {name:15} Tried: {tried:4} ' ' Failures: {fail:4} ({fail_pct}%)\n'.format(**build)) if build['name'] in fail_tests: fail_tests[build['name']] += build['fail_details'] else: fail_tests[build['name']] = build['fail_details'] retval += '{0}\n'.format(table) retval += '\n Builds failing' for fail_name in fail_tests: retval += '\n' + fail_name + '\n' for (fail_id,fail_url,rn_tests,nr_errors,log_errors, tracelog_errors) in fail_tests[fail_name]: retval += fail_url + '\n' retval += '\n\n' return retval # Send email smtp_from = env['EMAIL_SENDER'] subject = '[docker-ci] Daily report for {}'.format(str(TODAY)) msg = "From: {}\r\nTo: {}\r\nSubject: {}\r\n\r\n".format( smtp_from, env['EMAIL_RCP'], subject) msg = msg + Report('week').render_text() server = smtplib.SMTP_SSL('smtp.mailgun.org') server.login(env['SMTP_USER'], env['SMTP_PWD']) server.sendmail(smtp_from, env['EMAIL_RCP'], msg) docker-0.9.1/hack/infrastructure/docker-ci/Dockerfile0000644000175000017500000000246512314376205020732 0ustar tagtag# DOCKER-VERSION: 0.7.6 # AUTHOR: Daniel Mizyrycki # DESCRIPTION: docker-ci continuous integration service # TO_BUILD: docker build -rm -t docker-ci/docker-ci . # TO_RUN: docker run -rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \ # -v /data/docker-ci:/data/docker-ci docker-ci/docker-ci from ubuntu:12.04 maintainer Daniel Mizyrycki ENV DEBIAN_FRONTEND noninteractive RUN echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \ /etc/apt/sources.list; apt-get update RUN apt-get install -y --no-install-recommends python2.7 python-dev \ libevent-dev git supervisor ssh rsync less vim sudo gcc wget nginx RUN cd /tmp; wget http://python-distribute.org/distribute_setup.py RUN cd /tmp; python distribute_setup.py; easy_install pip; rm distribute_setup.py RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 RUN echo 'deb http://get.docker.io/ubuntu docker main' > \ /etc/apt/sources.list.d/docker.list; apt-get update RUN apt-get install -y lxc-docker-0.8.0 RUN pip install SQLAlchemy==0.7.10 buildbot buildbot-slave pyopenssl boto RUN ln -s /var/socket/docker.sock /run/docker.sock ADD . /docker-ci RUN /docker-ci/setup.sh ENTRYPOINT ["supervisord", "-n"] docker-0.9.1/hack/infrastructure/docker-ci/VERSION0000644000175000017500000000000612314376205017775 0ustar tagtag0.5.6 docker-0.9.1/hack/infrastructure/docker-ci/MAINTAINERS0000644000175000017500000000006312314376205020425 0ustar tagtagDaniel Mizyrycki (@mzdaniel) docker-0.9.1/hack/infrastructure/docker-ci/dcr/0000755000175000017500000000000012314376205017501 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/dcr/prod/0000755000175000017500000000000012314376205020445 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/dcr/prod/settings.yml0000644000175000017500000000014212314376205023025 0ustar tagtagdefault: hipaches: ['192.168.100.67:6379'] daemons: ['192.168.100.67:4243'] use_ssh: False docker-0.9.1/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml0000644000175000017500000000105212314376205023026 0ustar tagtagdocker-ci: image: "docker-ci/docker-ci" release_name: "docker-ci-0.5.6" ports: ["80","2222:22","8011:8011"] register: "80" volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"] command: [] env: - "DEPLOYMENT=production" - "IRC_CHANNEL=docker-testing" - "BACKUP_BUCKET=backup-ci" - "$WEB_USER" - "$WEB_IRC_PWD" - "$BUILDBOT_PWD" - "$AWS_ACCESS_KEY" - "$AWS_SECRET_KEY" - "$GPG_PASSPHRASE" - "$BACKUP_AWS_ID" - "$BACKUP_AWS_SECRET" - "$SMTP_USER" - "$SMTP_PWD" - "$EMAIL_RCP" docker-0.9.1/hack/infrastructure/docker-ci/dcr/stage/0000755000175000017500000000000012314376205020604 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/dcr/stage/settings.yml0000644000175000017500000000014212314376205023164 0ustar tagtagdefault: hipaches: ['192.168.100.65:6379'] daemons: ['192.168.100.65:4243'] use_ssh: False docker-0.9.1/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml0000644000175000017500000000106512314376205023171 0ustar tagtagdocker-ci: image: "docker-ci/docker-ci" release_name: "docker-ci-stage" ports: ["80","2222:22","8011:8011"] register: "80" volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"] command: [] env: - "DEPLOYMENT=staging" - "IRC_CHANNEL=docker-testing-staging" - "BACKUP_BUCKET=ci-backup-stage" - "$BACKUP_AWS_ID" - "$BACKUP_AWS_SECRET" - "$WEB_USER" - "$WEB_IRC_PWD" - "$BUILDBOT_PWD" - "$AWS_ACCESS_KEY" - "$AWS_SECRET_KEY" - "$GPG_PASSPHRASE" - "$SMTP_USER" - "$SMTP_PWD" - "$EMAIL_RCP" docker-0.9.1/hack/infrastructure/docker-ci/dockertest/0000755000175000017500000000000012314376205021100 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/dockertest/docker-registry0000777000175000017500000000000012314376205025526 2projectustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/dockertest/project0000755000175000017500000000034612314376205022477 0ustar tagtag#!/usr/bin/env bash set -x PROJECT_NAME=$(basename $0) docker run -rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \ -v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3 docker-0.9.1/hack/infrastructure/docker-ci/dockertest/docker0000777000175000017500000000000012314376205023660 2projectustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/dockertest/nightlyrelease0000755000175000017500000000065112314376205024047 0ustar tagtag#!/usr/bin/env bash if [ "$DEPLOYMENT" == "production" ]; then AWS_S3_BUCKET='test.docker.io' else AWS_S3_BUCKET='get-staging.docker.io' fi docker run -rm -privileged -v /run:/var/socket \ -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY \ -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE \ -e DOCKER_RELEASE=1 -e DEPLOYMENT=$DEPLOYMENT docker-ci/testbuilder docker docker-0.9.1/hack/infrastructure/docker-ci/buildbot/0000755000175000017500000000000012314376205020535 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/buildbot/master.cfg0000644000175000017500000001507212314376205022516 0ustar tagtagimport os, re from buildbot.buildslave import BuildSlave from buildbot.schedulers.forcesched import ForceScheduler from buildbot.schedulers.basic import SingleBranchScheduler from buildbot.schedulers.timed import Nightly from buildbot.changes import filter from buildbot.config import BuilderConfig from buildbot.process.factory import BuildFactory from buildbot.process.properties import Property from buildbot.steps.shell import ShellCommand from buildbot.status import html, words from buildbot.status.web import authz, auth from buildbot.status.mail import MailNotifier def ENV(x): '''Promote an environment variable for global use returning its value''' retval = os.environ.get(x, '') globals()[x] = retval return retval class TestCommand(ShellCommand): '''Extend ShellCommand with optional summary logs''' def __init__(self, *args, **kwargs): super(TestCommand, self).__init__(*args, **kwargs) def createSummary(self, log): exit_status = re.sub(r'.+\n\+ exit (\d+).+', r'\1', log.getText()[-100:], flags=re.DOTALL) if exit_status != '0': return # Infer coverage path from log if '+ COVERAGE_PATH' in log.getText(): path = re.sub(r'.+\+ COVERAGE_PATH=((.+?)-\d+).+', r'\2/\1', log.getText(), flags=re.DOTALL) url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path) self.addURL('coverage', url) elif 'COVERAGE_FILE' in log.getText(): path = re.sub(r'.+\+ COVERAGE_FILE=((.+?)-\d+).+', r'\2/\1', log.getText(), flags=re.DOTALL) url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path) self.addURL('coverage', url) PORT_WEB = 8000 # Buildbot webserver port PORT_GITHUB = 8011 # Buildbot github hook port PORT_MASTER = 9989 # Port where buildbot master listen buildworkers BUILDBOT_URL = '//localhost:{}/'.format(PORT_WEB) DOCKER_REPO = 'https://github.com/docker-test/docker' DOCKER_TEST_ARGV = 'HEAD {}'.format(DOCKER_REPO) REGISTRY_REPO = 'https://github.com/docker-test/docker-registry' REGISTRY_TEST_ARGV = 'HEAD {}'.format(REGISTRY_REPO) if ENV('DEPLOYMENT') == 'staging': BUILDBOT_URL = "//docker-ci-stage.docker.io/" if ENV('DEPLOYMENT') == 'production': BUILDBOT_URL = '//docker-ci.docker.io/' DOCKER_REPO = 'https://github.com/dotcloud/docker' DOCKER_TEST_ARGV = '' REGISTRY_REPO = 'https://github.com/dotcloud/docker-registry' REGISTRY_TEST_ARGV = '' # Credentials set by setup.sh from deployment.py ENV('WEB_USER') ENV('WEB_IRC_PWD') ENV('BUILDBOT_PWD') ENV('SMTP_USER') ENV('SMTP_PWD') ENV('EMAIL_RCP') ENV('IRC_CHANNEL') c = BuildmasterConfig = {} c['title'] = "docker-ci" c['titleURL'] = "waterfall" c['buildbotURL'] = BUILDBOT_URL c['db'] = {'db_url':"sqlite:///state.sqlite"} c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)] c['slavePortnum'] = PORT_MASTER # Schedulers c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[ 'docker', 'docker-registry', 'nightlyrelease', 'backup'])] c['schedulers'] += [SingleBranchScheduler(name="docker", treeStableTimer=None, change_filter=filter.ChangeFilter(branch='master', repository=DOCKER_REPO), builderNames=['docker'])] c['schedulers'] += [SingleBranchScheduler(name="registry", treeStableTimer=None, change_filter=filter.ChangeFilter(branch='master', repository=REGISTRY_REPO), builderNames=['docker-registry'])] c['schedulers'] += [SingleBranchScheduler(name='docker-pr', treeStableTimer=None, change_filter=filter.ChangeFilter(category='github_pullrequest', project='docker'), builderNames=['docker-pr'])] c['schedulers'] += [SingleBranchScheduler(name='docker-registry-pr', treeStableTimer=None, change_filter=filter.ChangeFilter(category='github_pullrequest', project='docker-registry'), builderNames=['docker-registry-pr'])] c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=[ 'nightlyrelease', 'backup'], hour=7, minute=00)] # Builders # Backup factory = BuildFactory() factory.addStep(TestCommand(description='backup', logEnviron=False, usePTY=True, command='/docker-ci/tool/backup.py')) c['builders'] = [BuilderConfig(name='backup',slavenames=['buildworker'], factory=factory)] # Docker test factory = BuildFactory() factory.addStep(TestCommand(description='docker', logEnviron=False, usePTY=True, command='/docker-ci/dockertest/docker {}'.format(DOCKER_TEST_ARGV))) c['builders'] += [BuilderConfig(name='docker',slavenames=['buildworker'], factory=factory)] # Docker pull request test factory = BuildFactory() factory.addStep(TestCommand(description='docker-pr', logEnviron=False, usePTY=True, command=['/docker-ci/dockertest/docker', Property('revision'), Property('repository'), Property('branch')])) c['builders'] += [BuilderConfig(name='docker-pr',slavenames=['buildworker'], factory=factory)] # docker-registry test factory = BuildFactory() factory.addStep(TestCommand(description='docker-registry', logEnviron=False, usePTY=True, command='/docker-ci/dockertest/docker-registry {}'.format(REGISTRY_TEST_ARGV))) c['builders'] += [BuilderConfig(name='docker-registry',slavenames=['buildworker'], factory=factory)] # Docker registry pull request test factory = BuildFactory() factory.addStep(TestCommand(description='docker-registry-pr', logEnviron=False, usePTY=True, command=['/docker-ci/dockertest/docker-registry', Property('revision'), Property('repository'), Property('branch')])) c['builders'] += [BuilderConfig(name='docker-registry-pr',slavenames=['buildworker'], factory=factory)] # Docker nightly release factory = BuildFactory() factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False, usePTY=True, command=['/docker-ci/dockertest/nightlyrelease'])) c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'], factory=factory)] # Status authz_cfg = authz.Authz(auth=auth.BasicAuth([(WEB_USER, WEB_IRC_PWD)]), forceBuild='auth') c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)] c['status'].append(html.WebStatus(http_port=PORT_GITHUB, allowForce=True, change_hook_dialects={ 'github': True })) c['status'].append(MailNotifier(fromaddr='docker-test@docker.io', sendToInterestedUsers=False, extraRecipients=[EMAIL_RCP], mode='failing', relayhost='smtp.mailgun.org', smtpPort=587, useTls=True, smtpUser=SMTP_USER, smtpPassword=SMTP_PWD)) c['status'].append(words.IRC("irc.freenode.net", "dockerqabot", channels=[IRC_CHANNEL], password=WEB_IRC_PWD, allowForce=True, notify_events={'exception':1, 'successToFailure':1, 'failureToSuccess':1})) docker-0.9.1/hack/infrastructure/docker-ci/buildbot/github.py0000644000175000017500000001557212314376205022403 0ustar tagtag# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members #!/usr/bin/env python """ github_buildbot.py is based on git_buildbot.py github_buildbot.py will determine the repository information from the JSON HTTP POST it receives from github.com and build the appropriate repository. If your github repository is private, you must add a ssh key to the github repository for the user who initiated the build on the buildslave. """ import re import datetime from twisted.python import log import calendar try: import json assert json except ImportError: import simplejson as json # python is silly about how it handles timezones class fixedOffset(datetime.tzinfo): """ fixed offset timezone """ def __init__(self, minutes, hours, offsetSign = 1): self.minutes = int(minutes) * offsetSign self.hours = int(hours) * offsetSign self.offset = datetime.timedelta(minutes = self.minutes, hours = self.hours) def utcoffset(self, dt): return self.offset def dst(self, dt): return datetime.timedelta(0) def convertTime(myTestTimestamp): #"1970-01-01T00:00:00+00:00" # Normalize myTestTimestamp if myTestTimestamp[-1] == 'Z': myTestTimestamp = myTestTimestamp[:-1] + '-00:00' matcher = re.compile(r'(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)([-+])(\d\d):(\d\d)') result = matcher.match(myTestTimestamp) (year, month, day, hour, minute, second, offsetsign, houroffset, minoffset) = \ result.groups() if offsetsign == '+': offsetsign = 1 else: offsetsign = -1 offsetTimezone = fixedOffset( minoffset, houroffset, offsetsign ) myDatetime = datetime.datetime( int(year), int(month), int(day), int(hour), int(minute), int(second), 0, offsetTimezone) return calendar.timegm( myDatetime.utctimetuple() ) def getChanges(request, options = None): """ Reponds only to POST events and starts the build process :arguments: request the http request object """ payload = json.loads(request.args['payload'][0]) import urllib,datetime fname = str(datetime.datetime.now()).replace(' ','_').replace(':','-')[:19] # Github event debug # open('github_{0}.json'.format(fname),'w').write(json.dumps(json.loads(urllib.unquote(request.args['payload'][0])), sort_keys = True, indent = 2)) if 'pull_request' in payload: user = payload['pull_request']['user']['login'] repo = payload['pull_request']['head']['repo']['name'] repo_url = payload['pull_request']['head']['repo']['html_url'] else: user = payload['repository']['owner']['name'] repo = payload['repository']['name'] repo_url = payload['repository']['url'] project = request.args.get('project', None) if project: project = project[0] elif project is None: project = '' # This field is unused: #private = payload['repository']['private'] changes = process_change(payload, user, repo, repo_url, project) log.msg("Received %s changes from github" % len(changes)) return (changes, 'git') def process_change(payload, user, repo, repo_url, project): """ Consumes the JSON as a python object and actually starts the build. :arguments: payload Python Object that represents the JSON sent by GitHub Service Hook. """ changes = [] newrev = payload['after'] if 'after' in payload else payload['pull_request']['head']['sha'] refname = payload['ref'] if 'ref' in payload else payload['pull_request']['head']['ref'] # We only care about regular heads, i.e. branches match = re.match(r"^(refs\/heads\/|)([^/]+)$", refname) if not match: log.msg("Ignoring refname `%s': Not a branch" % refname) return [] branch = match.groups()[1] if re.match(r"^0*$", newrev): log.msg("Branch `%s' deleted, ignoring" % branch) return [] else: if 'pull_request' in payload: if payload['action'] == 'closed': log.msg("PR#{} closed, ignoring".format(payload['number'])) return [] changes = [{ 'category' : 'github_pullrequest', 'who' : '{0} - PR#{1}'.format(user,payload['number']), 'files' : [], 'comments' : payload['pull_request']['title'], 'revision' : newrev, 'when' : convertTime(payload['pull_request']['updated_at']), 'branch' : branch, 'revlink' : '{0}/commit/{1}'.format(repo_url,newrev), 'repository' : repo_url, 'project' : project }] return changes for commit in payload['commits']: files = [] if 'added' in commit: files.extend(commit['added']) if 'modified' in commit: files.extend(commit['modified']) if 'removed' in commit: files.extend(commit['removed']) when = convertTime( commit['timestamp']) log.msg("New revision: %s" % commit['id'][:8]) chdict = dict( who = commit['author']['name'] + " <" + commit['author']['email'] + ">", files = files, comments = commit['message'], revision = commit['id'], when = when, branch = branch, revlink = commit['url'], repository = repo_url, project = project) changes.append(chdict) return changes docker-0.9.1/hack/infrastructure/docker-ci/testbuilder/0000755000175000017500000000000012314376205021257 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/testbuilder/Dockerfile0000644000175000017500000000051312314376205023250 0ustar tagtag# TO_BUILD: docker build -rm -no-cache -t docker-ci/testbuilder . # TO_RUN: docker run -rm -u sysadmin \ # -v /run:/var/socket docker-ci/testbuilder docker-registry # FROM docker-ci/docker-ci ENV HOME /home/sysadmin RUN mkdir /testbuilder ADD . /testbuilder ENTRYPOINT ["/testbuilder/testbuilder.sh"] docker-0.9.1/hack/infrastructure/docker-ci/testbuilder/docker.sh0000755000175000017500000000145512314376205023072 0ustar tagtag#!/usr/bin/env bash set -x set -e PROJECT_PATH=$1 # Build the docker project cd /data/$PROJECT_PATH sg docker -c "docker build -q -rm -t docker ." if [ "$DOCKER_RELEASE" == "1" ]; then # Do nightly release echo sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh" set +x sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh" else # Run the tests sg docker -c "docker run -rm -privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh" fi docker-0.9.1/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh0000755000175000017500000000052212314376205024732 0ustar tagtag#!/usr/bin/env bash set -x set -e PROJECT_PATH=$1 # Build the docker project cd /data/$PROJECT_PATH sg docker -c "docker build -q -rm -t registry ." cd test; sg docker -c "docker build -q -rm -t docker-registry-test ." # Run the tests sg docker -c "docker run -rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test" docker-0.9.1/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh0000755000175000017500000000170212314376205024144 0ustar tagtag#!/usr/bin/env bash # Download, build and run a docker project tests # Environment variables: DEPLOYMENT cat $0 set -e set -x PROJECT=$1 COMMIT=${2-HEAD} REPO=${3-https://github.com/dotcloud/$PROJECT} BRANCH=${4-master} REPO_PROJ="https://github.com/docker-test/$PROJECT" if [ "$DEPLOYMENT" == "production" ]; then REPO_PROJ="https://github.com/dotcloud/$PROJECT" fi set +x # Generate a random string of $1 characters function random { cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1 } PROJECT_PATH="$PROJECT-tmp-$(random 12)" # Set docker-test git user set -x git config --global user.email "docker-test@docker.io" git config --global user.name "docker-test" # Fetch project git clone -q $REPO_PROJ -b master /data/$PROJECT_PATH cd /data/$PROJECT_PATH echo "Git commit: $(git rev-parse HEAD)" git fetch -q $REPO $BRANCH git merge --no-edit $COMMIT # Build the project dockertest /testbuilder/$PROJECT.sh $PROJECT_PATH rm -rf /data/$PROJECT_PATH docker-0.9.1/hack/infrastructure/docker-ci/docker-coverage/0000755000175000017500000000000012314376205021771 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh0000755000175000017500000000323212314376205024451 0ustar tagtag#!/bin/bash export PATH='/go/bin':$PATH export DOCKER_PATH='/go/src/github.com/dotcloud/docker' # Signal coverage report name, parsed by docker-ci set -x COVERAGE_PATH=$(date +"docker-%Y%m%d%H%M%S") set +x REPORTS="/data/$COVERAGE_PATH" INDEX="$REPORTS/index.html" # Test docker cd $DOCKER_PATH ./hack/make.sh test; exit_status=$? PROFILE_PATH="$(ls -d $DOCKER_PATH/bundles/* | sed -n '$ p')/test/coverprofiles" if [ "$exit_status" -eq "0" ]; then # Download coverage dependencies go get github.com/axw/gocov/gocov go get -u github.com/matm/gocov-html # Create coverage report mkdir -p $REPORTS cd $PROFILE_PATH cat > $INDEX << "EOF" Docker Coverage Report

Docker Coverage Report

EOF for profile in *; do gocov convert $profile | gocov-html >$REPORTS/$profile.html echo "" >> $INDEX done echo "
packagepct
$profile" >> $INDEX go tool cover -func=$profile | sed -En '$ s/.+\t(.+)/\1/p' >> $INDEX echo "
" >> $INDEX fi # Signal test and coverage result, parsed by docker-ci set -x exit $exit_status docker-0.9.1/hack/infrastructure/docker-ci/nginx/0000755000175000017500000000000012314376205020054 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/nginx/nginx.conf0000644000175000017500000000030212314376205022041 0ustar tagtagserver { listen 80; root /data/docker-ci; location / { proxy_pass http://localhost:8000/; } location /coverage { root /data/docker-ci; } } docker-0.9.1/hack/infrastructure/docker-ci/setup.sh0000755000175000017500000000477212314376205020442 0ustar tagtag#!/usr/bin/env bash # Set timezone echo "GMT" >/etc/timezone dpkg-reconfigure --frontend noninteractive tzdata # Set ssh superuser mkdir -p /data/buildbot /var/run/sshd /run useradd -m -d /home/sysadmin -s /bin/bash -G sudo,docker -p '*' sysadmin sed -Ei 's/(\%sudo.*) ALL/\1 NOPASSWD:ALL/' /etc/sudoers cd /home/sysadmin mkdir .ssh chmod 700 .ssh cat > .ssh/authorized_keys << 'EOF' ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7ALVhwQ68q1SjrKaAduOuOEAcWmb8kDZf5qA7T1fM8AP07EDC7nSKRJ8PXUBGTOQfxm89coJDuSJsTAZ+1PvglXhA0Mq6+knc6ZrZY+SuZlDIDAk4TOdVPoDZnmR1YW2McxHkhcGIOKeC8MMig5NeEjtgQwXzauUSPqeh8HMlLZRMooFYyyluIpn7NaCLzyWjwAQz2s3KyI7VE7hl+ncCrW86v+dciEdwqtzNoUMFb3iDpPxaiCl3rv+SB7co/5eUDTs1FZvUcYMXKQuf8R+2ZKzXOpwr0Zs8sKQXvXavCeWykwGgXLBjVkvrDcHuDD6UXCW63UKgmRECpLZaMBVIIRWLEEgTS5OSQTcxpMVe5zUW6sDvXHTcdPwWrcn1dE9F/0vLC0HJ4ADKelLX5zyTpmXGbuZuntIf1JO67D/K/P++uV1rmVIH+zgtOf23w5rX2zKb4BSTqP0sv61pmWV7MEVoEz6yXswcTjS92tb775v7XLU9vKAkt042ORFdE4/++hejhL/Lj52IRgjt1CJZHZsR9JywJZrz3kYuf8eU2J2FYh0Cpz5gmf0f+12Rt4HztnZxGPP4KuMa66e4+hpx1jynjMZ7D5QUnNYEmuvJByopn8HSluuY/kS5MMyZCZtJLEPGX4+yECX0Di/S0vCRl2NyqfCBqS+yXXT5SA1nFw== docker-test@docker.io EOF chmod 600 .ssh/authorized_keys chown -R sysadmin .ssh # Fix docker group id for use of host dockerd by sysadmin sed -Ei 's/(docker:x:)[^:]+/\1999/' /etc/group # Create buildbot configuration cd /data/buildbot; buildbot create-master master cp -a /data/buildbot/master/master.cfg.sample \ /data/buildbot/master/master.cfg cd /data/buildbot; \ buildslave create-slave slave localhost:9989 buildworker pass cp /docker-ci/buildbot/master.cfg /data/buildbot/master # Patch github webstatus to capture pull requests cp /docker-ci/buildbot/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks chown -R sysadmin.sysadmin /data # Create nginx configuration rm /etc/nginx/sites-enabled/default cp /docker-ci/nginx/nginx.conf /etc/nginx/conf.d/buildbot.conf /bin/echo -e '\ndaemon off;\n' >> /etc/nginx/nginx.conf # Set supervisord buildbot, nginx and sshd processes /bin/echo -e "\ [program:buildmaster]\n\ command=twistd --nodaemon --no_save -y buildbot.tac\n\ directory=/data/buildbot/master\n\ user=sysadmin\n\n\ [program:buildworker]\n\ command=twistd --nodaemon --no_save -y buildbot.tac\n\ directory=/data/buildbot/slave\n\ user=sysadmin\n" > \ /etc/supervisor/conf.d/buildbot.conf /bin/echo -e "[program:nginx]\ncommand=/usr/sbin/nginx\n" > \ /etc/supervisor/conf.d/nginx.conf /bin/echo -e "[program:sshd]\ncommand=/usr/sbin/sshd -D\n" > \ /etc/supervisor/conf.d/sshd.conf docker-0.9.1/hack/infrastructure/docker-ci/functionaltests/0000755000175000017500000000000012314376205022156 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/functionaltests/test_index.py0000755000175000017500000000423512314376205024705 0ustar tagtag#!/usr/bin/python import os username, password = os.environ['DOCKER_CREDS'].split(':') from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select from selenium.common.exceptions import NoSuchElementException import unittest, time, re class Docker(unittest.TestCase): def setUp(self): self.driver = webdriver.PhantomJS() self.driver.implicitly_wait(30) self.base_url = "http://www.docker.io/" self.verificationErrors = [] self.accept_next_alert = True def test_docker(self): driver = self.driver print "Login into {0} as login user {1} ...".format(self.base_url,username) driver.get(self.base_url + "/") driver.find_element_by_link_text("INDEX").click() driver.find_element_by_link_text("login").click() driver.find_element_by_id("id_username").send_keys(username) driver.find_element_by_id("id_password").send_keys(password) print "Checking login user ..." driver.find_element_by_css_selector("input[type=\"submit\"]").click() try: self.assertEqual("test", driver.find_element_by_css_selector("h3").text) except AssertionError as e: self.verificationErrors.append(str(e)) print "Login user {0} found".format(username) def is_element_present(self, how, what): try: self.driver.find_element(by=how, value=what) except NoSuchElementException, e: return False return True def is_alert_present(self): try: self.driver.switch_to_alert() except NoAlertPresentException, e: return False return True def close_alert_and_get_its_text(self): try: alert = self.driver.switch_to_alert() alert_text = alert.text if self.accept_next_alert: alert.accept() else: alert.dismiss() return alert_text finally: self.accept_next_alert = True def tearDown(self): self.driver.quit() self.assertEqual([], self.verificationErrors) if __name__ == "__main__": unittest.main() docker-0.9.1/hack/infrastructure/docker-ci/functionaltests/test_registry.sh0000755000175000017500000000116412314376205025426 0ustar tagtag#!/bin/sh set -x # Cleanup rm -rf docker-registry # Setup the environment export SETTINGS_FLAVOR=test export DOCKER_REGISTRY_CONFIG=config_test.yml export PYTHONPATH=$(pwd)/docker-registry/test # Get latest docker registry git clone -q https://github.com/dotcloud/docker-registry.git cd docker-registry sed -Ei "s#(boto_bucket: ).+#\1_env:S3_BUCKET#" config_test.yml # Get dependencies pip install -q -r requirements.txt pip install -q -r test-requirements.txt pip install -q tox # Run registry tests tox || exit 1 python -m unittest discover -p s3.py -s test || exit 1 python -m unittest discover -p workflow.py -s test docker-0.9.1/hack/infrastructure/docker-ci/README.rst0000644000175000017500000000521412314376205020422 0ustar tagtag========= docker-ci ========= This directory contains docker-ci continuous integration system. As expected, it is a fully dockerized and deployed using docker-container-runner. docker-ci is based on Buildbot, a continuous integration system designed to automate the build/test cycle. By automatically rebuilding and testing the tree each time something has changed, build problems are pinpointed quickly, before other developers are inconvenienced by the failure. We are running buildbot at Rackspace to verify docker and docker-registry pass tests, and check for coverage code details. docker-ci instance is at https://docker-ci.docker.io/waterfall Inside docker-ci container we have the following directory structure: /docker-ci source code of docker-ci /data/backup/docker-ci/ daily backup (replicated over S3) /data/docker-ci/coverage/{docker,docker-registry}/ mapped to host volumes /data/buildbot/{master,slave}/ main docker-ci buildbot config and database /var/socket/{docker.sock} host volume access to docker socket Production deployment ===================== :: # Clone docker-ci repository git clone https://github.com/dotcloud/docker cd docker/hack/infrastructure/docker-ci export DOCKER_PROD=[PRODUCTION_SERVER_IP] # Create data host volume. (only once) docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ mkdir -p /data/docker-ci/coverage/docker docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ mkdir -p /data/docker-ci/coverage/docker-registry docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ chown -R 1000.1000 /data/docker-ci # dcr deployment. Define credentials and special environment dcr variables # ( retrieved at /hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml ) export WEB_USER=[DOCKER-CI-WEBSITE-USERNAME] export WEB_IRC_PWD=[DOCKER-CI-WEBSITE-PASSWORD] export BUILDBOT_PWD=[BUILDSLAVE_PASSWORD] export AWS_ACCESS_KEY=[DOCKER_RELEASE_S3_ACCESS] export AWS_SECRET_KEY=[DOCKER_RELEASE_S3_SECRET] export GPG_PASSPHRASE=[DOCKER_RELEASE_PASSPHRASE] export BACKUP_AWS_ID=[S3_BUCKET_CREDENTIAL_ACCESS] export BACKUP_AWS_SECRET=[S3_BUCKET_CREDENTIAL_SECRET] export SMTP_USER=[MAILGUN_SMTP_USERNAME] export SMTP_PWD=[MAILGUN_SMTP_PASSWORD] export EMAIL_RCP=[EMAIL_FOR_BUILD_ERRORS] # Build docker-ci and testbuilder docker images docker -H $DOCKER_PROD build -rm -t docker-ci/docker-ci . (cd testbuilder; docker -H $DOCKER_PROD build -rm -t docker-ci/testbuilder .) # Run docker-ci container ( assuming no previous container running ) (cd dcr/prod; dcr docker-ci.yml start) (cd dcr/prod; dcr docker-ci.yml register docker-ci.docker.io) docker-0.9.1/hack/infrastructure/docker-ci/tool/0000755000175000017500000000000012314376205017706 5ustar tagtagdocker-0.9.1/hack/infrastructure/docker-ci/tool/backup.py0000755000175000017500000000302212314376205021525 0ustar tagtag#!/usr/bin/env python import os,sys,json from datetime import datetime from filecmp import cmp from subprocess import check_call from boto.s3.key import Key from boto.s3.connection import S3Connection def ENV(x): '''Promote an environment variable for global use returning its value''' retval = os.environ.get(x, '') globals()[x] = retval return retval ROOT_PATH = '/data/backup/docker-ci' TODAY = str(datetime.today())[:10] BACKUP_FILE = '{}/docker-ci_{}.tgz'.format(ROOT_PATH, TODAY) BACKUP_LINK = '{}/docker-ci.tgz'.format(ROOT_PATH) ENV('BACKUP_BUCKET') ENV('BACKUP_AWS_ID') ENV('BACKUP_AWS_SECRET') '''Create full master buildbot backup, avoiding duplicates''' # Ensure backup path exist if not os.path.exists(ROOT_PATH): os.makedirs(ROOT_PATH) # Make actual backups check_call('/bin/tar czf {} -C /data --exclude=backup --exclude=buildbot/slave' ' . 1>/dev/null 2>&1'.format(BACKUP_FILE),shell=True) # remove previous dump if it is the same as the latest if (os.path.exists(BACKUP_LINK) and cmp(BACKUP_FILE, BACKUP_LINK) and os.path._resolve_link(BACKUP_LINK) != BACKUP_FILE): os.unlink(os.path._resolve_link(BACKUP_LINK)) # Recreate backup link pointing to latest backup try: os.unlink(BACKUP_LINK) except: pass os.symlink(BACKUP_FILE, BACKUP_LINK) # Make backup on S3 bucket = S3Connection(BACKUP_AWS_ID,BACKUP_AWS_SECRET).get_bucket(BACKUP_BUCKET) k = Key(bucket) k.key = BACKUP_FILE k.set_contents_from_filename(BACKUP_FILE) bucket.copy_key(os.path.basename(BACKUP_LINK),BACKUP_BUCKET,BACKUP_FILE[1:]) docker-0.9.1/hack/infrastructure/README.md0000644000175000017500000000751212314376205016355 0ustar tagtag# Docker project infrastructure This is an overview of the Docker infrastructure. **Note: obviously, credentials should not be stored in this repository.** However, when there are credentials, we should list how to obtain them (e.g. who has them). ## Providers This should be the list of all the entities providing some kind of infrastructure service to the Docker project (either for free, or paid by dotCloud). Provider | Service --------------|------------------------------------------------- AWS | packages (S3 bucket), dotCloud PAAS, dev-env, ci CloudFlare | cdn Digital Ocean | ci dotCloud PAAS | website, index, registry, ssl, blog DynECT | dns (docker.io) GitHub | repository Linode | stackbrew Mailgun | outgoing e-mail ReadTheDocs | docs *Ordered-by: lexicographic* ## URLs This should be the list of all the infrastructure-related URLs and which service is handling them. URL | Service ---------------------------------------------|--------------------------------- http://blog.docker.io/ | blog *http://cdn-registry-1.docker.io/ | registry (pull) http://debug.docker.io/ | debug tool http://docs.docker.io/ | docsproxy (proxy to readthedocs) http://docker-ci.dotcloud.com/ | ci http://docker.io/ | redirect to www.docker.io (dynect) http://docker.readthedocs.org/ | docs *http://get.docker.io/ | packages https://github.com/dotcloud/docker | repository *https://index.docker.io/ | index http://registry-1.docker.io/ | registry (push) http://staging-docker-ci.dotcloud.com/ | ci *http://test.docker.io/ | packages *http://www.docker.io/ | website http://? (internal URL, not for public use) | stackbrew *Ordered-by: lexicographic* **Note:** an asterisk in front of the URL means that it is cached by CloudFlare. ## Services This should be the list of all services referenced above. Service | Maintainer(s) | How to update | Source --------------------|----------------------------|------------------|------- blog | [@jbarbier] | dotcloud push | https://github.com/dotcloud/blog.docker.io cdn | [@jpetazzo][] [@samalba][] | cloudflare panel | N/A ci | [@mzdaniel] | See [docker-ci] | See [docker-ci] docs | [@metalivedev] | github webhook | docker repo docsproxy | [@dhrp] | dotcloud push | https://github.com/dotcloud/docker-docs-dotcloud-proxy index | [@kencochrane] | dotcloud push | private packages | [@jpetazzo] | hack/release | docker repo registry | [@samalba] | dotcloud push | https://github.com/dotcloud/docker-registry repository (github) | N/A | N/A | N/A ssl (dotcloud) | [@jpetazzo] | dotcloud ops | N/A ssl (cloudflare) | [@jpetazzo] | cloudflare panel | N/A stackbrew | [@shin-] | manual | https://github.com/dotcloud/stackbrew/stackbrew website | [@dhrp] | dotcloud push | https://github.com/dotcloud/www.docker.io *Ordered-by: lexicographic* [docker-ci]: docker-ci.rst [@dhrp]: https://github.com/dhrp [@jbarbier]: https://github.com/jbarbier [@jpetazzo]: https://github.com/jpetazzo [@kencochrane]: https://github.com/kencochrane [@metalivedev]: https://github.com/metalivedev [@mzdaniel]: https://github.com/mzdaniel [@samalba]: https://github.com/samalba [@shin-]: https://github.com/shin- docker-0.9.1/hack/install.sh0000755000175000017500000001222412314376205014017 0ustar tagtag#!/bin/sh set -e # # This script is meant for quick & easy install via: # 'curl -sL https://get.docker.io/ | sh' # or: # 'wget -qO- https://get.docker.io/ | sh' # # # Docker Maintainers: # To update this script on https://get.docker.io, # use hack/release.sh during a normal release, # or the following one-liner for script hotfixes: # s3cmd put --acl-public -P hack/install.sh s3://get.docker.io/index # url='https://get.docker.io/' command_exists() { command -v "$@" > /dev/null 2>&1 } case "$(uname -m)" in *64) ;; *) echo >&2 'Error: you are not using a 64bit platform.' echo >&2 'Docker currently only supports 64bit platforms.' exit 1 ;; esac if command_exists docker || command_exists lxc-docker; then echo >&2 'Warning: "docker" or "lxc-docker" command appears to already exist.' echo >&2 'Please ensure that you do not already have docker installed.' echo >&2 'You may press Ctrl+C now to abort this process and rectify this situation.' ( set -x; sleep 20 ) fi user="$(id -un 2>/dev/null || true)" sh_c='sh -c' if [ "$user" != 'root' ]; then if command_exists sudo; then sh_c='sudo sh -c' elif command_exists su; then sh_c='su -c' else echo >&2 'Error: this installer needs the ability to run commands as root.' echo >&2 'We are unable to find either "sudo" or "su" available to make this happen.' exit 1 fi fi curl='' if command_exists curl; then curl='curl -sL' elif command_exists wget; then curl='wget -qO-' elif command_exists busybox && busybox --list-modules | grep -q wget; then curl='busybox wget -qO-' fi # perform some very rudimentary platform detection lsb_dist='' if command_exists lsb_release; then lsb_dist="$(lsb_release -si)" fi if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" fi if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then lsb_dist='Debian' fi case "$lsb_dist" in Ubuntu|Debian) export DEBIAN_FRONTEND=noninteractive did_apt_get_update= apt_get_update() { if [ -z "$did_apt_get_update" ]; then ( set -x; $sh_c 'sleep 3; apt-get update' ) did_apt_get_update=1 fi } # TODO remove this section once device-mapper lands if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then kern_extras="linux-image-extra-$(uname -r)" apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' ( set -x; sleep 10 ) fi fi if [ ! -e /usr/lib/apt/methods/https ]; then apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https' ) fi if [ -z "$curl" ]; then apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl' ) curl='curl -sL' fi ( set -x if [ "https://get.docker.io/" = "$url" ]; then $sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9" elif [ "https://test.docker.io/" = "$url" ]; then $sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6" else $sh_c "$curl ${url}gpg | apt-key add -" fi $sh_c "echo deb ${url}ubuntu docker main > /etc/apt/sources.list.d/docker.list" $sh_c 'sleep 3; apt-get update; apt-get install -y -q lxc-docker' ) if command_exists docker && [ -e /var/run/docker.sock ]; then ( set -x $sh_c 'docker run busybox echo "Docker has been successfully installed!"' ) || true fi your_user=your-user [ "$user" != 'root' ] && your_user="$user" echo echo 'If you would like to use Docker as a non-root user, you should now consider' echo 'adding your user to the "docker" group with something like:' echo echo ' sudo usermod -aG docker' $your_user echo echo 'Remember that you will have to log out and back in for this to take effect!' echo exit 0 ;; Gentoo) if [ "$url" = "https://test.docker.io/" ]; then echo >&2 echo >&2 ' You appear to be trying to install the latest nightly build in Gentoo.' echo >&2 ' The portage tree should contain the latest stable release of Docker, but' echo >&2 ' if you want something more recent, you can always use the live ebuild' echo >&2 ' provided in the "docker" overlay available via layman. For more' echo >&2 ' instructions, please see the following URL:' echo >&2 ' https://github.com/tianon/docker-overlay#using-this-overlay' echo >&2 ' After adding the "docker" overlay, you should be able to:' echo >&2 ' emerge -av =app-emulation/docker-9999' echo >&2 exit 1 fi ( set -x $sh_c 'sleep 3; emerge app-emulation/docker' ) exit 0 ;; esac echo >&2 echo >&2 ' Either your platform is not easily detectable, is not supported by this' echo >&2 ' installer script (yet - PRs welcome!), or does not yet have a package for' echo >&2 ' Docker. Please visit the following URL for more detailed installation' echo >&2 ' instructions:' echo >&2 echo >&2 ' http://docs.docker.io/en/latest/installation/' echo >&2 exit 1