docker-1.6.2/0000755000175000017500000000000012527277712012345 5ustar tianontianondocker-1.6.2/integration/0000755000175000017500000000000012524223634014657 5ustar tianontianondocker-1.6.2/integration/runtime_test.go0000644000175000017500000005765512524223634017752 0ustar tianontianonpackage docker import ( "bytes" "fmt" "io" std_log "log" "net" "net/url" "os" "path/filepath" "runtime" "strconv" "strings" "syscall" "testing" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) const ( unitTestImageName = "docker-test-image" unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0 unitTestImageIDShort = "83599e29c455" unitTestNetworkBridge = "testdockbr0" unitTestStoreBase = "/var/lib/docker/unit-tests" unitTestDockerTmpdir = "/var/lib/docker/tmp" testDaemonAddr = "127.0.0.1:4270" testDaemonProto = "tcp" testDaemonHttpsProto = "tcp" testDaemonHttpsAddr = "localhost:4271" testDaemonRogueHttpsAddr = "localhost:4272" ) var ( // FIXME: globalDaemon is deprecated by globalEngine. All tests should be converted. globalDaemon *daemon.Daemon globalEngine *engine.Engine globalHttpsEngine *engine.Engine globalRogueHttpsEngine *engine.Engine startFds int startGoroutines int ) // FIXME: nuke() is deprecated by Daemon.Nuke() func nuke(daemon *daemon.Daemon) error { return daemon.Nuke() } // FIXME: cleanup and nuke are redundant. func cleanup(eng *engine.Engine, t *testing.T) error { daemon := mkDaemonFromEngine(eng, t) for _, container := range daemon.List() { container.Kill() daemon.Rm(container) } job := eng.Job("images") images, err := job.Stdout.AddTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } for _, image := range images.Data { if image.Get("Id") != unitTestImageID { eng.Job("image_delete", image.Get("Id")).Run() } } return nil } func init() { // Always use the same driver (vfs) for all integration tests. // To test other drivers, we need a dedicated driver validation suite. os.Setenv("DOCKER_DRIVER", "vfs") os.Setenv("TEST", "1") os.Setenv("DOCKER_TMPDIR", unitTestDockerTmpdir) // Hack to run sys init during unit testing if reexec.Init() { return } if uid := syscall.Geteuid(); uid != 0 { log.Fatalf("docker tests need to be run as root") } // Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary) if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" { src, err := os.Open(dockerinit) if err != nil { log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s", err) } defer src.Close() dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555) if err != nil { log.Fatalf("Unable to create dockerinit in test directory: %s", err) } defer dst.Close() if _, err := io.Copy(dst, src); err != nil { log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s", err) } dst.Close() src.Close() } // Setup the base daemon, which will be duplicated for each test. // (no tests are run directly in the base) setupBaseImage() // Create the "global daemon" with a long-running daemons for integration tests spawnGlobalDaemon() spawnLegitHttpsDaemon() spawnRogueHttpsDaemon() startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() } func setupBaseImage() { eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase) job := eng.Job("image_inspect", unitTestImageName) img, _ := job.Stdout.AddEnv() // If the unit test is not found, try to download it. if err := job.Run(); err != nil || img.Get("Id") != unitTestImageID { // Retrieve the Image job = eng.Job("pull", unitTestImageName) job.Stdout.Add(ioutils.NopWriteCloser(os.Stdout)) if err := job.Run(); err != nil { log.Fatalf("Unable to pull the test image: %s", err) } } } func spawnGlobalDaemon() { if globalDaemon != nil { log.Debugf("Global daemon already exists. Skipping.") return } t := std_log.New(os.Stderr, "", 0) eng := NewTestEngine(t) globalEngine = eng globalDaemon = mkDaemonFromEngine(eng, t) // Spawn a Daemon go func() { log.Debugf("Spawning global daemon for integration tests") listenURL := &url.URL{ Scheme: testDaemonProto, Host: testDaemonAddr, } job := eng.Job("serveapi", listenURL.String()) job.SetenvBool("Logging", true) if err := job.Run(); err != nil { log.Fatalf("Unable to spawn the test daemon: %s", err) } }() // Give some time to ListenAndServer to actually start // FIXME: use inmem transports instead of tcp time.Sleep(time.Second) if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatalf("Unable to accept connections for test api: %s", err) } } func spawnLegitHttpsDaemon() { if globalHttpsEngine != nil { return } globalHttpsEngine = spawnHttpsDaemon(testDaemonHttpsAddr, "fixtures/https/ca.pem", "fixtures/https/server-cert.pem", "fixtures/https/server-key.pem") } func spawnRogueHttpsDaemon() { if globalRogueHttpsEngine != nil { return } globalRogueHttpsEngine = spawnHttpsDaemon(testDaemonRogueHttpsAddr, "fixtures/https/ca.pem", "fixtures/https/server-rogue-cert.pem", "fixtures/https/server-rogue-key.pem") } func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine { t := std_log.New(os.Stderr, "", 0) root, err := newTestDirectory(unitTestStoreBase) if err != nil { t.Fatal(err) } // FIXME: here we don't use NewTestEngine because it configures the daemon with Autorestart=false, // and we want to set it to true. eng := newTestEngine(t, true, root) // Spawn a Daemon go func() { log.Debugf("Spawning https daemon for integration tests") listenURL := &url.URL{ Scheme: testDaemonHttpsProto, Host: addr, } job := eng.Job("serveapi", listenURL.String()) job.SetenvBool("Logging", true) job.SetenvBool("Tls", true) job.SetenvBool("TlsVerify", true) job.Setenv("TlsCa", cacert) job.Setenv("TlsCert", cert) job.Setenv("TlsKey", key) if err := job.Run(); err != nil { log.Fatalf("Unable to spawn the test daemon: %s", err) } }() // Give some time to ListenAndServer to actually start time.Sleep(time.Second) if err := eng.Job("acceptconnections").Run(); err != nil { log.Fatalf("Unable to accept connections for test api: %s", err) } return eng } // FIXME: test that ImagePull(json=true) send correct json output func GetTestImage(daemon *daemon.Daemon) *image.Image { imgs, err := daemon.Graph().Map() if err != nil { log.Fatalf("Unable to get the test image: %s", err) } for _, image := range imgs { if image.ID == unitTestImageID { return image } } log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, daemon.Graph().Root, imgs) return nil } func TestDaemonCreate(t *testing.T) { daemon := mkDaemon(t) defer nuke(daemon) // Make sure we start we 0 containers if len(daemon.List()) != 0 { t.Errorf("Expected 0 containers, %v found", len(daemon.List())) } container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}, }, &runconfig.HostConfig{}, "", ) if err != nil { t.Fatal(err) } defer func() { if err := daemon.Rm(container); err != nil { t.Error(err) } }() // Make sure we can find the newly created container with List() if len(daemon.List()) != 1 { t.Errorf("Expected 1 container, %v found", len(daemon.List())) } // Make sure the container List() returns is the right one if daemon.List()[0].ID != container.ID { t.Errorf("Unexpected container %v returned by List", daemon.List()[0]) } // Make sure we can get the container with Get() if _, err := daemon.Get(container.ID); err != nil { t.Errorf("Unable to get newly created container") } // Make sure it is the right container if c, _ := daemon.Get(container.ID); c != container { t.Errorf("Get() returned the wrong container") } // Make sure Exists returns it as existing if !daemon.Exists(container.ID) { t.Errorf("Exists() returned false for a newly created container") } // Test that conflict error displays correct details testContainer, _, _ := daemon.Create( &runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}, }, &runconfig.HostConfig{}, "conflictname", ) if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), common.TruncateID(testContainer.ID)) { t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %v", err) } // Make sure create with bad parameters returns an error if _, _, err = daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID}, &runconfig.HostConfig{}, ""); err == nil { t.Fatal("Builder.Create should throw an error when Cmd is missing") } if _, _, err := daemon.Create( &runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{}, }, &runconfig.HostConfig{}, "", ); err == nil { t.Fatal("Builder.Create should throw an error when Cmd is empty") } config := &runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{"/bin/ls"}, PortSpecs: []string{"80"}, } container, _, err = daemon.Create(config, &runconfig.HostConfig{}, "") _, err = daemon.Commit(container, "testrepo", "testtag", "", "", true, config) if err != nil { t.Error(err) } // test expose 80:8000 container, warnings, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}, PortSpecs: []string{"80:8000"}, }, &runconfig.HostConfig{}, "", ) if err != nil { t.Fatal(err) } if warnings == nil || len(warnings) != 1 { t.Error("Expected a warning, got none") } } func TestDestroy(t *testing.T) { daemon := mkDaemon(t) defer nuke(daemon) container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}, }, &runconfig.HostConfig{}, "") if err != nil { t.Fatal(err) } // Destroy if err := daemon.Rm(container); err != nil { t.Error(err) } // Make sure daemon.Exists() behaves correctly if daemon.Exists("test_destroy") { t.Errorf("Exists() returned true") } // Make sure daemon.List() doesn't list the destroyed container if len(daemon.List()) != 0 { t.Errorf("Expected 0 container, %v found", len(daemon.List())) } // Make sure daemon.Get() refuses to return the unexisting container if c, _ := daemon.Get(container.ID); c != nil { t.Errorf("Got a container that should not exist") } // Test double destroy if err := daemon.Rm(container); err == nil { // It should have failed t.Errorf("Double destroy did not fail") } } func TestGet(t *testing.T) { daemon := mkDaemon(t) defer nuke(daemon) container1, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) defer daemon.Rm(container1) container2, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) defer daemon.Rm(container2) container3, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) defer daemon.Rm(container3) if c, _ := daemon.Get(container1.ID); c != container1 { t.Errorf("Get(test1) returned %v while expecting %v", c, container1) } if c, _ := daemon.Get(container2.ID); c != container2 { t.Errorf("Get(test2) returned %v while expecting %v", c, container2) } if c, _ := daemon.Get(container3.ID); c != container3 { t.Errorf("Get(test3) returned %v while expecting %v", c, container3) } } func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) { var ( err error id string outputBuffer = bytes.NewBuffer(nil) strPort string eng = NewTestEngine(t) daemon = mkDaemonFromEngine(eng, t) port = 5554 p nat.Port ) defer func() { if err != nil { daemon.Nuke() } }() for { port += 1 strPort = strconv.Itoa(port) var cmd string if proto == "tcp" { cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat" } else if proto == "udp" { cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat" } else { t.Fatal(fmt.Errorf("Unknown protocol %v", proto)) } ep := make(map[nat.Port]struct{}, 1) p = nat.Port(fmt.Sprintf("%s/%s", strPort, proto)) ep[p] = struct{}{} jobCreate := eng.Job("create") jobCreate.Setenv("Image", unitTestImageID) jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd}) jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)}) jobCreate.SetenvJson("ExposedPorts", ep) jobCreate.Stdout.Add(outputBuffer) if err := jobCreate.Run(); err != nil { t.Fatal(err) } id = engine.Tail(outputBuffer, 1) // FIXME: this relies on the undocumented behavior of daemon.Create // which will return a nil error AND container if the exposed ports // are invalid. That behavior should be fixed! if id != "" { break } t.Logf("Port %v already in use, trying another one", strPort) } jobStart := eng.Job("start", id) portBindings := make(map[nat.Port][]nat.PortBinding) portBindings[p] = []nat.PortBinding{ {}, } if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil { t.Fatal(err) } if err := jobStart.Run(); err != nil { t.Fatal(err) } container, err := daemon.Get(id) if err != nil { t.Fatal(err) } setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { for !container.IsRunning() { time.Sleep(10 * time.Millisecond) } }) // Even if the state is running, lets give some time to lxc to spawn the process container.WaitStop(500 * time.Millisecond) strPort = container.NetworkSettings.Ports[p][0].HostPort return daemon, container, strPort } // Run a container with a TCP port allocated, and test that it can receive connections on localhost func TestAllocateTCPPortLocalhost(t *testing.T) { daemon, container, port := startEchoServerContainer(t, "tcp") defer nuke(daemon) defer container.Kill() for i := 0; i != 10; i++ { conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port)) if err != nil { t.Fatal(err) } defer conn.Close() input := bytes.NewBufferString("well hello there\n") _, err = conn.Write(input.Bytes()) if err != nil { t.Fatal(err) } buf := make([]byte, 16) read := 0 conn.SetReadDeadline(time.Now().Add(3 * time.Second)) read, err = conn.Read(buf) if err != nil { if err, ok := err.(*net.OpError); ok { if err.Err == syscall.ECONNRESET { t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec") conn.Close() time.Sleep(time.Second) continue } if err.Timeout() { t.Log("Timeout, trying again") conn.Close() continue } } t.Fatal(err) } output := string(buf[:read]) if !strings.Contains(output, "well hello there") { t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output)) } else { return } } t.Fatal("No reply from the container") } // Run a container with an UDP port allocated, and test that it can receive connections on localhost func TestAllocateUDPPortLocalhost(t *testing.T) { daemon, container, port := startEchoServerContainer(t, "udp") defer nuke(daemon) defer container.Kill() conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port)) if err != nil { t.Fatal(err) } defer conn.Close() input := bytes.NewBufferString("well hello there\n") buf := make([]byte, 16) // Try for a minute, for some reason the select in socat may take ages // to return even though everything on the path seems fine (i.e: the // UDPProxy forwards the traffic correctly and you can see the packets // on the interface from within the container). for i := 0; i != 120; i++ { _, err := conn.Write(input.Bytes()) if err != nil { t.Fatal(err) } conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond)) read, err := conn.Read(buf) if err == nil { output := string(buf[:read]) if strings.Contains(output, "well hello there") { return } } } t.Fatal("No reply from the container") } func TestRestore(t *testing.T) { eng := NewTestEngine(t) daemon1 := mkDaemonFromEngine(eng, t) defer daemon1.Nuke() // Create a container with one instance of docker container1, _, _ := mkContainer(daemon1, []string{"_", "ls", "-al"}, t) defer daemon1.Rm(container1) // Create a second container meant to be killed container2, _, _ := mkContainer(daemon1, []string{"-i", "_", "/bin/cat"}, t) defer daemon1.Rm(container2) // Start the container non blocking if err := container2.Start(); err != nil { t.Fatal(err) } if !container2.IsRunning() { t.Fatalf("Container %v should appear as running but isn't", container2.ID) } // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running' cStdin := container2.StdinPipe() cStdin.Close() if _, err := container2.WaitStop(2 * time.Second); err != nil { t.Fatal(err) } container2.SetRunning(42) container2.ToDisk() if len(daemon1.List()) != 2 { t.Errorf("Expected 2 container, %v found", len(daemon1.List())) } if err := container1.Run(); err != nil { t.Fatal(err) } if !container2.IsRunning() { t.Fatalf("Container %v should appear as running but isn't", container2.ID) } // Here are are simulating a docker restart - that is, reloading all containers // from scratch eng = newTestEngine(t, false, daemon1.Config().Root) daemon2 := mkDaemonFromEngine(eng, t) if len(daemon2.List()) != 2 { t.Errorf("Expected 2 container, %v found", len(daemon2.List())) } runningCount := 0 for _, c := range daemon2.List() { if c.IsRunning() { t.Errorf("Running container found: %v (%v)", c.ID, c.Path) runningCount++ } } if runningCount != 0 { t.Fatalf("Expected 0 container alive, %d found", runningCount) } container3, err := daemon2.Get(container1.ID) if err != nil { t.Fatal("Unable to Get container") } if err := container3.Run(); err != nil { t.Fatal(err) } container2.SetStopped(&execdriver.ExitStatus{ExitCode: 0}) } func TestDefaultContainerName(t *testing.T) { eng := NewTestEngine(t) daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } container, err := daemon.Get(createNamedTestContainer(eng, config, t, "some_name")) if err != nil { t.Fatal(err) } containerID := container.ID if container.Name != "/some_name" { t.Fatalf("Expect /some_name got %s", container.Name) } c, err := daemon.Get("/some_name") if err != nil { t.Fatalf("Couldn't retrieve test container as /some_name") } if c.ID != containerID { t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID) } } func TestRandomContainerName(t *testing.T) { eng := NewTestEngine(t) daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) config, _, _, err := parseRun([]string{GetTestImage(daemon).ID, "echo test"}) if err != nil { t.Fatal(err) } container, err := daemon.Get(createTestContainer(eng, config, t)) if err != nil { t.Fatal(err) } containerID := container.ID if container.Name == "" { t.Fatalf("Expected not empty container name") } if c, err := daemon.Get(container.Name); err != nil { log.Fatalf("Could not lookup container %s by its name", container.Name) } else if c.ID != containerID { log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID) } } func TestContainerNameValidation(t *testing.T) { eng := NewTestEngine(t) daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) for _, test := range []struct { Name string Valid bool }{ {"abc-123_AAA.1", true}, {"\000asdf", false}, } { config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { if !test.Valid { continue } t.Fatal(err) } var outputBuffer = bytes.NewBuffer(nil) job := eng.Job("create", test.Name) if err := job.ImportEnv(config); err != nil { t.Fatal(err) } job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { if !test.Valid { continue } t.Fatal(err) } container, err := daemon.Get(engine.Tail(outputBuffer, 1)) if err != nil { t.Fatal(err) } if container.Name != "/"+test.Name { t.Fatalf("Expect /%s got %s", test.Name, container.Name) } if c, err := daemon.Get("/" + test.Name); err != nil { t.Fatalf("Couldn't retrieve test container as /%s", test.Name) } else if c.ID != container.ID { t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID) } } } func TestLinkChildContainer(t *testing.T) { eng := NewTestEngine(t) daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) if err != nil { t.Fatal(err) } webapp, err := daemon.GetByName("/webapp") if err != nil { t.Fatal(err) } if webapp.ID != container.ID { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } config, _, _, err = parseRun([]string{GetTestImage(daemon).ID, "echo test"}) if err != nil { t.Fatal(err) } childContainer, err := daemon.Get(createTestContainer(eng, config, t)) if err != nil { t.Fatal(err) } if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { t.Fatal(err) } // Get the child by it's new name db, err := daemon.GetByName("/webapp/db") if err != nil { t.Fatal(err) } if db.ID != childContainer.ID { t.Fatalf("Expect db id to match container id: %s != %s", db.ID, childContainer.ID) } } func TestGetAllChildren(t *testing.T) { eng := NewTestEngine(t) daemon := mkDaemonFromEngine(eng, t) defer nuke(daemon) config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) if err != nil { t.Fatal(err) } webapp, err := daemon.GetByName("/webapp") if err != nil { t.Fatal(err) } if webapp.ID != container.ID { t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) } config, _, _, err = parseRun([]string{unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } childContainer, err := daemon.Get(createTestContainer(eng, config, t)) if err != nil { t.Fatal(err) } if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { t.Fatal(err) } children, err := daemon.Children("/webapp") if err != nil { t.Fatal(err) } if children == nil { t.Fatal("Children should not be nil") } if len(children) == 0 { t.Fatal("Children should not be empty") } for key, value := range children { if key != "/webapp/db" { t.Fatalf("Expected /webapp/db got %s", key) } if value.ID != childContainer.ID { t.Fatalf("Expected id %s got %s", childContainer.ID, value.ID) } } } func TestDestroyWithInitLayer(t *testing.T) { daemon := mkDaemon(t) defer nuke(daemon) container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}, }, &runconfig.HostConfig{}, "") if err != nil { t.Fatal(err) } // Destroy if err := daemon.Rm(container); err != nil { t.Fatal(err) } // Make sure daemon.Exists() behaves correctly if daemon.Exists("test_destroy") { t.Fatalf("Exists() returned true") } // Make sure daemon.List() doesn't list the destroyed container if len(daemon.List()) != 0 { t.Fatalf("Expected 0 container, %v found", len(daemon.List())) } driver := daemon.Graph().Driver() // Make sure that the container does not exist in the driver if _, err := driver.Get(container.ID, ""); err == nil { t.Fatal("Conttainer should not exist in the driver") } // Make sure that the init layer is removed from the driver if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID), ""); err == nil { t.Fatal("Container's init layer should not exist in the driver") } } docker-1.6.2/integration/fixtures/0000755000175000017500000000000012524223634016530 5ustar tianontianondocker-1.6.2/integration/fixtures/https/0000755000175000017500000000000012524223634017672 5ustar tianontianondocker-1.6.2/integration/fixtures/https/server-cert.pem0000644000175000017500000001001312524223634022631 0ustar tianontianonCertificate: Data: Version: 3 (0x2) Serial Number: 4 (0x4) Signature Algorithm: sha1WithRSAEncryption Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain Validity Not Before: Dec 4 15:01:20 2013 GMT Not After : Dec 2 15:01:20 2023 GMT Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (1024 bit) Modulus: 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: a8:05:32:1e:f9:95:09:14:75 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Cert Type: SSL Server Netscape Comment: Easy-RSA Generated Server Certificate X509v3 Subject Key Identifier: 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 X509v3 Authority Key Identifier: keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain serial:FD:AB:EC:6A:84:27:04:A7 X509v3 Extended Key Usage: TLS Web Server Authentication X509v3 Key Usage: Digital Signature, Key Encipherment Signature Algorithm: sha1WithRSAEncryption 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: 15:42 -----BEGIN CERTIFICATE----- MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG +EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX dDBV9m4gmmweCbQMFUI= -----END CERTIFICATE----- docker-1.6.2/integration/fixtures/https/client-cert.pem0000644000175000017500000000763612524223634022622 0ustar tianontianonCertificate: Data: Version: 3 (0x2) Serial Number: 3 (0x3) Signature Algorithm: sha1WithRSAEncryption Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain Validity Not Before: Dec 4 14:17:54 2013 GMT Not After : Dec 2 14:17:54 2023 GMT Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (1024 bit) Modulus: 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: 7e:4e:78:7d:0a:9e:8f:42:43 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Comment: Easy-RSA Generated Certificate X509v3 Subject Key Identifier: DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 X509v3 Authority Key Identifier: keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain serial:FD:AB:EC:6A:84:27:04:A7 X509v3 Extended Key Usage: TLS Web Client Authentication X509v3 Key Usage: Digital Signature Signature Algorithm: sha1WithRSAEncryption 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: 4a:c4 -----BEGIN CERTIFICATE----- MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= -----END CERTIFICATE----- docker-1.6.2/integration/fixtures/https/server-rogue-key.pem0000644000175000017500000000162412524223634023613 0ustar tianontianon-----BEGIN PRIVATE KEY----- MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBANEIWCRgoWllS3ZG j4h1fEk62APMW1jF0bvl+VS5dWXffrv7VNSy6W9YoqSEQ5R3JIE4NjbwZmUm5Vsq FByprld/dQAjFEthWOSCqhWXlL1QNQ1dGBjtEGq702Ra6zaYW1in/mdIwWw/US8C ZZZUd5s0+afSY1RqngJcvmWYpLS1AgMBAAECgYAJXh9dGfuB1qlIFqduDR3RxlJR 8UGSu+LHUeoXkuwg8aAjWoMVuSLe+5DmYIsKx0AajmNXmPRtyg1zRXJ7SltmubJ8 6qQVDsRk6biMdkpkl6a9Gk2av40psD9/VPGxagEoop7IKYhf3AeKPvPiwVB2qFrl 1aYMZm0aMR55pgRajQJBAOk8IsJDf0beooDZXVdv/oe4hcbM9fxO8Cn3qzoGImqD 37LL+PCzDP7AEV3fk43SsZDeSk+LDX+h0o9nPyhzHasCQQDlb3aDgcQY9NaGLUWO moOCB3148eBVcAwCocu+OSkf7sbQdvXxgThBOrZl11wwRIMQqh99c2yeUwj+tELl 3VcfAkBZTiNpCvtDIaBLge9RuZpWUXs3wec2cutWxnSTxSGMc25GQf/R+l0xdk2w ChmvpktDUzpU9sN2aXn8WuY+EMX9AkEApbLpUbKPUELLB958RLA819TW/lkZXjrs wZ3eSoR3ufM1rOqtVvyvBxUDE+wETWu9iHSFB5Ir2PA5J9JCGkbPmwJAFI1ndfBj iuyU93nFX0p+JE2wVHKx4dMzKCearNKiJh/lGDtUq3REGgamTNUnG8RAITUbxFs+ Z1hrIq8xYl2LOQ== -----END PRIVATE KEY----- docker-1.6.2/integration/fixtures/https/client-rogue-cert.pem0000644000175000017500000000761212524223634023733 0ustar tianontianonCertificate: Data: Version: 3 (0x2) Serial Number: 2 (0x2) Signature Algorithm: sha1WithRSAEncryption Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain Validity Not Before: Feb 24 17:54:59 2014 GMT Not After : Feb 22 17:54:59 2024 GMT Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (1024 bit) Modulus: 00:e8:e2:2c:b8:d4:db:89:50:4f:47:1e:68:db:f7: e4:cc:47:41:63:75:03:37:50:7a:a8:4d:27:36:d5: 15:01:08:b6:cf:56:f7:56:6d:3d:f9:e2:8d:1a:5d: bf:a0:24:5e:07:55:8e:d0:dc:f1:fa:19:87:1d:d6: b6:58:82:2e:ba:69:6d:e9:d9:c8:16:0d:1d:59:7f: f4:8e:58:10:01:3d:21:14:16:3c:ec:cd:8c:b7:0e: e6:7b:77:b4:f9:90:a5:17:01:bb:84:c6:b2:12:87: 70:eb:9f:6d:4f:d0:68:8b:96:c0:e7:0b:51:b4:9d: 1d:7b:6c:7b:be:89:6b:88:8b Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Comment: Easy-RSA Generated Certificate X509v3 Subject Key Identifier: 9E:F8:49:D0:A2:76:30:5C:AB:2B:8A:B5:8D:C6:45:1F:A7:F8:CF:85 X509v3 Authority Key Identifier: keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain serial:E7:21:1E:18:41:1B:96:83 X509v3 Extended Key Usage: TLS Web Client Authentication X509v3 Key Usage: Digital Signature Signature Algorithm: sha1WithRSAEncryption 48:76:c0:18:fa:0a:ee:4e:1a:ec:02:9d:d4:83:ca:94:54:a1: 3f:51:2f:3e:4b:95:c3:42:9b:71:a0:4b:d9:af:47:23:b9:1c: fb:85:ba:76:e2:09:cb:65:bb:d2:7d:44:3d:4b:67:ba:80:83: be:a8:ed:c4:b9:ea:1a:1b:c7:59:3b:d9:5c:0d:46:d8:c9:92: cb:10:c5:f2:1a:38:a4:aa:07:2c:e3:84:16:79:c7:95:09:e3: 01:d2:15:a2:77:0b:8b:bf:94:04:e9:7f:c0:cd:e6:2e:64:cd: 1e:a3:32:ec:11:cc:62:ce:c7:4e:cd:ad:48:5c:b1:b8:e9:76: b3:f9 -----BEGIN CERTIFICATE----- MIIEDTCCA3agAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu MB4XDTE0MDIyNDE3NTQ1OVoXDTI0MDIyMjE3NTQ1OVowgaAxCzAJBgNVBAYTAlVT MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMQ8wDQYDVQQDEwZjbGllbnQx ETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9t YWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDo4iy41NuJUE9HHmjb9+TM R0FjdQM3UHqoTSc21RUBCLbPVvdWbT354o0aXb+gJF4HVY7Q3PH6GYcd1rZYgi66 aW3p2cgWDR1Zf/SOWBABPSEUFjzszYy3DuZ7d7T5kKUXAbuExrISh3Drn21P0GiL lsDnC1G0nR17bHu+iWuIiwIDAQABo4IBVTCCAVEwCQYDVR0TBAIwADAtBglghkgB hvhCAQ0EIBYeRWFzeS1SU0EgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQW BBSe+EnQonYwXKsrirWNxkUfp/jPhTCB0wYDVR0jBIHLMIHIgBTcpfF2207Nju+x I1YdkoCZdDvqb6GBpKSBoTCBnjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUw EwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2aWwgSW5jMREwDwYDVQQL EwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1l MR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWluggkA5yEeGEEbloMwEwYD VR0lBAwwCgYIKwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBBQUAA4GB AEh2wBj6Cu5OGuwCndSDypRUoT9RLz5LlcNCm3GgS9mvRyO5HPuFunbiCctlu9J9 RD1LZ7qAg76o7cS56hobx1k72VwNRtjJkssQxfIaOKSqByzjhBZ5x5UJ4wHSFaJ3 C4u/lATpf8DN5i5kzR6jMuwRzGLOx07NrUhcsbjpdrP5 -----END CERTIFICATE----- docker-1.6.2/integration/fixtures/https/client-key.pem0000644000175000017500000000162012524223634022440 0ustar tianontianon-----BEGIN PRIVATE KEY----- MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU 9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ 93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC 4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 ksDFuNxAzbhl -----END PRIVATE KEY----- docker-1.6.2/integration/fixtures/https/server-rogue-cert.pem0000644000175000017500000001001312524223634023750 0ustar tianontianonCertificate: Data: Version: 3 (0x2) Serial Number: 3 (0x3) Signature Algorithm: sha1WithRSAEncryption Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain Validity Not Before: Feb 28 18:49:31 2014 GMT Not After : Feb 26 18:49:31 2024 GMT Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=localhost/name=changeme/emailAddress=mail@host.domain Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (1024 bit) Modulus: 00:d1:08:58:24:60:a1:69:65:4b:76:46:8f:88:75: 7c:49:3a:d8:03:cc:5b:58:c5:d1:bb:e5:f9:54:b9: 75:65:df:7e:bb:fb:54:d4:b2:e9:6f:58:a2:a4:84: 43:94:77:24:81:38:36:36:f0:66:65:26:e5:5b:2a: 14:1c:a9:ae:57:7f:75:00:23:14:4b:61:58:e4:82: aa:15:97:94:bd:50:35:0d:5d:18:18:ed:10:6a:bb: d3:64:5a:eb:36:98:5b:58:a7:fe:67:48:c1:6c:3f: 51:2f:02:65:96:54:77:9b:34:f9:a7:d2:63:54:6a: 9e:02:5c:be:65:98:a4:b4:b5 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Cert Type: SSL Server Netscape Comment: Easy-RSA Generated Server Certificate X509v3 Subject Key Identifier: 1F:E0:57:CA:CB:76:C9:C4:86:B9:EA:69:17:C0:F3:51:CE:95:40:EC X509v3 Authority Key Identifier: keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain serial:E7:21:1E:18:41:1B:96:83 X509v3 Extended Key Usage: TLS Web Server Authentication X509v3 Key Usage: Digital Signature, Key Encipherment Signature Algorithm: sha1WithRSAEncryption 04:93:0e:28:01:94:18:f0:8c:7c:d3:0c:ad:e9:b7:46:b1:30: 65:ed:68:7c:8c:91:cd:1a:86:66:87:4a:4f:c0:97:bc:f7:85: 4b:38:79:31:b2:65:88:b1:76:16:9e:80:93:38:f4:b9:eb:65: 00:6d:bb:89:e0:a1:bf:95:5e:80:13:8e:01:73:d3:f1:08:73: 85:a5:33:75:0b:42:8a:a3:07:09:35:ef:d7:c6:58:eb:60:a3: 06:89:a0:53:99:e2:aa:41:90:e0:1a:d2:12:4b:48:7d:c3:9c: ad:bd:0e:5e:5f:f7:09:0c:5d:7c:86:24:dd:92:d5:b3:14:06: c7:9f -----BEGIN CERTIFICATE----- MIIEKjCCA5OgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu MB4XDTE0MDIyODE4NDkzMVoXDTI0MDIyNjE4NDkzMVowgaMxCzAJBgNVBAYTAlVT MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMRIwEAYDVQQDEwlsb2NhbGhv c3QxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3Qu ZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRCFgkYKFpZUt2Ro+I dXxJOtgDzFtYxdG75flUuXVl3367+1TUsulvWKKkhEOUdySBODY28GZlJuVbKhQc qa5Xf3UAIxRLYVjkgqoVl5S9UDUNXRgY7RBqu9NkWus2mFtYp/5nSMFsP1EvAmWW VHebNPmn0mNUap4CXL5lmKS0tQIDAQABo4IBbzCCAWswCQYDVR0TBAIwADARBglg hkgBhvhCAQEEBAMCBkAwNAYJYIZIAYb4QgENBCcWJUVhc3ktUlNBIEdlbmVyYXRl ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFB/gV8rLdsnEhrnqaRfA81HO lUDsMIHTBgNVHSMEgcswgciAFNyl8XbbTs2O77EjVh2SgJl0O+pvoYGkpIGhMIGe MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNj bzERMA8GA1UEChMIRXZpbCBJbmMxETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h aWxAaG9zdC5kb21haW6CCQDnIR4YQRuWgzATBgNVHSUEDDAKBggrBgEFBQcDATAL BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEABJMOKAGUGPCMfNMMrem3RrEw Ze1ofIyRzRqGZodKT8CXvPeFSzh5MbJliLF2Fp6Akzj0uetlAG27ieChv5VegBOO AXPT8QhzhaUzdQtCiqMHCTXv18ZY62CjBomgU5niqkGQ4BrSEktIfcOcrb0OXl/3 CQxdfIYk3ZLVsxQGx58= -----END CERTIFICATE----- docker-1.6.2/integration/fixtures/https/client-rogue-key.pem0000644000175000017500000000162412524223634023563 0ustar tianontianon-----BEGIN PRIVATE KEY----- MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOjiLLjU24lQT0ce aNv35MxHQWN1AzdQeqhNJzbVFQEIts9W91ZtPfnijRpdv6AkXgdVjtDc8foZhx3W tliCLrppbenZyBYNHVl/9I5YEAE9IRQWPOzNjLcO5nt3tPmQpRcBu4TGshKHcOuf bU/QaIuWwOcLUbSdHXtse76Ja4iLAgMBAAECgYADs+TmI2xCKKa6CL++D5jxrohZ nnionnz0xBVFh+nHlG3jqgxQsXf0yydXLfpn/2wHTdLxezHVuiYt0UYg7iD0CglW +IjcgMebzyjLeYqYOE5llPlMvhp2HoEMYJNb+7bRrZ1WCITbu+Su0w1cgA7Cs+Ej VlfvGzN+qqnDThRUYQJBAPY0sMWZJKly8QhUmUvmcXdPczzSOf6Mm7gc5LR6wzxd vW7syuqk50qjqVqFpN81vCV7GoDxRUWbTM9ftf7JGFkCQQDyJc/1RMygE2o+enU1 6UBxJyclXITEYtDn8aoEpLNc7RakP1WoPUKjZOnjkcoKcIkFNkSPeCfQujrb5f3F MkuDAkByAI/hzzmkpK5rFxEsjfX4Mve/L/DepyjrpaVY1IdWimlO1aJX6CeY7hNa 8QsYt/74s/nfvtg+lNyKIV1aLq9xAkB+WSSNgfyTeg3x08vc+Xxajmdqoz/TiQwg OoTQL3A3iK5LvZBgXLasszcnOycFE3srcQmNItEDpGiZ3QPxJTEpAkEA45EE9NMJ SA7EGWSFlbz4f4u4oBeiDiJRJbGGfAyVxZlpCWUjPpg9+swsWoFEOjnGYaChAMk5 nrOdMf15T6QF7Q== -----END PRIVATE KEY----- docker-1.6.2/integration/fixtures/https/ca.pem0000644000175000017500000000254712524223634020770 0ustar tianontianon-----BEGIN CERTIFICATE----- MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn 0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt Zxtf5lL6KSO9Y+EFwM+rju6hm5hW -----END CERTIFICATE----- docker-1.6.2/integration/fixtures/https/server-key.pem0000644000175000017500000000162412524223634022474 0ustar tianontianon-----BEGIN PRIVATE KEY----- MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx 0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y 4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq +0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj +tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW 5nCwDu5ZTP+khltg -----END PRIVATE KEY----- docker-1.6.2/integration/api_test.go0000644000175000017500000006416512524223634017032 0ustar tianontianonpackage docker import ( "bufio" "bytes" "encoding/json" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/httptest" "strings" "testing" "time" "github.com/docker/docker/api" "github.com/docker/docker/api/server" "github.com/docker/docker/builder" "github.com/docker/docker/engine" "github.com/docker/docker/runconfig" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) func TestSaveImageAndThenLoad(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() // save image r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } tarball := r.Body // delete the image r = httptest.NewRecorder() req, err = http.NewRequest("DELETE", "/images/"+unitTestImageID, nil) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } // make sure there is no image r = httptest.NewRecorder() req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) if r.Code != http.StatusNotFound { t.Fatalf("%d NotFound expected, received %d\n", http.StatusNotFound, r.Code) } // load the image r = httptest.NewRecorder() req, err = http.NewRequest("POST", "/images/load", tarball) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } // finally make sure the image is there r = httptest.NewRecorder() req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } } func TestGetContainersTop(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "cat"}, OpenStdin: true, }, t, ) defer func() { // Make sure the process dies before destroying daemon containerKill(eng, containerID, t) containerWait(eng, containerID, t) }() startContainer(eng, containerID, t) setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { for { if containerRunning(eng, containerID, t) { break } time.Sleep(10 * time.Millisecond) } }) if !containerRunning(eng, containerID, t) { t.Fatalf("Container should be running") } // Make sure sh spawn up cat setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { in, out := containerAttach(eng, containerID, t) if err := assertPipe("hello\n", "hello", out, in, 150); err != nil { t.Fatal(err) } }) r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/containers/"+containerID+"/top?ps_args=aux", nil) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) var procs engine.Env if err := procs.Decode(r.Body); err != nil { t.Fatal(err) } if len(procs.GetList("Titles")) != 11 { t.Fatalf("Expected 11 titles, found %d.", len(procs.GetList("Titles"))) } if procs.GetList("Titles")[0] != "USER" || procs.GetList("Titles")[10] != "COMMAND" { t.Fatalf("Expected Titles[0] to be USER and Titles[10] to be COMMAND, found %s and %s.", procs.GetList("Titles")[0], procs.GetList("Titles")[10]) } processes := [][]string{} if err := procs.GetJson("Processes", &processes); err != nil { t.Fatal(err) } if len(processes) != 2 { t.Fatalf("Expected 2 processes, found %d.", len(processes)) } if processes[0][10] != "/bin/sh -c cat" { t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[0][10]) } if processes[1][10] != "/bin/sh -c cat" { t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[1][10]) } } func TestPostCommit(t *testing.T) { eng := NewTestEngine(t) b := &builder.BuilderJob{Engine: eng} b.Install() defer mkDaemonFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, t, ) containerRun(eng, containerID, t) req, err := http.NewRequest("POST", "/commit?repo=testrepo&testtag=tag&container="+containerID, bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) if r.Code != http.StatusCreated { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } var env engine.Env if err := env.Decode(r.Body); err != nil { t.Fatal(err) } if err := eng.Job("image_inspect", env.Get("Id")).Run(); err != nil { t.Fatalf("The image has not been committed") } } func TestPostContainersCreate(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() configJSON, err := json.Marshal(&runconfig.Config{ Image: unitTestImageID, Memory: 33554432, Cmd: []string{"touch", "/test"}, }) if err != nil { t.Fatal(err) } req, err := http.NewRequest("POST", "/containers/create", bytes.NewReader(configJSON)) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) if r.Code != http.StatusCreated { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } var apiRun engine.Env if err := apiRun.Decode(r.Body); err != nil { t.Fatal(err) } containerID := apiRun.Get("Id") containerAssertExists(eng, containerID, t) containerRun(eng, containerID, t) if !containerFileExists(eng, containerID, "test", t) { t.Fatal("Test file was not created") } } func TestPostJsonVerify(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() configJSON, err := json.Marshal(&runconfig.Config{ Image: unitTestImageID, Memory: 33554432, Cmd: []string{"touch", "/test"}, }) if err != nil { t.Fatal(err) } req, err := http.NewRequest("POST", "/containers/create", bytes.NewReader(configJSON)) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() server.ServeRequest(eng, api.APIVERSION, r, req) // Don't add Content-Type header // req.Header.Set("Content-Type", "application/json") server.ServeRequest(eng, api.APIVERSION, r, req) if r.Code != http.StatusInternalServerError || !strings.Contains(((*r.Body).String()), "application/json") { t.Fatal("Create should have failed due to no Content-Type header - got:", r) } // Now add header but with wrong type and retest req.Header.Set("Content-Type", "application/xml") server.ServeRequest(eng, api.APIVERSION, r, req) if r.Code != http.StatusInternalServerError || !strings.Contains(((*r.Body).String()), "application/json") { t.Fatal("Create should have failed due to wrong Content-Type header - got:", r) } } // Issue 7941 - test to make sure a "null" in JSON is just ignored. // W/o this fix a null in JSON would be parsed into a string var as "null" func TestPostCreateNull(t *testing.T) { eng := NewTestEngine(t) daemon := mkDaemonFromEngine(eng, t) defer daemon.Nuke() configStr := fmt.Sprintf(`{ "Hostname":"", "Domainname":"", "Memory":0, "MemorySwap":0, "CpuShares":0, "Cpuset":null, "AttachStdin":true, "AttachStdout":true, "AttachStderr":true, "PortSpecs":null, "ExposedPorts":{}, "Tty":true, "OpenStdin":true, "StdinOnce":true, "Env":[], "Cmd":"ls", "Image":"%s", "Volumes":{}, "WorkingDir":"", "Entrypoint":null, "NetworkDisabled":false, "OnBuild":null}`, unitTestImageID) req, err := http.NewRequest("POST", "/containers/create", strings.NewReader(configStr)) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) if r.Code != http.StatusCreated { t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) } var apiRun engine.Env if err := apiRun.Decode(r.Body); err != nil { t.Fatal(err) } containerID := apiRun.Get("Id") containerAssertExists(eng, containerID, t) c, _ := daemon.Get(containerID) if c.Config.Cpuset != "" { t.Fatalf("Cpuset should have been empty - instead its:" + c.Config.Cpuset) } } func TestPostContainersKill(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, t, ) startContainer(eng, containerID, t) // Give some time to the process to start containerWaitTimeout(eng, containerID, t) if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } r := httptest.NewRecorder() req, err := http.NewRequest("POST", "/containers/"+containerID+"/kill", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } if containerRunning(eng, containerID, t) { t.Fatalf("The container hasn't been killed") } } func TestPostContainersRestart(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, }, t, ) startContainer(eng, containerID, t) // Give some time to the process to start containerWaitTimeout(eng, containerID, t) if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } req, err := http.NewRequest("POST", "/containers/"+containerID+"/restart?t=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } // Give some time to the process to restart containerWaitTimeout(eng, containerID, t) if !containerRunning(eng, containerID, t) { t.Fatalf("Container should be running") } containerKill(eng, containerID, t) } func TestPostContainersStart(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer( eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, t, ) hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{}) req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } containerAssertExists(eng, containerID, t) req, err = http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON)) if err != nil { t.Fatal(err) } req.Header.Set("Content-Type", "application/json") r = httptest.NewRecorder() server.ServeRequest(eng, api.APIVERSION, r, req) // Starting an already started container should return a 304 assertHttpNotError(r, t) if r.Code != http.StatusNotModified { t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code) } containerAssertExists(eng, containerID, t) containerKill(eng, containerID, t) } func TestPostContainersStop(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/top"}, OpenStdin: true, }, t, ) startContainer(eng, containerID, t) // Give some time to the process to start containerWaitTimeout(eng, containerID, t) if !containerRunning(eng, containerID, t) { t.Errorf("Container should be running") } // Note: as it is a POST request, it requires a body. req, err := http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) if r.Code != http.StatusNoContent { t.Fatalf("%d NO CONTENT expected, received %d\n", http.StatusNoContent, r.Code) } if containerRunning(eng, containerID, t) { t.Fatalf("The container hasn't been stopped") } req, err = http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } r = httptest.NewRecorder() server.ServeRequest(eng, api.APIVERSION, r, req) // Stopping an already stopper container should return a 304 assertHttpNotError(r, t) if r.Code != http.StatusNotModified { t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code) } } func TestPostContainersWait(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sleep", "1"}, OpenStdin: true, }, t, ) startContainer(eng, containerID, t) setTimeout(t, "Wait timed out", 3*time.Second, func() { r := httptest.NewRecorder() req, err := http.NewRequest("POST", "/containers/"+containerID+"/wait", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) var apiWait engine.Env if err := apiWait.Decode(r.Body); err != nil { t.Fatal(err) } if apiWait.GetInt("StatusCode") != 0 { t.Fatalf("Non zero exit code for sleep: %d\n", apiWait.GetInt("StatusCode")) } }) if containerRunning(eng, containerID, t) { t.Fatalf("The container should be stopped after wait") } } func TestPostContainersAttach(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/cat"}, OpenStdin: true, }, t, ) // Start the process startContainer(eng, containerID, t) stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() // Try to avoid the timeout in destroy. Best effort, don't check error defer func() { closeWrap(stdin, stdinPipe, stdout, stdoutPipe) containerKill(eng, containerID, t) }() // Attach to it c1 := make(chan struct{}) go func() { defer close(c1) r := &hijackTester{ ResponseRecorder: httptest.NewRecorder(), in: stdin, out: stdoutPipe, } req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r.ResponseRecorder, t) }() // Acknowledge hijack setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() { stdout.Read([]byte{}) stdout.Read(make([]byte, 4096)) }) setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", string([]byte{1, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) // Close pipes (client disconnects) if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { t.Fatal(err) } // Wait for attach to finish, the client disconnected, therefore, Attach finished his job setTimeout(t, "Waiting for CmdAttach timed out", 10*time.Second, func() { <-c1 }) // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing containerWaitTimeout(eng, containerID, t) // Try to avoid the timeout in destroy. Best effort, don't check error cStdin, _ := containerAttach(eng, containerID, t) cStdin.Close() containerWait(eng, containerID, t) } func TestPostContainersAttachStderr(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"/bin/sh", "-c", "/bin/cat >&2"}, OpenStdin: true, }, t, ) // Start the process startContainer(eng, containerID, t) stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() // Try to avoid the timeout in destroy. Best effort, don't check error defer func() { closeWrap(stdin, stdinPipe, stdout, stdoutPipe) containerKill(eng, containerID, t) }() // Attach to it c1 := make(chan struct{}) go func() { defer close(c1) r := &hijackTester{ ResponseRecorder: httptest.NewRecorder(), in: stdin, out: stdoutPipe, } req, err := http.NewRequest("POST", "/containers/"+containerID+"/attach?stream=1&stdin=1&stdout=1&stderr=1", bytes.NewReader([]byte{})) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r.ResponseRecorder, t) }() // Acknowledge hijack setTimeout(t, "hijack acknowledge timed out", 2*time.Second, func() { stdout.Read([]byte{}) stdout.Read(make([]byte, 4096)) }) setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", string([]byte{2, 0, 0, 0, 0, 0, 0, 6})+"hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) // Close pipes (client disconnects) if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { t.Fatal(err) } // Wait for attach to finish, the client disconnected, therefore, Attach finished his job setTimeout(t, "Waiting for CmdAttach timed out", 10*time.Second, func() { <-c1 }) // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing containerWaitTimeout(eng, containerID, t) // Try to avoid the timeout in destroy. Best effort, don't check error cStdin, _ := containerAttach(eng, containerID, t) cStdin.Close() containerWait(eng, containerID, t) } func TestOptionsRoute(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() r := httptest.NewRecorder() req, err := http.NewRequest("OPTIONS", "/", nil) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code) } } func TestGetEnabledCors(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/version", nil) if err != nil { t.Fatal(err) } server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Errorf("Expected response for OPTIONS request to be \"200\", %v found.", r.Code) } allowOrigin := r.Header().Get("Access-Control-Allow-Origin") allowHeaders := r.Header().Get("Access-Control-Allow-Headers") allowMethods := r.Header().Get("Access-Control-Allow-Methods") if allowOrigin != "*" { t.Errorf("Expected header Access-Control-Allow-Origin to be \"*\", %s found.", allowOrigin) } if allowHeaders != "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth" { t.Errorf("Expected header Access-Control-Allow-Headers to be \"Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth\", %s found.", allowHeaders) } if allowMethods != "GET, POST, DELETE, PUT, OPTIONS" { t.Errorf("Expected hearder Access-Control-Allow-Methods to be \"GET, POST, DELETE, PUT, OPTIONS\", %s found.", allowMethods) } } func TestDeleteImages(t *testing.T) { eng := NewTestEngine(t) //we expect errors, so we disable stderr eng.Stderr = ioutil.Discard defer mkDaemonFromEngine(eng, t).Nuke() initialImages := getImages(eng, t, true, "") if err := eng.Job("tag", unitTestImageName, "test", "test").Run(); err != nil { t.Fatal(err) } images := getImages(eng, t, true, "") if len(images.Data[0].GetList("RepoTags")) != len(initialImages.Data[0].GetList("RepoTags"))+1 { t.Errorf("Expected %d images, %d found", len(initialImages.Data[0].GetList("RepoTags"))+1, len(images.Data[0].GetList("RepoTags"))) } req, err := http.NewRequest("DELETE", "/images/"+unitTestImageID, nil) if err != nil { t.Fatal(err) } r := httptest.NewRecorder() server.ServeRequest(eng, api.APIVERSION, r, req) if r.Code != http.StatusConflict { t.Fatalf("Expected http status 409-conflict, got %v", r.Code) } req2, err := http.NewRequest("DELETE", "/images/test:test", nil) if err != nil { t.Fatal(err) } r2 := httptest.NewRecorder() server.ServeRequest(eng, api.APIVERSION, r2, req2) assertHttpNotError(r2, t) if r2.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(r2.Body.Bytes()); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Fatalf("Expected %d event (untagged), got %d", 1, len(outs.Data)) } images = getImages(eng, t, false, "") if images.Len() != initialImages.Len() { t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len()) } } func TestPostContainersCopy(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test.txt"}, }, t, ) containerRun(eng, containerID, t) r := httptest.NewRecorder() var copyData engine.Env copyData.Set("Resource", "/test.txt") copyData.Set("HostPath", ".") jsonData := bytes.NewBuffer(nil) if err := copyData.Encode(jsonData); err != nil { t.Fatal(err) } req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", jsonData) if err != nil { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } found := false for tarReader := tar.NewReader(r.Body); ; { h, err := tarReader.Next() if err != nil { if err == io.EOF { break } t.Fatal(err) } if h.Name == "test.txt" { found = true break } } if !found { t.Fatalf("The created test file has not been found in the copied output") } } func TestPostContainersCopyWhenContainerNotFound(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() r := httptest.NewRecorder() var copyData engine.Env copyData.Set("Resource", "/test.txt") copyData.Set("HostPath", ".") jsonData := bytes.NewBuffer(nil) if err := copyData.Encode(jsonData); err != nil { t.Fatal(err) } req, err := http.NewRequest("POST", "/containers/id_not_found/copy", jsonData) if err != nil { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") server.ServeRequest(eng, api.APIVERSION, r, req) if r.Code != http.StatusNotFound { t.Fatalf("404 expected for id_not_found Container, received %v", r.Code) } } // Regression test for https://github.com/docker/docker/issues/6231 func TestConstainersStartChunkedEncodingHostConfig(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() r := httptest.NewRecorder() var testData engine.Env testData.Set("Image", "docker-test-image") testData.SetAuto("Volumes", map[string]struct{}{"/foo": {}}) testData.Set("Cmd", "true") jsonData := bytes.NewBuffer(nil) if err := testData.Encode(jsonData); err != nil { t.Fatal(err) } req, err := http.NewRequest("POST", "/containers/create?name=chunk_test", jsonData) if err != nil { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) var testData2 engine.Env testData2.SetAuto("Binds", []string{"/tmp:/foo"}) jsonData = bytes.NewBuffer(nil) if err := testData2.Encode(jsonData); err != nil { t.Fatal(err) } req, err = http.NewRequest("POST", "/containers/chunk_test/start", jsonData) if err != nil { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") // This is a cheat to make the http request do chunked encoding // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite // http://golang.org/src/pkg/net/http/request.go?s=11980:12172 req.ContentLength = -1 server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) type config struct { HostConfig struct { Binds []string } } req, err = http.NewRequest("GET", "/containers/chunk_test/json", nil) if err != nil { t.Fatal(err) } r2 := httptest.NewRecorder() req.Header.Add("Content-Type", "application/json") server.ServeRequest(eng, api.APIVERSION, r2, req) assertHttpNotError(r, t) c := config{} json.Unmarshal(r2.Body.Bytes(), &c) if len(c.HostConfig.Binds) == 0 { t.Fatal("Chunked Encoding not handled") } if c.HostConfig.Binds[0] != "/tmp:/foo" { t.Fatal("Chunked encoding not properly handled, execpted binds to be /tmp:/foo, got:", c.HostConfig.Binds[0]) } } // Mocked types for tests type NopConn struct { io.ReadCloser io.Writer } func (c *NopConn) LocalAddr() net.Addr { return nil } func (c *NopConn) RemoteAddr() net.Addr { return nil } func (c *NopConn) SetDeadline(t time.Time) error { return nil } func (c *NopConn) SetReadDeadline(t time.Time) error { return nil } func (c *NopConn) SetWriteDeadline(t time.Time) error { return nil } type hijackTester struct { *httptest.ResponseRecorder in io.ReadCloser out io.Writer } func (t *hijackTester) Hijack() (net.Conn, *bufio.ReadWriter, error) { bufrw := bufio.NewReadWriter(bufio.NewReader(t.in), bufio.NewWriter(t.out)) conn := &NopConn{ ReadCloser: t.in, Writer: t.out, } return conn, bufrw, nil } docker-1.6.2/integration/server_test.go0000644000175000017500000001601212524223634017553 0ustar tianontianonpackage docker import ( "bytes" "testing" "time" "github.com/docker/docker/builder" "github.com/docker/docker/engine" ) func TestCreateNumberHostname(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() config, _, _, err := parseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}) if err != nil { t.Fatal(err) } createTestContainer(eng, config, t) } func TestCommit(t *testing.T) { eng := NewTestEngine(t) b := &builder.BuilderJob{Engine: eng} b.Install() defer mkDaemonFromEngine(eng, t).Nuke() config, _, _, err := parseRun([]string{unitTestImageID, "/bin/cat"}) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) job := eng.Job("commit", id) job.Setenv("repo", "testrepo") job.Setenv("tag", "testtag") job.SetenvJson("config", config) if err := job.Run(); err != nil { t.Fatal(err) } } func TestMergeConfigOnCommit(t *testing.T) { eng := NewTestEngine(t) b := &builder.BuilderJob{Engine: eng} b.Install() runtime := mkDaemonFromEngine(eng, t) defer runtime.Nuke() container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t) defer runtime.Rm(container1) config, _, _, err := parseRun([]string{container1.ID, "cat /tmp/foo"}) if err != nil { t.Error(err) } job := eng.Job("commit", container1.ID) job.Setenv("repo", "testrepo") job.Setenv("tag", "testtag") job.SetenvJson("config", config) var outputBuffer = bytes.NewBuffer(nil) job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { t.Error(err) } container2, _, _ := mkContainer(runtime, []string{engine.Tail(outputBuffer, 1)}, t) defer runtime.Rm(container2) job = eng.Job("container_inspect", container1.Name) baseContainer, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Error(err) } job = eng.Job("container_inspect", container2.Name) commitContainer, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Error(err) } baseConfig := baseContainer.GetSubEnv("Config") commitConfig := commitContainer.GetSubEnv("Config") if commitConfig.Get("Env") != baseConfig.Get("Env") { t.Fatalf("Env config in committed container should be %v, was %v", baseConfig.Get("Env"), commitConfig.Get("Env")) } if baseConfig.Get("Cmd") != "[\"echo test \\u003e /tmp/foo\"]" { t.Fatalf("Cmd in base container should be [\"echo test \\u003e /tmp/foo\"], was %s", baseConfig.Get("Cmd")) } if commitConfig.Get("Cmd") != "[\"cat /tmp/foo\"]" { t.Fatalf("Cmd in committed container should be [\"cat /tmp/foo\"], was %s", commitConfig.Get("Cmd")) } } func TestRestartKillWait(t *testing.T) { eng := NewTestEngine(t) runtime := mkDaemonFromEngine(eng, t) defer runtime.Nuke() config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) job := eng.Job("containers") job.SetenvBool("all", true) outs, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } job = eng.Job("start", id) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("kill", id) if err := job.Run(); err != nil { t.Fatal(err) } eng = newTestEngine(t, false, runtime.Config().Root) job = eng.Job("containers") job.SetenvBool("all", true) outs, err = job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() { job = eng.Job("wait", outs.Data[0].Get("Id")) if err := job.Run(); err != nil { t.Fatal(err) } }) } func TestCreateStartRestartStopStartKillRm(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}) if err != nil { t.Fatal(err) } id := createTestContainer(eng, config, t) job := eng.Job("containers") job.SetenvBool("all", true) outs, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 1 { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } job = eng.Job("start", id) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("restart", id) job.SetenvInt("t", 2) if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("stop", id) job.SetenvInt("t", 2) if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("start", id) if err := job.ImportEnv(hostConfig); err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if err := eng.Job("kill", id).Run(); err != nil { t.Fatal(err) } // FIXME: this failed once with a race condition ("Unable to remove filesystem for xxx: directory not empty") job = eng.Job("rm", id) job.SetenvBool("removeVolume", true) if err := job.Run(); err != nil { t.Fatal(err) } job = eng.Job("containers") job.SetenvBool("all", true) outs, err = job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } if len(outs.Data) != 0 { t.Errorf("Expected 0 container, %v found", len(outs.Data)) } } func TestRunWithTooLowMemoryLimit(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() // Try to create a container with a memory limit of 1 byte less than the minimum allowed limit. job := eng.Job("create") job.Setenv("Image", unitTestImageID) job.Setenv("Memory", "524287") job.Setenv("CpuShares", "1000") job.SetenvList("Cmd", []string{"/bin/cat"}) if err := job.Run(); err == nil { t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!") } } func TestImagesFilter(t *testing.T) { eng := NewTestEngine(t) defer nuke(mkDaemonFromEngine(eng, t)) if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil { t.Fatal(err) } if err := eng.Job("tag", unitTestImageName, "utest/docker", "tag2").Run(); err != nil { t.Fatal(err) } if err := eng.Job("tag", unitTestImageName, "utest:5000/docker", "tag3").Run(); err != nil { t.Fatal(err) } images := getImages(eng, t, false, "utest*/*") if len(images.Data[0].GetList("RepoTags")) != 2 { t.Fatal("incorrect number of matches returned") } images = getImages(eng, t, false, "utest") if len(images.Data[0].GetList("RepoTags")) != 1 { t.Fatal("incorrect number of matches returned") } images = getImages(eng, t, false, "utest*") if len(images.Data[0].GetList("RepoTags")) != 1 { t.Fatal("incorrect number of matches returned") } images = getImages(eng, t, false, "*5000*/*") if len(images.Data[0].GetList("RepoTags")) != 1 { t.Fatal("incorrect number of matches returned") } } docker-1.6.2/integration/README.md0000644000175000017500000000145612524223634016144 0ustar tianontianon## Legacy integration tests `./integration` contains Docker's legacy integration tests. It is DEPRECATED and will eventually be removed. ### If you are a *CONTRIBUTOR* and want to add a test: * Consider mocking out side effects and contributing a *unit test* in the subsystem you're modifying. For example, the remote API has unit tests in `./api/server/server_unit_tests.go`. The events subsystem has unit tests in `./events/events_test.go`. And so on. * For end-to-end integration tests, please contribute to `./integration-cli`. ### If you are a *MAINTAINER* Please don't allow patches adding new tests to `./integration`. ### If you are *LOOKING FOR A WAY TO HELP* Please consider porting tests away from `./integration` and into either unit tests or CLI tests. Any help will be greatly appreciated! docker-1.6.2/integration/container_test.go0000644000175000017500000001163312524223634020233 0ustar tianontianonpackage docker import ( "io" "io/ioutil" "testing" "time" "github.com/docker/docker/runconfig" ) func TestRestartStdin(t *testing.T) { daemon := mkDaemon(t) defer nuke(daemon) container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{"cat"}, OpenStdin: true, }, &runconfig.HostConfig{}, "", ) if err != nil { t.Fatal(err) } defer daemon.Rm(container) stdin := container.StdinPipe() stdout := container.StdoutPipe() if err := container.Start(); err != nil { t.Fatal(err) } if _, err := io.WriteString(stdin, "hello world"); err != nil { t.Fatal(err) } if err := stdin.Close(); err != nil { t.Fatal(err) } container.WaitStop(-1 * time.Second) output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } if err := stdout.Close(); err != nil { t.Fatal(err) } if string(output) != "hello world" { t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) } // Restart and try again stdin = container.StdinPipe() stdout = container.StdoutPipe() if err := container.Start(); err != nil { t.Fatal(err) } if _, err := io.WriteString(stdin, "hello world #2"); err != nil { t.Fatal(err) } if err := stdin.Close(); err != nil { t.Fatal(err) } container.WaitStop(-1 * time.Second) output, err = ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } if err := stdout.Close(); err != nil { t.Fatal(err) } if string(output) != "hello world #2" { t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world #2", string(output)) } } func TestStdin(t *testing.T) { daemon := mkDaemon(t) defer nuke(daemon) container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{"cat"}, OpenStdin: true, }, &runconfig.HostConfig{}, "", ) if err != nil { t.Fatal(err) } defer daemon.Rm(container) stdin := container.StdinPipe() stdout := container.StdoutPipe() if err := container.Start(); err != nil { t.Fatal(err) } defer stdin.Close() defer stdout.Close() if _, err := io.WriteString(stdin, "hello world"); err != nil { t.Fatal(err) } if err := stdin.Close(); err != nil { t.Fatal(err) } container.WaitStop(-1 * time.Second) output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } if string(output) != "hello world" { t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) } } func TestTty(t *testing.T) { daemon := mkDaemon(t) defer nuke(daemon) container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{"cat"}, OpenStdin: true, }, &runconfig.HostConfig{}, "", ) if err != nil { t.Fatal(err) } defer daemon.Rm(container) stdin := container.StdinPipe() stdout := container.StdoutPipe() if err := container.Start(); err != nil { t.Fatal(err) } defer stdin.Close() defer stdout.Close() if _, err := io.WriteString(stdin, "hello world"); err != nil { t.Fatal(err) } if err := stdin.Close(); err != nil { t.Fatal(err) } container.WaitStop(-1 * time.Second) output, err := ioutil.ReadAll(stdout) if err != nil { t.Fatal(err) } if string(output) != "hello world" { t.Fatalf("Unexpected output. Expected %s, received: %s", "hello world", string(output)) } } func BenchmarkRunSequential(b *testing.B) { daemon := mkDaemon(b) defer nuke(daemon) for i := 0; i < b.N; i++ { container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{"echo", "-n", "foo"}, }, &runconfig.HostConfig{}, "", ) if err != nil { b.Fatal(err) } defer daemon.Rm(container) output, err := container.Output() if err != nil { b.Fatal(err) } if string(output) != "foo" { b.Fatalf("Unexpected output: %s", output) } if err := daemon.Rm(container); err != nil { b.Fatal(err) } } } func BenchmarkRunParallel(b *testing.B) { daemon := mkDaemon(b) defer nuke(daemon) var tasks []chan error for i := 0; i < b.N; i++ { complete := make(chan error) tasks = append(tasks, complete) go func(i int, complete chan error) { container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, Cmd: []string{"echo", "-n", "foo"}, }, &runconfig.HostConfig{}, "", ) if err != nil { complete <- err return } defer daemon.Rm(container) if err := container.Start(); err != nil { complete <- err return } if _, err := container.WaitStop(15 * time.Second); err != nil { complete <- err return } // if string(output) != "foo" { // complete <- fmt.Errorf("Unexecpted output: %v", string(output)) // } if err := daemon.Rm(container); err != nil { complete <- err return } complete <- nil }(i, complete) } var errors []error for _, task := range tasks { err := <-task if err != nil { errors = append(errors, err) } } if len(errors) > 0 { b.Fatal(errors) } } docker-1.6.2/integration/https_test.go0000644000175000017500000000505412524223634017413 0ustar tianontianonpackage docker import ( "crypto/tls" "crypto/x509" "io/ioutil" "strings" "testing" "time" "github.com/docker/docker/api/client" ) const ( errBadCertificate = "remote error: bad certificate" errCaUnknown = "x509: certificate signed by unknown authority" ) func getTlsConfig(certFile, keyFile string, t *testing.T) *tls.Config { certPool := x509.NewCertPool() file, err := ioutil.ReadFile("fixtures/https/ca.pem") if err != nil { t.Fatal(err) } certPool.AppendCertsFromPEM(file) cert, err := tls.LoadX509KeyPair("fixtures/https/"+certFile, "fixtures/https/"+keyFile) if err != nil { t.Fatalf("Couldn't load X509 key pair: %s", err) } tlsConfig := &tls.Config{ RootCAs: certPool, Certificates: []tls.Certificate{cert}, } return tlsConfig } // TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint func TestHttpsInfo(t *testing.T) { cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, "", testDaemonProto, testDaemonHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) setTimeout(t, "Reading command output time out", 10*time.Second, func() { if err := cli.CmdInfo(); err != nil { t.Fatal(err) } }) } // TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint // by using a rogue client certificate and checks that it fails with the expected error. func TestHttpsInfoRogueCert(t *testing.T) { cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, "", testDaemonProto, testDaemonHttpsAddr, getTlsConfig("client-rogue-cert.pem", "client-rogue-key.pem", t)) setTimeout(t, "Reading command output time out", 10*time.Second, func() { err := cli.CmdInfo() if err == nil { t.Fatal("Expected error but got nil") } if !strings.Contains(err.Error(), errBadCertificate) { t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err) } }) } // TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint // which provides a rogue server certificate and checks that it fails with the expected error func TestHttpsInfoRogueServerCert(t *testing.T) { cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, "", testDaemonProto, testDaemonRogueHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) setTimeout(t, "Reading command output time out", 10*time.Second, func() { err := cli.CmdInfo() if err == nil { t.Fatal("Expected error but got nil") } if !strings.Contains(err.Error(), errCaUnknown) { t.Fatalf("Expected error: %s, got instead: %s", errCaUnknown, err) } }) } docker-1.6.2/integration/z_final_test.go0000644000175000017500000000055312524223634017672 0ustar tianontianonpackage docker import ( "github.com/docker/docker/utils" "runtime" "testing" ) func displayFdGoroutines(t *testing.T) { t.Logf("Fds: %d, Goroutines: %d", utils.GetTotalUsedFds(), runtime.NumGoroutine()) } func TestFinal(t *testing.T) { nuke(globalDaemon) t.Logf("Start Fds: %d, Start Goroutines: %d", startFds, startGoroutines) displayFdGoroutines(t) } docker-1.6.2/integration/commands_test.go0000644000175000017500000002505612524223634020056 0ustar tianontianonpackage docker import ( "bufio" "fmt" "io" "io/ioutil" "strings" "testing" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/api/client" "github.com/docker/docker/daemon" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/term" "github.com/kr/pty" ) func closeWrap(args ...io.Closer) error { e := false ret := fmt.Errorf("Error closing elements") for _, c := range args { if err := c.Close(); err != nil { e = true ret = fmt.Errorf("%s\n%s", ret, err) } } if e { return ret } return nil } func setRaw(t *testing.T, c *daemon.Container) *term.State { pty, err := c.GetPtyMaster() if err != nil { t.Fatal(err) } state, err := term.MakeRaw(pty.Fd()) if err != nil { t.Fatal(err) } return state } func unsetRaw(t *testing.T, c *daemon.Container, state *term.State) { pty, err := c.GetPtyMaster() if err != nil { t.Fatal(err) } term.RestoreTerminal(pty.Fd(), state) } func waitContainerStart(t *testing.T, timeout time.Duration) *daemon.Container { var container *daemon.Container setTimeout(t, "Waiting for the container to be started timed out", timeout, func() { for { l := globalDaemon.List() if len(l) == 1 && l[0].IsRunning() { container = l[0] break } time.Sleep(10 * time.Millisecond) } }) if container == nil { t.Fatal("An error occured while waiting for the container to start") } return container } func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { c := make(chan bool) // Make sure we are not too long go func() { time.Sleep(d) c <- true }() go func() { f() c <- false }() if <-c && msg != "" { t.Fatal(msg) } } func expectPipe(expected string, r io.Reader) error { o, err := bufio.NewReader(r).ReadString('\n') if err != nil { return err } if strings.Trim(o, " \r\n") != expected { return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", expected, o) } return nil } func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error { for i := 0; i < count; i++ { if _, err := w.Write([]byte(input)); err != nil { return err } if err := expectPipe(output, r); err != nil { return err } } return nil } // TestRunDetach checks attaching and detaching with the escape sequence. func TestRunDetach(t *testing.T) { stdout, stdoutPipe := io.Pipe() cpty, tty, err := pty.Open() if err != nil { t.Fatal(err) } cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { defer close(ch) cli.CmdRun("-i", "-t", unitTestImageID, "cat") }() container := waitContainerStart(t, 10*time.Second) state := setRaw(t, container) defer unsetRaw(t, container, state) setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { t.Fatal(err) } }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) cpty.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() { <-ch }) closeWrap(cpty, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.IsRunning() { t.Fatal("The detached container should be still running") } setTimeout(t, "Waiting for container to die timed out", 20*time.Second, func() { container.Kill() }) } // TestAttachDetach checks that attach in tty mode can be detached using the long container ID func TestAttachDetach(t *testing.T) { stdout, stdoutPipe := io.Pipe() cpty, tty, err := pty.Open() if err != nil { t.Fatal(err) } cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { defer close(ch) if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil { t.Fatal(err) } }() container := waitContainerStart(t, 10*time.Second) setTimeout(t, "Reading container's id timed out", 10*time.Second, func() { buf := make([]byte, 1024) n, err := stdout.Read(buf) if err != nil { t.Fatal(err) } if strings.Trim(string(buf[:n]), " \r\n") != container.ID { t.Fatalf("Wrong ID received. Expect %s, received %s", container.ID, buf[:n]) } }) setTimeout(t, "Starting container timed out", 10*time.Second, func() { <-ch }) state := setRaw(t, container) defer unsetRaw(t, container, state) stdout, stdoutPipe = io.Pipe() cpty, tty, err = pty.Open() if err != nil { t.Fatal(err) } cli = client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) ch = make(chan struct{}) go func() { defer close(ch) if err := cli.CmdAttach(container.ID); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } } }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } } }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) cpty.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() { <-ch }) closeWrap(cpty, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.IsRunning() { t.Fatal("The detached container should be still running") } setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() { container.Kill() }) } // TestAttachDetachTruncatedID checks that attach in tty mode can be detached func TestAttachDetachTruncatedID(t *testing.T) { stdout, stdoutPipe := io.Pipe() cpty, tty, err := pty.Open() if err != nil { t.Fatal(err) } cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) // Discard the CmdRun output go stdout.Read(make([]byte, 1024)) setTimeout(t, "Starting container timed out", 2*time.Second, func() { if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil { t.Fatal(err) } }) container := waitContainerStart(t, 10*time.Second) state := setRaw(t, container) defer unsetRaw(t, container, state) stdout, stdoutPipe = io.Pipe() cpty, tty, err = pty.Open() if err != nil { t.Fatal(err) } cli = client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) ch := make(chan struct{}) go func() { defer close(ch) if err := cli.CmdAttach(common.TruncateID(container.ID)); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } } }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } } }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) cpty.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() { <-ch }) closeWrap(cpty, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.IsRunning() { t.Fatal("The detached container should be still running") } setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() { container.Kill() }) } // Expected behaviour, the process stays alive when the client disconnects func TestAttachDisconnect(t *testing.T) { stdout, stdoutPipe := io.Pipe() cpty, tty, err := pty.Open() if err != nil { t.Fatal(err) } cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) go func() { // Start a process in daemon mode if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil { log.Debugf("Error CmdRun: %s", err) } }() setTimeout(t, "Waiting for CmdRun timed out", 10*time.Second, func() { if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { t.Fatal(err) } }) setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { for { l := globalDaemon.List() if len(l) == 1 && l[0].IsRunning() { break } time.Sleep(10 * time.Millisecond) } }) container := globalDaemon.List()[0] // Attach to it c1 := make(chan struct{}) go func() { // We're simulating a disconnect so the return value doesn't matter. What matters is the // fact that CmdAttach returns. cli.CmdAttach(container.ID) close(c1) }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { t.Fatal(err) } }) // Close pipes (client disconnects) if err := closeWrap(cpty, stdout, stdoutPipe); err != nil { t.Fatal(err) } // Wait for attach to finish, the client disconnected, therefore, Attach finished his job setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() { <-c1 }) // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing _, err = container.WaitStop(500 * time.Millisecond) if err == nil || !container.IsRunning() { t.Fatalf("/bin/cat is not running after closing stdin") } // Try to avoid the timeout in destroy. Best effort, don't check error cStdin := container.StdinPipe() cStdin.Close() container.WaitStop(-1 * time.Second) } // Expected behaviour: container gets deleted automatically after exit func TestRunAutoRemove(t *testing.T) { t.Skip("Fixme. Skipping test for now, race condition") stdout, stdoutPipe := io.Pipe() cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdRun("--rm", unitTestImageID, "hostname"); err != nil { t.Fatal(err) } }() var temporaryContainerID string setTimeout(t, "Reading command output time out", 2*time.Second, func() { cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { t.Fatal(err) } temporaryContainerID = cmdOutput if err := closeWrap(stdout, stdoutPipe); err != nil { t.Fatal(err) } }) setTimeout(t, "CmdRun timed out", 10*time.Second, func() { <-c }) time.Sleep(500 * time.Millisecond) if len(globalDaemon.List()) > 0 { t.Fatalf("failed to remove container automatically: container %s still exists", temporaryContainerID) } } docker-1.6.2/integration/graph_test.go0000644000175000017500000001674512524223634017363 0ustar tianontianonpackage docker import ( "errors" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/graph" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/common" "github.com/docker/docker/utils" "io" "io/ioutil" "os" "path" "testing" "time" ) func TestMount(t *testing.T) { graph, driver := tempGraph(t) defer os.RemoveAll(graph.Root) defer driver.Cleanup() archive, err := fakeTar() if err != nil { t.Fatal(err) } image, err := graph.Create(archive, "", "", "Testing", "", nil, nil) if err != nil { t.Fatal(err) } tmp, err := ioutil.TempDir("", "docker-test-graph-mount-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) rootfs := path.Join(tmp, "rootfs") if err := os.MkdirAll(rootfs, 0700); err != nil { t.Fatal(err) } rw := path.Join(tmp, "rw") if err := os.MkdirAll(rw, 0700); err != nil { t.Fatal(err) } if _, err := driver.Get(image.ID, ""); err != nil { t.Fatal(err) } } func TestInit(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) // Root should exist if _, err := os.Stat(graph.Root); err != nil { t.Fatal(err) } // Map() should be empty if l, err := graph.Map(); err != nil { t.Fatal(err) } else if len(l) != 0 { t.Fatalf("len(Map()) should return %d, not %d", 0, len(l)) } } // Test that Register can be interrupted cleanly without side effects func TestInterruptedRegister(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data image := &image.Image{ ID: common.GenerateRandomID(), Comment: "testing", Created: time.Now(), } w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling) graph.Register(image, badArchive) if _, err := graph.Get(image.ID); err == nil { t.Fatal("Image should not exist after Register is interrupted") } // Registering the same image again should succeed if the first register was interrupted goodArchive, err := fakeTar() if err != nil { t.Fatal(err) } if err := graph.Register(image, goodArchive); err != nil { t.Fatal(err) } } // FIXME: Do more extensive tests (ex: create multiple, delete, recreate; // create multiple, check the amount of images and paths, etc..) func TestGraphCreate(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) archive, err := fakeTar() if err != nil { t.Fatal(err) } img, err := graph.Create(archive, "", "", "Testing", "", nil, nil) if err != nil { t.Fatal(err) } if err := utils.ValidateID(img.ID); err != nil { t.Fatal(err) } if img.Comment != "Testing" { t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", img.Comment) } if img.DockerVersion != dockerversion.VERSION { t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, img.DockerVersion) } images, err := graph.Map() if err != nil { t.Fatal(err) } else if l := len(images); l != 1 { t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) } if images[img.ID] == nil { t.Fatalf("Could not find image with id %s", img.ID) } } func TestRegister(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) archive, err := fakeTar() if err != nil { t.Fatal(err) } image := &image.Image{ ID: common.GenerateRandomID(), Comment: "testing", Created: time.Now(), } err = graph.Register(image, archive) if err != nil { t.Fatal(err) } if images, err := graph.Map(); err != nil { t.Fatal(err) } else if l := len(images); l != 1 { t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) } if resultImg, err := graph.Get(image.ID); err != nil { t.Fatal(err) } else { if resultImg.ID != image.ID { t.Fatalf("Wrong image ID. Should be '%s', not '%s'", image.ID, resultImg.ID) } if resultImg.Comment != image.Comment { t.Fatalf("Wrong image comment. Should be '%s', not '%s'", image.Comment, resultImg.Comment) } } } // Test that an image can be deleted by its shorthand prefix func TestDeletePrefix(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) img := createTestImage(graph, t) if err := graph.Delete(common.TruncateID(img.ID)); err != nil { t.Fatal(err) } assertNImages(graph, t, 0) } func createTestImage(graph *graph.Graph, t *testing.T) *image.Image { archive, err := fakeTar() if err != nil { t.Fatal(err) } img, err := graph.Create(archive, "", "", "Test image", "", nil, nil) if err != nil { t.Fatal(err) } return img } func TestDelete(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) archive, err := fakeTar() if err != nil { t.Fatal(err) } assertNImages(graph, t, 0) img, err := graph.Create(archive, "", "", "Bla bla", "", nil, nil) if err != nil { t.Fatal(err) } assertNImages(graph, t, 1) if err := graph.Delete(img.ID); err != nil { t.Fatal(err) } assertNImages(graph, t, 0) archive, err = fakeTar() if err != nil { t.Fatal(err) } // Test 2 create (same name) / 1 delete img1, err := graph.Create(archive, "", "", "Testing", "", nil, nil) if err != nil { t.Fatal(err) } archive, err = fakeTar() if err != nil { t.Fatal(err) } if _, err = graph.Create(archive, "", "", "Testing", "", nil, nil); err != nil { t.Fatal(err) } assertNImages(graph, t, 2) if err := graph.Delete(img1.ID); err != nil { t.Fatal(err) } assertNImages(graph, t, 1) // Test delete wrong name if err := graph.Delete("Not_foo"); err == nil { t.Fatalf("Deleting wrong ID should return an error") } assertNImages(graph, t, 1) archive, err = fakeTar() if err != nil { t.Fatal(err) } // Test delete twice (pull -> rm -> pull -> rm) if err := graph.Register(img1, archive); err != nil { t.Fatal(err) } if err := graph.Delete(img1.ID); err != nil { t.Fatal(err) } assertNImages(graph, t, 1) } func TestByParent(t *testing.T) { archive1, _ := fakeTar() archive2, _ := fakeTar() archive3, _ := fakeTar() graph, _ := tempGraph(t) defer nukeGraph(graph) parentImage := &image.Image{ ID: common.GenerateRandomID(), Comment: "parent", Created: time.Now(), Parent: "", } childImage1 := &image.Image{ ID: common.GenerateRandomID(), Comment: "child1", Created: time.Now(), Parent: parentImage.ID, } childImage2 := &image.Image{ ID: common.GenerateRandomID(), Comment: "child2", Created: time.Now(), Parent: parentImage.ID, } _ = graph.Register(parentImage, archive1) _ = graph.Register(childImage1, archive2) _ = graph.Register(childImage2, archive3) byParent, err := graph.ByParent() if err != nil { t.Fatal(err) } numChildren := len(byParent[parentImage.ID]) if numChildren != 2 { t.Fatalf("Expected 2 children, found %d", numChildren) } } /* * HELPER FUNCTIONS */ func assertNImages(graph *graph.Graph, t *testing.T, n int) { if images, err := graph.Map(); err != nil { t.Fatal(err) } else if actualN := len(images); actualN != n { t.Fatalf("Expected %d images, found %d", n, actualN) } } func tempGraph(t *testing.T) (*graph.Graph, graphdriver.Driver) { tmp, err := ioutil.TempDir("", "docker-graph-") if err != nil { t.Fatal(err) } driver, err := graphdriver.New(tmp, nil) if err != nil { t.Fatal(err) } graph, err := graph.NewGraph(tmp, driver) if err != nil { t.Fatal(err) } return graph, driver } func nukeGraph(graph *graph.Graph) { graph.Driver().Cleanup() os.RemoveAll(graph.Root) } func testArchive(t *testing.T) archive.Archive { archive, err := fakeTar() if err != nil { t.Fatal(err) } return archive } docker-1.6.2/integration/utils_test.go0000644000175000017500000002346712524223634017421 0ustar tianontianonpackage docker import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "os" "path" "path/filepath" "strings" "testing" "time" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "github.com/docker/docker/builtins" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) type Fataler interface { Fatal(...interface{}) } // This file contains utility functions for docker's unit test suite. // It has to be named XXX_test.go, apparently, in other to access private functions // from other XXX_test.go functions. // Create a temporary daemon suitable for unit testing. // Call t.Fatal() at the first error. func mkDaemon(f Fataler) *daemon.Daemon { eng := newTestEngine(f, false, "") return mkDaemonFromEngine(eng, f) } func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler, name string) (shortId string) { job := eng.Job("create", name) if err := job.ImportEnv(config); err != nil { f.Fatal(err) } var outputBuffer = bytes.NewBuffer(nil) job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { f.Fatal(err) } return engine.Tail(outputBuffer, 1) } func createTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler) (shortId string) { return createNamedTestContainer(eng, config, f, "") } func startContainer(eng *engine.Engine, id string, t Fataler) { job := eng.Job("start", id) if err := job.Run(); err != nil { t.Fatal(err) } } func containerRun(eng *engine.Engine, id string, t Fataler) { startContainer(eng, id, t) containerWait(eng, id, t) } func containerFileExists(eng *engine.Engine, id, dir string, t Fataler) bool { c := getContainer(eng, id, t) if err := c.Mount(); err != nil { t.Fatal(err) } defer c.Unmount() if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil { if os.IsNotExist(err) { return false } t.Fatal(err) } return true } func containerAttach(eng *engine.Engine, id string, t Fataler) (io.WriteCloser, io.ReadCloser) { c := getContainer(eng, id, t) i := c.StdinPipe() o := c.StdoutPipe() return i, o } func containerWait(eng *engine.Engine, id string, t Fataler) int { ex, _ := getContainer(eng, id, t).WaitStop(-1 * time.Second) return ex } func containerWaitTimeout(eng *engine.Engine, id string, t Fataler) error { _, err := getContainer(eng, id, t).WaitStop(500 * time.Millisecond) return err } func containerKill(eng *engine.Engine, id string, t Fataler) { if err := eng.Job("kill", id).Run(); err != nil { t.Fatal(err) } } func containerRunning(eng *engine.Engine, id string, t Fataler) bool { return getContainer(eng, id, t).IsRunning() } func containerAssertExists(eng *engine.Engine, id string, t Fataler) { getContainer(eng, id, t) } func containerAssertNotExists(eng *engine.Engine, id string, t Fataler) { daemon := mkDaemonFromEngine(eng, t) if c, _ := daemon.Get(id); c != nil { t.Fatal(fmt.Errorf("Container %s should not exist", id)) } } // assertHttpNotError expect the given response to not have an error. // Otherwise the it causes the test to fail. func assertHttpNotError(r *httptest.ResponseRecorder, t Fataler) { // Non-error http status are [200, 400) if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) } } // assertHttpError expect the given response to have an error. // Otherwise the it causes the test to fail. func assertHttpError(r *httptest.ResponseRecorder, t Fataler) { // Non-error http status are [200, 400) if !(r.Code < http.StatusOK || r.Code >= http.StatusBadRequest) { t.Fatal(fmt.Errorf("Unexpected http success code: %v", r.Code)) } } func getContainer(eng *engine.Engine, id string, t Fataler) *daemon.Container { daemon := mkDaemonFromEngine(eng, t) c, err := daemon.Get(id) if err != nil { t.Fatal(err) } return c } func mkDaemonFromEngine(eng *engine.Engine, t Fataler) *daemon.Daemon { iDaemon := eng.Hack_GetGlobalVar("httpapi.daemon") if iDaemon == nil { panic("Legacy daemon field not set in engine") } daemon, ok := iDaemon.(*daemon.Daemon) if !ok { panic("Legacy daemon field in engine does not cast to *daemon.Daemon") } return daemon } func newTestEngine(t Fataler, autorestart bool, root string) *engine.Engine { if root == "" { if dir, err := newTestDirectory(unitTestStoreBase); err != nil { t.Fatal(err) } else { root = dir } } os.MkdirAll(root, 0700) eng := engine.New() eng.Logging = false // Load default plugins if err := builtins.Register(eng); err != nil { t.Fatal(err) } // load registry service if err := registry.NewService(nil).Install(eng); err != nil { t.Fatal(err) } // (This is manually copied and modified from main() until we have a more generic plugin system) cfg := &daemon.Config{ Root: root, AutoRestart: autorestart, ExecDriver: "native", // Either InterContainerCommunication or EnableIptables must be set, // otherwise NewDaemon will fail because of conflicting settings. InterContainerCommunication: true, TrustKeyPath: filepath.Join(root, "key.json"), LogConfig: runconfig.LogConfig{Type: "json-file"}, } d, err := daemon.NewDaemon(cfg, eng) if err != nil { t.Fatal(err) } if err := d.Install(eng); err != nil { t.Fatal(err) } return eng } func NewTestEngine(t Fataler) *engine.Engine { return newTestEngine(t, false, "") } func newTestDirectory(templateDir string) (dir string, err error) { return utils.TestDirectory(templateDir) } func getCallerName(depth int) string { return utils.GetCallerName(depth) } // Write `content` to the file at path `dst`, creating it if necessary, // as well as any missing directories. // The file is truncated if it already exists. // Call t.Fatal() at the first error. func writeFile(dst, content string, t *testing.T) { // Create subdirectories if necessary if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { t.Fatal(err) } f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) if err != nil { t.Fatal(err) } // Write content (truncate if it exists) if _, err := io.Copy(f, strings.NewReader(content)); err != nil { t.Fatal(err) } } // Return the contents of file at path `src`. // Call t.Fatal() at the first error (including if the file doesn't exist) func readFile(src string, t *testing.T) (content string) { f, err := os.Open(src) if err != nil { t.Fatal(err) } data, err := ioutil.ReadAll(f) if err != nil { t.Fatal(err) } return string(data) } // Create a test container from the given daemon `r` and run arguments `args`. // If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is // dynamically replaced by the current test image. // The caller is responsible for destroying the container. // Call t.Fatal() at the first error. func mkContainer(r *daemon.Daemon, args []string, t *testing.T) (*daemon.Container, *runconfig.HostConfig, error) { config, hc, _, err := parseRun(args) defer func() { if err != nil && t != nil { t.Fatal(err) } }() if err != nil { return nil, nil, err } if config.Image == "_" { config.Image = GetTestImage(r).ID } c, _, err := r.Create(config, nil, "") if err != nil { return nil, nil, err } // NOTE: hostConfig is ignored. // If `args` specify privileged mode, custom lxc conf, external mount binds, // port redirects etc. they will be ignored. // This is because the correct way to set these things is to pass environment // to the `start` job. // FIXME: this helper function should be deprecated in favor of calling // `create` and `start` jobs directly. return c, hc, nil } // Create a test container, start it, wait for it to complete, destroy it, // and return its standard output as a string. // The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image. // If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally. func runContainer(eng *engine.Engine, r *daemon.Daemon, args []string, t *testing.T) (output string, err error) { defer func() { if err != nil && t != nil { t.Fatal(err) } }() container, hc, err := mkContainer(r, args, t) if err != nil { return "", err } defer r.Rm(container) stdout := container.StdoutPipe() defer stdout.Close() job := eng.Job("start", container.ID) if err := job.ImportEnv(hc); err != nil { return "", err } if err := job.Run(); err != nil { return "", err } container.WaitStop(-1 * time.Second) data, err := ioutil.ReadAll(stdout) if err != nil { return "", err } output = string(data) return } // FIXME: this is duplicated from graph_test.go in the docker package. func fakeTar() (io.ReadCloser, error) { content := []byte("Hello world!\n") buf := new(bytes.Buffer) tw := tar.NewWriter(buf) for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { hdr := new(tar.Header) hdr.Size = int64(len(content)) hdr.Name = name if err := tw.WriteHeader(hdr); err != nil { return nil, err } tw.Write([]byte(content)) } tw.Close() return ioutil.NopCloser(buf), nil } func getAllImages(eng *engine.Engine, t *testing.T) *engine.Table { return getImages(eng, t, true, "") } func getImages(eng *engine.Engine, t *testing.T, all bool, filter string) *engine.Table { job := eng.Job("images") job.SetenvBool("all", all) job.Setenv("filter", filter) images, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } return images } func parseRun(args []string) (*runconfig.Config, *runconfig.HostConfig, *flag.FlagSet, error) { cmd := flag.NewFlagSet("run", flag.ContinueOnError) cmd.SetOutput(ioutil.Discard) cmd.Usage = nil return runconfig.Parse(cmd, args) } docker-1.6.2/.mailmap0000644000175000017500000001600012524223634013752 0ustar tianontianon# Generate AUTHORS: hack/generate-authors.sh # Tip for finding duplicates (besides scanning the output of AUTHORS for name # duplicates that aren't also email duplicates): scan the output of: # git log --format='%aE - %aN' | sort -uf # # For explanation on this file format: man git-shortlog Patrick Stapleton Shishir Mahajan Erwin van der Koogh Ahmed Kamal Tejesh Mehta Cristian Staretu Cristian Staretu Cristian Staretu Marcus Linke Aleksandrs Fadins Christopher Latham Hu Keping Wayne Chang Chen Chao Daehyeok Mun Guillaume J. Charmes Thatcher Peskens Thatcher Peskens Thatcher Peskens dhrp Jérôme Petazzoni jpetazzo Jérôme Petazzoni Joffrey F Joffrey F Joffrey F Tim Terhorst Andy Smith Walter Stanish Roberto Hashioka Konstantin Pelykh David Sissitka Nolan Darilek Benoit Chesneau Jordan Arentsen Daniel Garcia Miguel Angel Fernández Bhiraj Butala Faiz Khan Victor Lyuboslavsky Jean-Baptiste Barth Matthew Mueller Shih-Yuan Lee Daniel Mizyrycki root Jean-Baptiste Dalido Sven Dowideit Sven Dowideit Sven Dowideit Sven Dowideit <¨SvenDowideit@home.org.au¨> Sven Dowideit Sven Dowideit Alexandr Morozov O.S. Tezer Roberto G. Hashioka Sridhar Ratnakumar Sridhar Ratnakumar Liang-Chi Hsieh Aleksa Sarai Will Weaver Timothy Hobbs Nathan LeClaire Nathan LeClaire Matthew Heon Francisco Carriedo Brian Goff Hollie Teal Jessica Frazelle Jessie Frazelle Thomas LEVEIL Thomas LÉVEIL docker-1.6.2/builder/0000755000175000017500000000000012524223634013762 5ustar tianontianondocker-1.6.2/builder/evaluator.go0000644000175000017500000002557412524223634016330 0ustar tianontianon// builder is the evaluation step in the Dockerfile parse/evaluate pipeline. // // It incorporates a dispatch table based on the parser.Node values (see the // parser package for more information) that are yielded from the parser itself. // Calling NewBuilder with the BuildOpts struct can be used to customize the // experience for execution purposes only. Parsing is controlled in the parser // package, and this division of resposibility should be respected. // // Please see the jump table targets for the actual invocations, most of which // will call out to the functions in internals.go to deal with their tasks. // // ONBUILD is a special case, which is covered in the onbuild() func in // dispatchers.go. // // The evaluator uses the concept of "steps", which are usually each processable // line in the Dockerfile. Each step is numbered and certain actions are taken // before and after each step, such as creating an image ID and removing temporary // containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which // includes its own set of steps (usually only one of them). package builder import ( "errors" "fmt" "io" "os" "path/filepath" "strings" log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/builder/command" "github.com/docker/docker/builder/parser" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) var ( ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") ) // Environment variable interpolation will happen on these statements only. var replaceEnvAllowed = map[string]struct{}{ command.Env: {}, command.Label: {}, command.Add: {}, command.Copy: {}, command.Workdir: {}, command.Expose: {}, command.Volume: {}, command.User: {}, } var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error func init() { evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ command.Env: env, command.Label: label, command.Maintainer: maintainer, command.Add: add, command.Copy: dispatchCopy, // copy() is a go builtin command.From: from, command.Onbuild: onbuild, command.Workdir: workdir, command.Run: run, command.Cmd: cmd, command.Entrypoint: entrypoint, command.Expose: expose, command.Volume: volume, command.User: user, command.Insert: insert, } } // internal struct, used to maintain configuration of the Dockerfile's // processing as it evaluates the parsing result. type Builder struct { Daemon *daemon.Daemon Engine *engine.Engine // effectively stdio for the run. Because it is not stdio, I said // "Effectively". Do not use stdio anywhere in this package for any reason. OutStream io.Writer ErrStream io.Writer Verbose bool UtilizeCache bool cacheBusted bool // controls how images and containers are handled between steps. Remove bool ForceRemove bool Pull bool // set this to true if we want the builder to not commit between steps. // This is useful when we only want to use the evaluator table to generate // the final configs of the Dockerfile but dont want the layers disableCommit bool AuthConfig *registry.AuthConfig AuthConfigFile *registry.ConfigFile // Deprecated, original writer used for ImagePull. To be removed. OutOld io.Writer StreamFormatter *utils.StreamFormatter Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. // both of these are controlled by the Remove and ForceRemove options in BuildOpts TmpContainers map[string]struct{} // a map of containers used for removes dockerfileName string // name of Dockerfile dockerfile *parser.Node // the syntax tree of the dockerfile image string // image name for commit processing maintainer string // maintainer name. could probably be removed. cmdSet bool // indicates is CMD was set in current Dockerfile context tarsum.TarSum // the context is a tarball that is uploaded by the client contextPath string // the path of the temporary directory the local context is unpacked to (server side) noBaseImage bool // indicates that this build does not start from any base image, but is being built from an empty file system. // Set resource restrictions for build containers cpuSetCpus string cpuShares int64 memory int64 memorySwap int64 cancelled <-chan struct{} // When closed, job was cancelled. } // Run the builder with the context. This is the lynchpin of this package. This // will (barring errors): // // * call readContext() which will set up the temporary directory and unpack // the context into it. // * read the dockerfile // * parse the dockerfile // * walk the parse tree and execute it by dispatching to handlers. If Remove // or ForceRemove is set, additional cleanup around containers happens after // processing. // * Print a happy message and return the image ID. // func (b *Builder) Run(context io.Reader) (string, error) { if err := b.readContext(context); err != nil { return "", err } defer func() { if err := os.RemoveAll(b.contextPath); err != nil { log.Debugf("[BUILDER] failed to remove temporary context: %s", err) } }() if err := b.readDockerfile(); err != nil { return "", err } // some initializations that would not have been supplied by the caller. b.Config = &runconfig.Config{} b.TmpContainers = map[string]struct{}{} for i, n := range b.dockerfile.Children { select { case <-b.cancelled: log.Debug("Builder: build cancelled!") fmt.Fprintf(b.OutStream, "Build cancelled") return "", fmt.Errorf("Build cancelled") default: // Not cancelled yet, keep going... } if err := b.dispatch(i, n); err != nil { if b.ForceRemove { b.clearTmp() } return "", err } fmt.Fprintf(b.OutStream, " ---> %s\n", common.TruncateID(b.image)) if b.Remove { b.clearTmp() } } if b.image == "" { return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") } fmt.Fprintf(b.OutStream, "Successfully built %s\n", common.TruncateID(b.image)) return b.image, nil } // Reads a Dockerfile from the current context. It assumes that the // 'filename' is a relative path from the root of the context func (b *Builder) readDockerfile() error { // If no -f was specified then look for 'Dockerfile'. If we can't find // that then look for 'dockerfile'. If neither are found then default // back to 'Dockerfile' and use that in the error message. if b.dockerfileName == "" { b.dockerfileName = api.DefaultDockerfileName tmpFN := filepath.Join(b.contextPath, api.DefaultDockerfileName) if _, err := os.Lstat(tmpFN); err != nil { tmpFN = filepath.Join(b.contextPath, strings.ToLower(api.DefaultDockerfileName)) if _, err := os.Lstat(tmpFN); err == nil { b.dockerfileName = strings.ToLower(api.DefaultDockerfileName) } } } origFile := b.dockerfileName filename, err := symlink.FollowSymlinkInScope(filepath.Join(b.contextPath, origFile), b.contextPath) if err != nil { return fmt.Errorf("The Dockerfile (%s) must be within the build context", origFile) } fi, err := os.Lstat(filename) if os.IsNotExist(err) { return fmt.Errorf("Cannot locate specified Dockerfile: %s", origFile) } if fi.Size() == 0 { return ErrDockerfileEmpty } f, err := os.Open(filename) if err != nil { return err } b.dockerfile, err = parser.Parse(f) f.Close() if err != nil { return err } // After the Dockerfile has been parsed, we need to check the .dockerignore // file for either "Dockerfile" or ".dockerignore", and if either are // present then erase them from the build context. These files should never // have been sent from the client but we did send them to make sure that // we had the Dockerfile to actually parse, and then we also need the // .dockerignore file to know whether either file should be removed. // Note that this assumes the Dockerfile has been read into memory and // is now safe to be removed. excludes, _ := utils.ReadDockerIgnore(filepath.Join(b.contextPath, ".dockerignore")) if rm, _ := fileutils.Matches(".dockerignore", excludes); rm == true { os.Remove(filepath.Join(b.contextPath, ".dockerignore")) b.context.(tarsum.BuilderContext).Remove(".dockerignore") } if rm, _ := fileutils.Matches(b.dockerfileName, excludes); rm == true { os.Remove(filepath.Join(b.contextPath, b.dockerfileName)) b.context.(tarsum.BuilderContext).Remove(b.dockerfileName) } return nil } // This method is the entrypoint to all statement handling routines. // // Almost all nodes will have this structure: // Child[Node, Node, Node] where Child is from parser.Node.Children and each // node comes from parser.Node.Next. This forms a "line" with a statement and // arguments and we process them in this normalized form by hitting // evaluateTable with the leaf nodes of the command and the Builder object. // // ONBUILD is a special case; in this case the parser will emit: // Child[Node, Child[Node, Node...]] where the first node is the literal // "onbuild" and the child entrypoint is the command of the ONBUILD statmeent, // such as `RUN` in ONBUILD RUN foo. There is special case logic in here to // deal with that, at least until it becomes more of a general concern with new // features. func (b *Builder) dispatch(stepN int, ast *parser.Node) error { cmd := ast.Value attrs := ast.Attributes original := ast.Original strs := []string{} msg := fmt.Sprintf("Step %d : %s", stepN, strings.ToUpper(cmd)) if cmd == "onbuild" { if ast.Next == nil { return fmt.Errorf("ONBUILD requires at least one argument") } ast = ast.Next.Children[0] strs = append(strs, ast.Value) msg += " " + ast.Value } // count the number of nodes that we are going to traverse first // so we can pre-create the argument and message array. This speeds up the // allocation of those list a lot when they have a lot of arguments cursor := ast var n int for cursor.Next != nil { cursor = cursor.Next n++ } l := len(strs) strList := make([]string, n+l) copy(strList, strs) msgList := make([]string, n) var i int for ast.Next != nil { ast = ast.Next var str string str = ast.Value if _, ok := replaceEnvAllowed[cmd]; ok { var err error str, err = ProcessWord(ast.Value, b.Config.Env) if err != nil { return err } } strList[i+l] = str msgList[i] = ast.Value i++ } msg += " " + strings.Join(msgList, " ") fmt.Fprintln(b.OutStream, msg) // XXX yes, we skip any cmds that are not valid; the parser should have // picked these out already. if f, ok := evaluateTable[cmd]; ok { return f(b, strList, attrs, original) } return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(cmd)) } docker-1.6.2/builder/words0000644000175000017500000000310512524223634015042 0ustar tianontianonhello | hello he'll'o | hello he'llo | hello he\'llo | he'llo he\\'llo | he\llo abc\tdef | abctdef "abc\tdef" | abc\tdef 'abc\tdef' | abc\tdef hello\ | hello hello\\ | hello\ "hello | hello "hello\" | hello" "hel'lo" | hel'lo 'hello | hello 'hello\' | hello\ "''" | '' $. | $. $1 | he$1x | hex he$.x | he$.x he$pwd. | he. he$PWD | he/home he\$PWD | he$PWD he\\$PWD | he\/home he\${} | he${} he\${}xx | he${}xx he${} | he he${}xx | hexx he${hi} | he he${hi}xx | hexx he${PWD} | he/home he${.} | error 'he${XX}' | he${XX} "he${PWD}" | he/home "he'$PWD'" | he'/home' "$PWD" | /home '$PWD' | $PWD '\$PWD' | \$PWD '"hello"' | "hello" he\$PWD | he$PWD "he\$PWD" | he$PWD 'he\$PWD' | he\$PWD he${PWD | error docker-1.6.2/builder/parser/0000755000175000017500000000000012524223634015256 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/0000755000175000017500000000000012524223634017260 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/nginx/0000755000175000017500000000000012524223634020403 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/nginx/result0000644000175000017500000000047712524223634021654 0ustar tianontianon(from "ubuntu:14.04") (maintainer "Erik Hollensbe ") (run "apt-get update && apt-get install nginx-full -y") (run "rm -rf /etc/nginx") (add "etc" "/etc/nginx") (run "chown -R root:root /etc/nginx") (run "/usr/sbin/nginx -qt") (run "mkdir /www") (cmd "/usr/sbin/nginx") (volume "/www") (expose "80") docker-1.6.2/builder/parser/testfiles/nginx/Dockerfile0000644000175000017500000000043012524223634022372 0ustar tianontianonFROM ubuntu:14.04 MAINTAINER Erik Hollensbe RUN apt-get update && apt-get install nginx-full -y RUN rm -rf /etc/nginx ADD etc /etc/nginx RUN chown -R root:root /etc/nginx RUN /usr/sbin/nginx -qt RUN mkdir /www CMD ["/usr/sbin/nginx"] VOLUME /www EXPOSE 80 docker-1.6.2/builder/parser/testfiles/mumble/0000755000175000017500000000000012524223634020541 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/mumble/result0000644000175000017500000000025412524223634022003 0ustar tianontianon(from "ubuntu:14.04") (run "apt-get update && apt-get install libcap2-bin mumble-server -y") (add "./mumble-server.ini" "/etc/mumble-server.ini") (cmd "/usr/sbin/murmurd") docker-1.6.2/builder/parser/testfiles/mumble/Dockerfile0000644000175000017500000000023512524223634022533 0ustar tianontianonFROM ubuntu:14.04 RUN apt-get update && apt-get install libcap2-bin mumble-server -y ADD ./mumble-server.ini /etc/mumble-server.ini CMD /usr/sbin/murmurd docker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/0000755000175000017500000000000012524223634026664 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/result0000644000175000017500000000007612524223634030130 0ustar tianontianon(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") docker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile0000644000175000017500000000006612524223634030660 0ustar tianontianonCMD '["echo", "Well, JSON in a string is JSON too?"]' docker-1.6.2/builder/parser/testfiles/lk4d4-the-edge-case-generator/0000755000175000017500000000000012524223634024557 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/lk4d4-the-edge-case-generator/result0000644000175000017500000000135412524223634026023 0ustar tianontianon(from "busybox:buildroot-2014.02") (maintainer "docker ") (onbuild (run "echo" "test")) (onbuild (run "echo test")) (onbuild (copy "." "/")) (run "ls" "-la") (run "echo" "'1234'") (run "echo \"1234\"") (run "echo 1234") (run "echo '1234' && echo \"456\" && echo 789") (run "sh -c 'echo root:testpass > /tmp/passwd'") (run "mkdir -p /test /test2 /test3/test") (env "SCUBA" "1 DUBA 3") (env "SCUBA" "\"1 DUBA 3\"") (cmd "echo" "test") (cmd "echo test") (cmd "echo \"test\"") (cmd "echo 'test'") (cmd "echo 'test' | wc -") (expose "3000") (expose "9000" "5000" "6000") (user "docker") (user "docker:root") (volume "/test") (volume "/test" "/test2") (volume "/test3") (workdir "/test") (add "." "/") (copy "." "copy") docker-1.6.2/builder/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile0000644000175000017500000000132012524223634026545 0ustar tianontianonFROM busybox:buildroot-2014.02 MAINTAINER docker ONBUILD RUN ["echo", "test"] ONBUILD RUN echo test ONBUILD COPY . / # RUN Commands \ # linebreak in comment \ RUN ["ls", "-la"] RUN ["echo", "'1234'"] RUN echo "1234" RUN echo 1234 RUN echo '1234' && \ echo "456" && \ echo 789 RUN sh -c 'echo root:testpass \ > /tmp/passwd' RUN mkdir -p /test /test2 /test3/test # ENV \ ENV SCUBA 1 DUBA 3 ENV SCUBA "1 DUBA 3" # CMD \ CMD ["echo", "test"] CMD echo test CMD echo "test" CMD echo 'test' CMD echo 'test' | wc - #EXPOSE\ EXPOSE 3000 EXPOSE 9000 5000 6000 USER docker USER docker:root VOLUME ["/test"] VOLUME ["/test", "/test2"] VOLUME /test3 WORKDIR /test ADD . / COPY . copy docker-1.6.2/builder/parser/testfiles/kartar-entrypoint-oddities/0000755000175000017500000000000012524223634024557 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/kartar-entrypoint-oddities/result0000644000175000017500000000035412524223634026022 0ustar tianontianon(from "ubuntu:14.04") (maintainer "James Turnbull \"james@example.com\"") (env "REFRESHED_AT" "2014-06-01") (run "apt-get update") (run "apt-get -y install redis-server redis-tools") (expose "6379") (entrypoint "/usr/bin/redis-server") docker-1.6.2/builder/parser/testfiles/kartar-entrypoint-oddities/Dockerfile0000644000175000017500000000032212524223634026546 0ustar tianontianonFROM ubuntu:14.04 MAINTAINER James Turnbull "james@example.com" ENV REFRESHED_AT 2014-06-01 RUN apt-get update RUN apt-get -y install redis-server redis-tools EXPOSE 6379 ENTRYPOINT [ "/usr/bin/redis-server" ] docker-1.6.2/builder/parser/testfiles/json/0000755000175000017500000000000012524223634020231 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/json/result0000644000175000017500000000021312524223634021466 0ustar tianontianon(cmd) (cmd "") (cmd "a") (cmd "a" "b") (cmd "a" "b") (cmd "a" "b") (cmd "a" "b") (cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") docker-1.6.2/builder/parser/testfiles/json/Dockerfile0000644000175000017500000000023412524223634022222 0ustar tianontianonCMD [] CMD [""] CMD ["a"] CMD ["a","b"] CMD [ "a", "b" ] CMD [ "a", "b" ] CMD [ "a", "b" ] CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] docker-1.6.2/builder/parser/testfiles/multiple-volumes/0000755000175000017500000000000012524223634022603 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/multiple-volumes/result0000644000175000017500000000017212524223634024044 0ustar tianontianon(from "foo") (volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") docker-1.6.2/builder/parser/testfiles/multiple-volumes/Dockerfile0000644000175000017500000000015312524223634024574 0ustar tianontianonFROM foo VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs docker-1.6.2/builder/parser/testfiles/tf2/0000755000175000017500000000000012524223634017753 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/tf2/result0000644000175000017500000000232212524223634021213 0ustar tianontianon(from "ubuntu:12.04") (expose "27015") (expose "27005") (expose "26901") (expose "27020") (run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") (run "mkdir -p /steam") (run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") (add "./script" "/steam/script") (run "/steam/steamcmd.sh +runscript /steam/script") (run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") (run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") (add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") (add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") (add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") (run "rm -r /steam/tf2/tf/addons/sourcemod/configs") (add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") (run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") (run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") (cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") docker-1.6.2/builder/parser/testfiles/tf2/Dockerfile0000644000175000017500000000217312524223634021750 0ustar tianontianonFROM ubuntu:12.04 EXPOSE 27015 EXPOSE 27005 EXPOSE 26901 EXPOSE 27020 RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y RUN mkdir -p /steam RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam ADD ./script /steam/script RUN /steam/steamcmd.sh +runscript /steam/script RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg RUN rm -r /steam/tf2/tf/addons/sourcemod/configs ADD ./configs /steam/tf2/tf/addons/sourcemod/configs RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill docker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/0000755000175000017500000000000012524223634030134 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result0000644000175000017500000000013012524223634031367 0ustar tianontianon(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") docker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile0000644000175000017500000000011212524223634032120 0ustar tianontianonCMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" docker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/0000755000175000017500000000000012524223634027246 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result0000644000175000017500000000010312524223634030501 0ustar tianontianon(cmd "[\"echo\", \"Please, close the brackets when you're done\"") docker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile0000644000175000017500000000007312524223634031240 0ustar tianontianonCMD ["echo", "Please, close the brackets when you're done" docker-1.6.2/builder/parser/testfiles/brimstone-docker-consul/0000755000175000017500000000000012524223634024030 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/brimstone-docker-consul/result0000644000175000017500000000277212524223634025301 0ustar tianontianon(from "brimstone/ubuntu:14.04") (cmd) (entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") (expose "8500" "8600" "8400" "8301" "8302") (run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") (run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") (run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") (env "GOPATH" "/go") (run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") docker-1.6.2/builder/parser/testfiles/brimstone-docker-consul/Dockerfile0000644000175000017500000000302712524223634026024 0ustar tianontianonFROM brimstone/ubuntu:14.04 CMD [] ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] EXPOSE 8500 8600 8400 8301 8302 RUN apt-get update \ && apt-get install -y unzip wget \ && apt-get clean \ && rm -rf /var/lib/apt/lists RUN cd /tmp \ && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ -O web_ui.zip \ && unzip web_ui.zip \ && mv dist /webui \ && rm web_ui.zip RUN apt-get update \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ && apt-get install -y --no-install-recommends unzip wget \ && apt-get clean \ && rm -rf /var/lib/apt/lists \ && cd /tmp \ && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ -O web_ui.zip \ && unzip web_ui.zip \ && mv dist /webui \ && rm web_ui.zip \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ && rm /tmp/dpkg.* ENV GOPATH /go RUN apt-get update \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ && apt-get clean \ && rm -rf /var/lib/apt/lists \ && go get -v github.com/hashicorp/consul \ && mv $GOPATH/bin/consul /usr/bin/consul \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ && rm /tmp/dpkg.* \ && rm -rf $GOPATH docker-1.6.2/builder/parser/testfiles/ADD-COPY-with-JSON/0000755000175000017500000000000012524223634022140 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/ADD-COPY-with-JSON/result0000644000175000017500000000036012524223634023400 0ustar tianontianon(from "ubuntu:14.04") (maintainer "Seongyeol Lim ") (copy "." "/go/src/github.com/docker/docker") (add "." "/") (add "vimrc" "/tmp") (copy "bashrc" "/tmp") (copy "test file" "/tmp") (add "test file" "/tmp/test file") docker-1.6.2/builder/parser/testfiles/ADD-COPY-with-JSON/Dockerfile0000644000175000017500000000035412524223634024134 0ustar tianontianonFROM ubuntu:14.04 MAINTAINER Seongyeol Lim COPY . /go/src/github.com/docker/docker ADD . / ADD [ "vimrc", "/tmp" ] COPY [ "bashrc", "/tmp" ] COPY [ "test file", "/tmp" ] ADD [ "test file", "/tmp/test file" ] docker-1.6.2/builder/parser/testfiles/weechat/0000755000175000017500000000000012524223634020700 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/weechat/result0000644000175000017500000000034112524223634022137 0ustar tianontianon(from "ubuntu:14.04") (run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") (add ".weechat" "/.weechat") (add ".tmux.conf" "/") (run "echo \"export TERM=screen-256color\" >/.zshenv") (cmd "zsh -c weechat") docker-1.6.2/builder/parser/testfiles/weechat/Dockerfile0000644000175000017500000000030612524223634022671 0ustar tianontianonFROM ubuntu:14.04 RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y ADD .weechat /.weechat ADD .tmux.conf / RUN echo "export TERM=screen-256color" >/.zshenv CMD zsh -c weechat docker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-single-quotes/0000755000175000017500000000000012524223634025735 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-single-quotes/result0000644000175000017500000000006212524223634027174 0ustar tianontianon(cmd "['echo','single quotes are invalid JSON']") docker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile0000644000175000017500000000005612524223634027730 0ustar tianontianonCMD ['echo','single quotes are invalid JSON'] docker-1.6.2/builder/parser/testfiles/continueIndent/0000755000175000017500000000000012524223634022246 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/continueIndent/result0000644000175000017500000000050412524223634023506 0ustar tianontianon(from "ubuntu:14.04") (run "echo hello world goodnight moon lightning") (run "echo hello world") (run "echo hello world") (run "echo hello goodbyefrog") (run "echo hello world") (run "echo hi world goodnight") (run "echo goodbyefrog") (run "echo goodbyefrog") (run "echo hello this is some more useful stuff") docker-1.6.2/builder/parser/testfiles/continueIndent/Dockerfile0000644000175000017500000000062312524223634024241 0ustar tianontianonFROM ubuntu:14.04 RUN echo hello\ world\ goodnight \ moon\ light\ ning RUN echo hello \ world RUN echo hello \ world RUN echo hello \ goodbye\ frog RUN echo hello \ world RUN echo hi \ \ world \ \ good\ \ night RUN echo goodbye\ frog RUN echo good\ bye\ frog RUN echo hello \ # this is a comment # this is a comment with a blank line surrounding it this is some more useful stuff docker-1.6.2/builder/parser/testfiles/brimstone-consuldock/0000755000175000017500000000000012524223634023424 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/brimstone-consuldock/result0000644000175000017500000000117712524223634024673 0ustar tianontianon(from "brimstone/ubuntu:14.04") (maintainer "brimstone@the.narro.ws") (env "GOPATH" "/go") (entrypoint "/usr/local/bin/consuldock") (run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") docker-1.6.2/builder/parser/testfiles/brimstone-consuldock/Dockerfile0000644000175000017500000000140412524223634025415 0ustar tianontianonFROM brimstone/ubuntu:14.04 MAINTAINER brimstone@the.narro.ws # TORUN -v /var/run/docker.sock:/var/run/docker.sock ENV GOPATH /go # Set our command ENTRYPOINT ["/usr/local/bin/consuldock"] # Install the packages we need, clean up after them and us RUN apt-get update \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ && apt-get install -y --no-install-recommends git golang ca-certificates \ && apt-get clean \ && rm -rf /var/lib/apt/lists \ && go get -v github.com/brimstone/consuldock \ && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ && rm /tmp/dpkg.* \ && rm -rf $GOPATH docker-1.6.2/builder/parser/testfiles/znc/0000755000175000017500000000000012524223634020052 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/znc/result0000644000175000017500000000025612524223634021316 0ustar tianontianon(from "ubuntu:14.04") (maintainer "Erik Hollensbe ") (run "apt-get update && apt-get install znc -y") (add "conf" "/.znc") (cmd "/usr/bin/znc" "-f" "-r") docker-1.6.2/builder/parser/testfiles/znc/Dockerfile0000644000175000017500000000024212524223634022042 0ustar tianontianonFROM ubuntu:14.04 MAINTAINER Erik Hollensbe RUN apt-get update && apt-get install znc -y ADD conf /.znc CMD [ "/usr/bin/znc", "-f", "-r" ] docker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/0000755000175000017500000000000012524223634027141 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/result0000644000175000017500000000005112524223634030376 0ustar tianontianon(cmd "[\"echo\", \"look ma, no quote!]") docker-1.6.2/builder/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile0000644000175000017500000000004212524223634031127 0ustar tianontianonCMD ["echo", "look ma, no quote!] docker-1.6.2/builder/parser/testfiles/escapes/0000755000175000017500000000000012524223634020703 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/escapes/result0000644000175000017500000000033612524223634022146 0ustar tianontianon(from "ubuntu:14.04") (maintainer "Erik \\\\Hollensbe \\\"") (run "apt-get \\update && apt-get \\\"install znc -y") (add "\\conf\\\\\"" "/.znc") (run "foo bar baz") (cmd "/usr\\\"/bin/znc" "-f" "-r") docker-1.6.2/builder/parser/testfiles/escapes/Dockerfile0000644000175000017500000000031512524223634022674 0ustar tianontianonFROM ubuntu:14.04 MAINTAINER Erik \\Hollensbe \" RUN apt-get \update && \ apt-get \"install znc -y ADD \conf\\" /.znc RUN foo \ bar \ baz CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] docker-1.6.2/builder/parser/testfiles/cpuguy83-nagios/0000755000175000017500000000000012524223634022225 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/cpuguy83-nagios/result0000644000175000017500000000672512524223634023500 0ustar tianontianon(from "cpuguy83/ubuntu") (env "NAGIOS_HOME" "/opt/nagios") (env "NAGIOS_USER" "nagios") (env "NAGIOS_GROUP" "nagios") (env "NAGIOS_CMDUSER" "nagios") (env "NAGIOS_CMDGROUP" "nagios") (env "NAGIOSADMIN_USER" "nagiosadmin") (env "NAGIOSADMIN_PASS" "nagios") (env "APACHE_RUN_USER" "nagios") (env "APACHE_RUN_GROUP" "nagios") (env "NAGIOS_TIMEZONE" "UTC") (run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") (run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") (run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") (run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") (add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") (run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") (add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") (run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") (run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") (run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") (run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") (run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") (run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") (run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") (run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") (run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") (run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") (run "cp /etc/services /var/spool/postfix/etc/") (run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") (add "nagios.init" "/etc/sv/nagios/run") (add "apache.init" "/etc/sv/apache/run") (add "postfix.init" "/etc/sv/postfix/run") (add "postfix.stop" "/etc/sv/postfix/finish") (add "start.sh" "/usr/local/bin/start_nagios") (env "APACHE_LOCK_DIR" "/var/run") (env "APACHE_LOG_DIR" "/var/log/apache2") (expose "80") (volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") (cmd "/usr/local/bin/start_nagios") docker-1.6.2/builder/parser/testfiles/cpuguy83-nagios/Dockerfile0000644000175000017500000000642212524223634024223 0ustar tianontianonFROM cpuguy83/ubuntu ENV NAGIOS_HOME /opt/nagios ENV NAGIOS_USER nagios ENV NAGIOS_GROUP nagios ENV NAGIOS_CMDUSER nagios ENV NAGIOS_CMDGROUP nagios ENV NAGIOSADMIN_USER nagiosadmin ENV NAGIOSADMIN_PASS nagios ENV APACHE_RUN_USER nagios ENV APACHE_RUN_GROUP nagios ENV NAGIOS_TIMEZONE UTC RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg RUN cp /etc/services /var/spool/postfix/etc/ RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix ADD nagios.init /etc/sv/nagios/run ADD apache.init /etc/sv/apache/run ADD postfix.init /etc/sv/postfix/run ADD postfix.stop /etc/sv/postfix/finish ADD start.sh /usr/local/bin/start_nagios ENV APACHE_LOCK_DIR /var/run ENV APACHE_LOG_DIR /var/log/apache2 EXPOSE 80 VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] CMD ["/usr/local/bin/start_nagios"] docker-1.6.2/builder/parser/testfiles/env/0000755000175000017500000000000012524223634020050 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/env/result0000644000175000017500000000113012524223634021304 0ustar tianontianon(from "ubuntu") (env "name" "value") (env "name" "value") (env "name" "value" "name2" "value2") (env "name" "\"value value1\"") (env "name" "value\\ value2") (env "name" "\"value'quote space'value2\"") (env "name" "'value\"double quote\"value2'") (env "name" "value\\ value2" "name2" "value2\\ value3") (env "name" "\"a\\\"b\"") (env "name" "\"a\\'b\"") (env "name" "'a\\'b'") (env "name" "'a\\'b''") (env "name" "'a\\\"b'") (env "name" "\"''\"") (env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") docker-1.6.2/builder/parser/testfiles/env/Dockerfile0000644000175000017500000000110712524223634022041 0ustar tianontianonFROM ubuntu ENV name value ENV name=value ENV name=value name2=value2 ENV name="value value1" ENV name=value\ value2 ENV name="value'quote space'value2" ENV name='value"double quote"value2' ENV name=value\ value2 name2=value2\ value3 ENV name="a\"b" ENV name="a\'b" ENV name='a\'b' ENV name='a\'b'' ENV name='a\"b' ENV name="''" # don't put anything after the next line - it must be the last line of the # Dockerfile and it must end with \ ENV name=value \ name1=value1 \ name2="value2a \ value2b" \ name3="value3a\n\"value3b\"" \ name4="value4a\\nvalue4b" \ docker-1.6.2/builder/parser/testfiles/influxdb/0000755000175000017500000000000012524223634021073 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/influxdb/result0000644000175000017500000000064012524223634022334 0ustar tianontianon(from "ubuntu:14.04") (run "apt-get update && apt-get install wget -y") (run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") (run "dpkg -i influxdb_latest_amd64.deb") (run "rm -r /opt/influxdb/shared") (volume "/opt/influxdb/shared") (cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") (expose "8083") (expose "8086") (expose "8090") (expose "8099") docker-1.6.2/builder/parser/testfiles/influxdb/Dockerfile0000644000175000017500000000057012524223634023067 0ustar tianontianonFROM ubuntu:14.04 RUN apt-get update && apt-get install wget -y RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb RUN dpkg -i influxdb_latest_amd64.deb RUN rm -r /opt/influxdb/shared VOLUME /opt/influxdb/shared CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml EXPOSE 8083 EXPOSE 8086 EXPOSE 8090 EXPOSE 8099 docker-1.6.2/builder/parser/testfiles/mail/0000755000175000017500000000000012524223634020202 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/mail/result0000644000175000017500000000071212524223634021443 0ustar tianontianon(from "ubuntu:14.04") (run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") (add ".muttrc" "/") (add ".offlineimaprc" "/") (add ".tmux.conf" "/") (add "mutt" "/.mutt") (add "vim" "/.vim") (add "vimrc" "/.vimrc") (add "crontab" "/etc/crontab") (run "chmod 644 /etc/crontab") (run "mkdir /Mail") (run "mkdir /.offlineimap") (run "echo \"export TERM=screen-256color\" >/.zshenv") (cmd "setsid cron; tmux -2") docker-1.6.2/builder/parser/testfiles/mail/Dockerfile0000644000175000017500000000060412524223634022174 0ustar tianontianonFROM ubuntu:14.04 RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y ADD .muttrc / ADD .offlineimaprc / ADD .tmux.conf / ADD mutt /.mutt ADD vim /.vim ADD vimrc /.vimrc ADD crontab /etc/crontab RUN chmod 644 /etc/crontab RUN mkdir /Mail RUN mkdir /.offlineimap RUN echo "export TERM=screen-256color" >/.zshenv CMD setsid cron; tmux -2 docker-1.6.2/builder/parser/testfiles/docker/0000755000175000017500000000000012524223634020527 5ustar tianontianondocker-1.6.2/builder/parser/testfiles/docker/result0000644000175000017500000000352512524223634021775 0ustar tianontianon(from "ubuntu:14.04") (maintainer "Tianon Gravi (@tianon)") (run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tlxc=1.0* \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") (run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") (run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") (run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") (env "PATH" "/usr/local/go/bin:$PATH") (env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") (run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") (env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") (env "GOARM" "5") (run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") (run "go get golang.org/x/tools/cmd/cover") (run "gem install --no-rdoc --no-ri fpm --version 1.0.2") (run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") (run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") (run "git config --global user.email 'docker-dummy@example.com'") (run "groupadd -r docker") (run "useradd --create-home --gid docker unprivilegeduser") (volume "/var/lib/docker") (workdir "/go/src/github.com/docker/docker") (env "DOCKER_BUILDTAGS" "apparmor selinux") (entrypoint "hack/dind") (copy "." "/go/src/github.com/docker/docker") docker-1.6.2/builder/parser/testfiles/docker/Dockerfile0000644000175000017500000000673212524223634022531 0ustar tianontianon# This file describes the standard way to build Docker, using docker # # Usage: # # # Assemble the full dev environment. This is slow the first time. # docker build -t docker . # # # Mount your source in an interactive container for quick testing: # docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash # # # Run the test suite: # docker run --privileged docker hack/make.sh test # # # Publish a release: # docker run --privileged \ # -e AWS_S3_BUCKET=baz \ # -e AWS_ACCESS_KEY=foo \ # -e AWS_SECRET_KEY=bar \ # -e GPG_PASSPHRASE=gloubiboulga \ # docker hack/release.sh # # Note: Apparmor used to mess with privileged mode, but this is no longer # the case. Therefore, you don't have to disable it anymore. # FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) # Packaged dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ apt-utils \ aufs-tools \ automake \ btrfs-tools \ build-essential \ curl \ dpkg-sig \ git \ iptables \ libapparmor-dev \ libcap-dev \ libsqlite3-dev \ lxc=1.0* \ mercurial \ pandoc \ parallel \ reprepro \ ruby1.9.1 \ ruby1.9.1-dev \ s3cmd=1.1.0* \ --no-install-recommends # Get lvm2 source for compiling statically RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags # note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly # Compile and install lvm2 RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install Go RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz ENV PATH /usr/local/go/bin:$PATH ENV GOPATH /go:/go/src/github.com/docker/docker/vendor RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 # Compile Go for cross compilation ENV DOCKER_CROSSPLATFORMS \ linux/386 linux/arm \ darwin/amd64 darwin/386 \ freebsd/amd64 freebsd/386 freebsd/arm # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' # Grab Go's cover tool for dead-simple code coverage testing RUN go get golang.org/x/tools/cmd/cover # TODO replace FPM with some very minimal debhelper stuff RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 # Get the "busybox" image source so we can build locally instead of pulling RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox # Setup s3cmd config RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' # Add an unprivileged user to be used for tests which need it RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker ENV DOCKER_BUILDTAGS apparmor selinux # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] # Upload docker source COPY . /go/src/github.com/docker/docker docker-1.6.2/builder/parser/line_parsers.go0000644000175000017500000001550312524223634020277 0ustar tianontianonpackage parser // line parsers are dispatch calls that parse a single unit of text into a // Node object which contains the whole statement. Dockerfiles have varied // (but not usually unique, see ONBUILD for a unique example) parsing rules // per-command, and these unify the processing in a way that makes it // manageable. import ( "encoding/json" "errors" "fmt" "strings" "unicode" ) var ( errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") ) // ignore the current argument. This will still leave a command parsed, but // will not incorporate the arguments into the ast. func parseIgnore(rest string) (*Node, map[string]bool, error) { return &Node{}, nil, nil } // used for onbuild. Could potentially be used for anything that represents a // statement with sub-statements. // // ONBUILD RUN foo bar -> (onbuild (run foo bar)) // func parseSubCommand(rest string) (*Node, map[string]bool, error) { if rest == "" { return nil, nil, nil } _, child, err := parseLine(rest) if err != nil { return nil, nil, err } return &Node{Children: []*Node{child}}, nil, nil } // parse environment like statements. Note that this does *not* handle // variable interpolation, which will be handled in the evaluator. func parseNameVal(rest string, key string) (*Node, map[string]bool, error) { // This is kind of tricky because we need to support the old // variant: KEY name value // as well as the new one: KEY name=value ... // The trigger to know which one is being used will be whether we hit // a space or = first. space ==> old, "=" ==> new const ( inSpaces = iota // looking for start of a word inWord inQuote ) words := []string{} phase := inSpaces word := "" quote := '\000' blankOK := false var ch rune for pos := 0; pos <= len(rest); pos++ { if pos != len(rest) { ch = rune(rest[pos]) } if phase == inSpaces { // Looking for start of word if pos == len(rest) { // end of input break } if unicode.IsSpace(ch) { // skip spaces continue } phase = inWord // found it, fall thru } if (phase == inWord || phase == inQuote) && (pos == len(rest)) { if blankOK || len(word) > 0 { words = append(words, word) } break } if phase == inWord { if unicode.IsSpace(ch) { phase = inSpaces if blankOK || len(word) > 0 { words = append(words, word) // Look for = and if not there assume // we're doing the old stuff and // just read the rest of the line if !strings.Contains(word, "=") { word = strings.TrimSpace(rest[pos:]) words = append(words, word) break } } word = "" blankOK = false continue } if ch == '\'' || ch == '"' { quote = ch blankOK = true phase = inQuote } if ch == '\\' { if pos+1 == len(rest) { continue // just skip \ at end } // If we're not quoted and we see a \, then always just // add \ plus the char to the word, even if the char // is a quote. word += string(ch) pos++ ch = rune(rest[pos]) } word += string(ch) continue } if phase == inQuote { if ch == quote { phase = inWord } // \ is special except for ' quotes - can't escape anything for ' if ch == '\\' && quote != '\'' { if pos+1 == len(rest) { phase = inWord continue // just skip \ at end } pos++ nextCh := rune(rest[pos]) word += string(ch) ch = nextCh } word += string(ch) } } if len(words) == 0 { return nil, nil, nil } // Old format (KEY name value) var rootnode *Node if !strings.Contains(words[0], "=") { node := &Node{} rootnode = node strs := TOKEN_WHITESPACE.Split(rest, 2) if len(strs) < 2 { return nil, nil, fmt.Errorf(key + " must have two arguments") } node.Value = strs[0] node.Next = &Node{} node.Next.Value = strs[1] } else { var prevNode *Node for i, word := range words { if !strings.Contains(word, "=") { return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) } parts := strings.SplitN(word, "=", 2) name := &Node{} value := &Node{} name.Next = value name.Value = parts[0] value.Value = parts[1] if i == 0 { rootnode = name } else { prevNode.Next = name } prevNode = value } } return rootnode, nil, nil } func parseEnv(rest string) (*Node, map[string]bool, error) { return parseNameVal(rest, "ENV") } func parseLabel(rest string) (*Node, map[string]bool, error) { return parseNameVal(rest, "LABEL") } // parses a whitespace-delimited set of arguments. The result is effectively a // linked list of string arguments. func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) { if rest == "" { return nil, nil, nil } node := &Node{} rootnode := node prevnode := node for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp prevnode = node node.Value = str node.Next = &Node{} node = node.Next } // XXX to get around regexp.Split *always* providing an empty string at the // end due to how our loop is constructed, nil out the last node in the // chain. prevnode.Next = nil return rootnode, nil, nil } // parsestring just wraps the string in quotes and returns a working node. func parseString(rest string) (*Node, map[string]bool, error) { if rest == "" { return nil, nil, nil } n := &Node{} n.Value = rest return n, nil, nil } // parseJSON converts JSON arrays to an AST. func parseJSON(rest string) (*Node, map[string]bool, error) { var myJson []interface{} if err := json.Unmarshal([]byte(rest), &myJson); err != nil { return nil, nil, err } var top, prev *Node for _, str := range myJson { if s, ok := str.(string); !ok { return nil, nil, errDockerfileNotStringArray } else { node := &Node{Value: s} if prev == nil { top = node } else { prev.Next = node } prev = node } } return top, map[string]bool{"json": true}, nil } // parseMaybeJSON determines if the argument appears to be a JSON array. If // so, passes to parseJSON; if not, quotes the result and returns a single // node. func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { if rest == "" { return nil, nil, nil } node, attrs, err := parseJSON(rest) if err == nil { return node, attrs, nil } if err == errDockerfileNotStringArray { return nil, nil, err } node = &Node{} node.Value = rest return node, nil, nil } // parseMaybeJSONToList determines if the argument appears to be a JSON array. If // so, passes to parseJSON; if not, attmpts to parse it as a whitespace // delimited string. func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) { node, attrs, err := parseJSON(rest) if err == nil { return node, attrs, nil } if err == errDockerfileNotStringArray { return nil, nil, err } return parseStringsWhitespaceDelimited(rest) } docker-1.6.2/builder/parser/parser_test.go0000644000175000017500000000301012524223634020132 0ustar tianontianonpackage parser import ( "fmt" "io/ioutil" "os" "path/filepath" "testing" ) const testDir = "testfiles" const negativeTestDir = "testfiles-negative" func getDirs(t *testing.T, dir string) []string { f, err := os.Open(dir) if err != nil { t.Fatal(err) } defer f.Close() dirs, err := f.Readdirnames(0) if err != nil { t.Fatal(err) } return dirs } func TestTestNegative(t *testing.T) { for _, dir := range getDirs(t, negativeTestDir) { dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") df, err := os.Open(dockerfile) if err != nil { t.Fatalf("Dockerfile missing for %s: %v", dir, err) } _, err = Parse(df) if err == nil { t.Fatalf("No error parsing broken dockerfile for %s", dir) } df.Close() } } func TestTestData(t *testing.T) { for _, dir := range getDirs(t, testDir) { dockerfile := filepath.Join(testDir, dir, "Dockerfile") resultfile := filepath.Join(testDir, dir, "result") df, err := os.Open(dockerfile) if err != nil { t.Fatalf("Dockerfile missing for %s: %v", dir, err) } defer df.Close() ast, err := Parse(df) if err != nil { t.Fatalf("Error parsing %s's dockerfile: %v", dir, err) } content, err := ioutil.ReadFile(resultfile) if err != nil { t.Fatalf("Error reading %s's result file: %v", dir, err) } if ast.Dump()+"\n" != string(content) { fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) t.Fatalf("%s: AST dump of dockerfile does not match result", dir) } } } docker-1.6.2/builder/parser/testfiles-negative/0000755000175000017500000000000012524223634021060 5ustar tianontianondocker-1.6.2/builder/parser/testfiles-negative/shykes-nested-json/0000755000175000017500000000000012524223634024615 5ustar tianontianondocker-1.6.2/builder/parser/testfiles-negative/shykes-nested-json/Dockerfile0000644000175000017500000000004212524223634026603 0ustar tianontianonCMD [ "echo", [ "nested json" ] ] docker-1.6.2/builder/parser/testfiles-negative/env_no_value/0000755000175000017500000000000012524223634023540 5ustar tianontianondocker-1.6.2/builder/parser/testfiles-negative/env_no_value/Dockerfile0000644000175000017500000000002712524223634025531 0ustar tianontianonFROM busybox ENV PATH docker-1.6.2/builder/parser/parser.go0000644000175000017500000000754712524223634017116 0ustar tianontianon// This package implements a parser and parse tree dumper for Dockerfiles. package parser import ( "bufio" "io" "regexp" "strings" "unicode" "github.com/docker/docker/builder/command" ) // Node is a structure used to represent a parse tree. // // In the node there are three fields, Value, Next, and Children. Value is the // current token's string value. Next is always the next non-child token, and // children contains all the children. Here's an example: // // (value next (child child-next child-next-next) next-next) // // This data structure is frankly pretty lousy for handling complex languages, // but lucky for us the Dockerfile isn't very complicated. This structure // works a little more effectively than a "proper" parse tree for our needs. // type Node struct { Value string // actual content Next *Node // the next item in the current sexp Children []*Node // the children of this sexp Attributes map[string]bool // special attributes for this node Original string // original line used before parsing } var ( dispatch map[string]func(string) (*Node, map[string]bool, error) TOKEN_WHITESPACE = regexp.MustCompile(`[\t\v\f\r ]+`) TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\[ \t]*$`) TOKEN_COMMENT = regexp.MustCompile(`^#.*$`) ) func init() { // Dispatch Table. see line_parsers.go for the parse functions. // The command is parsed and mapped to the line parser. The line parser // recieves the arguments but not the command, and returns an AST after // reformulating the arguments according to the rules in the parser // functions. Errors are propagated up by Parse() and the resulting AST can // be incorporated directly into the existing AST as a next. dispatch = map[string]func(string) (*Node, map[string]bool, error){ command.User: parseString, command.Onbuild: parseSubCommand, command.Workdir: parseString, command.Env: parseEnv, command.Label: parseLabel, command.Maintainer: parseString, command.From: parseString, command.Add: parseMaybeJSONToList, command.Copy: parseMaybeJSONToList, command.Run: parseMaybeJSON, command.Cmd: parseMaybeJSON, command.Entrypoint: parseMaybeJSON, command.Expose: parseStringsWhitespaceDelimited, command.Volume: parseMaybeJSONToList, command.Insert: parseIgnore, } } // parse a line and return the remainder. func parseLine(line string) (string, *Node, error) { if line = stripComments(line); line == "" { return "", nil, nil } if TOKEN_LINE_CONTINUATION.MatchString(line) { line = TOKEN_LINE_CONTINUATION.ReplaceAllString(line, "") return line, nil, nil } cmd, args, err := splitCommand(line) if err != nil { return "", nil, err } node := &Node{} node.Value = cmd sexp, attrs, err := fullDispatch(cmd, args) if err != nil { return "", nil, err } node.Next = sexp node.Attributes = attrs node.Original = line return "", node, nil } // The main parse routine. Handles an io.ReadWriteCloser and returns the root // of the AST. func Parse(rwc io.Reader) (*Node, error) { root := &Node{} scanner := bufio.NewScanner(rwc) for scanner.Scan() { scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) line, child, err := parseLine(scannedLine) if err != nil { return nil, err } if line != "" && child == nil { for scanner.Scan() { newline := scanner.Text() if stripComments(strings.TrimSpace(newline)) == "" { continue } line, child, err = parseLine(line + newline) if err != nil { return nil, err } if child != nil { break } } if child == nil && line != "" { line, child, err = parseLine(line) if err != nil { return nil, err } } } if child != nil { root.Children = append(root.Children, child) } } return root, nil } docker-1.6.2/builder/parser/dumper/0000755000175000017500000000000012524223634016552 5ustar tianontianondocker-1.6.2/builder/parser/dumper/main.go0000644000175000017500000000063712524223634020033 0ustar tianontianonpackage main import ( "fmt" "os" "github.com/docker/docker/builder/parser" ) func main() { var f *os.File var err error if len(os.Args) < 2 { fmt.Println("please supply filename(s)") os.Exit(1) } for _, fn := range os.Args[1:] { f, err = os.Open(fn) if err != nil { panic(err) } ast, err := parser.Parse(f) if err != nil { panic(err) } else { fmt.Println(ast.Dump()) } } } docker-1.6.2/builder/parser/json_test.go0000644000175000017500000000302212524223634017612 0ustar tianontianonpackage parser import ( "testing" ) var invalidJSONArraysOfStrings = []string{ `["a",42,"b"]`, `["a",123.456,"b"]`, `["a",{},"b"]`, `["a",{"c": "d"},"b"]`, `["a",["c"],"b"]`, `["a",true,"b"]`, `["a",false,"b"]`, `["a",null,"b"]`, } var validJSONArraysOfStrings = map[string][]string{ `[]`: {}, `[""]`: {""}, `["a"]`: {"a"}, `["a","b"]`: {"a", "b"}, `[ "a", "b" ]`: {"a", "b"}, `[ "a", "b" ]`: {"a", "b"}, ` [ "a", "b" ] `: {"a", "b"}, `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, } func TestJSONArraysOfStrings(t *testing.T) { for json, expected := range validJSONArraysOfStrings { if node, _, err := parseJSON(json); err != nil { t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) } else { i := 0 for node != nil { if i >= len(expected) { t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) } if node.Value != expected[i] { t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) } node = node.Next i++ } if i != len(expected) { t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) } } } for _, json := range invalidJSONArraysOfStrings { if _, _, err := parseJSON(json); err != errDockerfileNotStringArray { t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) } } } docker-1.6.2/builder/parser/utils.go0000644000175000017500000000343612524223634016753 0ustar tianontianonpackage parser import ( "strconv" "strings" ) // dumps the AST defined by `node` as a list of sexps. Returns a string // suitable for printing. func (node *Node) Dump() string { str := "" str += node.Value for _, n := range node.Children { str += "(" + n.Dump() + ")\n" } if node.Next != nil { for n := node.Next; n != nil; n = n.Next { if len(n.Children) > 0 { str += " " + n.Dump() } else { str += " " + strconv.Quote(n.Value) } } } return strings.TrimSpace(str) } // performs the dispatch based on the two primal strings, cmd and args. Please // look at the dispatch table in parser.go to see how these dispatchers work. func fullDispatch(cmd, args string) (*Node, map[string]bool, error) { fn := dispatch[cmd] // Ignore invalid Dockerfile instructions if fn == nil { fn = parseIgnore } sexp, attrs, err := fn(args) if err != nil { return nil, nil, err } return sexp, attrs, nil } // splitCommand takes a single line of text and parses out the cmd and args, // which are used for dispatching to more exact parsing functions. func splitCommand(line string) (string, string, error) { var args string // Make sure we get the same results irrespective of leading/trailing spaces cmdline := TOKEN_WHITESPACE.Split(strings.TrimSpace(line), 2) cmd := strings.ToLower(cmdline[0]) if len(cmdline) == 2 { args = strings.TrimSpace(cmdline[1]) } // the cmd should never have whitespace, but it's possible for the args to // have trailing whitespace. return cmd, args, nil } // covers comments and empty lines. Lines should be trimmed before passing to // this function. func stripComments(line string) string { // string is already trimmed at this point if TOKEN_COMMENT.MatchString(line) { return TOKEN_COMMENT.ReplaceAllString(line, "") } return line } docker-1.6.2/builder/job.go0000644000175000017500000001233412524223634015066 0ustar tianontianonpackage builder import ( "bytes" "encoding/json" "io" "io/ioutil" "os" "os/exec" "strings" "github.com/docker/docker/api" "github.com/docker/docker/builder/parser" "github.com/docker/docker/daemon" "github.com/docker/docker/engine" "github.com/docker/docker/graph" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) // whitelist of commands allowed for a commit/import var validCommitCommands = map[string]bool{ "entrypoint": true, "cmd": true, "user": true, "workdir": true, "env": true, "volume": true, "expose": true, "onbuild": true, } type BuilderJob struct { Engine *engine.Engine Daemon *daemon.Daemon } func (b *BuilderJob) Install() { b.Engine.Register("build", b.CmdBuild) b.Engine.Register("build_config", b.CmdBuildConfig) } func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { if len(job.Args) != 0 { return job.Errorf("Usage: %s\n", job.Name) } var ( dockerfileName = job.Getenv("dockerfile") remoteURL = job.Getenv("remote") repoName = job.Getenv("t") suppressOutput = job.GetenvBool("q") noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") forceRm = job.GetenvBool("forcerm") pull = job.GetenvBool("pull") memory = job.GetenvInt64("memory") memorySwap = job.GetenvInt64("memswap") cpuShares = job.GetenvInt64("cpushares") cpuSetCpus = job.Getenv("cpusetcpus") authConfig = ®istry.AuthConfig{} configFile = ®istry.ConfigFile{} tag string context io.ReadCloser ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("configFile", configFile) repoName, tag = parsers.ParseRepositoryTag(repoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return job.Error(err) } if len(tag) > 0 { if err := graph.ValidateTagName(tag); err != nil { return job.Error(err) } } } if remoteURL == "" { context = ioutil.NopCloser(job.Stdin) } else if urlutil.IsGitURL(remoteURL) { if !urlutil.IsGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { return job.Error(err) } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { return job.Errorf("Error trying to use git: %s (%s)", err, output) } c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return job.Error(err) } context = c } else if urlutil.IsURL(remoteURL) { f, err := utils.Download(remoteURL) if err != nil { return job.Error(err) } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return job.Error(err) } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it dockerfileName = api.DefaultDockerfileName c, err := archive.Generate(dockerfileName, string(dockerFile)) if err != nil { return job.Error(err) } context = c } defer context.Close() sf := utils.NewStreamFormatter(job.GetenvBool("json")) builder := &Builder{ Daemon: b.Daemon, Engine: b.Engine, OutStream: &utils.StdoutFormater{ Writer: job.Stdout, StreamFormatter: sf, }, ErrStream: &utils.StderrFormater{ Writer: job.Stdout, StreamFormatter: sf, }, Verbose: !suppressOutput, UtilizeCache: !noCache, Remove: rm, ForceRemove: forceRm, Pull: pull, OutOld: job.Stdout, StreamFormatter: sf, AuthConfig: authConfig, AuthConfigFile: configFile, dockerfileName: dockerfileName, cpuShares: cpuShares, cpuSetCpus: cpuSetCpus, memory: memory, memorySwap: memorySwap, cancelled: job.WaitCancelled(), } id, err := builder.Run(context) if err != nil { return job.Error(err) } if repoName != "" { b.Daemon.Repositories().Set(repoName, tag, id, true) } return engine.StatusOK } func (b *BuilderJob) CmdBuildConfig(job *engine.Job) engine.Status { if len(job.Args) != 0 { return job.Errorf("Usage: %s\n", job.Name) } var ( changes = job.GetenvList("changes") newConfig runconfig.Config ) if err := job.GetenvJson("config", &newConfig); err != nil { return job.Error(err) } ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) if err != nil { return job.Error(err) } // ensure that the commands are valid for _, n := range ast.Children { if !validCommitCommands[n.Value] { return job.Errorf("%s is not a valid change command", n.Value) } } builder := &Builder{ Daemon: b.Daemon, Engine: b.Engine, Config: &newConfig, OutStream: ioutil.Discard, ErrStream: ioutil.Discard, disableCommit: true, } for i, n := range ast.Children { if err := builder.dispatch(i, n); err != nil { return job.Error(err) } } if err := json.NewEncoder(job.Stdout).Encode(builder.Config); err != nil { return job.Error(err) } return engine.StatusOK } docker-1.6.2/builder/dispatchers.go0000644000175000017500000002671712524223634016637 0ustar tianontianonpackage builder // This file contains the dispatchers for each command. Note that // `nullDispatch` is not actually a command, but support for commands we parse // but do nothing with. // // See evaluator.go for a higher level discussion of the whole evaluator // package. import ( "fmt" "io/ioutil" "path/filepath" "regexp" "sort" "strings" log "github.com/Sirupsen/logrus" "github.com/docker/docker/nat" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/runconfig" ) const ( // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. NoBaseImageSpecifier string = "scratch" ) // dispatch with no layer / parsing. This is effectively not a command. func nullDispatch(b *Builder, args []string, attributes map[string]bool, original string) error { return nil } // ENV foo bar // // Sets the environment variable foo to bar, also makes interpolation // in the dockerfile available from the next statement on via ${foo}. // func env(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) == 0 { return fmt.Errorf("ENV requires at least one argument") } if len(args)%2 != 0 { // should never get here, but just in case return fmt.Errorf("Bad input to ENV, too many args") } commitStr := "ENV" for j := 0; j < len(args); j++ { // name ==> args[j] // value ==> args[j+1] newVar := args[j] + "=" + args[j+1] + "" commitStr += " " + newVar gotOne := false for i, envVar := range b.Config.Env { envParts := strings.SplitN(envVar, "=", 2) if envParts[0] == args[j] { b.Config.Env[i] = newVar gotOne = true break } } if !gotOne { b.Config.Env = append(b.Config.Env, newVar) } j++ } return b.commit("", b.Config.Cmd, commitStr) } // MAINTAINER some text // // Sets the maintainer metadata. func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) != 1 { return fmt.Errorf("MAINTAINER requires exactly one argument") } b.maintainer = args[0] return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) } // LABEL some json data describing the image // // Sets the Label variable foo to bar, // func label(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) == 0 { return fmt.Errorf("LABEL requires at least one argument") } if len(args)%2 != 0 { // should never get here, but just in case return fmt.Errorf("Bad input to LABEL, too many args") } commitStr := "LABEL" if b.Config.Labels == nil { b.Config.Labels = map[string]string{} } for j := 0; j < len(args); j++ { // name ==> args[j] // value ==> args[j+1] newVar := args[j] + "=" + args[j+1] + "" commitStr += " " + newVar b.Config.Labels[args[j]] = args[j+1] j++ } return b.commit("", b.Config.Cmd, commitStr) } // ADD foo /path // // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling // exist here. If you do not wish to have this automatic handling, use COPY. // func add(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) < 2 { return fmt.Errorf("ADD requires at least two arguments") } return b.runContextCommand(args, true, true, "ADD") } // COPY foo /path // // Same as 'ADD' but without the tar and remote url handling. // func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) < 2 { return fmt.Errorf("COPY requires at least two arguments") } return b.runContextCommand(args, false, false, "COPY") } // FROM imagename // // This sets the image the dockerfile will build on top of. // func from(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) != 1 { return fmt.Errorf("FROM requires one argument") } name := args[0] if name == NoBaseImageSpecifier { b.image = "" b.noBaseImage = true return nil } image, err := b.Daemon.Repositories().LookupImage(name) if b.Pull { image, err = b.pullImage(name) if err != nil { return err } } if err != nil { if b.Daemon.Graph().IsNotExist(err) { image, err = b.pullImage(name) } // note that the top level err will still be !nil here if IsNotExist is // not the error. This approach just simplifies hte logic a bit. if err != nil { return err } } return b.processImageFrom(image) } // ONBUILD RUN echo yo // // ONBUILD triggers run when the image is used in a FROM statement. // // ONBUILD handling has a lot of special-case functionality, the heading in // evaluator.go and comments around dispatch() in the same file explain the // special cases. search for 'OnBuild' in internals.go for additional special // cases. // func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) == 0 { return fmt.Errorf("ONBUILD requires at least one argument") } triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) switch triggerInstruction { case "ONBUILD": return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") case "MAINTAINER", "FROM": return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) } original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") b.Config.OnBuild = append(b.Config.OnBuild, original) return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original)) } // WORKDIR /tmp // // Set the working directory for future RUN/CMD/etc statements. // func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) != 1 { return fmt.Errorf("WORKDIR requires exactly one argument") } workdir := args[0] if !filepath.IsAbs(workdir) { workdir = filepath.Join("/", b.Config.WorkingDir, workdir) } b.Config.WorkingDir = workdir return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) } // RUN some command yo // // run a command and commit the image. Args are automatically prepended with // 'sh -c' in the event there is only one argument. The difference in // processing: // // RUN echo hi # sh -c echo hi // RUN [ "echo", "hi" ] # echo hi // func run(b *Builder, args []string, attributes map[string]bool, original string) error { if b.image == "" && !b.noBaseImage { return fmt.Errorf("Please provide a source image with `from` prior to run") } args = handleJsonArgs(args, attributes) if !attributes["json"] { args = append([]string{"/bin/sh", "-c"}, args...) } runCmd := flag.NewFlagSet("run", flag.ContinueOnError) runCmd.SetOutput(ioutil.Discard) runCmd.Usage = nil config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...)) if err != nil { return err } cmd := b.Config.Cmd // set Cmd manually, this is special case only for Dockerfiles b.Config.Cmd = config.Cmd runconfig.Merge(b.Config, config) defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) log.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd) hit, err := b.probeCache() if err != nil { return err } if hit { return nil } c, err := b.create() if err != nil { return err } // Ensure that we keep the container mounted until the commit // to avoid unmounting and then mounting directly again c.Mount() defer c.Unmount() err = b.run(c) if err != nil { return err } if err := b.commit(c.ID, cmd, "run"); err != nil { return err } return nil } // CMD foo // // Set the default command to run in the container (which may be empty). // Argument handling is the same as RUN. // func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { b.Config.Cmd = handleJsonArgs(args, attributes) if !attributes["json"] { b.Config.Cmd = append([]string{"/bin/sh", "-c"}, b.Config.Cmd...) } if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", b.Config.Cmd)); err != nil { return err } if len(args) != 0 { b.cmdSet = true } return nil } // ENTRYPOINT /usr/sbin/nginx // // Set the entrypoint (which defaults to sh -c) to /usr/sbin/nginx. Will // accept the CMD as the arguments to /usr/sbin/nginx. // // Handles command processing similar to CMD and RUN, only b.Config.Entrypoint // is initialized at NewBuilder time instead of through argument parsing. // func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { parsed := handleJsonArgs(args, attributes) switch { case attributes["json"]: // ENTRYPOINT ["echo", "hi"] b.Config.Entrypoint = parsed case len(parsed) == 0: // ENTRYPOINT [] b.Config.Entrypoint = nil default: // ENTRYPOINT echo hi b.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]} } // when setting the entrypoint if a CMD was not explicitly set then // set the command to nil if !b.cmdSet { b.Config.Cmd = nil } if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil { return err } return nil } // EXPOSE 6667/tcp 7000/tcp // // Expose ports for links and port mappings. This all ends up in // b.Config.ExposedPorts for runconfig. // func expose(b *Builder, args []string, attributes map[string]bool, original string) error { portsTab := args if len(args) == 0 { return fmt.Errorf("EXPOSE requires at least one argument") } if b.Config.ExposedPorts == nil { b.Config.ExposedPorts = make(nat.PortSet) } ports, bindingMap, err := nat.ParsePortSpecs(append(portsTab, b.Config.PortSpecs...)) if err != nil { return err } for _, bindings := range bindingMap { if bindings[0].HostIp != "" || bindings[0].HostPort != "" { fmt.Fprintf(b.ErrStream, " ---> Using Dockerfile's EXPOSE instruction"+ " to map host ports to container ports (ip:hostPort:containerPort) is deprecated.\n"+ " Please use -p to publish the ports.\n") } } // instead of using ports directly, we build a list of ports and sort it so // the order is consistent. This prevents cache burst where map ordering // changes between builds portList := make([]string, len(ports)) var i int for port := range ports { if _, exists := b.Config.ExposedPorts[port]; !exists { b.Config.ExposedPorts[port] = struct{}{} } portList[i] = string(port) i++ } sort.Strings(portList) b.Config.PortSpecs = nil return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) } // USER foo // // Set the user to 'foo' for future commands and when running the // ENTRYPOINT/CMD at container run time. // func user(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) != 1 { return fmt.Errorf("USER requires exactly one argument") } b.Config.User = args[0] return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args)) } // VOLUME /foo // // Expose the volume /foo for use. Will also accept the JSON array form. // func volume(b *Builder, args []string, attributes map[string]bool, original string) error { if len(args) == 0 { return fmt.Errorf("VOLUME requires at least one argument") } if b.Config.Volumes == nil { b.Config.Volumes = map[string]struct{}{} } for _, v := range args { v = strings.TrimSpace(v) if v == "" { return fmt.Errorf("Volume specified can not be an empty string") } b.Config.Volumes[v] = struct{}{} } if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { return err } return nil } // INSERT is no longer accepted, but we still parse it. func insert(b *Builder, args []string, attributes map[string]bool, original string) error { return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") } docker-1.6.2/builder/shell_parser_test.go0000644000175000017500000000165112524223634020036 0ustar tianontianonpackage builder import ( "bufio" "os" "strings" "testing" ) func TestShellParser(t *testing.T) { file, err := os.Open("words") if err != nil { t.Fatalf("Can't open 'words': %s", err) } defer file.Close() scanner := bufio.NewScanner(file) envs := []string{"PWD=/home", "SHELL=bash"} for scanner.Scan() { line := scanner.Text() // Trim comments and blank lines i := strings.Index(line, "#") if i >= 0 { line = line[:i] } line = strings.TrimSpace(line) if line == "" { continue } words := strings.Split(line, "|") if len(words) != 2 { t.Fatalf("Error in 'words' - should be 2 words:%q", words) } words[0] = strings.TrimSpace(words[0]) words[1] = strings.TrimSpace(words[1]) newWord, err := ProcessWord(words[0], envs) if err != nil { newWord = "error" } if newWord != words[1] { t.Fatalf("Error. Src: %s Calc: %s Expected: %s", words[0], newWord, words[1]) } } } docker-1.6.2/builder/internals.go0000644000175000017500000004730412524223634016320 0ustar tianontianonpackage builder // internals for handling commands. Covers many areas and a lot of // non-contiguous functionality. Please read the comments. import ( "crypto/sha256" "encoding/hex" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path" "path/filepath" "sort" "strings" "syscall" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/builder/parser" "github.com/docker/docker/daemon" imagepkg "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) func (b *Builder) readContext(context io.Reader) error { tmpdirPath, err := ioutil.TempDir("", "docker-build") if err != nil { return err } decompressedStream, err := archive.DecompressStream(context) if err != nil { return err } if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil { return err } if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil { return err } b.contextPath = tmpdirPath return nil } func (b *Builder) commit(id string, autoCmd []string, comment string) error { if b.disableCommit { return nil } if b.image == "" && !b.noBaseImage { return fmt.Errorf("Please provide a source image with `from` prior to commit") } b.Config.Image = b.image if id == "" { cmd := b.Config.Cmd b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) hit, err := b.probeCache() if err != nil { return err } if hit { return nil } container, err := b.create() if err != nil { return err } id = container.ID if err := container.Mount(); err != nil { return err } defer container.Unmount() } container, err := b.Daemon.Get(id) if err != nil { return err } // Note: Actually copy the struct autoConfig := *b.Config autoConfig.Cmd = autoCmd // Commit the container image, err := b.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) if err != nil { return err } b.image = image.ID return nil } type copyInfo struct { origPath string destPath string hash string decompress bool tmpDir string } func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } if len(args) < 2 { return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) } dest := args[len(args)-1] // last one is always the dest copyInfos := []*copyInfo{} b.Config.Image = b.image defer func() { for _, ci := range copyInfos { if ci.tmpDir != "" { os.RemoveAll(ci.tmpDir) } } }() // Loop through each src file and calculate the info we need to // do the copy (e.g. hash value if cached). Don't actually do // the copy until we've looked at all src files for _, orig := range args[0 : len(args)-1] { err := calcCopyInfo(b, cmdName, ©Infos, orig, dest, allowRemote, allowDecompression) if err != nil { return err } } if len(copyInfos) == 0 { return fmt.Errorf("No source files were specified") } if len(copyInfos) > 1 && !strings.HasSuffix(dest, "/") { return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } // For backwards compat, if there's just one CI then use it as the // cache look-up string, otherwise hash 'em all into one var srcHash string var origPaths string if len(copyInfos) == 1 { srcHash = copyInfos[0].hash origPaths = copyInfos[0].origPath } else { var hashs []string var origs []string for _, ci := range copyInfos { hashs = append(hashs, ci.hash) origs = append(origs, ci.origPath) } hasher := sha256.New() hasher.Write([]byte(strings.Join(hashs, ","))) srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) origPaths = strings.Join(origs, " ") } cmd := b.Config.Cmd b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)} defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) hit, err := b.probeCache() if err != nil { return err } if hit { return nil } container, _, err := b.Daemon.Create(b.Config, nil, "") if err != nil { return err } b.TmpContainers[container.ID] = struct{}{} if err := container.Mount(); err != nil { return err } defer container.Unmount() for _, ci := range copyInfos { if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil { return err } } if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil { return err } return nil } func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error { if origPath != "" && origPath[0] == '/' && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "./") // Twiddle the destPath when its a relative path - meaning, make it // relative to the WORKINGDIR if !filepath.IsAbs(destPath) { hasSlash := strings.HasSuffix(destPath, "/") destPath = filepath.Join("/", b.Config.WorkingDir, destPath) // Make sure we preserve any trailing slash if hasSlash { destPath += "/" } } // In the remote/URL case, download it and gen its hashcode if urlutil.IsURL(origPath) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } ci := copyInfo{} ci.origPath = origPath ci.hash = origPath // default to this but can change ci.destPath = destPath ci.decompress = false *cInfos = append(*cInfos, &ci) // Initiate the download resp, err := utils.Download(ci.origPath) if err != nil { return err } // Create a tmp dir tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } ci.tmpDir = tmpDirName // Create a tmp file within our tmp dir tmpFileName := path.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } // Download and dump result to tmp file if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{ In: resp.Body, Out: b.OutOld, Formatter: b.StreamFormatter, Size: int(resp.ContentLength), NewLines: true, ID: "", Action: "Downloading", })); err != nil { tmpFile.Close() return err } fmt.Fprintf(b.OutStream, "\n") tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime times := make([]syscall.Timespec, 2) lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { mTime, err := http.ParseTime(lastMod) // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if err == nil { times[1] = syscall.NsecToTimespec(mTime.UnixNano()) } } if err := system.UtimesNano(tmpFileName, times); err != nil { return err } ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // If the destination is a directory, figure out the filename. if strings.HasSuffix(ci.destPath, "/") { u, err := url.Parse(origPath) if err != nil { return err } path := u.Path if strings.HasSuffix(path, "/") { path = path[:len(path)-1] } parts := strings.Split(path, "/") filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } ci.destPath = ci.destPath + filename } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0) if err != nil { return err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return err } ci.hash = tarSum.Sum(nil) r.Close() return nil } // Deal with wildcards if ContainsWildcards(origPath) { for _, fileInfo := range b.context.GetSums() { if fileInfo.Name() == "" { continue } match, _ := path.Match(origPath, fileInfo.Name()) if !match { continue } calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression) } return nil } // Must be a dir or a file if err := b.checkPathForAddition(origPath); err != nil { return err } fi, _ := os.Stat(path.Join(b.contextPath, origPath)) ci := copyInfo{} ci.origPath = origPath ci.hash = origPath ci.destPath = destPath ci.decompress = allowDecompression *cInfos = append(*cInfos, &ci) // Deal with the single file case if !fi.IsDir() { // This will match first file in sums of the archive fis := b.context.GetSums().GetFile(ci.origPath) if fis != nil { ci.hash = "file:" + fis.Sum() } return nil } // Must be a dir var subfiles []string absOrigPath := path.Join(b.contextPath, ci.origPath) // Add a trailing / to make sure we only pick up nested files under // the dir and not sibling files of the dir that just happen to // start with the same chars if !strings.HasSuffix(absOrigPath, "/") { absOrigPath += "/" } // Need path w/o / too to find matching dir w/o trailing / absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] for _, fileInfo := range b.context.GetSums() { absFile := path.Join(b.contextPath, fileInfo.Name()) // Any file in the context that starts with the given path will be // picked up and its hashcode used. However, we'll exclude the // root dir itself. We do this for a coupel of reasons: // 1 - ADD/COPY will not copy the dir itself, just its children // so there's no reason to include it in the hash calc // 2 - the metadata on the dir will change when any child file // changes. This will lead to a miss in the cache check if that // child file is in the .dockerignore list. if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash { subfiles = append(subfiles, fileInfo.Sum()) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) return nil } func ContainsWildcards(name string) bool { for i := 0; i < len(name); i++ { ch := name[i] if ch == '\\' { i++ } else if ch == '*' || ch == '?' || ch == '[' { return true } } return false } func (b *Builder) pullImage(name string) (*imagepkg.Image, error) { remote, tag := parsers.ParseRepositoryTag(name) if tag == "" { tag = "latest" } job := b.Engine.Job("pull", remote, tag) pullRegistryAuth := b.AuthConfig if len(b.AuthConfigFile.Configs) > 0 { // The request came with a full auth config file, we prefer to use that repoInfo, err := registry.ResolveRepositoryInfo(job, remote) if err != nil { return nil, err } resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(repoInfo.Index) pullRegistryAuth = &resolvedAuth } job.SetenvBool("json", b.StreamFormatter.Json()) job.SetenvBool("parallel", true) job.SetenvJson("authConfig", pullRegistryAuth) job.Stdout.Add(ioutils.NopWriteCloser(b.OutOld)) if err := job.Run(); err != nil { return nil, err } image, err := b.Daemon.Repositories().LookupImage(name) if err != nil { return nil, err } return image, nil } func (b *Builder) processImageFrom(img *imagepkg.Image) error { b.image = img.ID if img.Config != nil { b.Config = img.Config } if len(b.Config.Env) == 0 { b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) } // Process ONBUILD triggers if they exist if nTriggers := len(b.Config.OnBuild); nTriggers != 0 { fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers) } // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited. onBuildTriggers := b.Config.OnBuild b.Config.OnBuild = []string{} // parse the ONBUILD triggers by invoking the parser for stepN, step := range onBuildTriggers { ast, err := parser.Parse(strings.NewReader(step)) if err != nil { return err } for i, n := range ast.Children { switch strings.ToUpper(n.Value) { case "ONBUILD": return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") case "MAINTAINER", "FROM": return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) } fmt.Fprintf(b.OutStream, "Trigger %d, %s\n", stepN, step) if err := b.dispatch(i, n); err != nil { return err } } } return nil } // probeCache checks to see if image-caching is enabled (`b.UtilizeCache`) // and if so attempts to look up the current `b.image` and `b.Config` pair // in the current server `b.Daemon`. If an image is found, probeCache returns // `(true, nil)`. If no image is found, it returns `(false, nil)`. If there // is any error, it returns `(false, err)`. func (b *Builder) probeCache() (bool, error) { if !b.UtilizeCache || b.cacheBusted { return false, nil } cache, err := b.Daemon.ImageGetCached(b.image, b.Config) if err != nil { return false, err } if cache == nil { log.Debugf("[BUILDER] Cache miss") b.cacheBusted = true return false, nil } fmt.Fprintf(b.OutStream, " ---> Using cache\n") log.Debugf("[BUILDER] Use cached version") b.image = cache.ID return true, nil } func (b *Builder) create() (*daemon.Container, error) { if b.image == "" && !b.noBaseImage { return nil, fmt.Errorf("Please provide a source image with `from` prior to run") } b.Config.Image = b.image hostConfig := &runconfig.HostConfig{ CpuShares: b.cpuShares, CpusetCpus: b.cpuSetCpus, Memory: b.memory, MemorySwap: b.memorySwap, } config := *b.Config // Create the container c, warnings, err := b.Daemon.Create(b.Config, hostConfig, "") if err != nil { return nil, err } for _, warning := range warnings { fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning) } b.TmpContainers[c.ID] = struct{}{} fmt.Fprintf(b.OutStream, " ---> Running in %s\n", common.TruncateID(c.ID)) if len(config.Cmd) > 0 { // override the entry point that may have been picked up from the base image c.Path = config.Cmd[0] c.Args = config.Cmd[1:] } else { config.Cmd = []string{} } return c, nil } func (b *Builder) run(c *daemon.Container) error { var errCh chan error if b.Verbose { errCh = b.Daemon.Attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, nil, b.OutStream, b.ErrStream) } //start the container if err := c.Start(); err != nil { return err } finished := make(chan struct{}) defer close(finished) go func() { select { case <-b.cancelled: log.Debugln("Build cancelled, killing container:", c.ID) c.Kill() case <-finished: } }() if b.Verbose { // Block on reading output from container, stop on err or chan closed if err := <-errCh; err != nil { return err } } // Wait for it to finish if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 { err := &utils.JSONError{ Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret), Code: ret, } return err } return nil } func (b *Builder) checkPathForAddition(orig string) error { origPath := path.Join(b.contextPath, orig) origPath, err := filepath.EvalSymlinks(origPath) if err != nil { if os.IsNotExist(err) { return fmt.Errorf("%s: no such file or directory", orig) } return err } if !strings.HasPrefix(origPath, b.contextPath) { return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) } if _, err := os.Stat(origPath); err != nil { if os.IsNotExist(err) { return fmt.Errorf("%s: no such file or directory", orig) } return err } return nil } func (b *Builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error { var ( err error destExists = true origPath = path.Join(b.contextPath, orig) destPath = path.Join(container.RootfsPath(), dest) ) if destPath != container.RootfsPath() { destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath()) if err != nil { return err } } // Preserve the trailing '/' if strings.HasSuffix(dest, "/") || dest == "." { destPath = destPath + "/" } destStat, err := os.Stat(destPath) if err != nil { if !os.IsNotExist(err) { return err } destExists = false } fi, err := os.Stat(origPath) if err != nil { if os.IsNotExist(err) { return fmt.Errorf("%s: no such file or directory", orig) } return err } if fi.IsDir() { return copyAsDirectory(origPath, destPath, destExists) } // If we are adding a remote file (or we've been told not to decompress), do not try to untar it if decompress { // First try to unpack the source as an archive // to support the untar feature we need to clean up the path a little bit // because tar is very forgiving. First we need to strip off the archive's // filename from the path but this is only added if it does not end in / . tarDest := destPath if strings.HasSuffix(tarDest, "/") { tarDest = filepath.Dir(destPath) } // try to successfully untar the orig if err := chrootarchive.UntarPath(origPath, tarDest); err == nil { return nil } else if err != io.EOF { log.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err) } } if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { return err } if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil { return err } resPath := destPath if destExists && destStat.IsDir() { resPath = path.Join(destPath, path.Base(origPath)) } return fixPermissions(origPath, resPath, 0, 0, destExists) } func copyAsDirectory(source, destination string, destExisted bool) error { if err := chrootarchive.CopyWithTar(source, destination); err != nil { return err } return fixPermissions(source, destination, 0, 0, destExisted) } func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { // If the destination didn't already exist, or the destination isn't a // directory, then we should Lchown the destination. Otherwise, we shouldn't // Lchown the destination. destStat, err := os.Stat(destination) if err != nil { // This should *never* be reached, because the destination must've already // been created while untar-ing the context. return err } doChownDestination := !destExisted || !destStat.IsDir() // We Walk on the source rather than on the destination because we don't // want to change permissions on things we haven't created or modified. return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { // Do not alter the walk root iff. it existed before, as it doesn't fall under // the domain of "things we should chown". if !doChownDestination && (source == fullpath) { return nil } // Path is prefixed by source: substitute with destination instead. cleaned, err := filepath.Rel(source, fullpath) if err != nil { return err } fullpath = path.Join(destination, cleaned) return os.Lchown(fullpath, uid, gid) }) } func (b *Builder) clearTmp() { for c := range b.TmpContainers { tmp, err := b.Daemon.Get(c) if err != nil { fmt.Fprint(b.OutStream, err.Error()) } if err := b.Daemon.Rm(tmp); err != nil { fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", common.TruncateID(c), err) return } b.Daemon.DeleteVolumes(tmp.VolumePaths()) delete(b.TmpContainers, c) fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", common.TruncateID(c)) } } docker-1.6.2/builder/shell_parser.go0000644000175000017500000001000112524223634016764 0ustar tianontianonpackage builder // This will take a single word and an array of env variables and // process all quotes (" and ') as well as $xxx and ${xxx} env variable // tokens. Tries to mimic bash shell process. // It doesn't support all flavors of ${xx:...} formats but new ones can // be added by adding code to the "special ${} format processing" section import ( "fmt" "strings" "unicode" ) type shellWord struct { word string envs []string pos int } func ProcessWord(word string, env []string) (string, error) { sw := &shellWord{ word: word, envs: env, pos: 0, } return sw.process() } func (sw *shellWord) process() (string, error) { return sw.processStopOn('\000') } // Process the word, starting at 'pos', and stop when we get to the // end of the word or the 'stopChar' character func (sw *shellWord) processStopOn(stopChar rune) (string, error) { var result string var charFuncMapping = map[rune]func() (string, error){ '\'': sw.processSingleQuote, '"': sw.processDoubleQuote, '$': sw.processDollar, } for sw.pos < len(sw.word) { ch := sw.peek() if stopChar != '\000' && ch == stopChar { sw.next() break } if fn, ok := charFuncMapping[ch]; ok { // Call special processing func for certain chars tmp, err := fn() if err != nil { return "", err } result += tmp } else { // Not special, just add it to the result ch = sw.next() if ch == '\\' { // '\' escapes, except end of line ch = sw.next() if ch == '\000' { continue } } result += string(ch) } } return result, nil } func (sw *shellWord) peek() rune { if sw.pos == len(sw.word) { return '\000' } return rune(sw.word[sw.pos]) } func (sw *shellWord) next() rune { if sw.pos == len(sw.word) { return '\000' } ch := rune(sw.word[sw.pos]) sw.pos++ return ch } func (sw *shellWord) processSingleQuote() (string, error) { // All chars between single quotes are taken as-is // Note, you can't escape ' var result string sw.next() for { ch := sw.next() if ch == '\000' || ch == '\'' { break } result += string(ch) } return result, nil } func (sw *shellWord) processDoubleQuote() (string, error) { // All chars up to the next " are taken as-is, even ', except any $ chars // But you can escape " with a \ var result string sw.next() for sw.pos < len(sw.word) { ch := sw.peek() if ch == '"' { sw.next() break } if ch == '$' { tmp, err := sw.processDollar() if err != nil { return "", err } result += tmp } else { ch = sw.next() if ch == '\\' { chNext := sw.peek() if chNext == '\000' { // Ignore \ at end of word continue } if chNext == '"' || chNext == '$' { // \" and \$ can be escaped, all other \'s are left as-is ch = sw.next() } } result += string(ch) } } return result, nil } func (sw *shellWord) processDollar() (string, error) { sw.next() ch := sw.peek() if ch == '{' { sw.next() name := sw.processName() ch = sw.peek() if ch == '}' { // Normal ${xx} case sw.next() return sw.getEnv(name), nil } return "", fmt.Errorf("Unsupported ${} substitution: %s", sw.word) } else { // $xxx case name := sw.processName() if name == "" { return "$", nil } return sw.getEnv(name), nil } } func (sw *shellWord) processName() string { // Read in a name (alphanumeric or _) // If it starts with a numeric then just return $# var name string for sw.pos < len(sw.word) { ch := sw.peek() if len(name) == 0 && unicode.IsDigit(ch) { ch = sw.next() return string(ch) } if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { break } ch = sw.next() name += string(ch) } return name } func (sw *shellWord) getEnv(name string) string { for _, env := range sw.envs { i := strings.Index(env, "=") if i < 0 { if name == env { // Should probably never get here, but just in case treat // it like "var" and "var=" are the same return "" } continue } if name != env[:i] { continue } return env[i+1:] } return "" } docker-1.6.2/builder/command/0000755000175000017500000000000012524223634015400 5ustar tianontianondocker-1.6.2/builder/command/command.go0000644000175000017500000000137112524223634017347 0ustar tianontianon// This package contains the set of Dockerfile commands. package command const ( Env = "env" Label = "label" Maintainer = "maintainer" Add = "add" Copy = "copy" From = "from" Onbuild = "onbuild" Workdir = "workdir" Run = "run" Cmd = "cmd" Entrypoint = "entrypoint" Expose = "expose" Volume = "volume" User = "user" Insert = "insert" ) // Commands is list of all Dockerfile commands var Commands = map[string]struct{}{ Env: {}, Label: {}, Maintainer: {}, Add: {}, Copy: {}, From: {}, Onbuild: {}, Workdir: {}, Run: {}, Cmd: {}, Entrypoint: {}, Expose: {}, Volume: {}, User: {}, Insert: {}, } docker-1.6.2/builder/support.go0000644000175000017500000000047112524223634016027 0ustar tianontianonpackage builder import ( "strings" ) func handleJsonArgs(args []string, attributes map[string]bool) []string { if len(args) == 0 { return []string{} } if attributes != nil && attributes["json"] { return args } // literal string command, not an exec array return []string{strings.Join(args, " ")} } docker-1.6.2/trust/0000755000175000017500000000000012524223634013515 5ustar tianontianondocker-1.6.2/trust/service.go0000644000175000017500000000327012524223634015506 0ustar tianontianonpackage trust import ( "fmt" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/libtrust" ) func (t *TrustStore) Install(eng *engine.Engine) error { for name, handler := range map[string]engine.Handler{ "trust_key_check": t.CmdCheckKey, "trust_update_base": t.CmdUpdateBase, } { if err := eng.Register(name, handler); err != nil { return fmt.Errorf("Could not register %q: %v", name, err) } } return nil } func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s NAMESPACE", job.Name) } var ( namespace = job.Args[0] keyBytes = job.Getenv("PublicKey") ) if keyBytes == "" { return job.Errorf("Missing PublicKey") } pk, err := libtrust.UnmarshalPublicKeyJWK([]byte(keyBytes)) if err != nil { return job.Errorf("Error unmarshalling public key: %s", err) } permission := uint16(job.GetenvInt("Permission")) if permission == 0 { permission = 0x03 } t.RLock() defer t.RUnlock() if t.graph == nil { job.Stdout.Write([]byte("no graph")) return engine.StatusOK } // Check if any expired grants verified, err := t.graph.Verify(pk, namespace, permission) if err != nil { return job.Errorf("Error verifying key to namespace: %s", namespace) } if !verified { log.Debugf("Verification failed for %s using key %s", namespace, pk.KeyID()) job.Stdout.Write([]byte("not verified")) } else if t.expiration.Before(time.Now()) { job.Stdout.Write([]byte("expired")) } else { job.Stdout.Write([]byte("verified")) } return engine.StatusOK } func (t *TrustStore) CmdUpdateBase(job *engine.Job) engine.Status { t.fetch() return engine.StatusOK } docker-1.6.2/trust/trusts.go0000644000175000017500000001031512524223634015410 0ustar tianontianonpackage trust import ( "crypto/x509" "errors" "io/ioutil" "net/http" "net/url" "os" "path" "path/filepath" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/docker/libtrust/trustgraph" ) type TrustStore struct { path string caPool *x509.CertPool graph trustgraph.TrustGraph expiration time.Time fetcher *time.Timer fetchTime time.Duration autofetch bool httpClient *http.Client baseEndpoints map[string]*url.URL sync.RWMutex } // defaultFetchtime represents the starting duration to wait between // fetching sections of the graph. Unsuccessful fetches should // increase time between fetching. const defaultFetchtime = 45 * time.Second var baseEndpoints = map[string]string{"official": "https://dvjy3tqbc323p.cloudfront.net/trust/official.json"} func NewTrustStore(path string) (*TrustStore, error) { abspath, err := filepath.Abs(path) if err != nil { return nil, err } // Create base graph url map endpoints := map[string]*url.URL{} for name, endpoint := range baseEndpoints { u, err := url.Parse(endpoint) if err != nil { return nil, err } endpoints[name] = u } // Load grant files t := &TrustStore{ path: abspath, caPool: nil, httpClient: &http.Client{}, fetchTime: time.Millisecond, baseEndpoints: endpoints, } err = t.reload() if err != nil { return nil, err } return t, nil } func (t *TrustStore) reload() error { t.Lock() defer t.Unlock() matches, err := filepath.Glob(filepath.Join(t.path, "*.json")) if err != nil { return err } statements := make([]*trustgraph.Statement, len(matches)) for i, match := range matches { f, err := os.Open(match) if err != nil { return err } statements[i], err = trustgraph.LoadStatement(f, nil) if err != nil { f.Close() return err } f.Close() } if len(statements) == 0 { if t.autofetch { log.Debugf("No grants, fetching") t.fetcher = time.AfterFunc(t.fetchTime, t.fetch) } return nil } grants, expiration, err := trustgraph.CollapseStatements(statements, true) if err != nil { return err } t.expiration = expiration t.graph = trustgraph.NewMemoryGraph(grants) log.Debugf("Reloaded graph with %d grants expiring at %s", len(grants), expiration) if t.autofetch { nextFetch := expiration.Sub(time.Now()) if nextFetch < 0 { nextFetch = defaultFetchtime } else { nextFetch = time.Duration(0.8 * (float64)(nextFetch)) } t.fetcher = time.AfterFunc(nextFetch, t.fetch) } return nil } func (t *TrustStore) fetchBaseGraph(u *url.URL) (*trustgraph.Statement, error) { req := &http.Request{ Method: "GET", URL: u, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1, Header: make(http.Header), Body: nil, Host: u.Host, } resp, err := t.httpClient.Do(req) if err != nil { return nil, err } if resp.StatusCode == 404 { return nil, errors.New("base graph does not exist") } defer resp.Body.Close() return trustgraph.LoadStatement(resp.Body, t.caPool) } // fetch retrieves updated base graphs. This function cannot error, it // should only log errors func (t *TrustStore) fetch() { t.Lock() defer t.Unlock() if t.autofetch && t.fetcher == nil { // Do nothing ?? return } fetchCount := 0 for bg, ep := range t.baseEndpoints { statement, err := t.fetchBaseGraph(ep) if err != nil { log.Infof("Trust graph fetch failed: %s", err) continue } b, err := statement.Bytes() if err != nil { log.Infof("Bad trust graph statement: %s", err) continue } // TODO check if value differs err = ioutil.WriteFile(path.Join(t.path, bg+".json"), b, 0600) if err != nil { log.Infof("Error writing trust graph statement: %s", err) } fetchCount++ } log.Debugf("Fetched %d base graphs at %s", fetchCount, time.Now()) if fetchCount > 0 { go func() { err := t.reload() if err != nil { log.Infof("Reload of trust graph failed: %s", err) } }() t.fetchTime = defaultFetchtime t.fetcher = nil } else if t.autofetch { maxTime := 10 * defaultFetchtime t.fetchTime = time.Duration(1.5 * (float64)(t.fetchTime+time.Second)) if t.fetchTime > maxTime { t.fetchTime = maxTime } t.fetcher = time.AfterFunc(t.fetchTime, t.fetch) } } docker-1.6.2/project/0000755000175000017500000000000012524223634014002 5ustar tianontianondocker-1.6.2/project/GOVERNANCE.md0000644000175000017500000000142712524223634015757 0ustar tianontianon# Docker Governance Advisory Board Meetings In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at [Google Docs Folder](http://goo.gl/Alfj8r) These include: * First Meeting Notes * DGAB Charter * Presentation 1: Introductory Presentation, including State of The Project * Presentation 2: Overall Contribution Structure/Docker Project Core Proposal * Presentation 3: Long Term Roadmap/Statement of Direction docker-1.6.2/project/PRINCIPLES.md0000644000175000017500000000214012524223634015771 0ustar tianontianon# Docker principles In the design and development of Docker we try to follow these principles: (Work in progress) * Don't try to replace every tool. Instead, be an ingredient to improve them. * Less code is better. * Fewer components are better. Do you really need to add one more class? * 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. * Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. * When hesitating between 2 options, choose the one that is easier to reverse. * No is temporary, Yes is forever. If you're not sure about a new feature, say no. You can change your mind later. * Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. * The less moving parts in a container, the better. * Don't merge it unless you document it. * Don't document it unless you can keep it up-to-date. * Don't merge it unless you test it! * Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. docker-1.6.2/project/PACKAGERS.md0000644000175000017500000003303512524223634015630 0ustar tianontianon# Dear Packager, If you are looking to make Docker available on your favorite software distribution, this document is for you. It summarizes the requirements for building and running the Docker client and the Docker daemon. ## Getting Started We want to help you package Docker successfully. Before doing any packaging, a good first step is to introduce yourself on the [docker-dev mailing list](https://groups.google.com/d/forum/docker-dev), explain what you're trying to achieve, and tell us how we can help. Don't worry, we don't bite! There might even be someone already working on packaging for the same distro! You can also join the IRC channel - #docker and #docker-dev on Freenode are both active and friendly. We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our "Packagers Relations", since he's always working to make sure our packagers have a good, healthy upstream to work with (both in our communication and in our build scripts). If you're having any kind of trouble, feel free to ping him directly. He also likes to keep track of what distributions we have packagers for, so feel free to reach out to him even just to say "Hi!" ## Package Name If possible, your package should be called "docker". If that name is already taken, a second choice is "lxc-docker", but with the caveat that "LXC" is now an optional dependency (as noted below). Another possible choice is "docker.io". ## Official Build vs Distro Build The Docker project maintains its own build and release toolchain. It is pretty neat and entirely based on Docker (surprise!). This toolchain is the canonical way to build Docker. We encourage you to give it a try, and if the circumstances allow you to use it, we recommend that you do. You might not be able to use the official build toolchain - usually because your distribution has a toolchain and packaging policy of its own. We get it! Your house, your rules. The rest of this document should give you the information you need to package Docker your way, without denaturing it in the process. ## Build Dependencies To build Docker, you will need the following: * A recent version of Git and Mercurial * Go version 1.3 or later * A clean checkout of the source added to a valid [Go workspace](http://golang.org/doc/code.html#Workspaces) under the path *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, explained in more detail below) To build the Docker daemon, you will additionally need: * An amd64/x86_64 machine running Linux * SQLite version 3.7.9 or later * libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version 2.02.89 or later * btrfs-progs version 3.16.1 or later (unless using an older version is absolutely necessary, in which case 3.8 is the minimum and the note below regarding `btrfs_noversion` applies) Be sure to also check out Docker's Dockerfile for the most up-to-date list of these build-time dependencies. ### Go Dependencies All Go dependencies are vendored under "./vendor". They are used by the official build, so the source of truth for the current version of each dependency is whatever is in "./vendor". To use the vendored dependencies, simply make sure the path to "./vendor" is included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). If you would rather (or must, due to distro policy) package these dependencies yourself, take a look at "./hack/vendor.sh" for an easy-to-parse list of the exact version for each. NOTE: if you're not able to package the exact version (to the exact commit) of a given dependency, please get in touch so we can remediate! Who knows what discrepancies can be caused by even the slightest deviation. We promise to do our best to make everybody happy. ## Stripping Binaries Please, please, please do not strip any compiled binaries. This is really important. In our own testing, stripping the resulting binaries sometimes results in a binary that appears to work, but more often causes random panics, segfaults, and other issues. Even if the binary appears to work, please don't strip. See the following quotes from Dave Cheney, which explain this position better from the upstream Golang perspective. ### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) > Super super important: Do not strip go binaries or archives. It isn't tested, > often breaks, and doesn't work. ### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) > To quote myself: "Please do not strip Go binaries, it is not supported, not > tested, is often broken, and doesn't do what you want" > > To unpack that a bit > > * not supported, as in, we don't support it, and recommend against it when > asked > * not tested, we don't test stripped binaries as part of the build CI process > * is often broken, stripping a go binary will produce anywhere from no, to > subtle, to outright execution failure, see above ### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) > To clarify my previous statements. > > * I do not disagree with the debian policy, it is there for a good reason > * Having said that, it stripping Go binaries doesn't work, and nobody is > looking at making it work, so there is that. > > Thanks for patching the build formula. ## Building Docker Please use our build script ("./hack/make.sh") for all your compilation of Docker. If there's something you need that it isn't doing, or something it could be doing to make your life as a packager easier, please get in touch with Tianon and help us rectify the situation. Chances are good that other packagers have probably run into the same problems and a fix might already be in the works, but none of us will know for sure unless you harass Tianon about it. :) All the commands listed within this section should be run with the Docker source checkout as the current working directory. ### `AUTO_GOPATH` If you'd rather not be bothered with the hassles that setting up `GOPATH` appropriately can be, and prefer to just get a "build that works", you should add something similar to this to whatever script or process you're using to build Docker: ```bash export AUTO_GOPATH=1 ``` This will cause the build scripts to set up a reasonable `GOPATH` that automatically and properly includes both docker/docker from the local directory, and the local "./vendor" directory as necessary. ### `DOCKER_BUILDTAGS` If you're building a binary that may need to be used on platforms that include AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: ```bash export DOCKER_BUILDTAGS='apparmor' ``` If you're building a binary that may need to be used on platforms that include SELinux, you will need to use the `selinux` build tag: ```bash export DOCKER_BUILDTAGS='selinux' ``` If your version of btrfs-progs (also called btrfs-tools) is < 3.16.1, then you will need the following tag to not check for btrfs version headers: ```bash export DOCKER_BUILDTAGS='btrfs_noversion' ``` There are build tags for disabling graphdrivers as well. By default, support for all graphdrivers are built in. To disable btrfs: ```bash export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' ``` To disable devicemapper: ```bash export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' ``` To disable aufs: ```bash export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' ``` NOTE: if you need to set more than one build tag, space separate them: ```bash export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs' ``` ### Static Daemon If it is feasible within the constraints of your distribution, you should seriously consider packaging Docker as a single static binary. A good comparison is Busybox, which is often packaged statically as a feature to enable mass portability. Because of the unique way Docker operates, being similarly static is a "feature". To build a static Docker daemon binary, run the following command (first ensuring that all the necessary libraries are available in static form for linking - see the "Build Dependencies" section above, and the relevant lines within Docker's own Dockerfile that set up our official build environment): ```bash ./hack/make.sh binary ``` This will create a static binary under "./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of the file "./VERSION". This binary is usually installed somewhere like "/usr/bin/docker". ### Dynamic Daemon / Client-only Binary If you are only interested in a Docker client binary, set `DOCKER_CLIENTONLY` to a non-empty value using something similar to the following: (which will prevent the extra step of compiling dockerinit) ```bash export DOCKER_CLIENTONLY=1 ``` If you need to (due to distro policy, distro library availability, or for other reasons) create a dynamically compiled daemon binary, or if you are only interested in creating a client binary for Docker, use something similar to the following: ```bash ./hack/make.sh dynbinary ``` This will create "./bundles/$VERSION/dynbinary/docker-$VERSION", which for client-only builds is the important file to grab and install as appropriate. For daemon builds, you will also need to grab and install "./bundles/$VERSION/dynbinary/dockerinit-$VERSION", which is created from the minimal set of Docker's codebase that _must_ be compiled statically (and is thus a pure static binary). The acceptable locations Docker will search for this file are as follows (in order): * as "dockerinit" in the same directory as the daemon binary (ie, if docker is installed at "/usr/bin/docker", then "/usr/bin/dockerinit" will be the first place this file is searched for) * "/usr/libexec/docker/dockerinit" or "/usr/local/libexec/docker/dockerinit" ([FHS 3.0 Draft](http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec)) * "/usr/lib/docker/dockerinit" or "/usr/local/lib/docker/dockerinit" ([FHS 2.3](http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA)) If (and please, only if) one of the paths above is insufficient due to distro policy or similar issues, you may use the `DOCKER_INITPATH` environment variable at compile-time as follows to set a different path for Docker to search: ```bash export DOCKER_INITPATH=/usr/lib/docker.io/dockerinit ``` If you find yourself needing this, please don't hesitate to reach out to Tianon to see if it would be reasonable or helpful to add more paths to Docker's list, especially if there's a relevant standard worth referencing (such as the FHS). Also, it goes without saying, but for the purposes of the daemon please consider these two binaries ("docker" and "dockerinit") as if they were a single unit. Mixing and matching can cause undesired consequences, and will fail to run properly. ## System Dependencies ### Runtime Dependencies To function properly, the Docker daemon needs the following software to be installed and available at runtime: * iptables version 1.4 or later * procps (or similar provider of a "ps" executable) * e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, mkfs.xfs, tune2fs) * XZ Utils version 4.9 or later * a [properly mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point [is](https://github.com/docker/docker/issues/2683) [not](https://github.com/docker/docker/issues/3485) [sufficient](https://github.com/docker/docker/issues/4568)) Additionally, the Docker client needs the following software to be installed and available at runtime: * Git version 1.7 or later ### Kernel Requirements The Docker daemon has very specific kernel requirements. Most pre-packaged kernels already include the necessary options enabled. If you are building your own kernel, you will either need to discover the options necessary via trial and error, or check out the [Gentoo ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), in which a list is maintained (and if there are any issues or discrepancies in that list, please contact Tianon so they can be rectified). Note that in client mode, there are no specific kernel requirements, and that the client will even run on alternative platforms such as Mac OS X / Darwin. ### Optional Dependencies Some of Docker's features are activated by using optional command-line flags or by having support for them in the kernel or userspace. A few examples include: * LXC execution driver (requires version 1.0.7 or later of lxc and the lxc-libs) * AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at least the "auplink" utility from aufs-tools) * BTRFS graph driver (requires BTRFS support enabled in the kernel) ## Daemon Init Script Docker expects to run as a daemon at machine startup. Your package will need to include a script for your distro's process supervisor of choice. Be sure to check out the "contrib/init" folder in case a suitable init script already exists (and if one does not, contact Tianon about whether it might be appropriate for your distro's init script to live there too!). In general, Docker should be run as root, similar to the following: ```bash docker -d ``` Generally, a `DOCKER_OPTS` variable of some kind is available for adding more flags (such as changing the graph driver to use BTRFS, switching the location of "/var/lib/docker", etc). ## Communicate As a final note, please do feel free to reach out to Tianon at any time for pretty much anything. He really does love hearing from our packagers and wants to make sure we're not being a "hostile upstream". As should be a given, we appreciate the work our packagers do to make sure we have broad distribution! docker-1.6.2/project/make/0000755000175000017500000000000012524223634014717 5ustar tianontianondocker-1.6.2/project/make/validate-toml0000644000175000017500000000121612524223634017404 0ustar tianontianon#!/bin/bash source "$(dirname "$BASH_SOURCE")/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) unset IFS badFiles=() for f in "${files[@]}"; do # we use "git show" here to validate that what's committed is formatted if [ "$(git show "$VALIDATE_HEAD:$f" | tomlv)" ]; then badFiles+=( "$f" ) fi done if [ ${#badFiles[@]} -eq 0 ]; then echo 'Congratulations! All toml source files have valid syntax.' else { echo "These files are not valid toml:" for f in "${badFiles[@]}"; do echo " - $f" done echo echo 'Please reformat the above files as valid toml' echo } >&2 false fi docker-1.6.2/project/README.md0000644000175000017500000000131512524223634015261 0ustar tianontianon# Hacking on Docker The hack/ directory holds information and tools for everyone involved in the process of creating and distributing Docker, specifically: ## Guides If you're a *contributor* or aspiring contributor, you should read CONTRIBUTORS.md. If you're a *maintainer* or aspiring maintainer, you should read MAINTAINERS.md. If you're a *packager* or aspiring packager, you should read PACKAGERS.md. If you're a maintainer in charge of a *release*, you should read RELEASE-CHECKLIST.md. ## Roadmap A high-level roadmap is available at ROADMAP.md. ## Build tools make.sh is the primary build tool for docker. It is used for compiling the official binary, running the test suite, and pushing releases. docker-1.6.2/project/ISSUE-TRIAGE.md0000644000175000017500000001567212524223634016200 0ustar tianontianonTriaging of issue ------------------ Triage provides an important way to contribute to an open source project. Triage helps ensure issues resolve quickly by: - Describing the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences an problem and what actions they took. - Giving a contributor the information they need before they commit to resolving an issue. - Lowering the issue count by preventing duplicate issues. - Streamling the development process by preventing duplicate discussions. If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. ### Step 1: Ensure the issue contains basic information Before triaging an issue very far, make sure that the issue's author provided the standard issue information. This will help you make an educated recommendation on how this to categorize the issue. Standard information that *must* be included in most issues are things such as: - the output of `docker version` - the output of `docker info` - the output of `uname -a` - a reproducible case if this is a bug, Dockerfiles FTW - host distribution and version ( ubuntu 14.04, RHEL, fedora 21 ) - page URL if this is a docs issue Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. If the author does not respond requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. ### Step 2: Apply the template When triaging, use the standard template below. You should cut and place the template in the issue's description. The template helps other reviewers find key information in an issue. For example, using a template saves a potential contributor from wading though 100s of comments to find a proposed solution at the very end. When adding the template to the issue's description also add any required labels to the issue for the classification and difficulty. Here is a sample summary for an [issue](https://github.com/docker/docker/issues/10545). ``` **Summary**: docker rm can return a non-zero exit code if the container does not exist and it is not easy to parse the error message. **Proposed solution**: docker rm should have consistent exit codes for different types of errors so that the user can easily script and know the reason why the command failed. ``` ### Step 3: Classify the Issue Classifications help both to inform readers about an issue's priority and how to resolve it. This is also helpful for identifying new, critical issues. "Kinds of" are applied to the issue or pull request using labels. You can apply one or more labels. Kinds of classifications: | Kind | Description | |------------------|---------------------------------------------------------------------------------------------------------------------------------| | kind/enhancement | Enhancement are not bugs or new features but can drastically improve usability or performance of a project component. | | kind/cleanup | Refactoring code or otherwise clarifying documentation. | | kind/content | Content that is not documentation such as help or error messages. | | kind/graphics | Work involving graphics skill | | kind/regression | Regressions are usually easy fixes as hopefully the action worked previously and git history can be used to propose a solution. | | kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | | kind/feature | Functionality or other elements that the project does not currently support. Features are new and shinny. | | kind/question | Contains a user or contributor question requiring a response. | | kind/usecase | A description of a user or contributor situation requiring a response perhaps in code or documentation. | | kind/writing | Writing documentation, man pages, articles, blogs, or other significant word-driven task. | | kind/test | Tests or test infrastructure needs adding or updating. | Contributors can add labels by using a `+kind/bug` in an issue or pull request comment. ### Step 4: Estimate the experience level required Experience level is a way for a contributor to find an issue based on their skill set. Experience types are applied to the issue or pull request using labels. | Level | Experience level guideline | |------------------|--------------------------------------------------------------------------------------------------------------------------| | exp/beginner | You have made less than 10 contributions in your life time to any open source project. | | exp/novice | You have made more than 10 contributions to an open source project or at least 5 contributions to Docker. | | exp/proficient | You have made more than 5 contributions to Docker which amount to at least 200 code lines or 1000 documentation lines. | | exp/expert | You have made less than 20 commits to Docker which amount to 500-1000 code lines or 1000-3000 documentation lines. | | exp/master | You have made more than 20 commits to Docker and greater than 1000 code lines or 3000 documentation lines. | As the table states, these labels are meant as guidelines. You might have written a whole plugin for Docker in a personal project and never contributed to Docker. With that kind of experience, you could take on an exp/expert or exp/master level task. Contributors can add labels by using a `+exp/expert` format in issue comment. And that's it. That should be all the information required for a new or existing contributor to come in an resolve an issue. docker-1.6.2/project/IRC-ADMINISTRATION.md0000644000175000017500000000335112524223634017066 0ustar tianontianon# Freenode IRC Administration Guidelines and Tips This is not meant to be a general "Here's how to IRC" document, so if you're looking for that, check Google instead. ♥ If you've been charged with helping maintain one of Docker's now many IRC channels, this might turn out to be useful. If there's information that you wish you'd known about how a particular channel is organized, you should add deets here! :) ## `ChanServ` Most channel maintenance happens by talking to Freenode's `ChanServ` bot. For example, `/msg ChanServ ACCESS LIST` will show you a list of everyone with "access" privileges for a particular channel. A similar command is used to give someone a particular access level. For example, to add a new maintainer to the `#docker-maintainers` access list so that they can contribute to the dicsussions (after they've been merged appropriately in a `MAINTAINERS` file, of course), one would use `/msg ChanServ ACCESS #docker-maintainers ADD maintainer`. To setup a new channel with a similar `maintainer` access template, use a command like `/msg ChanServ TEMPLATE maintainer +AV` (`+A` for letting them view the `ACCESS LIST`, `+V` for auto-voice; see `/msg ChanServ HELP FLAGS` for more details). ## Troubleshooting The most common cause of not-getting-auto-`+v` woes is people not being `IDENTIFY`ed with `NickServ` (or their current nickname not being `GROUP`ed with their main nickname) -- often manifested by `ChanServ` responding to an `ACCESS ADD` request with something like `xyz is not registered.`. This is easily fixed by doing `/msg NickServ IDENTIFY OldNick SecretPassword` followed by `/msg NickServ GROUP` to group the two nicknames together. See `/msg NickServ HELP GROUP` for more information. docker-1.6.2/project/RELEASE-CHECKLIST.md0000644000175000017500000002734612524223634016727 0ustar tianontianon# Release Checklist ## A maintainer's guide to releasing Docker So you're in charge of a Docker release? Cool. Here's what to do. If your experience deviates from this document, please document the changes to keep it up-to-date. It is important to note that this document assumes that the git remote in your repository that corresponds to "https://github.com/docker/docker" is named "origin". If yours is not (for example, if you've chosen to name it "upstream" or something similar instead), be sure to adjust the listed snippets for your local environment accordingly. If you are not sure what your upstream remote is named, use a command like `git remote -v` to find out. If you don't have an upstream remote, you can add one easily using something like: ```bash export GITHUBUSER="YOUR_GITHUB_USER" git remote add origin https://github.com/docker/docker.git git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git ``` ### 1. Pull from master and create a release branch Note: Even for major releases, all of X, Y and Z in vX.Y.Z must be specified (e.g. v1.0.0). ```bash export VERSION=vX.Y.Z git fetch origin git branch -D release || true git checkout --track origin/release git checkout -b bump_$VERSION ``` If it's a regular release, we usually merge master. ```bash git merge origin/master ``` Otherwise, if it is a hotfix release, we cherry-pick only the commits we want. ```bash # get the commits ids we want to cherry-pick git log # cherry-pick the commits starting from the oldest one, without including merge commits git cherry-pick git cherry-pick ... ``` ### 2. Update CHANGELOG.md You can run this command for reference with git 2.0: ```bash git fetch --tags LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) git log --stat $LAST_VERSION..bump_$VERSION ``` If you don't have git 2.0 but have a sort command that supports `-V`: ```bash git fetch --tags LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) git log --stat $LAST_VERSION..bump_$VERSION ``` If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. ```markdown #### Notable features since * New docker command to do something useful * Remote API change (deprecating old version) * Performance improvements in some usecases * ... ``` For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. Each change should be listed under a category heading formatted as `#### CATEGORY`. `CATEGORY` should describe which part of the project is affected. Valid categories are: * Builder * Documentation * Hack * Packaging * Remote API * Runtime * Other (please use this category sparingly) Each change should be formatted as `BULLET DESCRIPTION`, given: * BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or upgrade, respectively. * DESCRIPTION: a concise description of the change that is relevant to the end-user, using the present tense. Changes should be described in terms of how they affect the user, for example "Add new feature X which allows Y", "Fix bug which caused X", "Increase performance of Y". EXAMPLES: ```markdown ## 0.3.6 (1995-12-25) #### Builder + 'docker build -t FOO .' applies the tag FOO to the newly built image #### Remote API - Fix a bug in the optional unix socket transport #### Runtime * Improve detection of kernel version ``` If you need a list of contributors between the last major release and the current bump branch, use something like: ```bash git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf ``` Obviously, you'll need to adjust version numbers as necessary. If you just need a count, add a simple `| wc -l`. ### 3. Change the contents of the VERSION file Before the big thing, you'll want to make successive release candidates and get people to test. The release candidate number `N` should be part of the version: ```bash export RC_VERSION=${VERSION}-rcN echo ${RC_VERSION#v} > VERSION ``` ### 4. Test the docs Make sure that your tree includes documentation for any modified or new features, syntax or semantic changes. To test locally: ```bash make docs ``` To make a shared test at http://beta-docs.docker.io: (You will need the `awsconfig` file added to the `docs/` dir) ```bash make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release ``` ### 5. Commit and create a pull request to the "release" branch ```bash git add VERSION CHANGELOG.md git commit -m "Bump version to $VERSION" git push $GITHUBUSER bump_$VERSION echo "https://github.com/$GITHUBUSER/docker/compare/docker:release...$GITHUBUSER:bump_$VERSION?expand=1" ``` That last command will give you the proper link to visit to ensure that you open the PR against the "release" branch instead of accidentally against "master" (like so many brave souls before you already have). ### 6. Publish release candidate binaries To run this you will need access to the release credentials. Get them from the Core maintainers. Replace "..." with the respective credentials: ```bash docker build -t docker . docker run \ -e AWS_S3_BUCKET=test.docker.com \ -e AWS_ACCESS_KEY="..." \ -e AWS_SECRET_KEY="..." \ -e GPG_PASSPHRASE="..." \ -i -t --privileged \ docker \ hack/release.sh ``` It will run the test suite, build the binaries and packages, and upload to the specified bucket, so this is a good time to verify that you're running against **test**.docker.com. After the binaries and packages are uploaded to test.docker.com, make sure they get tested in both Ubuntu and Debian for any obvious installation issues or runtime issues. If everything looks good, it's time to create a git tag for this candidate: ```bash git tag -a $RC_VERSION -m $RC_VERSION bump_$VERSION git push origin $RC_VERSION ``` Announcing on multiple medias is the best way to get some help testing! An easy way to get some useful links for sharing: ```bash echo "Ubuntu/Debian: https://test.docker.com/ubuntu or curl -sSL https://test.docker.com/ | sh" echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" echo "Darwin/OSX 32bit client binary: https://test.docker.com/builds/Darwin/i386/docker-${VERSION#v}" echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" ``` We recommend announcing the release candidate on: - IRC on #docker, #docker-dev, #docker-maintainers - In a comment on the pull request to notify subscribed people on GitHub - The [docker-dev](https://groups.google.com/forum/#!forum/docker-dev) group - The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group - Any social media that can bring some attention to the release candidate ### 7. Iterate on successive release candidates Spend several days along with the community explicitly investing time and resources to try and break Docker in every possible way, documenting any findings pertinent to the release. This time should be spent testing and finding ways in which the release might have caused various features or upgrade environments to have issues, not coding. During this time, the release is in code freeze, and any additional code changes will be pushed out to the next release. It should include various levels of breaking Docker, beyond just using Docker by the book. Any issues found may still remain issues for this release, but they should be documented and give appropriate warnings. During this phase, the `bump_$VERSION` branch will keep evolving as you will produce new release candidates. The frequency of new candidates is up to the release manager: use your best judgement taking into account the severity of reported issues, testers availability, and time to scheduled release date. Each time you'll want to produce a new release candidate, you will start by adding commits to the branch, usually by cherry-picking from master: ```bash git cherry-pick -x -m0 ``` You want your "bump commit" (the one that updates the CHANGELOG and VERSION files) to remain on top, so you'll have to `git rebase -i` to bring it back up. Now that your bump commit is back on top, you will need to update the CHANGELOG file (if appropriate for this particular release candidate), and update the VERSION file to increment the RC number: ```bash export RC_VERSION=$VERSION-rcN echo $RC_VERSION > VERSION ``` You can now amend your last commit and update the bump branch: ```bash git commit --amend git push -f $GITHUBUSER bump_$VERSION ``` Repeat step 6 to tag the code, publish new binaries, announce availability, and get help testing. ### 8. Finalize the bump branch When you're happy with the quality of a release candidate, you can move on and create the real thing. You will first have to amend the "bump commit" to drop the release candidate suffix in the VERSION file: ```bash echo $VERSION > VERSION git add VERSION git commit --amend ``` You will then repeat step 6 to publish the binaries to test ### 9. Get 2 other maintainers to validate the pull request ### 10. Publish final binaries Once they're tested and reasonably believed to be working, run against get.docker.com: ```bash docker run \ -e AWS_S3_BUCKET=get.docker.com \ -e AWS_ACCESS_KEY="..." \ -e AWS_SECRET_KEY="..." \ -e GPG_PASSPHRASE="..." \ -i -t --privileged \ docker \ hack/release.sh ``` ### 9. Apply tag It's very important that we don't make the tag until after the official release is uploaded to get.docker.com! ```bash git tag -a $VERSION -m $VERSION bump_$VERSION git push origin $VERSION ``` ### 10. Go to github to merge the `bump_$VERSION` branch into release Don't forget to push that pretty blue button to delete the leftover branch afterwards! ### 11. Update the docs branch If this is a MAJOR.MINOR.0 release, you need to make an branch for the previous release's documentation: ```bash git checkout -b docs-$PREVIOUS_MAJOR_MINOR git fetch git reset --hard origin/docs git push -f origin docs-$PREVIOUS_MAJOR_MINOR ``` You will need the `awsconfig` file added to the `docs/` directory to contain the s3 credentials for the bucket you are deploying to. ```bash git checkout -b docs release || git checkout docs git fetch git reset --hard origin/release git push -f origin docs make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes DISTRIBUTION_ID=C2K6......FL2F docs-release ``` The docs will appear on http://docs.docker.com/ (though there may be cached versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). For more information about documentation releases, see `docs/README.md`. Note that the new docs will not appear live on the site until the cache (a complex, distributed CDN system) is flushed. The `make docs-release` command will do this _if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run and you can check its progress with the CDN Cloudfront Chrome addin. ### 12. Create a new pull request to merge your bump commit back into master ```bash git checkout master git fetch git reset --hard origin/master git cherry-pick $VERSION git push $GITHUBUSER merge_release_$VERSION echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" ``` Again, get two maintainers to validate, then merge, then push that pretty blue button to delete your branch. ### 13. Rejoice and Evangelize! Congratulations! You're done. Go forth and announce the glad tidings of the new release in `#docker`, `#docker-dev`, on the [dev mailing list](https://groups.google.com/forum/#!forum/docker-dev), the [announce mailing list](https://groups.google.com/forum/#!forum/docker-announce), and on Twitter! docker-1.6.2/project/CONTRIBUTORS.md0000777000175000017500000000000012524223634020722 2../CONTRIBUTING.mdustar tianontianondocker-1.6.2/project/TOOLS.md0000644000175000017500000000552712524223634015235 0ustar tianontianon# Tools This page describes the tools we use and infrastructure that is in place for the Docker project. ### CI The Docker project uses [Jenkins](https://jenkins.dockerproject.com/) as our continuous integration server. Each Pull Request to Docker is tested by running the equivalent of `make all`. We chose Jenkins because we can host it ourselves and we run Docker in Docker to test. #### Leeroy Leeroy is a Go application which integrates Jenkins with GitHub pull requests. Leeroy uses [GitHub hooks](http://developer.github.com/v3/repos/hooks/) to listen for pull request notifications and starts jobs on your Jenkins server. Using the Jenkins [notification plugin][jnp], Leeroy updates the pull request using GitHub's [status API](http://developer.github.com/v3/repos/statuses/) with pending, success, failure, or error statuses. The leeroy repository is maintained at [github.com/jfrazelle/leeroy](https://github.com/jfrazelle/leeroy). #### GordonTheTurtle IRC Bot The GordonTheTurtle IRC Bot lives in the [#docker-maintainers](https://botbot.me/freenode/docker-maintainers/) channel on Freenode. He is built in Go and is based off the project at [github.com/fabioxgn/go-bot](https://github.com/fabioxgn/go-bot). His main command is `!rebuild`, which rebuilds a given Pull Request for a repository. This command works by integrating with Leroy. He has a few other commands too, such as `!gif` or `!godoc`, but we are always looking for more fun commands to add. The gordon-bot repository is maintained at [github.com/jfrazelle/gordon-bot](https://github.com/jfrazelle/gordon-bot) ### NSQ We use [NSQ](https://github.com/bitly/nsq) for various aspects of the project infrastucture. #### Hooks The hooks project, [github.com/crosbymichael/hooks](https://github.com/crosbymichael/hooks), is a small Go application that manages web hooks from github, hub.docker.com, or other third party services. It can be used for listening to github webhooks & pushing them to a queue, archiving hooks to rethinkdb for processing, and broadcasting hooks to various jobs. #### Docker Master Binaries One of the things queued from the Hooks are the building of the Master Binaries. This happens on every push to the master branch of Docker. The repository for this is maintained at [github.com/jfrazelle/docker-bb](https://github.com/jfrazelle/docker-bb). #### Docker Master Docs The master build of the docs gets queued from the Hooks as well. They are built using [github.com/jfrazelle/nsqexec](https://github.com/jfrazelle/nsqexec). #### Patch Parser Bot The bot, also named GordonTheTurtle, that labels and comments on Pull Requests listens on Hooks as well. He is capable of knowing if a Pull Request needs to be signed, or gofmt'd, as well as rebased. The repository for this is maintained at [github.com/jfrazelle/gh-patch-parser](https://github.com/jfrazelle/gh-patch-parser). docker-1.6.2/project/ROADMAP.md0000644000175000017500000000654012524223634015414 0ustar tianontianon# Docker: Statement of Direction This document is a high-level overview of where we want to take Docker. It is a curated selection of planned improvements which are either important, difficult, or both. For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/docker/issues). To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. ## Orchestration Orchestration touches on several aspects of multi-container applications.  These include provisioning hosts with the Docker daemon, organizing and maintaining multiple Docker hosts as a cluster, composing an application using multiple containers, and handling the networking between the containers across the hosts. Today, users accomplish this using a combination of glue scripts and various tools, like Shipper, Deis, Pipeworks, etc. We want the Docker API to support all aspects of orchestration natively, so that these tools can cleanly and seamlessly integrate into the Docker user experience, and remain interoperable with each other. ## Networking The current Docker networking model works for communication between containers all residing on the same host.  Since Docker applications in production are made up of many containers deployed across multiple hosts (and sometimes multiple data centers), Docker’s networking model will evolve to accommodate this.  An aspect of this evolution includes providing a Networking API to enable alternative implementations. ## Storage Currently, stateful Docker containers are pinned to specific hosts during their lifetime.  To support additional resiliency, capacity management, and load balancing we want to enable live stateful containers to dynamically migrate between hosts.  While the Docker Project will provide a “batteries included†implementation for a great out-of-box experience, we will also provide an API for alternative implementations. ## Microsoft Windows The next Microsoft Windows Server will ship with primitives to support container-based process isolation and resource management.  The Docker Project will guide contributors and maintainers developing native Microsoft versions of the Docker Remote API client and Docker daemon to take advantage of these primitives. ## Provenance When assembling Docker applications we want users to be confident that images they didn’t create themselves are safe to use and build upon.  Provenance gives users the capability to digitally verify the inputs and processes constituting an image’s origins and lifecycle events. ## Plugin API We want Docker to run everywhere, and to integrate with every devops tool. Those are ambitious goals, and the only way to reach them is with the Docker community. For the community to participate fully, we need an API which allows Docker to be deeply and easily customized. We are working on a plugin API which will make Docker very customization-friendly. We believe it will facilitate the integrations listed above – and many more we didn’t even think about. ## Multi-Architecture Support Our goal is to make Docker run everywhere. However, currently Docker only runs on x86_64 systems. We plan on expanding architecture support, so that Docker containers can be created and used on more architectures, including ARM, Joyent SmartOS, and Microsoft. docker-1.6.2/events/0000755000175000017500000000000012524223634013640 5ustar tianontianondocker-1.6.2/events/events_test.go0000644000175000017500000000663312524223634016542 0ustar tianontianonpackage events import ( "bytes" "encoding/json" "fmt" "io" "testing" "time" "github.com/docker/docker/engine" "github.com/docker/docker/utils" ) func TestEventsPublish(t *testing.T) { e := New() l1 := make(chan *utils.JSONMessage) l2 := make(chan *utils.JSONMessage) e.subscribe(l1) e.subscribe(l2) count := e.subscribersCount() if count != 2 { t.Fatalf("Must be 2 subscribers, got %d", count) } go e.log("test", "cont", "image") select { case msg := <-l1: if len(e.events) != 1 { t.Fatalf("Must be only one event, got %d", len(e.events)) } if msg.Status != "test" { t.Fatalf("Status should be test, got %s", msg.Status) } if msg.ID != "cont" { t.Fatalf("ID should be cont, got %s", msg.ID) } if msg.From != "image" { t.Fatalf("From should be image, got %s", msg.From) } case <-time.After(1 * time.Second): t.Fatal("Timeout waiting for broadcasted message") } select { case msg := <-l2: if len(e.events) != 1 { t.Fatalf("Must be only one event, got %d", len(e.events)) } if msg.Status != "test" { t.Fatalf("Status should be test, got %s", msg.Status) } if msg.ID != "cont" { t.Fatalf("ID should be cont, got %s", msg.ID) } if msg.From != "image" { t.Fatalf("From should be image, got %s", msg.From) } case <-time.After(1 * time.Second): t.Fatal("Timeout waiting for broadcasted message") } } func TestEventsPublishTimeout(t *testing.T) { e := New() l := make(chan *utils.JSONMessage) e.subscribe(l) c := make(chan struct{}) go func() { e.log("test", "cont", "image") close(c) }() select { case <-c: case <-time.After(time.Second): t.Fatal("Timeout publishing message") } } func TestLogEvents(t *testing.T) { e := New() eng := engine.New() if err := e.Install(eng); err != nil { t.Fatal(err) } for i := 0; i < eventsLimit+16; i++ { action := fmt.Sprintf("action_%d", i) id := fmt.Sprintf("cont_%d", i) from := fmt.Sprintf("image_%d", i) job := eng.Job("log", action, id, from) if err := job.Run(); err != nil { t.Fatal(err) } } time.Sleep(50 * time.Millisecond) if len(e.events) != eventsLimit { t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) } job := eng.Job("events") job.SetenvInt64("since", 1) job.SetenvInt64("until", time.Now().Unix()) buf := bytes.NewBuffer(nil) job.Stdout.Add(buf) if err := job.Run(); err != nil { t.Fatal(err) } buf = bytes.NewBuffer(buf.Bytes()) dec := json.NewDecoder(buf) var msgs []utils.JSONMessage for { var jm utils.JSONMessage if err := dec.Decode(&jm); err != nil { if err == io.EOF { break } t.Fatal(err) } msgs = append(msgs, jm) } if len(msgs) != eventsLimit { t.Fatalf("Must be %d events, got %d", eventsLimit, len(msgs)) } first := msgs[0] if first.Status != "action_16" { t.Fatalf("First action is %s, must be action_15", first.Status) } last := msgs[len(msgs)-1] if last.Status != "action_79" { t.Fatalf("First action is %s, must be action_79", first.Status) } } func TestEventsCountJob(t *testing.T) { e := New() eng := engine.New() if err := e.Install(eng); err != nil { t.Fatal(err) } l1 := make(chan *utils.JSONMessage) l2 := make(chan *utils.JSONMessage) e.subscribe(l1) e.subscribe(l2) job := eng.Job("subscribers_count") env, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { t.Fatal(err) } count := env.GetInt("count") if count != 2 { t.Fatalf("There must be 2 subscribers, got %d", count) } } docker-1.6.2/events/events.go0000644000175000017500000001226512524223634015501 0ustar tianontianonpackage events import ( "bytes" "encoding/json" "io" "strings" "sync" "time" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/parsers/filters" "github.com/docker/docker/utils" ) const eventsLimit = 64 type listener chan<- *utils.JSONMessage type Events struct { mu sync.RWMutex events []*utils.JSONMessage subscribers []listener } func New() *Events { return &Events{ events: make([]*utils.JSONMessage, 0, eventsLimit), } } // Install installs events public api in docker engine func (e *Events) Install(eng *engine.Engine) error { // Here you should describe public interface jobs := map[string]engine.Handler{ "events": e.Get, "log": e.Log, "subscribers_count": e.SubscribersCount, } for name, job := range jobs { if err := eng.Register(name, job); err != nil { return err } } return nil } func (e *Events) Get(job *engine.Job) engine.Status { var ( since = job.GetenvInt64("since") until = job.GetenvInt64("until") timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) ) eventFilters, err := filters.FromParam(job.Getenv("filters")) if err != nil { return job.Error(err) } // If no until, disable timeout if until == 0 { timeout.Stop() } listener := make(chan *utils.JSONMessage) e.subscribe(listener) defer e.unsubscribe(listener) job.Stdout.Write(nil) // Resend every event in the [since, until] time interval. if since != 0 { if err := e.writeCurrent(job, since, until, eventFilters); err != nil { return job.Error(err) } } for { select { case event, ok := <-listener: if !ok { return engine.StatusOK } if err := writeEvent(job, event, eventFilters); err != nil { return job.Error(err) } case <-timeout.C: return engine.StatusOK } } } func (e *Events) Log(job *engine.Job) engine.Status { if len(job.Args) != 3 { return job.Errorf("usage: %s ACTION ID FROM", job.Name) } // not waiting for receivers go e.log(job.Args[0], job.Args[1], job.Args[2]) return engine.StatusOK } func (e *Events) SubscribersCount(job *engine.Job) engine.Status { ret := &engine.Env{} ret.SetInt("count", e.subscribersCount()) ret.WriteTo(job.Stdout) return engine.StatusOK } func writeEvent(job *engine.Job, event *utils.JSONMessage, eventFilters filters.Args) error { isFiltered := func(field string, filter []string) bool { if len(filter) == 0 { return false } for _, v := range filter { if v == field { return false } if strings.Contains(field, ":") { image := strings.Split(field, ":") if image[0] == v { return false } } } return true } //incoming container filter can be name,id or partial id, convert and replace as a full container id for i, cn := range eventFilters["container"] { eventFilters["container"][i] = GetContainerId(job.Eng, cn) } if isFiltered(event.Status, eventFilters["event"]) || isFiltered(event.From, eventFilters["image"]) || isFiltered(event.ID, eventFilters["container"]) { return nil } // When sending an event JSON serialization errors are ignored, but all // other errors lead to the eviction of the listener. if b, err := json.Marshal(event); err == nil { if _, err = job.Stdout.Write(b); err != nil { return err } } return nil } func (e *Events) writeCurrent(job *engine.Job, since, until int64, eventFilters filters.Args) error { e.mu.RLock() for _, event := range e.events { if event.Time >= since && (event.Time <= until || until == 0) { if err := writeEvent(job, event, eventFilters); err != nil { e.mu.RUnlock() return err } } } e.mu.RUnlock() return nil } func (e *Events) subscribersCount() int { e.mu.RLock() c := len(e.subscribers) e.mu.RUnlock() return c } func (e *Events) log(action, id, from string) { e.mu.Lock() now := time.Now().UTC().Unix() jm := &utils.JSONMessage{Status: action, ID: id, From: from, Time: now} if len(e.events) == cap(e.events) { // discard oldest event copy(e.events, e.events[1:]) e.events[len(e.events)-1] = jm } else { e.events = append(e.events, jm) } for _, s := range e.subscribers { // We give each subscriber a 100ms time window to receive the event, // after which we move to the next. select { case s <- jm: case <-time.After(100 * time.Millisecond): } } e.mu.Unlock() } func (e *Events) subscribe(l listener) { e.mu.Lock() e.subscribers = append(e.subscribers, l) e.mu.Unlock() } // unsubscribe closes and removes the specified listener from the list of // previously registed ones. // It returns a boolean value indicating if the listener was successfully // found, closed and unregistered. func (e *Events) unsubscribe(l listener) bool { e.mu.Lock() for i, subscriber := range e.subscribers { if subscriber == l { close(l) e.subscribers = append(e.subscribers[:i], e.subscribers[i+1:]...) e.mu.Unlock() return true } } e.mu.Unlock() return false } func GetContainerId(eng *engine.Engine, name string) string { var buf bytes.Buffer job := eng.Job("container_inspect", name) var outStream io.Writer outStream = &buf job.Stdout.Set(outStream) if err := job.Run(); err != nil { return "" } var out struct{ ID string } json.NewDecoder(&buf).Decode(&out) return out.ID } docker-1.6.2/.dockerignore0000644000175000017500000000002012524223634015000 0ustar tianontianonbundles .gopath docker-1.6.2/CHANGELOG.md0000644000175000017500000017330512524223634014156 0ustar tianontianon# Changelog ## 1.6.2 (2015-05-13) #### Runtime - Revert change prohibiting mounting into /sys ## 1.6.1 (2015-05-07) #### Security - Fix read/write /proc paths (CVE-2015-3630) - Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) - Fix opening of file-descriptor 1 (CVE-2015-3627) - Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) - Prohibit mount of /sys #### Runtime - Update Apparmor policy to not allow mounts ## 1.6.0 (2015-04-07) #### Builder + Building images from an image ID + build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` + `commit --change` to apply specified Dockerfile instructions while committing the image + `import --change` to apply specified Dockerfile instructions while importing the image + basic build cancellation #### Client + Windows Support #### Runtime + Container and image Labels + `--cgroup-parent` for specifying a parent cgroup to place container cgroup within + Logging drivers, `json-file`, `syslog`, or `none` + Pulling images by ID + `--ulimit` to set the ulimit on a container + `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) ## 1.5.0 (2015-02-10) #### Builder + Dockerfile to use for a given `docker build` can be specified with the `-f` flag * Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache * ADD and COPY instructions accept relative paths * Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier * Improve performance when exposing a large number of ports #### Hack + Allow client-side only integration tests for Windows * Include docker-py integration tests against Docker daemon as part of our test suites #### Packaging + Support for the new version of the registry HTTP API * Speed up `docker push` for images with a majority of already existing layers - Fixed contacting a private registry through a proxy #### Remote API + A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command + Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command * Container `inspect` endpoint show the ID of `exec` commands running in this container * Container `inspect` endpoint show the number of times Docker auto-restarted the container * New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' - Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes #### Runtime + Docker daemon has full IPv6 support + The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools + The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted + Container total memory usage can be limited for `docker run` using the `—memory-swap` flag * Major stability improvements for devicemapper storage driver * Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted * Better integration with host system: per-container iptable rules are moved to the DOCKER chain - Fixed container exiting on out of memory to return an invalid exit code #### Other * The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon ## 1.4.1 (2014-12-15) #### Runtime - Fix issue with volumes-from and bind mounts not being honored after create ## 1.4.0 (2014-12-11) #### Notable Features since 1.3.0 + Set key=value labels to the daemon (displayed in `docker info`), applied with new `-label` daemon flag + Add support for `ENV` in Dockerfile of the form: `ENV name=value name2=value2...` + New Overlayfs Storage Driver + `docker info` now returns an `ID` and `Name` field + Filter events by event name, container, or image + `docker cp` now supports copying from container volumes - Fixed `docker tag`, so it honors `--force` when overriding a tag for existing image. ## 1.3.3 (2014-12-11) #### Security - Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) - Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) - Validate image IDs (CVE-2014-9358) #### Runtime - Fix an issue when image archives are being read slowly #### Client - Fix a regression related to stdin redirection - Fix a regression with `docker cp` when destination is the current directory ## 1.3.2 (2014-11-20) #### Security - Fix tar breakout vulnerability * Extractions are now sandboxed chroot - Security options are no longer committed to images #### Runtime - Fix deadlock in `docker ps -f exited=1` - Fix a bug when `--volumes-from` references a container that failed to start #### Registry + `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 * Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag - Skip the experimental registry v2 API when mirroring is enabled ## 1.3.1 (2014-10-28) #### Security * Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry + Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified #### Runtime - Fix issue where volumes would not be shared #### Client - Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` - Fix docker run output to non-TTY stdout #### Builder - Fix escaping `$` for environment variables - Fix issue with lowercase `onbuild` Dockerfile instruction - Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` ## 1.3.0 (2014-10-14) #### Notable features since 1.2.0 + Docker `exec` allows you to run additional processes inside existing containers + Docker `create` gives you the ability to create a container via the CLI without executing a process + `--security-opts` options to allow user to customize container labels and apparmor profiles + Docker `ps` filters - Wildcard support to COPY/ADD + Move production URLs to get.docker.com from get.docker.io + Allocate IP address on the bridge inside a valid CIDR + Use drone.io for PR and CI testing + Ability to setup an official registry mirror + Ability to save multiple images with docker `save` ## 1.2.0 (2014-08-20) #### Runtime + Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime + Auto-restart containers using policies + Use /var/lib/docker/tmp for large temporary files + `--cap-add` and `--cap-drop` to tweak what linux capability you want + `--device` to use devices in containers #### Client + `docker search` on private registries + Add `exited` filter to `docker ps --filter` * `docker rm -f` now kills instead of stop + Support for IPv6 addresses in `--dns` flag #### Proxy + Proxy instances in separate processes * Small bug fix on UDP proxy ## 1.1.2 (2014-07-23) #### Runtime + Fix port allocation for existing containers + Fix containers restart on daemon restart #### Packaging + Fix /etc/init.d/docker issue on Debian ## 1.1.1 (2014-07-09) #### Builder * Fix issue with ADD ## 1.1.0 (2014-07-03) #### Notable features since 1.0.1 + Add `.dockerignore` support + Pause containers during `docker commit` + Add `--tail` to `docker logs` #### Builder + Allow a tar file as context for `docker build` * Fix issue with white-spaces and multi-lines in `Dockerfiles` #### Runtime * Overall performance improvements * Allow `/` as source of `docker run -v` * Fix port allocation * Fix bug in `docker save` * Add links information to `docker inspect` #### Client * Improve command line parsing for `docker commit` #### Remote API * Improve status code for the `start` and `stop` endpoints ## 1.0.1 (2014-06-19) #### Notable features since 1.0.0 * Enhance security for the LXC driver #### Builder * Fix `ONBUILD` instruction passed to grandchildren #### Runtime * Fix events subscription * Fix /etc/hostname file with host networking * Allow `-h` and `--net=none` * Fix issue with hotplug devices in `--privileged` #### Client * Fix artifacts with events * Fix a panic with empty flags * Fix `docker cp` on Mac OS X #### Miscellaneous * Fix compilation on Mac OS X * Fix several races ## 1.0.0 (2014-06-09) #### Notable features since 0.12.0 * Production support ## 0.12.0 (2014-06-05) #### Notable features since 0.11.0 * 40+ various improvements to stability, performance and usability * New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file * Inherit file permissions from the host on `ADD` * New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer * The `images` command has a `-f`/`--filter` option to filter the list of images * Add `--force-rm` to clean up after a failed build * Standardize JSON keys in Remote API to CamelCase * Pull from a docker run now assumes `latest` tag if not specified * Enhance security on Linux capabilities and device nodes ## 0.11.1 (2014-05-07) #### Registry - Fix push and pull to private registry ## 0.11.0 (2014-05-07) #### Notable features since 0.10.0 * SELinux support for mount and process labels * Linked containers can be accessed by hostname * Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces * Add a ping endpoint to the Remote API to do healthchecks of your docker daemon * Logs can now be returned with an optional timestamp * Docker now works with registries that support SHA-512 * Multiple registry endpoints are supported to allow registry mirrors ## 0.10.0 (2014-04-08) #### Builder - Fix printing multiple messages on a single line. Fixes broken output during builds. - Follow symlinks inside container's root for ADD build instructions. - Fix EXPOSE caching. #### Documentation - Add the new options of `docker ps` to the documentation. - Add the options of `docker restart` to the documentation. - Update daemon docs and help messages for --iptables and --ip-forward. - Updated apt-cacher-ng docs example. - Remove duplicate description of --mtu from docs. - Add missing -t and -v for `docker images` to the docs. - Add fixes to the cli docs. - Update libcontainer docs. - Update images in docs to remove references to AUFS and LXC. - Update the nodejs_web_app in the docs to use the new epel RPM address. - Fix external link on security of containers. - Update remote API docs. - Add image size to history docs. - Be explicit about binding to all interfaces in redis example. - Document DisableNetwork flag in the 1.10 remote api. - Document that `--lxc-conf` is lxc only. - Add chef usage documentation. - Add example for an image with multiple for `docker load`. - Explain what `docker run -a` does in the docs. #### Contrib - Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. - Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. - Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. - Add check-config script to contrib. - Fix fish shell completion. #### Hack * Clean up "go test" output from "make test" to be much more readable/scannable. * Excluse more "definitely not unit tested Go source code" directories from hack/make/test. + Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. - Include contributed completions in Ubuntu PPA. + Add cli integration tests. * Add tweaks to the hack scripts to make them simpler. #### Remote API + Add TLS auth support for API. * Move git clone from daemon to client. - Fix content-type detection in docker cp. * Split API into 2 go packages. #### Runtime * Support hairpin NAT without going through Docker server. - devicemapper: succeed immediately when removing non-existing devices. - devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). - devicemapper: increase timeout in waitClose to 10 seconds. - devicemapper: ensure we shut down thin pool cleanly. - devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. - devicemapper: avoid AB-BA deadlock. - devicemapper: make shutdown better/faster. - improve alpha sorting in mflag. - Remove manual http cookie management because the cookiejar is being used. - Use BSD raw mode on Darwin. Fixes nano, tmux and others. - Add FreeBSD support for the client. - Merge auth package into registry. - Add deprecation warning for -t on `docker pull`. - Remove goroutine leak on error. - Update parseLxcInfo to comply with new lxc1.0 format. - Fix attach exit on darwin. - Improve deprecation message. - Retry to retrieve the layer metadata up to 5 times for `docker pull`. - Only unshare the mount namespace for execin. - Merge existing config when committing. - Disable daemon startup timeout. - Fix issue #4681: add loopback interface when networking is disabled. - Add failing test case for issue #4681. - Send SIGTERM to child, instead of SIGKILL. - Show the driver and the kernel version in `docker info` even when not in debug mode. - Always symlink /dev/ptmx for libcontainer. This fixes console related problems. - Fix issue caused by the absence of /etc/apparmor.d. - Don't leave empty cidFile behind when failing to create the container. - Mount cgroups automatically if they're not mounted already. - Use mock for search tests. - Update to double-dash everywhere. - Move .dockerenv parsing to lxc driver. - Move all bind-mounts in the container inside the namespace. - Don't use separate bind mount for container. - Always symlink /dev/ptmx for libcontainer. - Don't kill by pid for other drivers. - Add initial logging to libcontainer. * Sort by port in `docker ps`. - Move networking drivers into runtime top level package. + Add --no-prune to `docker rmi`. + Add time since exit in `docker ps`. - graphdriver: add build tags. - Prevent allocation of previously allocated ports & prevent improve port allocation. * Add support for --since/--before in `docker ps`. - Clean up container stop. + Add support for configurable dns search domains. - Add support for relative WORKDIR instructions. - Add --output flag for docker save. - Remove duplication of DNS entries in config merging. - Add cpuset.cpus to cgroups and native driver options. - Remove docker-ci. - Promote btrfs. btrfs is no longer considered experimental. - Add --input flag to `docker load`. - Return error when existing bridge doesn't match IP address. - Strip comments before parsing line continuations to avoid interpreting instructions as comments. - Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. - Add systemd implementation of cgroups and make containers show up as systemd units. - Fix commit and import when no repository is specified. - Remount /var/lib/docker as --private to fix scaling issue. - Use the environment's proxy when pinging the remote registry. - Reduce error level from harmless errors. * Allow --volumes-from to be individual files. - Fix expanding buffer in StdCopy. - Set error regardless of attach or stdin. This fixes #3364. - Add support for --env-file to load environment variables from files. - Symlink /etc/mtab and /proc/mounts. - Allow pushing a single tag. - Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. - Don't throw error when starting an already running container. - Fix dynamic port allocation limit. - remove setupDev from libcontainer. - Add API version to `docker version`. - Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. - Fix --volumes-from mount failure. - Allow non-privileged containers to create device nodes. - Skip login tests because of external dependency on a hosted service. - Deprecate `docker images --tree` and `docker images --viz`. - Deprecate `docker insert`. - Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. - Add specific error message when hitting 401 over HTTP on push. - Fix absolute volume check. - Remove volumes-from from the config. - Move DNS options to hostconfig. - Update the apparmor profile for libcontainer. - Add deprecation notice for `docker commit -run`. ## 0.9.1 (2014-03-24) #### Builder - Fix printing multiple messages on a single line. Fixes broken output during builds. #### Documentation - Fix external link on security of containers. #### Contrib - Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. - Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. #### Hack - Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. #### Remote API - Fix content-type detection in `docker cp`. #### Runtime - Use BSD raw mode on Darwin. Fixes nano, tmux and others. - Only unshare the mount namespace for execin. - Retry to retrieve the layer metadata up to 5 times for `docker pull`. - Merge existing config when committing. - Fix panic in monitor. - Disable daemon startup timeout. - Fix issue #4681: add loopback interface when networking is disabled. - Add failing test case for issue #4681. - Send SIGTERM to child, instead of SIGKILL. - Show the driver and the kernel version in `docker info` even when not in debug mode. - Always symlink /dev/ptmx for libcontainer. This fixes console related problems. - Fix issue caused by the absence of /etc/apparmor.d. - Don't leave empty cidFile behind when failing to create the container. - Improve deprecation message. - Fix attach exit on darwin. - devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). - devicemapper: succeed immediately when removing non-existing devices. - devicemapper: increase timeout in waitClose to 10 seconds. - Remove goroutine leak on error. - Update parseLxcInfo to comply with new lxc1.0 format. ## 0.9.0 (2014-03-10) #### Builder - Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. - Add error to docker build --rm. This adds missing error handling. - Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. - Make `--rm` the default for `docker build`. #### Documentation - Download the docker client binary for Mac over https. - Update the titles of the install instructions & descriptions. * Add instructions for upgrading boot2docker. * Add port forwarding example in OS X install docs. - Attempt to disentangle repository and registry. - Update docs to explain more about `docker ps`. - Update sshd example to use a Dockerfile. - Rework some examples, including the Python examples. - Update docs to include instructions for a container's lifecycle. - Update docs documentation to discuss the docs branch. - Don't skip cert check for an example & use HTTPS. - Bring back the memory and swap accounting section which was lost when the kernel page was removed. - Explain DNS warnings and how to fix them on systems running and using a local nameserver. #### Contrib - Add Tanglu support for mkimage-debootstrap. - Add SteamOS support for mkimage-debootstrap. #### Hack - Get package coverage when running integration tests. - Remove the Vagrantfile. This is being replaced with boot2docker. - Fix tests on systems where aufs isn't available. - Update packaging instructions and remove the dependency on lxc. #### Remote API * Move code specific to the API to the api package. - Fix header content type for the API. Makes all endpoints use proper content type. - Fix registry auth & remove ping calls from CmdPush and CmdPull. - Add newlines to the JSON stream functions. #### Runtime * Do not ping the registry from the CLI. All requests to registries flow through the daemon. - Check for nil information return in the lxc driver. This fixes panics with older lxc versions. - Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. - Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. * Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. - Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. - Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. - Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. - Fix `--run` in `docker commit`. This makes `docker commit --run` work again. - Fix custom bridge related options. This makes custom bridges work again. + Mount-bind the PTY as container console. This allows tmux/screen to run. + Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. + Add native exec driver which uses libcontainer and make it the default exec driver. - Add support for handling extended attributes in archives. * Set the container MTU to be the same as the host MTU. + Add simple sha256 checksums for layers to speed up `docker push`. * Improve kernel version parsing. * Allow flag grouping (`docker run -it`). - Remove chroot exec driver. - Fix divide by zero to fix panic. - Rewrite `docker rmi`. - Fix docker info with lxc 1.0.0. - Fix fedora tty with apparmor. * Don't always append env vars, replace defaults with vars from config. * Fix a goroutine leak. * Switch to Go 1.2.1. - Fix unique constraint error checks. * Handle symlinks for Docker's data directory and for TMPDIR. - Add deprecation warnings for flags (-flag is deprecated in favor of --flag) - Add apparmor profile for the native execution driver. * Move system specific code from archive to pkg/system. - Fix duplicate signal for `docker run -i -t` (issue #3336). - Return correct process pid for lxc. - Add a -G option to specify the group which unix sockets belong to. + Add `-f` flag to `docker rm` to force removal of running containers. + Kill ghost containers and restart all ghost containers when the docker daemon restarts. + Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. ## 0.8.1 (2014-02-18) #### Builder - Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper - Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system - Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported #### Documentation * Update issue filing instructions * Warn against the use of symlinks for Docker's storage folder * Replace the Firefox example with an IceWeasel example * Rewrite the PostgresSQL example using a Dockerfile and add more details to it * Improve the OS X documentation #### Remote API - Fix broken images API for version less than 1.7 - Use the right encoding for all API endpoints which return JSON - Move remote api client to api/ - Queue calls to the API using generic socket wait #### Runtime - Fix the use of custom settings for bridges and custom bridges - Refactor the devicemapper code to avoid many mount/unmount race conditions and failures - Remove two panics which could make Docker crash in some situations - Don't ping registry from the CLI client - Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks - Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration - Remove directory when removing devicemapper device. This cleans up leftover mount directories - Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration - Ensure `docker cp` stream is closed properly - Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper - Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port + Mount-bind the PTY as container console. This allows tmux and screen to run in a container - Clean up archive closing. This fixes and improves archive handling - Fix engine tests on systems where temp directories are symlinked - Add test methods for save and load - Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart - Support submodules when building from a GitHub repository - Quote volume path to allow spaces - Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs ## 0.8.0 (2014-02-04) #### Notable features since 0.7.0 * Images and containers can be removed much faster * Building an image from source with docker build is now much faster * The Docker daemon starts and stops much faster * The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations * Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations * All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar * Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages * Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change * The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed * The new ONBUILD instruction adds to your image a “trigger†instruction to be executed at a later time, when the image is used as the base for another build * Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write * Docker is officially supported on Mac OSX * The Docker daemon supports systemd socket activation ## 0.7.6 (2014-01-14) #### Builder * Do not follow symlink outside of build context #### Runtime - Remount bind mounts when ro is specified * Use https for fetching docker version #### Other * Inline the test.docker.io fingerprint * Add ca-certificates to packaging documentation ## 0.7.5 (2014-01-09) #### Builder * Disable compression for build. More space usage but a much faster upload - Fix ADD caching for certain paths - Do not compress archive from git build #### Documentation - Fix error in GROUP add example * Make sure the GPG fingerprint is inline in the documentation * Give more specific advice on setting up signing of commits for DCO #### Runtime - Fix misspelled container names - Do not add hostname when networking is disabled * Return most recent image from the cache by date - Return all errors from docker wait * Add Content-Type Header "application/json" to GET /version and /info responses #### Other * Update DCO to version 1.1 + Update Makefile to use "docker:GIT_BRANCH" as the generated image name * Update Travis to check for new 1.1 DCO version ## 0.7.4 (2014-01-07) #### Builder - Fix ADD caching issue with . prefixed path - Fix docker build on devicemapper by reverting sparse file tar option - Fix issue with file caching and prevent wrong cache hit * Use same error handling while unmarshalling CMD and ENTRYPOINT #### Documentation * Simplify and streamline Amazon Quickstart * Install instructions use unprefixed fedora image * Update instructions for mtu flag for Docker on GCE + Add Ubuntu Saucy to installation - Fix for wrong version warning on master instead of latest #### Runtime - Only get the image's rootfs when we need to calculate the image size - Correctly handle unmapping UDP ports * Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build - Fix login message to say pull instead of push - Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN * Make blank -H option default to the same as no -H was sent * Extract cgroups utilities to own submodule #### Other + Add Travis CI configuration to validate DCO and gofmt requirements + Add Developer Certificate of Origin Text * Upgrade VBox Guest Additions * Check standalone header when pinging a registry server ## 0.7.3 (2014-01-02) #### Builder + Update ADD to use the image cache, based on a hash of the added content * Add error message for empty Dockerfile #### Documentation - Fix outdated link to the "Introduction" on www.docker.io + Update the docs to get wider when the screen does - Add information about needing to install LXC when using raw binaries * Update Fedora documentation to disentangle the docker and docker.io conflict * Add a note about using the new `-mtu` flag in several GCE zones + Add FrugalWare installation instructions + Add a more complete example of `docker run` - Fix API documentation for creating and starting Privileged containers - Add missing "name" parameter documentation on "/containers/create" * Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration - Update the 1.8 API documentation with some additions that were added to the docs for 1.7 #### Hack - Add missing libdevmapper dependency to the packagers documentation * Update minimum Go requirement to a hard line at Go 1.2+ * Many minor improvements to the Vagrantfile + Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) + Add coverprofile generation reporting - Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually * Update Dockerfile to be more canonical and have less spurious warnings during build - Fix some miscellaneous `docker pull` progress bar display issues * Migrate more miscellaneous packages under the "pkg" folder * Update TextMate highlighting to automatically be enabled for files named "Dockerfile" * Reorganize syntax highlighting files under a common "contrib/syntax" directory * Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation * Add support for container names in bash completion #### Packaging + Add an official Docker client binary for Darwin (Mac OS X) * Remove empty "Vendor" string and added "License" on deb package + Add a stubbed version of "/etc/default/docker" in the deb package #### Runtime * Update layer application to extract tars in place, avoiding file churn while handling whiteouts - Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) * Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) + Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions - Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files * Update container name validation to include '.' - Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected * Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler * Update to use proper box-drawing characters everywhere in `docker images -tree` * Move MTU setting from LXC configuration to directly use netlink * Add `-S` option to external tar invocation for more efficient spare file handling + Add arch/os info to User-Agent string, especially for registry requests + Add `-mtu` option to Docker daemon for configuring MTU - Fix `docker build` to exit with a non-zero exit code on error + Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation ## 0.7.2 (2013-12-16) #### Runtime + Validate container names on creation with standard regex * Increase maximum image depth to 127 from 42 * Continue to move api endpoints to the job api + Add -bip flag to allow specification of dynamic bridge IP via CIDR - Allow bridge creation when ipv6 is not enabled on certain systems * Set hostname and IP address from within dockerinit * Drop capabilities from within dockerinit - Fix volumes on host when symlink is present the image - Prevent deletion of image if ANY container is depending on it even if the container is not running * Update docker push to use new progress display * Use os.Lstat to allow mounting unix sockets when inspecting volumes - Adjust handling of inactive user login - Add missing defines in devicemapper for older kernels - Allow untag operations with no container validation - Add auth config to docker build #### Documentation * Add more information about Docker logging + Add RHEL documentation * Add a direct example for changing the CMD that is run in a container * Update Arch installation documentation + Add section on Trusted Builds + Add Network documentation page #### Other + Add new cover bundle for providing code coverage reporting * Separate integration tests in bundles * Make Tianon the hack maintainer * Update mkimage-debootstrap with more tweaks for keeping images small * Use https to get the install script * Remove vendored dotcloud/tar now that Go 1.2 has been released ## 0.7.1 (2013-12-05) #### Documentation + Add @SvenDowideit as documentation maintainer + Add links example + Add documentation regarding ambassador pattern + Add Google Cloud Platform docs + Add dockerfile best practices * Update doc for RHEL * Update doc for registry * Update Postgres examples * Update doc for Ubuntu install * Improve remote api doc #### Runtime + Add hostconfig to docker inspect + Implement `docker log -f` to stream logs + Add env variable to disable kernel version warning + Add -format to `docker inspect` + Support bind-mount for files - Fix bridge creation on RHEL - Fix image size calculation - Make sure iptables are called even if the bridge already exists - Fix issue with stderr only attach - Remove init layer when destroying a container - Fix same port binding on different interfaces - `docker build` now returns the correct exit code - Fix `docker port` to display correct port - `docker build` now check that the dockerfile exists client side - `docker attach` now returns the correct exit code - Remove the name entry when the container does not exist #### Registry * Improve progress bars, add ETA for downloads * Simultaneous pulls now waits for the first to finish instead of failing - Tag only the top-layer image when pushing to registry - Fix issue with offline image transfer - Fix issue preventing using ':' in password for registry #### Other + Add pprof handler for debug + Create a Makefile * Use stdlib tar that now includes fix * Improve make.sh test script * Handle SIGQUIT on the daemon * Disable verbose during tests * Upgrade to go1.2 for official build * Improve unit tests * The test suite now runs all tests even if one fails * Refactor C in Go (Devmapper) - Fix OSX compilation ## 0.7.0 (2013-11-25) #### Notable features since 0.6.0 * Storage drivers: choose from aufs, device-mapper, or vfs. * Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. * Links: compose complex software stacks by connecting containers to each other. * Container naming: organize your containers by giving them memorable names. * Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. * Offline transfer: push and pull images to the filesystem without losing information. * Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. ## 0.6.7 (2013-11-21) #### Runtime * Improve stability, fixes some race conditons * Skip the volumes mounted when deleting the volumes of container. * Fix layer size computation: handle hard links correctly * Use the work Path for docker cp CONTAINER:PATH * Fix tmp dir never cleanup * Speedup docker ps * More informative error message on name collisions * Fix nameserver regex * Always return long id's * Fix container restart race condition * Keep published ports on docker stop;docker start * Fix container networking on Fedora * Correctly express "any address" to iptables * Fix network setup when reconnecting to ghost container * Prevent deletion if image is used by a running container * Lock around read operations in graph #### RemoteAPI * Return full ID on docker rmi #### Client + Add -tree option to images + Offline image transfer * Exit with status 2 on usage error and display usage on stderr * Do not forward SIGCHLD to container * Use string timestamp for docker events -since #### Other * Update to go 1.2rc5 + Add /etc/default/docker support to upstart ## 0.6.6 (2013-11-06) #### Runtime * Ensure container name on register * Fix regression in /etc/hosts + Add lock around write operations in graph * Check if port is valid * Fix restart runtime error with ghost container networking + Add some more colors and animals to increase the pool of generated names * Fix issues in docker inspect + Escape apparmor confinement + Set environment variables using a file. * Prevent docker insert to erase something + Prevent DNS server conflicts in CreateBridgeIface + Validate bind mounts on the server side + Use parent image config in docker build * Fix regression in /etc/hosts #### Client + Add -P flag to publish all exposed ports + Add -notrunc and -q flags to docker history * Fix docker commit, tag and import usage + Add stars, trusted builds and library flags in docker search * Fix docker logs with tty #### RemoteAPI * Make /events API send headers immediately * Do not split last column docker top + Add size to history #### Other + Contrib: Desktop integration. Firefox usecase. + Dockerfile: bump to go1.2rc3 ## 0.6.5 (2013-10-29) #### Runtime + Containers can now be named + Containers can now be linked together for service discovery + 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors + Automatically start crashed containers after a reboot + Expose IP, port, and proto as separate environment vars for container links * Allow ports to be published to specific ips * Prohibit inter-container communication by default - Ignore ErrClosedPipe for stdin in Container.Attach - Remove unused field kernelVersion * Fix issue when mounting subdirectories of /mnt in container - Fix untag during removal of images * Check return value of syscall.Chdir when changing working directory inside dockerinit #### Client - Only pass stdin to hijack when needed to avoid closed pipe errors * Use less reflection in command-line method invocation - Monitor the tty size after starting the container, not prior - Remove useless os.Exit() calls after log.Fatal #### Hack + Add initial init scripts library and a safer Ubuntu packaging script that works for Debian * Add -p option to invoke debootstrap with http_proxy - Update install.sh with $sh_c to get sudo/su for modprobe * Update all the mkimage scripts to use --numeric-owner as a tar argument * Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues #### Other * Documentation: Fix the flags for nc in example * Testing: Remove warnings and prevent mount issues - Testing: Change logic for tty resize to avoid warning in tests - Builder: Fix race condition in docker build with verbose output - Registry: Fix content-type for PushImageJSONIndex method * Contrib: Improve helper tools to generate debian and Arch linux server images ## 0.6.4 (2013-10-16) #### Runtime - Add cleanup of container when Start() fails * Add better comments to utils/stdcopy.go * Add utils.Errorf for error logging + Add -rm to docker run for removing a container on exit - Remove error messages which are not actually errors - Fix `docker rm` with volumes - Fix some error cases where a HTTP body might not be closed - Fix panic with wrong dockercfg file - Fix the attach behavior with -i * Record termination time in state. - Use empty string so TempDir uses the OS's temp dir automatically - Make sure to close the network allocators + Autorestart containers by default * Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` * lxc: Allow set_file_cap capability in container - Move run -rm to the cli only * Split stdout stderr * Always create a new session for the container #### Testing - Add aggregated docker-ci email report - Add cleanup to remove leftover containers * Add nightly release to docker-ci * Add more tests around auth.ResolveAuthConfig - Remove a few errors in tests - Catch errClosing error when TCP and UDP proxies are terminated * Only run certain tests with TESTFLAGS='-run TestName' make.sh * Prevent docker-ci to test closing PRs * Replace panic by log.Fatal in tests - Increase TestRunDetach timeout #### Documentation * Add initial draft of the Docker infrastructure doc * Add devenvironment link to CONTRIBUTING.md * Add `apt-get install curl` to Ubuntu docs * Add explanation for export restrictions * Add .dockercfg doc * Remove Gentoo install notes about #1422 workaround * Fix help text for -v option * Fix Ping endpoint documentation - Fix parameter names in docs for ADD command - Fix ironic typo in changelog * Various command fixes in postgres example * Document how to edit and release docs - Minor updates to `postgresql_service.rst` * Clarify LGTM process to contributors - Corrected error in the package name * Document what `vagrant up` is actually doing + improve doc search results * Cleanup whitespace in API 1.5 docs * use angle brackets in MAINTAINER example email * Update archlinux.rst + Changes to a new style for the docs. Includes version switcher. * Formatting, add information about multiline json * Improve registry and index REST API documentation - Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 * Update Gentoo installation documentation now that we're in the portage tree proper * Cleanup and reorganize docs and tooling for contributors and maintainers - Minor spelling correction of protocoll -> protocol #### Contrib * Add vim syntax highlighting for Dockerfiles from @honza * Add mkimage-arch.sh * Reorganize contributed completion scripts to add zsh completion #### Hack * Add vagrant user to the docker group * Add proper bash completion for "docker push" * Add xz utils as a runtime dep * Add cleanup/refactor portion of #2010 for hack and Dockerfile updates + Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link * Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly + Add @tianon to hack/MAINTAINERS * Improve network performance for VirtualBox * Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) - Fix contrib/mkimage-debian.sh apt caching prevention + Add Dockerfile.tmLanguage to contrib * Configured FPM to make /etc/init/docker.conf a config file * Enable SSH Agent forwarding in Vagrant VM * Several small tweaks/fixes for contrib/mkimage-debian.sh #### Other - Builder: Abort build if mergeConfig returns an error and fix duplicate error message - Packaging: Remove deprecated packaging directory - Registry: Use correct auth config when logging in. - Registry: Fix the error message so it is the same as the regex ## 0.6.3 (2013-09-23) #### Packaging * Add 'docker' group on install for ubuntu package * Update tar vendor dependency * Download apt key over HTTPS #### Runtime - Only copy and change permissions on non-bindmount volumes * Allow multiple volumes-from - Fix HTTP imports from STDIN #### Documentation * Update section on extracting the docker binary after build * Update development environment docs for new build process * Remove 'base' image from documentation #### Other - Client: Fix detach issue - Registry: Update regular expression to match index ## 0.6.2 (2013-09-17) #### Runtime + Add domainname support + Implement image filtering with path.Match * Remove unnecessary warnings * Remove os/user dependency * Only mount the hostname file when the config exists * Handle signals within the `docker login` command - UID and GID are now also applied to volumes - `docker start` set error code upon error - `docker run` set the same error code as the process started #### Builder + Add -rm option in order to remove intermediate containers * Allow multiline for the RUN instruction #### Registry * Implement login with private registry - Fix push issues #### Other + Hack: Vendor all dependencies * Remote API: Bump to v1.5 * Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. * Documentation: General improvements ## 0.6.1 (2013-08-23) #### Registry * Pass "meta" headers in API calls to the registry #### Packaging - Use correct upstart script with new build tool - Use libffi-dev, don`t build it from sources - Remove duplicate mercurial install command ## 0.6.0 (2013-08-22) #### Runtime + Add lxc-conf flag to allow custom lxc options + Add an option to set the working directory * Add Image name to LogEvent tests + Add -privileged flag and relevant tests, docs, and examples * Add websocket support to /container//attach/ws * Add warning when net.ipv4.ip_forwarding = 0 * Add hostname to environment * Add last stable version in `docker version` - Fix race conditions in parallel pull - Fix Graph ByParent() to generate list of child images per parent image. - Fix typo: fmt.Sprint -> fmt.Sprintf - Fix small \n error un docker build * Fix to "Inject dockerinit at /.dockerinit" * Fix #910. print user name to docker info output * Use Go 1.1.2 for dockerbuilder * Use ranged for loop on channels - Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete - Improve CMD, ENTRYPOINT, and attach docs. - Improve connect message with socket error - Load authConfig only when needed and fix useless WARNING - Show tag used when image is missing * Apply volumes-from before creating volumes - Make docker run handle SIGINT/SIGTERM - Prevent crash when .dockercfg not readable - Install script should be fetched over https, not http. * API, issue 1471: Use groups for socket permissions - Correctly detect IPv4 forwarding * Mount /dev/shm as a tmpfs - Switch from http to https for get.docker.io * Let userland proxy handle container-bound traffic * Update the Docker CLI to specify a value for the "Host" header. - Change network range to avoid conflict with EC2 DNS - Reduce connect and read timeout when pinging the registry * Parallel pull - Handle ip route showing mask-less IP addresses * Allow ENTRYPOINT without CMD - Always consider localhost as a domain name when parsing the FQN repos name * Refactor checksum #### Documentation * Add MongoDB image example * Add instructions for creating and using the docker group * Add sudo to examples and installation to documentation * Add ufw doc * Add a reference to ps -a * Add information about Docker`s high level tools over LXC. * Fix typo in docs for docker run -dns * Fix a typo in the ubuntu installation guide * Fix to docs regarding adding docker groups * Update default -H docs * Update readme with dependencies for building * Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 * PostgreSQL service example in documentation * Suggest installing linux-headers by default. * Change the twitter handle * Clarify Amazon EC2 installation * 'Base' image is deprecated and should no longer be referenced in the docs. * Move note about officially supported kernel - Solved the logo being squished in Safari #### Builder + Add USER instruction do Dockerfile + Add workdir support for the Buildfile * Add no cache for docker build - Fix docker build and docker events output - Only count known instructions as build steps - Make sure ENV instruction within build perform a commit each time - Forbid certain paths within docker build ADD - Repository name (and optionally a tag) in build usage - Make sure ADD will create everything in 0755 #### Remote API * Sort Images by most recent creation date. * Reworking opaque requests in registry module * Add image name in /events * Use mime pkg to parse Content-Type * 650 http utils and user agent field #### Hack + Bash Completion: Limit commands to containers of a relevant state * Add docker dependencies coverage testing into docker-ci #### Packaging + Docker-brew 0.5.2 support and memory footprint reduction * Add new docker dependencies into docker-ci - Revert "docker.upstart: avoid spawning a `sh` process" + Docker-brew and Docker standard library + Release docker with docker * Fix the upstart script generated by get.docker.io * Enabled the docs to generate manpages. * Revert Bind daemon to 0.0.0.0 in Vagrant. #### Register * Improve auth push * Registry unit tests + mock registry #### Tests * Improve TestKillDifferentUser to prevent timeout on buildbot - Fix typo in TestBindMounts (runContainer called without image) * Improve TestGetContainersTop so it does not rely on sleep * Relax the lo interface test to allow iface index != 1 * Add registry functional test to docker-ci * Add some tests in server and utils #### Other * Contrib: bash completion script * Client: Add docker cp command and copy api endpoint to copy container files/folders to the host * Don`t read from stdout when only attached to stdin ## 0.5.3 (2013-08-13) #### Runtime * Use docker group for socket permissions - Spawn shell within upstart script - Handle ip route showing mask-less IP addresses - Add hostname to environment #### Builder - Make sure ENV instruction within build perform a commit each time ## 0.5.2 (2013-08-08) * Builder: Forbid certain paths within docker build ADD - Runtime: Change network range to avoid conflict with EC2 DNS * API: Change daemon to listen on unix socket by default ## 0.5.1 (2013-07-30) #### Runtime + Add `ps` args to `docker top` + Add support for container ID files (pidfile like) + Add container=lxc in default env + Support networkless containers with `docker run -n` and `docker -d -b=none` * Stdout/stderr logs are now stored in the same file as JSON * Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. * Change .dockercfg format to json and support multiple auth remote - Do not override volumes from config - Fix issue with EXPOSE override #### API + Docker client now sets useragent (RFC 2616) + Add /events endpoint #### Builder + ADD command now understands URLs + CmdAdd and CmdEnv now respect Dockerfile-set ENV variables - Create directories with 755 instead of 700 within ADD instruction #### Hack * Simplify unit tests with helpers * Improve docker.upstart event * Add coverage testing into docker-ci ## 0.5.0 (2013-07-17) #### Runtime + List all processes running inside a container with 'docker top' + Host directories can be mounted as volumes with 'docker run -v' + Containers can expose public UDP ports (eg, '-p 123/udp') + Optionally specify an exact public port (eg. '-p 80:4500') * 'docker login' supports additional options - Dont save a container`s hostname when committing an image. #### Registry + New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries - Fix issues when uploading images to a private registry #### Builder + ENTRYPOINT instruction sets a default binary entry point to a container + VOLUME instruction marks a part of the container as persistent data * 'docker build' displays the full output of a build by default ## 0.4.8 (2013-07-01) + Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. - Tests: Fix issues in the test suite ## 0.4.7 (2013-06-28) #### Remote API * The progress bar updates faster when downloading and uploading large files - Fix a bug in the optional unix socket transport #### Runtime * Improve detection of kernel version + Host directories can be mounted as volumes with 'docker run -b' - fix an issue when only attaching to stdin * Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts #### Hack * Improve test suite and dev environment * Remove dependency on unit tests on 'os/user' #### Other * Registry: easier push/pull to a custom registry + Documentation: add terminology section ## 0.4.6 (2013-06-22) - Runtime: fix a bug which caused creation of empty images (and volumes) to crash. ## 0.4.5 (2013-06-21) + Builder: 'docker build git://URL' fetches and builds a remote git repository * Runtime: 'docker ps -s' optionally prints container size * Tests: improved and simplified - Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. - Builder: fix a regression when using ADD with single regular file. ## 0.4.4 (2013-06-19) - Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. ## 0.4.3 (2013-06-19) #### Builder + ADD of a local file will detect tar archives and unpack them * ADD improvements: use tar for copy + automatically unpack local archives * ADD uses tar/untar for copies instead of calling 'cp -ar' * Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. - Fix a bug which caused builds to fail if ADD was the first command * Nicer output for 'docker build' #### Runtime * Remove bsdtar dependency * Add unix socket and multiple -H support * Prevent rm of running containers * Use go1.1 cookiejar - Fix issue detaching from running TTY container - Forbid parallel push/pull for a single image/repo. Fixes #311 - Fix race condition within Run command when attaching. #### Client * HumanReadable ProgressBar sizes in pull * Fix docker version`s git commit output #### API * Send all tags on History API call * Add tag lookup to history command. Fixes #882 #### Documentation - Fix missing command in irc bouncer example ## 0.4.2 (2013-06-17) - Packaging: Bumped version to work around an Ubuntu bug ## 0.4.1 (2013-06-17) #### Remote Api + Add flag to enable cross domain requests + Add images and containers sizes in docker ps and docker images #### Runtime + Configure dns configuration host-wide with 'docker -d -dns' + Detect faulty DNS configuration and replace it with a public default + Allow docker run : + You can now specify public port (ex: -p 80:4500) * Improve image removal to garbage-collect unreferenced parents #### Client * Allow multiple params in inspect * Print the container id before the hijack in `docker run` #### Registry * Add regexp check on repo`s name * Move auth to the client - Remove login check on pull #### Other * Vagrantfile: Add the rest api port to vagrantfile`s port_forward * Upgrade to Go 1.1 - Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n ## 0.4.0 (2013-06-03) #### Builder + Introducing Builder + 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile #### Remote API + Introducing Remote API + control Docker programmatically using a simple HTTP/json API #### Runtime * Various reliability and usability improvements ## 0.3.4 (2013-05-30) #### Builder + 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + 'docker build -t FOO' applies the tag FOO to the newly built container. #### Runtime + Interactive TTYs correctly handle window resize * Fix how configuration is merged between layers #### Remote API + Split stdout and stderr on 'docker run' + Optionally listen on a different IP and port (use at your own risk) #### Documentation * Improve install instructions. ## 0.3.3 (2013-05-23) - Registry: Fix push regression - Various bugfixes ## 0.3.2 (2013-05-09) #### Registry * Improve the checksum process * Use the size to have a good progress bar while pushing * Use the actual archive if it exists in order to speed up the push - Fix error 400 on push #### Runtime * Store the actual archive on commit ## 0.3.1 (2013-05-08) #### Builder + Implement the autorun capability within docker builder + Add caching to docker builder + Add support for docker builder with native API as top level command + Implement ENV within docker builder - Check the command existence prior create and add Unit tests for the case * use any whitespaces instead of tabs #### Runtime + Add go version to debug infos * Kernel version - don`t show the dash if flavor is empty #### Registry + Add docker search top level command in order to search a repository - Fix pull for official images with specific tag - Fix issue when login in with a different user and trying to push * Improve checksum - async calculation #### Images + Output graph of images to dot (graphviz) - Fix ByParent function #### Documentation + New introduction and high-level overview + Add the documentation for docker builder - CSS fix for docker documentation to make REST API docs look better. - Fix CouchDB example page header mistake - Fix README formatting * Update www.docker.io website. #### Other + Website: new high-level overview - Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc * Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker ## 0.3.0 (2013-05-06) #### Runtime - Fix the command existence check - strings.Split may return an empty string on no match - Fix an index out of range crash if cgroup memory is not #### Documentation * Various improvements + New example: sharing data between 2 couchdb databases #### Other * Vagrant: Use only one deb line in /etc/apt + Registry: Implement the new registry ## 0.2.2 (2013-05-03) + Support for data volumes ('docker run -v=PATH') + Share data volumes between containers ('docker run -volumes-from') + Improve documentation * Upgrade to Go 1.0.3 * Various upgrades to the dev environment for contributors ## 0.2.1 (2013-05-01) + 'docker commit -run' bundles a layer with default runtime options: command, ports etc. * Improve install process on Vagrant + New Dockerfile operation: "maintainer" + New Dockerfile operation: "expose" + New Dockerfile operation: "cmd" + Contrib script to build a Debian base layer + 'docker -d -r': restart crashed containers at daemon startup * Runtime: improve test coverage ## 0.2.0 (2013-04-23) - Runtime: ghost containers can be killed and waited for * Documentation: update install instructions - Packaging: fix Vagrantfile - Development: automate releasing binaries and ubuntu packages + Add a changelog - Various bugfixes ## 0.1.8 (2013-04-22) - Dynamically detect cgroup capabilities - Issue stability warning on kernels <3.8 - 'docker push' buffers on disk instead of memory - Fix 'docker diff' for removed files - Fix 'docker stop' for ghost containers - Fix handling of pidfile - Various bugfixes and stability improvements ## 0.1.7 (2013-04-18) - Container ports are available on localhost - 'docker ps' shows allocated TCP ports - Contributors can run 'make hack' to start a continuous integration VM - Streamline ubuntu packaging & uploading - Various bugfixes and stability improvements ## 0.1.6 (2013-04-17) - Record the author an image with 'docker commit -author' ## 0.1.5 (2013-04-17) - Disable standalone mode - Use a custom DNS resolver with 'docker -d -dns' - Detect ghost containers - Improve diagnosis of missing system capabilities - Allow disabling memory limits at compile time - Add debian packaging - Documentation: installing on Arch Linux - Documentation: running Redis on docker - Fix lxc 0.9 compatibility - Automatically load aufs module - Various bugfixes and stability improvements ## 0.1.4 (2013-04-09) - Full support for TTY emulation - Detach from a TTY session with the escape sequence `C-p C-q` - Various bugfixes and stability improvements - Minor UI improvements - Automatically create our own bridge interface 'docker0' ## 0.1.3 (2013-04-04) - Choose TCP frontend port with '-p :PORT' - Layer format is versioned - Major reliability improvements to the process manager - Various bugfixes and stability improvements ## 0.1.2 (2013-04-03) - Set container hostname with 'docker run -h' - Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' - Various bugfixes and stability improvements - UI polish - Progress bar on push/pull - Use XZ compression by default - Make IP allocator lazy ## 0.1.1 (2013-03-31) - Display shorthand IDs for convenience - Stabilize process management - Layers can include a commit message - Simplified 'docker attach' - Fix support for re-attaching - Various bugfixes and stability improvements - Auto-download at run - Auto-login on push - Beefed up documentation ## 0.1.0 (2013-03-23) Initial public release - Implement registry in order to push/pull images - TCP port allocation - Fix termcaps on Linux - Add documentation - Add Vagrant support with Vagrantfile - Add unit tests - Add repository/tags to ease image management - Improve the layer implementation docker-1.6.2/integration-cli/0000755000175000017500000000000012524223634015424 5ustar tianontianondocker-1.6.2/integration-cli/docker_cli_links_test.go0000644000175000017500000002571612524223634022323 0ustar tianontianonpackage main import ( "fmt" "io/ioutil" "os" "os/exec" "reflect" "regexp" "strings" "testing" "time" "github.com/docker/docker/pkg/iptables" ) func TestLinksEtcHostsRegularFile(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } if !strings.HasPrefix(out, "-") { t.Errorf("/etc/hosts should be a regular file") } logDone("link - /etc/hosts is a regular file") } func TestLinksEtcHostsContentMatch(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hosts") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } hosts, err := ioutil.ReadFile("/etc/hosts") if os.IsNotExist(err) { t.Skip("/etc/hosts does not exist, skip this test") } if out != string(hosts) { t.Errorf("container") } logDone("link - /etc/hosts matches hosts copy") } func TestLinksPingUnlinkedContainers(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") exitCode, err := runCommand(runCmd) if exitCode == 0 { t.Fatal("run ping did not fail") } else if exitCode != 1 { t.Fatalf("run ping failed with errors: %v", err) } logDone("links - ping unlinked container") } // Test for appropriate error when calling --link with an invalid target container func TestLinksInvalidContainerTarget(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--link", "bogus:alias", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err == nil { t.Fatal("an invalid container target should produce an error") } if !strings.Contains(out, "Could not get container") { t.Fatal("error output expected 'Could not get container', but got %q instead; err: %v", out, err) } logDone("links - linking to non-existent container should not work") } func TestLinksPingLinkedContainers(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "container1", "--hostname", "fred", "busybox", "top") if _, err := runCommand(runCmd); err != nil { t.Fatal(err) } runCmd = exec.Command(dockerBinary, "run", "-d", "--name", "container2", "--hostname", "wilma", "busybox", "top") if _, err := runCommand(runCmd); err != nil { t.Fatal(err) } runArgs := []string{"run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c"} pingCmd := "ping -c 1 %s -W 1 && ping -c 1 %s -W 1" // test ping by alias, ping by name, and ping by hostname // 1. Ping by alias dockerCmd(t, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...) // 2. Ping by container name dockerCmd(t, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...) // 3. Ping by hostname dockerCmd(t, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...) logDone("links - ping linked container") } func TestLinksPingLinkedContainersAfterRename(t *testing.T) { defer deleteAllContainers() out, _, _ := dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") idA := stripTrailingCharacters(out) out, _, _ = dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") idB := stripTrailingCharacters(out) dockerCmd(t, "rename", "container1", "container_new") dockerCmd(t, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") dockerCmd(t, "kill", idA) dockerCmd(t, "kill", idB) logDone("links - ping linked container after rename") } func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() dockerCmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "sleep", "10") childIP := findContainerIP(t, "child") parentIP := findContainerIP(t, "parent") sourceRule := []string{"-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} destinationRule := []string{"-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) { t.Fatal("Iptables rules not found") } dockerCmd(t, "rm", "--link", "parent/http") if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) { t.Fatal("Iptables rules should be removed when unlink") } dockerCmd(t, "kill", "child") dockerCmd(t, "kill", "parent") logDone("link - verify iptables when link and unlink") } func TestLinksInspectLinksStarted(t *testing.T) { var ( expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} result []string ) defer deleteAllContainers() dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") if err != nil { t.Fatal(err) } err = unmarshalJSON([]byte(links), &result) if err != nil { t.Fatal(err) } output := convertSliceOfStringsToMap(result) equal := reflect.DeepEqual(output, expected) if !equal { t.Fatalf("Links %s, expected %s", result, expected) } logDone("link - links in started container inspect") } func TestLinksInspectLinksStopped(t *testing.T) { var ( expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} result []string ) defer deleteAllContainers() dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10") dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") if err != nil { t.Fatal(err) } err = unmarshalJSON([]byte(links), &result) if err != nil { t.Fatal(err) } output := convertSliceOfStringsToMap(result) equal := reflect.DeepEqual(output, expected) if !equal { t.Fatalf("Links %s, but expected %s", result, expected) } logDone("link - links in stopped container inspect") } func TestLinksNotStartedParentNotFail(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "create", "--name=first", "busybox", "top") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "create", "--name=second", "--link=first:first", "busybox", "top") out, _, _, err = runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "start", "first") out, _, _, err = runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } logDone("link - container start successfully updating stopped parent links") } func TestLinksHostsFilesInject(t *testing.T) { testRequires(t, SameHostDaemon, ExecSupport) defer deleteAllContainers() out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "--name", "one", "busybox", "top")) if err != nil { t.Fatal(err, out) } idOne := strings.TrimSpace(out) out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top")) if err != nil { t.Fatal(err, out) } idTwo := strings.TrimSpace(out) time.Sleep(1 * time.Second) contentOne, err := readContainerFileWithExec(idOne, "/etc/hosts") if err != nil { t.Fatal(err, string(contentOne)) } contentTwo, err := readContainerFileWithExec(idTwo, "/etc/hosts") if err != nil { t.Fatal(err, string(contentTwo)) } if !strings.Contains(string(contentTwo), "onetwo") { t.Fatal("Host is not present in updated hosts file", string(contentTwo)) } logDone("link - ensure containers hosts files are updated with the link alias.") } func TestLinksNetworkHostContainer(t *testing.T) { defer deleteAllContainers() out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top")) if err != nil { t.Fatal(err, out) } out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true")) if err == nil || !strings.Contains(out, "--net=host can't be used with links. This would result in undefined behavior.") { t.Fatalf("Running container linking to a container with --net host should have failed: %s", out) } logDone("link - error thrown when linking to container with --net host") } func TestLinksUpdateOnRestart(t *testing.T) { testRequires(t, SameHostDaemon, ExecSupport) defer deleteAllContainers() if out, err := exec.Command(dockerBinary, "run", "-d", "--name", "one", "busybox", "top").CombinedOutput(); err != nil { t.Fatal(err, string(out)) } out, err := exec.Command(dockerBinary, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top").CombinedOutput() if err != nil { t.Fatal(err, string(out)) } id := strings.TrimSpace(string(out)) realIP, err := inspectField("one", "NetworkSettings.IPAddress") if err != nil { t.Fatal(err) } content, err := readContainerFileWithExec(id, "/etc/hosts") if err != nil { t.Fatal(err, string(content)) } getIP := func(hosts []byte, hostname string) string { re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname))) matches := re.FindSubmatch(hosts) if matches == nil { t.Fatalf("Hostname %s have no matches in hosts", hostname) } return string(matches[1]) } if ip := getIP(content, "one"); ip != realIP { t.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip) } if ip := getIP(content, "onetwo"); ip != realIP { t.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip) } if out, err := exec.Command(dockerBinary, "restart", "one").CombinedOutput(); err != nil { t.Fatal(err, string(out)) } realIP, err = inspectField("one", "NetworkSettings.IPAddress") if err != nil { t.Fatal(err) } content, err = readContainerFileWithExec(id, "/etc/hosts") if err != nil { t.Fatal(err, string(content)) } if ip := getIP(content, "one"); ip != realIP { t.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip) } if ip := getIP(content, "onetwo"); ip != realIP { t.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip) } logDone("link - ensure containers hosts files are updated on restart") } docker-1.6.2/integration-cli/docker_cli_version_test.go0000644000175000017500000000151312524223634022655 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) // ensure docker version works func TestVersionEnsureSucceeds(t *testing.T) { versionCmd := exec.Command(dockerBinary, "version") out, _, err := runCommandWithOutput(versionCmd) if err != nil { t.Fatalf("failed to execute docker version: %s, %v", out, err) } stringsToCheck := []string{ "Client version:", "Client API version:", "Go version (client):", "Git commit (client):", "OS/Arch (client):", "Server version:", "Server API version:", "Go version (server):", "Git commit (server):", "OS/Arch (server):", } for _, linePrefix := range stringsToCheck { if !strings.Contains(out, linePrefix) { t.Errorf("couldn't find string %v in output", linePrefix) } } logDone("version - verify that it works and that the output is properly formatted") } docker-1.6.2/integration-cli/docker_cli_attach_test.go0000644000175000017500000000526212524223634022441 0ustar tianontianonpackage main import ( "io" "os/exec" "strings" "sync" "testing" "time" ) const attachWait = 5 * time.Second func TestAttachMultipleAndRestart(t *testing.T) { defer deleteAllContainers() endGroup := &sync.WaitGroup{} startGroup := &sync.WaitGroup{} endGroup.Add(3) startGroup.Add(3) if err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done"); err != nil { t.Fatal(err) } startDone := make(chan struct{}) endDone := make(chan struct{}) go func() { endGroup.Wait() close(endDone) }() go func() { startGroup.Wait() close(startDone) }() for i := 0; i < 3; i++ { go func() { c := exec.Command(dockerBinary, "attach", "attacher") defer func() { c.Wait() endGroup.Done() }() out, err := c.StdoutPipe() if err != nil { t.Fatal(err) } if err := c.Start(); err != nil { t.Fatal(err) } buf := make([]byte, 1024) if _, err := out.Read(buf); err != nil && err != io.EOF { t.Fatal(err) } startGroup.Done() if !strings.Contains(string(buf), "hello") { t.Fatalf("unexpected output %s expected hello\n", string(buf)) } }() } select { case <-startDone: case <-time.After(attachWait): t.Fatalf("Attaches did not initialize properly") } cmd := exec.Command(dockerBinary, "kill", "attacher") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } select { case <-endDone: case <-time.After(attachWait): t.Fatalf("Attaches did not finish properly") } logDone("attach - multiple attach") } func TestAttachTtyWithoutStdin(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "-ti", "busybox") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to start container: %v (%v)", out, err) } id := strings.TrimSpace(out) if err := waitRun(id); err != nil { t.Fatal(err) } defer func() { cmd := exec.Command(dockerBinary, "kill", id) if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatalf("failed to kill container: %v (%v)", out, err) } }() done := make(chan struct{}) go func() { defer close(done) cmd := exec.Command(dockerBinary, "attach", id) if _, err := cmd.StdinPipe(); err != nil { t.Fatal(err) } expected := "cannot enable tty mode" if out, _, err := runCommandWithOutput(cmd); err == nil { t.Fatal("attach should have failed") } else if !strings.Contains(out, expected) { t.Fatalf("attach failed with error %q: expected %q", out, expected) } }() select { case <-done: case <-time.After(attachWait): t.Fatal("attach is running but should have failed") } logDone("attach - forbid piped stdin to tty enabled container") } docker-1.6.2/integration-cli/docker_cli_save_load_unix_test.go0000644000175000017500000000536112524223634024175 0ustar tianontianon// +build !windows package main import ( "bytes" "fmt" "os" "os/exec" "testing" "github.com/docker/docker/vendor/src/github.com/kr/pty" ) // save a repo and try to load it using stdout func TestSaveAndLoadRepoStdout(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to create a container: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) repoName := "foobar-save-load-test" inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { t.Fatalf("output should've been a container id: %s, %v", out, err) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) if out, _, err = runCommandWithOutput(commitCmd); err != nil { t.Fatalf("failed to commit container: %s, %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("the repo should exist before saving it: %s, %v", before, err) } saveCmdTemplate := `%v save %v > /tmp/foobar-save-load-test.tar` saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName) saveCmd := exec.Command("bash", "-c", saveCmdFinal) if out, _, err = runCommandWithOutput(saveCmd); err != nil { t.Fatalf("failed to save repo: %s, %v", out, err) } deleteImages(repoName) loadCmdFinal := `cat /tmp/foobar-save-load-test.tar | docker load` loadCmd := exec.Command("bash", "-c", loadCmdFinal) if out, _, err = runCommandWithOutput(loadCmd); err != nil { t.Fatalf("failed to load repo: %s, %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("the repo should exist after loading it: %s %v", after, err) } if before != after { t.Fatalf("inspect is not the same after a save / load") } deleteContainer(cleanedContainerID) deleteImages(repoName) os.Remove("/tmp/foobar-save-load-test.tar") logDone("save - save/load a repo using stdout") pty, tty, err := pty.Open() if err != nil { t.Fatalf("Could not open pty: %v", err) } cmd := exec.Command(dockerBinary, "save", repoName) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty if err := cmd.Start(); err != nil { t.Fatalf("start err: %v", err) } if err := cmd.Wait(); err == nil { t.Fatal("did not break writing to a TTY") } buf := make([]byte, 1024) n, err := pty.Read(buf) if err != nil { t.Fatal("could not read tty output") } if !bytes.Contains(buf[:n], []byte("Cowardly refusing")) { t.Fatal("help output is not being yielded", out) } logDone("save - do not save to a tty") } docker-1.6.2/integration-cli/docker_test_vars_daemon.go0000644000175000017500000000017412524223634022641 0ustar tianontianon// +build daemon package main const ( // tests can assume daemon runs on the same machine as CLI isLocalDaemon = true ) docker-1.6.2/integration-cli/docker_cli_pause_test.go0000644000175000017500000000665412524223634022320 0ustar tianontianonpackage main import ( "fmt" "os/exec" "strings" "testing" ) func TestPause(t *testing.T) { defer deleteAllContainers() defer unpauseAllContainers() name := "testeventpause" out, _, _ := dockerCmd(t, "images", "-q") image := strings.Split(out, "\n")[0] dockerCmd(t, "run", "-d", "--name", name, image, "sleep", "2") dockerCmd(t, "pause", name) pausedContainers, err := getSliceOfPausedContainers() if err != nil { t.Fatalf("error thrown while checking if containers were paused: %v", err) } if len(pausedContainers) != 1 { t.Fatalf("there should be one paused container and not", len(pausedContainers)) } dockerCmd(t, "unpause", name) eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) out, _, _ = runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") if len(events) <= 1 { t.Fatalf("Missing expected event") } pauseEvent := strings.Fields(events[len(events)-3]) unpauseEvent := strings.Fields(events[len(events)-2]) if pauseEvent[len(pauseEvent)-1] != "pause" { t.Fatalf("event should be pause, not %#v", pauseEvent) } if unpauseEvent[len(unpauseEvent)-1] != "unpause" { t.Fatalf("event should be unpause, not %#v", unpauseEvent) } waitCmd := exec.Command(dockerBinary, "wait", name) if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) } logDone("pause - pause/unpause is logged") } func TestPauseMultipleContainers(t *testing.T) { defer deleteAllContainers() defer unpauseAllContainers() containers := []string{ "testpausewithmorecontainers1", "testpausewithmorecontainers2", } out, _, _ := dockerCmd(t, "images", "-q") image := strings.Split(out, "\n")[0] for _, name := range containers { dockerCmd(t, "run", "-d", "--name", name, image, "sleep", "2") } dockerCmd(t, append([]string{"pause"}, containers...)...) pausedContainers, err := getSliceOfPausedContainers() if err != nil { t.Fatalf("error thrown while checking if containers were paused: %v", err) } if len(pausedContainers) != len(containers) { t.Fatalf("there should be %d paused container and not %d", len(containers), len(pausedContainers)) } dockerCmd(t, append([]string{"unpause"}, containers...)...) eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) out, _, _ = runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") if len(events) <= len(containers)*3-2 { t.Fatalf("Missing expected event") } pauseEvents := make([][]string, len(containers)) unpauseEvents := make([][]string, len(containers)) for i := range containers { pauseEvents[i] = strings.Fields(events[len(events)-len(containers)*2-1+i]) unpauseEvents[i] = strings.Fields(events[len(events)-len(containers)-1+i]) } for _, pauseEvent := range pauseEvents { if pauseEvent[len(pauseEvent)-1] != "pause" { t.Fatalf("event should be pause, not %#v", pauseEvent) } } for _, unpauseEvent := range unpauseEvents { if unpauseEvent[len(unpauseEvent)-1] != "unpause" { t.Fatalf("event should be unpause, not %#v", unpauseEvent) } } for _, name := range containers { waitCmd := exec.Command(dockerBinary, "wait", name) if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) } } logDone("pause - multi pause/unpause is logged") } docker-1.6.2/integration-cli/docker_test_vars.go0000644000175000017500000000217012524223634021314 0ustar tianontianonpackage main import ( "fmt" "os" "os/exec" ) var ( // the docker binary to use dockerBinary = "docker" // the private registry image to use for tests involving the registry registryImageName = "registry" // the private registry to use for tests privateRegistryURL = "127.0.0.1:5000" dockerBasePath = "/var/lib/docker" volumesConfigPath = dockerBasePath + "/volumes" volumesStoragePath = dockerBasePath + "/vfs/dir" containerStoragePath = dockerBasePath + "/containers" runtimePath = "/var/run/docker" execDriverPath = runtimePath + "/execdriver/native" workingDirectory string ) func init() { if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { dockerBinary = dockerBin } var err error dockerBinary, err = exec.LookPath(dockerBinary) if err != nil { fmt.Printf("ERROR: couldn't resolve full path to the Docker binary (%v)", err) os.Exit(1) } if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { registryImageName = registryImage } if registry := os.Getenv("REGISTRY_URL"); registry != "" { privateRegistryURL = registry } workingDirectory, _ = os.Getwd() } docker-1.6.2/integration-cli/registry.go0000644000175000017500000000250712524223634017627 0ustar tianontianonpackage main import ( "fmt" "io/ioutil" "net/http" "os" "os/exec" "path/filepath" "testing" ) const v2binary = "registry-v2" type testRegistryV2 struct { cmd *exec.Cmd dir string } func newTestRegistryV2(t *testing.T) (*testRegistryV2, error) { template := `version: 0.1 loglevel: debug storage: filesystem: rootdirectory: %s http: addr: %s` tmp, err := ioutil.TempDir("", "registry-test-") if err != nil { return nil, err } confPath := filepath.Join(tmp, "config.yaml") config, err := os.Create(confPath) if err != nil { return nil, err } if _, err := fmt.Fprintf(config, template, tmp, privateRegistryURL); err != nil { os.RemoveAll(tmp) return nil, err } cmd := exec.Command(v2binary, confPath) if err := cmd.Start(); err != nil { os.RemoveAll(tmp) if os.IsNotExist(err) { t.Skip() } return nil, err } return &testRegistryV2{ cmd: cmd, dir: tmp, }, nil } func (t *testRegistryV2) Ping() error { // We always ping through HTTP for our test registry. resp, err := http.Get(fmt.Sprintf("http://%s/v2/", privateRegistryURL)) if err != nil { return err } if resp.StatusCode != 200 { return fmt.Errorf("registry ping replied with an unexpected status code %d", resp.StatusCode) } return nil } func (r *testRegistryV2) Close() { r.cmd.Process.Kill() os.RemoveAll(r.dir) } docker-1.6.2/integration-cli/docker_api_attach_test.go0000644000175000017500000000241512524223634022440 0ustar tianontianonpackage main import ( "bytes" "os/exec" "testing" "time" "code.google.com/p/go.net/websocket" ) func TestGetContainersAttachWebsocket(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-dit", "busybox", "cat") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) } defer deleteAllContainers() rwc, err := sockConn(time.Duration(10 * time.Second)) if err != nil { t.Fatal(err) } cleanedContainerID := stripTrailingCharacters(out) config, err := websocket.NewConfig( "/containers/"+cleanedContainerID+"/attach/ws?stream=1&stdin=1&stdout=1&stderr=1", "http://localhost", ) if err != nil { t.Fatal(err) } ws, err := websocket.NewClient(config, rwc) if err != nil { t.Fatal(err) } defer ws.Close() expected := []byte("hello") actual := make([]byte, len(expected)) outChan := make(chan string) go func() { if _, err := ws.Read(actual); err != nil { t.Fatal(err) } outChan <- "done" }() inChan := make(chan string) go func() { if _, err := ws.Write(expected); err != nil { t.Fatal(err) } inChan <- "done" }() <-inChan <-outChan if !bytes.Equal(expected, actual) { t.Fatal("Expected output on websocket to match input") } logDone("container attach websocket - can echo input via cat") } docker-1.6.2/integration-cli/docker_cli_run_test.go0000644000175000017500000031760612524223634022011 0ustar tianontianonpackage main import ( "bufio" "bytes" "fmt" "io/ioutil" "net" "os" "os/exec" "path" "path/filepath" "reflect" "regexp" "sort" "strconv" "strings" "sync" "testing" "time" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/networkfs/resolvconf" ) // "test123" should be printed by docker run func TestRunEchoStdout(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "busybox", "echo", "test123") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } if out != "test123\n" { t.Errorf("container should've printed 'test123'") } logDone("run - echo test123") } // "test" should be printed func TestRunEchoStdoutWithMemoryLimit(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-m", "16m", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } out = strings.Trim(out, "\r\n") if expected := "test"; out != expected { t.Errorf("container should've printed %q but printed %q", expected, out) } logDone("run - echo with memory limit") } // should run without memory swap func TestRunWithoutMemoryswapLimit(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-m", "16m", "--memory-swap", "-1", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to run container, output: %q", out) } logDone("run - without memory swap limit") } // "test" should be printed func TestRunEchoStdoutWitCPULimit(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } if out != "test\n" { t.Errorf("container should've printed 'test'") } logDone("run - echo with CPU limit") } // "test" should be printed func TestRunEchoStdoutWithCPUAndMemoryLimit(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "16m", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } if out != "test\n" { t.Errorf("container should've printed 'test', got %q instead", out) } logDone("run - echo with CPU and memory limit") } // "test" should be printed func TestRunEchoNamedContainer(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } if out != "test\n" { t.Errorf("container should've printed 'test'") } if err := deleteContainer("testfoonamedcontainer"); err != nil { t.Errorf("failed to remove the named container: %v", err) } logDone("run - echo with named container") } // docker run should not leak file descriptors func TestRunLeakyFileDescriptors(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "busybox", "ls", "-C", "/proc/self/fd") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory if out != "0 1 2 3\n" { t.Errorf("container should've printed '0 1 2 3', not: %s", out) } logDone("run - check file descriptor leakage") } // it should be possible to lookup Google DNS // this will fail when Internet access is unavailable func TestRunLookupGoogleDns(t *testing.T) { defer deleteAllContainers() out, _, _, err := runCommandWithStdoutStderr(exec.Command(dockerBinary, "run", "busybox", "nslookup", "google.com")) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } logDone("run - nslookup google.com") } // the exit code should be 0 // some versions of lxc might make this test fail func TestRunExitCodeZero(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "busybox", "true") if out, _, err := runCommandWithOutput(runCmd); err != nil { t.Errorf("container should've exited with exit code 0: %s, %v", out, err) } logDone("run - exit with 0") } // the exit code should be 1 // some versions of lxc might make this test fail func TestRunExitCodeOne(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "busybox", "false") exitCode, err := runCommand(runCmd) if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { t.Fatal(err) } if exitCode != 1 { t.Errorf("container should've exited with exit code 1") } logDone("run - exit with 1") } // it should be possible to pipe in data via stdin to a process running in a container // some versions of lxc might make this test fail func TestRunStdinPipe(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command("bash", "-c", `echo "blahblah" | docker run -i -a stdin busybox cat`) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } out = stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", out) if out, _, err := runCommandWithOutput(inspectCmd); err != nil { t.Fatalf("out should've been a container id: %s %v", out, err) } waitCmd := exec.Command(dockerBinary, "wait", out) if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) } logsCmd := exec.Command(dockerBinary, "logs", out) logsOut, _, err := runCommandWithOutput(logsCmd) if err != nil { t.Fatalf("error thrown while trying to get container logs: %s, %v", logsOut, err) } containerLogs := stripTrailingCharacters(logsOut) if containerLogs != "blahblah" { t.Errorf("logs didn't print the container's logs %s", containerLogs) } rmCmd := exec.Command(dockerBinary, "rm", out) if out, _, err = runCommandWithOutput(rmCmd); err != nil { t.Fatalf("rm failed to remove container: %s, %v", out, err) } logDone("run - pipe in with -i -a stdin") } // the container's ID should be printed when starting a container in detached mode func TestRunDetachedContainerIDPrinting(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } out = stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", out) if inspectOut, _, err := runCommandWithOutput(inspectCmd); err != nil { t.Fatalf("out should've been a container id: %s %v", inspectOut, err) } waitCmd := exec.Command(dockerBinary, "wait", out) if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) } rmCmd := exec.Command(dockerBinary, "rm", out) rmOut, _, err := runCommandWithOutput(rmCmd) if err != nil { t.Fatalf("rm failed to remove container: %s, %v", rmOut, err) } rmOut = stripTrailingCharacters(rmOut) if rmOut != out { t.Errorf("rm didn't print the container ID %s %s", out, rmOut) } logDone("run - print container ID in detached mode") } // the working directory should be set correctly func TestRunWorkingDirectory(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-w", "/root", "busybox", "pwd") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } out = stripTrailingCharacters(out) if out != "/root" { t.Errorf("-w failed to set working directory") } runCmd = exec.Command(dockerBinary, "run", "--workdir", "/root", "busybox", "pwd") out, _, _, err = runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } out = stripTrailingCharacters(out) if out != "/root" { t.Errorf("--workdir failed to set working directory") } logDone("run - run with working directory set by -w") logDone("run - run with working directory set by --workdir") } // pinging Google's DNS resolver should fail when we disable the networking func TestRunWithoutNetworking(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "8.8.8.8") out, _, exitCode, err := runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 1 { t.Fatal(out, err) } if exitCode != 1 { t.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") } runCmd = exec.Command(dockerBinary, "run", "-n=false", "busybox", "ping", "-c", "1", "8.8.8.8") out, _, exitCode, err = runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 1 { t.Fatal(out, err) } if exitCode != 1 { t.Errorf("-n=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") } logDone("run - disable networking with --net=none") logDone("run - disable networking with -n=false") } //test --link use container name to link target func TestRunLinksContainerWithContainerName(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-i", "-t", "-d", "--name", "parent", "busybox") out, _, _, err := runCommandWithStdoutStderr(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.NetworkSettings.IPAddress}}", "parent") ip, _, _, err := runCommandWithStdoutStderr(cmd) if err != nil { t.Fatalf("failed to inspect container: %v, output: %q", err, ip) } ip = strings.TrimSpace(ip) cmd = exec.Command(dockerBinary, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } if !strings.Contains(out, ip+" test") { t.Fatalf("use a container name to link target failed") } logDone("run - use a container name to link target work") } //test --link use container id to link target func TestRunLinksContainerWithContainerId(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-i", "-t", "-d", "busybox") cID, _, _, err := runCommandWithStdoutStderr(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, cID) } cID = strings.TrimSpace(cID) cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.NetworkSettings.IPAddress}}", cID) ip, _, _, err := runCommandWithStdoutStderr(cmd) if err != nil { t.Fatalf("faild to inspect container: %v, output: %q", err, ip) } ip = strings.TrimSpace(ip) cmd = exec.Command(dockerBinary, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } if !strings.Contains(out, ip+" test") { t.Fatalf("use a container id to link target failed") } logDone("run - use a container id to link target work") } func TestRunLinkToContainerNetMode(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--name", "test", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "run", "-d", "--link=parent:parent", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "run", "-d", "--link=child:child", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } logDone("run - link to a container which net mode is container success") } func TestRunModeNetContainerHostname(t *testing.T) { testRequires(t, ExecSupport) defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-i", "-d", "--name", "parent", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "exec", "parent", "cat", "/etc/hostname") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to exec command: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") out1, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out1) } if out1 != out { t.Fatal("containers with shared net namespace should have same hostname") } logDone("run - containers with shared net namespace have same hostname") } // Regression test for #4741 func TestRunWithVolumesAsFiles(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true") out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 0 { t.Fatal("1", out, stderr, err) } runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file") out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 0 { t.Fatal("2", out, stderr, err) } logDone("run - regression test for #4741 - volumes from as files") } // Regression test for #4979 func TestRunWithVolumesFromExited(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 0 { t.Fatal("1", out, stderr, err) } runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 0 { t.Fatal("2", out, stderr, err) } logDone("run - regression test for #4979 - volumes-from on exited container") } // Volume path is a symlink which also exists on the host, and the host side is a file not a dir // But the volume call is just a normal volume, not a bind mount func TestRunCreateVolumesInSymlinkDir(t *testing.T) { testRequires(t, SameHostDaemon) testRequires(t, NativeExecDriver) defer deleteAllContainers() name := "test-volume-symlink" dir, err := ioutil.TempDir("", name) if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700) if err != nil { t.Fatal(err) } f.Close() dockerFile := fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir) if _, err := buildImage(name, dockerFile, false); err != nil { t.Fatal(err) } defer deleteImages(name) if out, _, err := dockerCmd(t, "run", "-v", "/test/test", name); err != nil { t.Fatal(err, out) } logDone("run - create volume in symlink directory") } // Regression test for #4830 func TestRunWithRelativePath(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-v", "tmp:/other-tmp", "busybox", "true") if _, _, _, err := runCommandWithStdoutStderr(runCmd); err == nil { t.Fatalf("relative path should result in an error") } logDone("run - volume with relative path") } func TestRunVolumesMountedAsReadonly(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile") if code, err := runCommand(cmd); err == nil || code == 0 { t.Fatalf("run should fail because volume is ro: exit code %d", code) } logDone("run - volumes as readonly mount") } func TestRunVolumesFromInReadonlyMode(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:ro", "busybox", "touch", "/test/file") if code, err := runCommand(cmd); err == nil || code == 0 { t.Fatalf("run should fail because volume is ro: exit code %d", code) } logDone("run - volumes from as readonly mount") } // Regression test for #1201 func TestRunVolumesFromInReadWriteMode(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file") if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatalf("running --volumes-from parent:rw failed with output: %q\nerror: %v", out, err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:bar", "busybox", "touch", "/test/file") if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "invalid mode for volumes-from: bar") { t.Fatalf("running --volumes-from foo:bar should have failed with invalid mount mode: %q", out) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "busybox", "touch", "/test/file") if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatalf("running --volumes-from parent failed with output: %q\nerror: %v", out, err) } logDone("run - volumes from as read write mount") } func TestVolumesFromGetsProperMode(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test:/test:ro", "busybox", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } // Expect this "rw" mode to be be ignored since the inheritted volume is "ro" cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file") if _, err := runCommand(cmd); err == nil { t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") } cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/test:/test:ro", "busybox", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } // Expect this to be read-only since both are "ro" cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent2:ro", "busybox", "touch", "/test/file") if _, err := runCommand(cmd); err == nil { t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") } logDone("run - volumes from ignores `rw` if inherrited volume is `ro`") } // Test for GH#10618 func TestRunNoDupVolumes(t *testing.T) { defer deleteAllContainers() mountstr1 := randomUnixTmpDirPath("test1") + ":/someplace" mountstr2 := randomUnixTmpDirPath("test2") + ":/someplace" cmd := exec.Command(dockerBinary, "run", "-v", mountstr1, "-v", mountstr2, "busybox", "true") if out, _, err := runCommandWithOutput(cmd); err == nil { t.Fatal("Expected error about duplicate volume definitions") } else { if !strings.Contains(out, "Duplicate volume") { t.Fatalf("Expected 'duplicate volume' error, got %v", err) } } logDone("run - don't allow multiple (bind) volumes on the same container target") } // Test for #1351 func TestRunApplyVolumesFromBeforeVolumes(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "touch", "/test/foo") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "-v", "/test", "busybox", "cat", "/test/foo") if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(out, err) } logDone("run - volumes from mounted first") } func TestRunMultipleVolumesFrom(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--name", "parent1", "-v", "/test", "busybox", "touch", "/test/foo") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/other", "busybox", "touch", "/other/bar") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } logDone("run - multiple volumes from") } // this tests verifies the ID format for the container func TestRunVerifyContainerID(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, exit, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } if exit != 0 { t.Fatalf("expected exit code 0 received %d", exit) } match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) if err != nil { t.Fatal(err) } if !match { t.Fatalf("Invalid container ID: %s", out) } logDone("run - verify container ID") } // Test that creating a container with a volume doesn't crash. Regression test for #995. func TestRunCreateVolume(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-v", "/var/lib/data", "busybox", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } logDone("run - create docker managed volume") } // Test that creating a volume with a symlink in its path works correctly. Test for #5152. // Note that this bug happens only with symlinks with a target that starts with '/'. func TestRunCreateVolumeWithSymlink(t *testing.T) { defer deleteAllContainers() image := "docker-test-createvolumewithsymlink" defer deleteImages(image) buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") buildCmd.Stdin = strings.NewReader(`FROM busybox RUN ln -s home /bar`) buildCmd.Dir = workingDirectory err := buildCmd.Run() if err != nil { t.Fatalf("could not build '%s': %v", image, err) } cmd := exec.Command(dockerBinary, "run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo") exitCode, err := runCommand(cmd) if err != nil || exitCode != 0 { t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) } var volPath string cmd = exec.Command(dockerBinary, "inspect", "-f", "{{range .Volumes}}{{.}}{{end}}", "test-createvolumewithsymlink") volPath, exitCode, err = runCommandWithOutput(cmd) if err != nil || exitCode != 0 { t.Fatalf("[inspect] err: %v, exitcode: %d", err, exitCode) } cmd = exec.Command(dockerBinary, "rm", "-v", "test-createvolumewithsymlink") exitCode, err = runCommand(cmd) if err != nil || exitCode != 0 { t.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) } f, err := os.Open(volPath) defer f.Close() if !os.IsNotExist(err) { t.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) } logDone("run - create volume with symlink") } // Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. func TestRunVolumesFromSymlinkPath(t *testing.T) { defer deleteAllContainers() name := "docker-test-volumesfromsymlinkpath" defer deleteImages(name) buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") buildCmd.Stdin = strings.NewReader(`FROM busybox RUN ln -s home /foo VOLUME ["/foo/bar"]`) buildCmd.Dir = workingDirectory err := buildCmd.Run() if err != nil { t.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) } cmd := exec.Command(dockerBinary, "run", "--name", "test-volumesfromsymlinkpath", name) exitCode, err := runCommand(cmd) if err != nil || exitCode != 0 { t.Fatalf("[run] (volume) err: %v, exitcode: %d", err, exitCode) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls /foo | grep -q bar") exitCode, err = runCommand(cmd) if err != nil || exitCode != 0 { t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) } logDone("run - volumes-from symlink path") } func TestRunExitCode(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "busybox", "/bin/sh", "-c", "exit 72") exit, err := runCommand(cmd) if err == nil { t.Fatal("should not have a non nil error") } if exit != 72 { t.Fatalf("expected exit code 72 received %d", exit) } logDone("run - correct exit code") } func TestRunUserDefaultsToRoot(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if !strings.Contains(out, "uid=0(root) gid=0(root)") { t.Fatalf("expected root user got %s", out) } logDone("run - default user") } func TestRunUserByName(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-u", "root", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if !strings.Contains(out, "uid=0(root) gid=0(root)") { t.Fatalf("expected root user got %s", out) } logDone("run - user by name") } func TestRunUserByID(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-u", "1", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { t.Fatalf("expected daemon user got %s", out) } logDone("run - user by id") } func TestRunUserByIDBig(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-u", "2147483648", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal("No error, but must be.", out) } if !strings.Contains(out, "Uids and gids must be in range") { t.Fatalf("expected error about uids range, got %s", out) } logDone("run - user by id, id too big") } func TestRunUserByIDNegative(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-u", "-1", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal("No error, but must be.", out) } if !strings.Contains(out, "Uids and gids must be in range") { t.Fatalf("expected error about uids range, got %s", out) } logDone("run - user by id, id negative") } func TestRunUserByIDZero(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-u", "0", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { t.Fatalf("expected daemon user got %s", out) } logDone("run - user by id, zero uid") } func TestRunUserNotFound(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-u", "notme", "busybox", "id") _, err := runCommand(cmd) if err == nil { t.Fatal("unknown user should cause container to fail") } logDone("run - user not found") } func TestRunTwoConcurrentContainers(t *testing.T) { defer deleteAllContainers() group := sync.WaitGroup{} group.Add(2) for i := 0; i < 2; i++ { go func() { defer group.Done() cmd := exec.Command(dockerBinary, "run", "busybox", "sleep", "2") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } }() } group.Wait() logDone("run - two concurrent containers") } func TestRunEnvironment(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") cmd.Env = append(os.Environ(), "TRUE=false", "TRICKY=tri\ncky\n", ) out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") actualEnv := []string{} for i := range actualEnvLxc { if actualEnvLxc[i] != "container=lxc" { actualEnv = append(actualEnv, actualEnvLxc[i]) } } sort.Strings(actualEnv) goodEnv := []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOSTNAME=testing", "FALSE=true", "TRUE=false", "TRICKY=tri", "cky", "", "HOME=/root", } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { t.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } logDone("run - verify environment") } func TestRunEnvironmentErase(t *testing.T) { // Test to make sure that when we use -e on env vars that are // not set in our local env that they're removed (if present) in // the container defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env") cmd.Env = appendBaseEnv([]string{}) out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") actualEnv := []string{} for i := range actualEnvLxc { if actualEnvLxc[i] != "container=lxc" { actualEnv = append(actualEnv, actualEnvLxc[i]) } } sort.Strings(actualEnv) goodEnv := []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOME=/root", } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { t.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } logDone("run - verify environment erase") } func TestRunEnvironmentOverride(t *testing.T) { // Test to make sure that when we use -e on env vars that are // already in the env that we're overriding them defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env") cmd.Env = appendBaseEnv([]string{"HOSTNAME=bar"}) out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") actualEnv := []string{} for i := range actualEnvLxc { if actualEnvLxc[i] != "container=lxc" { actualEnv = append(actualEnv, actualEnvLxc[i]) } } sort.Strings(actualEnv) goodEnv := []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOME=/root2", "HOSTNAME=bar", } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { t.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } logDone("run - verify environment override") } func TestRunContainerNetwork(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "busybox", "ping", "-c", "1", "127.0.0.1") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } logDone("run - test container network via ping") } // Issue #4681 func TestRunLoopbackWhenNetworkDisabled(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } logDone("run - test container loopback when networking disabled") } func TestRunNetHostNotAllowedWithLinks(t *testing.T) { defer deleteAllContainers() _, _, err := dockerCmd(t, "run", "--name", "linked", "busybox", "true") cmd := exec.Command(dockerBinary, "run", "--net=host", "--link", "linked:linked", "busybox", "true") _, _, err = runCommandWithOutput(cmd) if err == nil { t.Fatal("Expected error") } logDone("run - don't allow --net=host to be used with links") } func TestRunLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } var ( count = 0 parts = strings.Split(out, "\n") ) for _, l := range parts { if l != "" { count++ } } if count != 1 { t.Fatalf("Wrong interface count in container %d", count) } if !strings.HasPrefix(out, "1: lo") { t.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) } logDone("run - test loopback only exists when networking disabled") } // #7851 hostname outside container shows FQDN, inside only shortname // For testing purposes it is not required to set host's hostname directly // and use "--net=host" (as the original issue submitter did), as the same // codepath is executed with "docker run -h ". Both were manually // tested, but this testcase takes the simpler path of using "run -h .." func TestRunFullHostnameSet(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-h", "foo.bar.baz", "busybox", "hostname") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { t.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) } logDone("run - test fully qualified hostname set with -h") } func TestRunPrivilegedCanMknod(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { t.Fatalf("expected output ok received %s", actual) } logDone("run - test privileged can mknod") } func TestRunUnPrivilegedCanMknod(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { t.Fatalf("expected output ok received %s", actual) } logDone("run - test un-privileged can mknod") } func TestRunCapDropInvalid(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cap-drop=CHPASS", "busybox", "ls") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal(err, out) } logDone("run - test --cap-drop=CHPASS invalid") } func TestRunCapDropCannotMknod(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { t.Fatalf("expected output not ok received %s", actual) } logDone("run - test --cap-drop=MKNOD cannot mknod") } func TestRunCapDropCannotMknodLowerCase(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { t.Fatalf("expected output not ok received %s", actual) } logDone("run - test --cap-drop=mknod cannot mknod lowercase") } func TestRunCapDropALLCannotMknod(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { t.Fatalf("expected output not ok received %s", actual) } logDone("run - test --cap-drop=ALL cannot mknod") } func TestRunCapDropALLAddMknodCanMknod(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { t.Fatalf("expected output ok received %s", actual) } logDone("run - test --cap-drop=ALL --cap-add=MKNOD can mknod") } func TestRunCapAddInvalid(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cap-add=CHPASS", "busybox", "ls") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal(err, out) } logDone("run - test --cap-add=CHPASS invalid") } func TestRunCapAddCanDownInterface(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { t.Fatalf("expected output ok received %s", actual) } logDone("run - test --cap-add=NET_ADMIN can set eth0 down") } func TestRunCapAddALLCanDownInterface(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { t.Fatalf("expected output ok received %s", actual) } logDone("run - test --cap-add=ALL can set eth0 down") } func TestRunCapAddALLDropNetAdminCanDownInterface(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { t.Fatalf("expected output not ok received %s", actual) } logDone("run - test --cap-add=ALL --cap-drop=NET_ADMIN cannot set eth0 down") } func TestRunPrivilegedCanMount(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { t.Fatalf("expected output ok received %s", actual) } logDone("run - test privileged can mount") } func TestRunUnPrivilegedCannotMount(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { t.Fatalf("expected output not ok received %s", actual) } logDone("run - test un-privileged cannot mount") } func TestRunSysNotWritableInNonPrivilegedContainers(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/sys/kernel/profiling") if code, err := runCommand(cmd); err == nil || code == 0 { t.Fatal("sys should not be writable in a non privileged container") } logDone("run - sys not writable in non privileged container") } func TestRunSysWritableInPrivilegedContainers(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/sys/kernel/profiling") if code, err := runCommand(cmd); err != nil || code != 0 { t.Fatalf("sys should be writable in privileged container") } logDone("run - sys writable in privileged container") } func TestRunProcNotWritableInNonPrivilegedContainers(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/proc/sysrq-trigger") if code, err := runCommand(cmd); err == nil || code == 0 { t.Fatal("proc should not be writable in a non privileged container") } logDone("run - proc not writable in non privileged container") } func TestRunProcWritableInPrivilegedContainers(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/proc/sysrq-trigger") if code, err := runCommand(cmd); err != nil || code != 0 { t.Fatalf("proc should be writable in privileged container") } logDone("run - proc writable in privileged container") } func TestRunWithCpuset(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cpuset", "0", "busybox", "true") if code, err := runCommand(cmd); err != nil || code != 0 { t.Fatalf("container should run successfuly with cpuset of 0: %s", err) } logDone("run - cpuset 0") } func TestRunWithCpusetCpus(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--cpuset-cpus", "0", "busybox", "true") if code, err := runCommand(cmd); err != nil || code != 0 { t.Fatalf("container should run successfuly with cpuset-cpus of 0: %s", err) } logDone("run - cpuset-cpus 0") } func TestRunDeviceNumbers(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "ls -l /dev/null") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } deviceLineFields := strings.Fields(out) deviceLineFields[6] = "" deviceLineFields[7] = "" deviceLineFields[8] = "" expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} if !(reflect.DeepEqual(deviceLineFields, expected)) { t.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) } logDone("run - test device numbers") } func TestRunThatCharacterDevicesActLikeCharacterDevices(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { t.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) } logDone("run - test that character devices work.") } func TestRunUnprivilegedWithChroot(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "busybox", "chroot", "/", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } logDone("run - unprivileged with chroot") } func TestRunAddingOptionalDevices(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { t.Fatalf("expected output /dev/nulo, received %s", actual) } logDone("run - test --device argument") } func TestRunModeHostname(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { t.Fatalf("expected 'testhostname', but says: %q", actual) } cmd = exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hostname") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } hostname, err := os.Hostname() if err != nil { t.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != hostname { t.Fatalf("expected %q, but says: %q", hostname, actual) } logDone("run - hostname and several network modes") } func TestRunRootWorkdir(t *testing.T) { defer deleteAllContainers() s, _, err := dockerCmd(t, "run", "--workdir", "/", "busybox", "pwd") if err != nil { t.Fatal(s, err) } if s != "/\n" { t.Fatalf("pwd returned %q (expected /\\n)", s) } logDone("run - workdir /") } func TestRunAllowBindMountingRoot(t *testing.T) { defer deleteAllContainers() s, _, err := dockerCmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host") if err != nil { t.Fatal(s, err) } logDone("run - bind mount / as volume") } func TestRunDisallowBindMountingRootToRoot(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-v", "/:/", "busybox", "ls", "/host") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal(out, err) } logDone("run - bind mount /:/ as volume should not work") } // Verify that a container gets default DNS when only localhost resolvers exist func TestRunDnsDefaultOptions(t *testing.T) { defer deleteAllContainers() testRequires(t, SameHostDaemon) // preserve original resolv.conf for restoring after test origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { t.Fatalf("/etc/resolv.conf does not exist") } // defer restored original conf defer func() { if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { t.Fatal(err) } }() // test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost // 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by // GetNameservers(), leading to a replacement of nameservers with the default set tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1") if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { t.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "busybox", "cat", "/etc/resolv.conf") actual, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, actual) } // check that the actual defaults are appended to the commented out // localhost resolver (which should be preserved) // NOTE: if we ever change the defaults from google dns, this will break expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4" if actual != expected { t.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) } logDone("run - dns default options") } func TestRunDnsOptions(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") out, stderr, _, err := runCommandWithStdoutStderr(cmd) if err != nil { t.Fatal(err, out) } // The client will get a warning on stderr when setting DNS to a localhost address; verify this: if !strings.Contains(stderr, "Localhost DNS setting") { t.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr) } actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) if actual != "nameserver 127.0.0.1 search mydomain" { t.Fatalf("expected 'nameserver 127.0.0.1 search mydomain', but says: %q", actual) } cmd = exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=.", "busybox", "cat", "/etc/resolv.conf") out, _, _, err = runCommandWithStdoutStderr(cmd) if err != nil { t.Fatal(err, out) } actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) if actual != "nameserver 127.0.0.1" { t.Fatalf("expected 'nameserver 127.0.0.1', but says: %q", actual) } logDone("run - dns options") } func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { defer deleteAllContainers() testRequires(t, SameHostDaemon) origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { t.Fatalf("/etc/resolv.conf does not exist") } hostNamservers := resolvconf.GetNameservers(origResolvConf) hostSearch := resolvconf.GetSearchDomains(origResolvConf) var out string cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") if out, _, _, err = runCommandWithStdoutStderr(cmd); err != nil { t.Fatal(err, out) } if actualNameservers := resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "127.0.0.1" { t.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) } actualSearch := resolvconf.GetSearchDomains([]byte(out)) if len(actualSearch) != len(hostSearch) { t.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { t.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } cmd = exec.Command(dockerBinary, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") if out, _, err = runCommandWithOutput(cmd); err != nil { t.Fatal(err, out) } actualNameservers := resolvconf.GetNameservers([]byte(out)) if len(actualNameservers) != len(hostNamservers) { t.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNamservers), len(actualNameservers)) } for i := range actualNameservers { if actualNameservers[i] != hostNamservers[i] { t.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNamservers[i]) } } if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { t.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) } // test with file tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { t.Fatal(err) } // put the old resolvconf back defer func() { if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { t.Fatal(err) } }() resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { t.Fatalf("/etc/resolv.conf does not exist") } hostNamservers = resolvconf.GetNameservers(resolvConf) hostSearch = resolvconf.GetSearchDomains(resolvConf) cmd = exec.Command(dockerBinary, "run", "busybox", "cat", "/etc/resolv.conf") if out, _, err = runCommandWithOutput(cmd); err != nil { t.Fatal(err, out) } if actualNameservers = resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { t.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) } actualSearch = resolvconf.GetSearchDomains([]byte(out)) if len(actualSearch) != len(hostSearch) { t.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { t.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } defer deleteAllContainers() logDone("run - dns options based on host resolv.conf") } // Test the file watch notifier on docker host's /etc/resolv.conf // A go-routine is responsible for auto-updating containers which are // stopped and have an unmodified copy of resolv.conf, as well as // marking running containers as requiring an update on next restart func TestRunResolvconfUpdater(t *testing.T) { // Because overlay doesn't support inotify properly, we need to skip // this test if the docker daemon has Storage Driver == overlay testRequires(t, SameHostDaemon, NotOverlay) tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78") tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1") //take a copy of resolv.conf for restoring after test completes resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") if err != nil { t.Fatal(err) } // This test case is meant to test monitoring resolv.conf when it is // a regular file not a bind mount. So we unmount resolv.conf and replace // it with a file containing the original settings. cmd := exec.Command("umount", "/etc/resolv.conf") if _, err = runCommand(cmd); err != nil { t.Fatal(err) } //cleanup defer func() { deleteAllContainers() if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { t.Fatal(err) } }() //1. test that a non-running container gets an updated resolv.conf cmd = exec.Command(dockerBinary, "run", "--name='first'", "busybox", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } containerID1, err := getIDByName("first") if err != nil { t.Fatal(err) } // replace resolv.conf with our temporary copy bytesResolvConf := []byte(tmpResolvConf) if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { t.Fatal(err) } time.Sleep(time.Second / 2) // check for update in container containerResolv, err := readContainerFile(containerID1, "resolv.conf") if err != nil { t.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { t.Fatalf("Stopped container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) } //2. test that a non-running container does not receive resolv.conf updates // if it modified the container copy of the starting point resolv.conf cmd = exec.Command(dockerBinary, "run", "--name='second'", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") if _, err = runCommand(cmd); err != nil { t.Fatal(err) } containerID2, err := getIDByName("second") if err != nil { t.Fatal(err) } containerResolvHashBefore, err := readContainerFile(containerID2, "resolv.conf.hash") if err != nil { t.Fatal(err) } //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { t.Fatal(err) } time.Sleep(time.Second / 2) containerResolvHashAfter, err := readContainerFile(containerID2, "resolv.conf.hash") if err != nil { t.Fatal(err) } if !bytes.Equal(containerResolvHashBefore, containerResolvHashAfter) { t.Fatalf("Stopped container with modified resolv.conf should not have been updated; expected hash: %v, new hash: %v", containerResolvHashBefore, containerResolvHashAfter) } //3. test that a running container's resolv.conf is not modified while running cmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } runningContainerID := strings.TrimSpace(out) containerResolvHashBefore, err = readContainerFile(runningContainerID, "resolv.conf.hash") if err != nil { t.Fatal(err) } // replace resolv.conf if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { t.Fatal(err) } // make sure the updater has time to run to validate we really aren't // getting updated time.Sleep(time.Second / 2) containerResolvHashAfter, err = readContainerFile(runningContainerID, "resolv.conf.hash") if err != nil { t.Fatal(err) } if !bytes.Equal(containerResolvHashBefore, containerResolvHashAfter) { t.Fatalf("Running container's resolv.conf should not be updated; expected hash: %v, new hash: %v", containerResolvHashBefore, containerResolvHashAfter) } //4. test that a running container's resolv.conf is updated upon restart // (the above container is still running..) cmd = exec.Command(dockerBinary, "restart", runningContainerID) if _, err = runCommand(cmd); err != nil { t.Fatal(err) } // check for update in container containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") if err != nil { t.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { t.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) } //5. test that additions of a localhost resolver are cleaned from // host resolv.conf before updating container's resolv.conf copies // replace resolv.conf with a localhost-only nameserver copy bytesResolvConf = []byte(tmpLocalhostResolvConf) if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { t.Fatal(err) } time.Sleep(time.Second / 2) // our first exited container ID should have been updated, but with default DNS // after the cleanup of resolv.conf found only a localhost nameserver: containerResolv, err = readContainerFile(containerID1, "resolv.conf") if err != nil { t.Fatal(err) } expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4" if !bytes.Equal(containerResolv, []byte(expected)) { t.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) } //6. Test that replacing (as opposed to modifying) resolv.conf triggers an update // of containers' resolv.conf. // Restore the original resolv.conf if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { t.Fatal(err) } // Run the container so it picks up the old settings cmd = exec.Command(dockerBinary, "run", "--name='third'", "busybox", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } containerID3, err := getIDByName("third") if err != nil { t.Fatal(err) } // Create a modified resolv.conf.aside and override resolv.conf with it bytesResolvConf = []byte(tmpResolvConf) if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil { t.Fatal(err) } err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf") if err != nil { t.Fatal(err) } time.Sleep(time.Second / 2) // check for update in container containerResolv, err = readContainerFile(containerID3, "resolv.conf") if err != nil { t.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { t.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) } //cleanup, restore original resolv.conf happens in defer func() logDone("run - resolv.conf updater") } func TestRunAddHost(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } actual := strings.Trim(out, "\r\n") if actual != "86.75.30.9\textra" { t.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) } logDone("run - add-host option") } // Regression test for #6983 func TestRunAttachStdErrOnlyTTYMode(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stderr", "busybox", "true") exitCode, err := runCommand(cmd) if err != nil { t.Fatal(err) } else if exitCode != 0 { t.Fatalf("Container should have exited with error code 0") } logDone("run - Attach stderr only with -t") } // Regression test for #6983 func TestRunAttachStdOutOnlyTTYMode(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "busybox", "true") exitCode, err := runCommand(cmd) if err != nil { t.Fatal(err) } else if exitCode != 0 { t.Fatalf("Container should have exited with error code 0") } logDone("run - Attach stdout only with -t") } // Regression test for #6983 func TestRunAttachStdOutAndErrTTYMode(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") exitCode, err := runCommand(cmd) if err != nil { t.Fatal(err) } else if exitCode != 0 { t.Fatalf("Container should have exited with error code 0") } logDone("run - Attach stderr and stdout with -t") } // Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode // but using --attach instead of -a to make sure we read the flag correctly func TestRunAttachWithDettach(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true") _, stderr, _, err := runCommandWithStdoutStderr(cmd) if err == nil { t.Fatalf("Container should have exited with error code different than 0", err) } else if !strings.Contains(stderr, "Conflicting options: -a and -d") { t.Fatalf("Should have been returned an error with conflicting options -a and -d") } logDone("run - Attach stdout with -d") } func TestRunState(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } id := strings.TrimSpace(out) state, err := inspectField(id, "State.Running") if err != nil { t.Fatal(err) } if state != "true" { t.Fatal("Container state is 'not running'") } pid1, err := inspectField(id, "State.Pid") if err != nil { t.Fatal(err) } if pid1 == "0" { t.Fatal("Container state Pid 0") } cmd = exec.Command(dockerBinary, "stop", id) out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } state, err = inspectField(id, "State.Running") if err != nil { t.Fatal(err) } if state != "false" { t.Fatal("Container state is 'running'") } pid2, err := inspectField(id, "State.Pid") if err != nil { t.Fatal(err) } if pid2 == pid1 { t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) } cmd = exec.Command(dockerBinary, "start", id) out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } state, err = inspectField(id, "State.Running") if err != nil { t.Fatal(err) } if state != "true" { t.Fatal("Container state is 'not running'") } pid3, err := inspectField(id, "State.Pid") if err != nil { t.Fatal(err) } if pid3 == pid1 { t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) } logDone("run - test container state.") } // Test for #1737 func TestRunCopyVolumeUidGid(t *testing.T) { name := "testrunvolumesuidgid" defer deleteImages(name) defer deleteAllContainers() _, err := buildImage(name, `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, true) if err != nil { t.Fatal(err) } // Test that the uid and gid is copied from the image to the volume cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } out = strings.TrimSpace(out) if out != "dockerio:dockerio" { t.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) } logDone("run - copy uid/gid for volume") } // Test for #1582 func TestRunCopyVolumeContent(t *testing.T) { name := "testruncopyvolumecontent" defer deleteImages(name) defer deleteAllContainers() _, err := buildImage(name, `FROM busybox RUN mkdir -p /hello/local && echo hello > /hello/local/world`, true) if err != nil { t.Fatal(err) } // Test that the content is copied from the image to the volume cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "find", "/hello") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { t.Fatal("Container failed to transfer content to volume") } logDone("run - copy volume content") } func TestRunCleanupCmdOnEntrypoint(t *testing.T) { name := "testrunmdcleanuponentrypoint" defer deleteImages(name) defer deleteAllContainers() if _, err := buildImage(name, `FROM busybox ENTRYPOINT ["echo"] CMD ["testingpoint"]`, true); err != nil { t.Fatal(err) } runCmd := exec.Command(dockerBinary, "run", "--entrypoint", "whoami", name) out, exit, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("Error: %v, out: %q", err, out) } if exit != 0 { t.Fatalf("expected exit code 0 received %d, out: %q", exit, out) } out = strings.TrimSpace(out) if out != "root" { t.Fatalf("Expected output root, got %q", out) } logDone("run - cleanup cmd on --entrypoint") } // TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected func TestRunWorkdirExistsAndIsFile(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-w", "/bin/cat", "busybox") out, exit, err := runCommandWithOutput(runCmd) if !(err != nil && exit == 1 && strings.Contains(out, "Cannot mkdir: /bin/cat is not a directory")) { t.Fatalf("Docker must complains about making dir, but we got out: %s, exit: %d, err: %s", out, exit, err) } logDone("run - error on existing file for workdir") } func TestRunExitOnStdinClose(t *testing.T) { name := "testrunexitonstdinclose" defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", "/bin/cat") stdin, err := runCmd.StdinPipe() if err != nil { t.Fatal(err) } stdout, err := runCmd.StdoutPipe() if err != nil { t.Fatal(err) } if err := runCmd.Start(); err != nil { t.Fatal(err) } if _, err := stdin.Write([]byte("hello\n")); err != nil { t.Fatal(err) } r := bufio.NewReader(stdout) line, err := r.ReadString('\n') if err != nil { t.Fatal(err) } line = strings.TrimSpace(line) if line != "hello" { t.Fatalf("Output should be 'hello', got '%q'", line) } if err := stdin.Close(); err != nil { t.Fatal(err) } finish := make(chan struct{}) go func() { if err := runCmd.Wait(); err != nil { t.Fatal(err) } close(finish) }() select { case <-finish: case <-time.After(1 * time.Second): t.Fatal("docker run failed to exit on stdin close") } state, err := inspectField(name, "State.Running") if err != nil { t.Fatal(err) } if state != "false" { t.Fatal("Container must be stopped after stdin closing") } logDone("run - exit on stdin closing") } // Test for #2267 func TestRunWriteHostsFileAndNotCommit(t *testing.T) { defer deleteAllContainers() name := "writehosts" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if !strings.Contains(out, "test2267") { t.Fatal("/etc/hosts should contain 'test2267'") } cmd = exec.Command(dockerBinary, "diff", name) if err != nil { t.Fatal(err, out) } out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) { t.Fatal("diff should be empty") } logDone("run - write to /etc/hosts and not commited") } func eqToBaseDiff(out string, t *testing.T) bool { cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello") out1, _, err := runCommandWithOutput(cmd) cID := stripTrailingCharacters(out1) cmd = exec.Command(dockerBinary, "diff", cID) base_diff, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, base_diff) } base_arr := strings.Split(base_diff, "\n") sort.Strings(base_arr) out_arr := strings.Split(out, "\n") sort.Strings(out_arr) return sliceEq(base_arr, out_arr) } func sliceEq(a, b []string) bool { if len(a) != len(b) { return false } for i := range a { if a[i] != b[i] { return false } } return true } // Test for #2267 func TestRunWriteHostnameFileAndNotCommit(t *testing.T) { defer deleteAllContainers() name := "writehostname" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if !strings.Contains(out, "test2267") { t.Fatal("/etc/hostname should contain 'test2267'") } cmd = exec.Command(dockerBinary, "diff", name) if err != nil { t.Fatal(err, out) } out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) { t.Fatal("diff should be empty") } logDone("run - write to /etc/hostname and not commited") } // Test for #2267 func TestRunWriteResolvFileAndNotCommit(t *testing.T) { defer deleteAllContainers() name := "writeresolv" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if !strings.Contains(out, "test2267") { t.Fatal("/etc/resolv.conf should contain 'test2267'") } cmd = exec.Command(dockerBinary, "diff", name) if err != nil { t.Fatal(err, out) } out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) { t.Fatal("diff should be empty") } logDone("run - write to /etc/resolv.conf and not commited") } func TestRunWithBadDevice(t *testing.T) { defer deleteAllContainers() name := "baddevice" cmd := exec.Command(dockerBinary, "run", "--name", name, "--device", "/etc", "busybox", "true") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatal("Run should fail with bad device") } expected := `\"/etc\": not a device node` if !strings.Contains(out, expected) { t.Fatalf("Output should contain %q, actual out: %q", expected, out) } logDone("run - error with bad device") } func TestRunEntrypoint(t *testing.T) { defer deleteAllContainers() name := "entrypoint" cmd := exec.Command(dockerBinary, "run", "--name", name, "--entrypoint", "/bin/echo", "busybox", "-n", "foobar") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } expected := "foobar" if out != expected { t.Fatalf("Output should be %q, actual out: %q", expected, out) } logDone("run - entrypoint") } func TestRunBindMounts(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() tmpDir, err := ioutil.TempDir("", "docker-test-container") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) writeFile(path.Join(tmpDir, "touch-me"), "", t) // Test reading from a read-only bind mount cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox", "ls", "/tmp") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if !strings.Contains(out, "touch-me") { t.Fatal("Container failed to read from bind mount") } // test writing to bind mount cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist // test mounting to an illegal destination directory cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") _, err = runCommand(cmd) if err == nil { t.Fatal("Container bind mounted illegal directory") } // test mount a file cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") _, err = runCommand(cmd) if err != nil { t.Fatal(err, out) } content := readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist expected := "yotta" if content != expected { t.Fatalf("Output should be %q, actual out: %q", expected, content) } logDone("run - bind mounts") } // Ensure that CIDFile gets deleted if it's empty // Perform this test by making `docker run` fail func TestRunCidFileCleanupIfEmpty(t *testing.T) { defer deleteAllContainers() tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) tmpCidFile := path.Join(tmpDir, "cid") cmd := exec.Command(dockerBinary, "run", "--cidfile", tmpCidFile, "emptyfs") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatalf("Run without command must fail. out=%s", out) } else if !strings.Contains(out, "No command specified") { t.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) } if _, err := os.Stat(tmpCidFile); err == nil { t.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) } logDone("run - cleanup empty cidfile on error") } // #2098 - Docker cidFiles only contain short version of the containerId //sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" // TestRunCidFile tests that run --cidfile returns the longid func TestRunCidFileCheckIDLength(t *testing.T) { defer deleteAllContainers() tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { t.Fatal(err) } tmpCidFile := path.Join(tmpDir, "cid") defer os.RemoveAll(tmpDir) cmd := exec.Command(dockerBinary, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } id := strings.TrimSpace(out) buffer, err := ioutil.ReadFile(tmpCidFile) if err != nil { t.Fatal(err) } cid := string(buffer) if len(cid) != 64 { t.Fatalf("--cidfile should be a long id, not %q", id) } if cid != id { t.Fatalf("cid must be equal to %s, got %s", id, cid) } logDone("run - cidfile contains long id") } func TestRunNetworkNotInitializedNoneMode(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "--net=none", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } id := strings.TrimSpace(out) res, err := inspectField(id, "NetworkSettings.IPAddress") if err != nil { t.Fatal(err) } if res != "" { t.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) } logDone("run - network must not be initialized in 'none' mode") } func TestRunSetMacAddress(t *testing.T) { mac := "12:34:56:78:9a:bc" defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'") out, ec, err := runCommandWithOutput(cmd) if err != nil { t.Fatalf("exec failed:\nexit code=%v\noutput=%s", ec, out) } actualMac := strings.TrimSpace(out) if actualMac != mac { t.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) } logDone("run - setting MAC address with --mac-address") } func TestRunInspectMacAddress(t *testing.T) { defer deleteAllContainers() mac := "12:34:56:78:9a:bc" cmd := exec.Command(dockerBinary, "run", "-d", "--mac-address="+mac, "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } id := strings.TrimSpace(out) inspectedMac, err := inspectField(id, "NetworkSettings.MacAddress") if err != nil { t.Fatal(err) } if inspectedMac != mac { t.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) } logDone("run - inspecting MAC address") } // test docker run use a invalid mac address func TestRunWithInvalidMacAddress(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--mac-address", "92:d0:c6:0a:29", "busybox") out, _, err := runCommandWithOutput(runCmd) //use a invalid mac address should with a error out if err == nil || !strings.Contains(out, "is not a valid mac address") { t.Fatalf("run with an invalid --mac-address should with error out") } logDone("run - can't use an invalid mac address") } func TestRunDeallocatePortOnMissingIptablesRule(t *testing.T) { defer deleteAllContainers() testRequires(t, SameHostDaemon) cmd := exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } id := strings.TrimSpace(out) ip, err := inspectField(id, "NetworkSettings.IPAddress") if err != nil { t.Fatal(err) } iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") out, _, err = runCommandWithOutput(iptCmd) if err != nil { t.Fatal(err, out) } if err := deleteContainer(id); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } logDone("run - port should be deallocated even on iptables error") } func TestRunPortInUse(t *testing.T) { defer deleteAllContainers() testRequires(t, SameHostDaemon) port := "1234" l, err := net.Listen("tcp", ":"+port) if err != nil { t.Fatal(err) } defer l.Close() cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err == nil { t.Fatalf("Binding on used port must fail") } if !strings.Contains(out, "address already in use") { t.Fatalf("Out must be about \"address already in use\", got %s", out) } logDone("run - error out if port already in use") } // https://github.com/docker/docker/issues/8428 func TestRunPortProxy(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() port := "12345" cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatalf("Failed to run and bind port %s, output: %s, error: %s", port, out, err) } // connect for 10 times here. This will trigger 10 EPIPES in the child // process and kill it when it writes to a closed stdout/stderr for i := 0; i < 10; i++ { net.Dial("tcp", fmt.Sprintf("0.0.0.0:%s", port)) } listPs := exec.Command("sh", "-c", "ps ax | grep docker") out, _, err = runCommandWithOutput(listPs) if err != nil { t.Errorf("list docker process failed with output %s, error %s", out, err) } if strings.Contains(out, "docker ") { t.Errorf("Unexpected defunct docker process") } if !strings.Contains(out, "docker-proxy -proto tcp -host-ip 0.0.0.0 -host-port 12345") { t.Errorf("Failed to find docker-proxy process, got %s", out) } logDone("run - proxy should work with unavailable port") } // Regression test for #7792 func TestRunMountOrdering(t *testing.T) { defer deleteAllContainers() testRequires(t, SameHostDaemon) tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir2) // Create a temporary tmpfs mount. fooDir := filepath.Join(tmpDir, "foo") if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { t.Fatalf("failed to mkdir at %s - %s", fooDir, err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { t.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp", tmpDir), "-v", fmt.Sprintf("%s:/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } logDone("run - volumes are mounted in the correct order") } // Regression test for https://github.com/docker/docker/issues/8259 func TestRunReuseBindVolumeThatIsSymlink(t *testing.T) { defer deleteAllContainers() testRequires(t, SameHostDaemon) tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) linkPath := os.TempDir() + "/testlink2" if err := os.Symlink(tmpDir, linkPath); err != nil { t.Fatal(err) } defer os.RemoveAll(linkPath) // Create first container cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } // Create second container with same symlinked path // This will fail if the referenced issue is hit with a "Volume exists" error cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test") if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(err, out) } logDone("run - can remount old bindmount volume") } //GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container func TestRunCreateVolumeEtc(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal("failed to run container: %v, output: %q", err, out) } if !strings.Contains(out, "nameserver 127.0.0.1") { t.Fatal("/etc volume mount hides /etc/resolv.conf") } cmd = exec.Command(dockerBinary, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal("failed to run container: %v, output: %q", err, out) } if !strings.Contains(out, "test123") { t.Fatal("/etc volume mount hides /etc/hostname") } cmd = exec.Command(dockerBinary, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal("failed to run container: %v, output: %q", err, out) } out = strings.Replace(out, "\n", " ", -1) if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") { t.Fatal("/etc volume mount hides /etc/hosts") } logDone("run - verify /etc volume doesn't hide special bind mounts") } func TestVolumesNoCopyData(t *testing.T) { defer deleteImages("dataimage") defer deleteAllContainers() if _, err := buildImage("dataimage", `FROM busybox RUN mkdir -p /foo RUN touch /foo/bar`, true); err != nil { t.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "--name", "test", "-v", "/foo", "busybox") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar") if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "No such file or directory") { t.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) } tmpDir := randomUnixTmpDirPath("docker_test_bind_mount_copy_data") cmd = exec.Command(dockerBinary, "run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar") if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "No such file or directory") { t.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) } logDone("run - volumes do not copy data for volumes-from and bindmounts") } func TestRunVolumesNotRecreatedOnStart(t *testing.T) { testRequires(t, SameHostDaemon) // Clear out any remnants from other tests deleteAllContainers() info, err := ioutil.ReadDir(volumesConfigPath) if err != nil { t.Fatal(err) } if len(info) > 0 { for _, f := range info { if err := os.RemoveAll(volumesConfigPath + "/" + f.Name()); err != nil { t.Fatal(err) } } } defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "--name", "lone_starr", "busybox") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "start", "lone_starr") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } info, err = ioutil.ReadDir(volumesConfigPath) if err != nil { t.Fatal(err) } if len(info) != 1 { t.Fatalf("Expected only 1 volume have %v", len(info)) } logDone("run - volumes not recreated on start") } func TestRunNoOutputFromPullInStdout(t *testing.T) { defer deleteAllContainers() // just run with unknown image cmd := exec.Command(dockerBinary, "run", "asdfsg") stdout := bytes.NewBuffer(nil) cmd.Stdout = stdout if err := cmd.Run(); err == nil { t.Fatal("Run with unknown image should fail") } if stdout.Len() != 0 { t.Fatalf("Stdout contains output from pull: %s", stdout) } logDone("run - no output from pull in stdout") } func TestRunVolumesCleanPaths(t *testing.T) { if _, err := buildImage("run_volumes_clean_paths", `FROM busybox VOLUME /foo/`, true); err != nil { t.Fatal(err) } defer deleteImages("run_volumes_clean_paths") defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "-v", "/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(err, out) } out, err := inspectFieldMap("dark_helmet", "Volumes", "/foo/") if err != nil { t.Fatal(err) } if out != "" { t.Fatalf("Found unexpected volume entry for '/foo/' in volumes\n%q", out) } out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo") if err != nil { t.Fatal(err) } if !strings.Contains(out, volumesStoragePath) { t.Fatalf("Volume was not defined for /foo\n%q", out) } out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar/") if err != nil { t.Fatal(err) } if out != "" { t.Fatalf("Found unexpected volume entry for '/bar/' in volumes\n%q", out) } out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar") if err != nil { t.Fatal(err) } if !strings.Contains(out, volumesStoragePath) { t.Fatalf("Volume was not defined for /bar\n%q", out) } logDone("run - volume paths are cleaned") } // Regression test for #3631 func TestRunSlowStdoutConsumer(t *testing.T) { defer deleteAllContainers() c := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") stdout, err := c.StdoutPipe() if err != nil { t.Fatal(err) } if err := c.Start(); err != nil { t.Fatal(err) } n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) if err != nil { t.Fatal(err) } expected := 2 * 1024 * 2000 if n != expected { t.Fatalf("Expected %d, got %d", expected, n) } logDone("run - slow consumer") } func TestRunAllowPortRangeThroughExpose(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } id := strings.TrimSpace(out) portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports") if err != nil { t.Fatal(err) } var ports nat.PortMap err = unmarshalJSON([]byte(portstr), &ports) for port, binding := range ports { portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) if portnum < 3000 || portnum > 3003 { t.Fatalf("Port is out of range ", portnum, binding, out) } if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { t.Fatal("Port is not mapped for the port "+port, out) } } if err := deleteContainer(id); err != nil { t.Fatal(err) } logDone("run - allow port range through --expose flag") } // test docker run expose a invalid port func TestRunExposePort(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--expose", "80000", "busybox") out, _, err := runCommandWithOutput(runCmd) //expose a invalid port should with a error out if err == nil || !strings.Contains(out, "Invalid range format for --expose") { t.Fatalf("run --expose a invalid port should with error out") } logDone("run - can't expose a invalid port") } func TestRunUnknownCommand(t *testing.T) { testRequires(t, NativeExecDriver) defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "create", "busybox", "/bin/nada") cID, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("Failed to create container: %v, output: %q", err, cID) } cID = strings.TrimSpace(cID) runCmd = exec.Command(dockerBinary, "start", cID) _, _, _, _ = runCommandWithStdoutStderr(runCmd) runCmd = exec.Command(dockerBinary, "inspect", "--format={{.State.ExitCode}}", cID) rc, _, _, err2 := runCommandWithStdoutStderr(runCmd) rc = strings.TrimSpace(rc) if err2 != nil { t.Fatalf("Error getting status of container: %v", err2) } if rc == "0" { t.Fatalf("ExitCode(%v) cannot be 0", rc) } logDone("run - Unknown Command") } func TestRunModeIpcHost(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() hostIpc, err := os.Readlink("/proc/1/ns/ipc") if err != nil { t.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") out2, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostIpc != out2 { t.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out2) } cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/ipc") out2, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostIpc == out2 { t.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out2) } logDone("run - ipc host mode") } func TestRunModeIpcContainer(t *testing.T) { defer deleteAllContainers() testRequires(t, SameHostDaemon) cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } id := strings.TrimSpace(out) state, err := inspectField(id, "State.Running") if err != nil { t.Fatal(err) } if state != "true" { t.Fatal("Container state is 'not running'") } pid1, err := inspectField(id, "State.Pid") if err != nil { t.Fatal(err) } parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) if err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") out2, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if parentContainerIpc != out2 { t.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out2) } logDone("run - ipc container mode") } func TestContainerNetworkMode(t *testing.T) { defer deleteAllContainers() testRequires(t, SameHostDaemon) cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } id := strings.TrimSpace(out) if err := waitRun(id); err != nil { t.Fatal(err) } pid1, err := inspectField(id, "State.Pid") if err != nil { t.Fatal(err) } parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) if err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") out2, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if parentContainerNet != out2 { t.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out2) } logDone("run - container shared network namespace") } func TestRunModePidHost(t *testing.T) { testRequires(t, NativeExecDriver, SameHostDaemon) defer deleteAllContainers() hostPid, err := os.Readlink("/proc/1/ns/pid") if err != nil { t.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid") out2, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostPid != out2 { t.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out2) } cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/pid") out2, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostPid == out2 { t.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out2) } logDone("run - pid host mode") } func TestRunTLSverify(t *testing.T) { cmd := exec.Command(dockerBinary, "ps") out, ec, err := runCommandWithOutput(cmd) if err != nil || ec != 0 { t.Fatalf("Should have worked: %v:\n%v", err, out) } // Regardless of whether we specify true or false we need to // test to make sure tls is turned on if --tlsverify is specified at all cmd = exec.Command(dockerBinary, "--tlsverify=false", "ps") out, ec, err = runCommandWithOutput(cmd) if err == nil || ec == 0 || !strings.Contains(out, "trying to connect") { t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err) } cmd = exec.Command(dockerBinary, "--tlsverify=true", "ps") out, ec, err = runCommandWithOutput(cmd) if err == nil || ec == 0 || !strings.Contains(out, "cert") { t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err) } logDone("run - verify tls is set for --tlsverify") } func TestRunPortFromDockerRangeInUse(t *testing.T) { defer deleteAllContainers() // first find allocator current position cmd := exec.Command(dockerBinary, "run", "-d", "-p", ":80", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } id := strings.TrimSpace(out) cmd = exec.Command(dockerBinary, "port", id) out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } out = strings.TrimSpace(out) if out == "" { t.Fatal("docker port command output is empty") } out = strings.Split(out, ":")[1] lastPort, err := strconv.Atoi(out) if err != nil { t.Fatal(err) } port := lastPort + 1 l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) if err != nil { t.Fatal(err) } defer l.Close() cmd = exec.Command(dockerBinary, "run", "-d", "-p", ":80", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatalf(out, err) } id = strings.TrimSpace(out) cmd = exec.Command(dockerBinary, "port", id) out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } logDone("run - find another port if port from autorange already bound") } func TestRunTtyWithPipe(t *testing.T) { defer deleteAllContainers() done := make(chan struct{}) go func() { defer close(done) cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") if _, err := cmd.StdinPipe(); err != nil { t.Fatal(err) } expected := "cannot enable tty mode" if out, _, err := runCommandWithOutput(cmd); err == nil { t.Fatal("run should have failed") } else if !strings.Contains(out, expected) { t.Fatalf("run failed with error %q: expected %q", out, expected) } }() select { case <-done: case <-time.After(3 * time.Second): t.Fatal("container is running but should have failed") } logDone("run - forbid piped stdin with tty") } func TestRunNonLocalMacAddress(t *testing.T) { defer deleteAllContainers() addr := "00:16:3E:08:00:50" cmd := exec.Command(dockerBinary, "run", "--mac-address", addr, "busybox", "ifconfig") if out, _, err := runCommandWithOutput(cmd); err != nil || !strings.Contains(out, addr) { t.Fatalf("Output should have contained %q: %s, %v", addr, out, err) } logDone("run - use non-local mac-address") } func TestRunNetHost(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() hostNet, err := os.Readlink("/proc/1/ns/net") if err != nil { t.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net") out2, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostNet != out2 { t.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out2) } cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/net") out2, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostNet == out2 { t.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out2) } logDone("run - net host mode") } func TestRunNetContainerWhichHost(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() hostNet, err := os.Readlink("/proc/1/ns/net") if err != nil { t.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "-d", "--net=host", "--name=test", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } cmd = exec.Command(dockerBinary, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } out = strings.Trim(out, "\n") if hostNet != out { t.Fatalf("Container should have host network namespace") } logDone("run - net container mode, where container in host mode") } func TestRunAllowPortRangeThroughPublish(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top") out, _, err := runCommandWithOutput(cmd) id := strings.TrimSpace(out) portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports") if err != nil { t.Fatal(err) } var ports nat.PortMap err = unmarshalJSON([]byte(portstr), &ports) for port, binding := range ports { portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) if portnum < 3000 || portnum > 3003 { t.Fatalf("Port is out of range ", portnum, binding, out) } if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { t.Fatal("Port is not mapped for the port "+port, out) } } logDone("run - allow port range through --expose flag") } func TestRunOOMExitCode(t *testing.T) { defer deleteAllContainers() done := make(chan struct{}) go func() { defer close(done) runCmd := exec.Command(dockerBinary, "run", "-m", "4MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x; done") out, exitCode, _ := runCommandWithOutput(runCmd) if expected := 137; exitCode != expected { t.Fatalf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) } }() select { case <-done: case <-time.After(3 * time.Second): t.Fatal("Timeout waiting for container to die on OOM") } logDone("run - exit code on oom") } func TestRunSetDefaultRestartPolicy(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "test", "busybox", "top") if out, _, err := runCommandWithOutput(runCmd); err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } cmd := exec.Command(dockerBinary, "inspect", "-f", "{{.HostConfig.RestartPolicy.Name}}", "test") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to inspect container: %v, output: %q", err, out) } out = strings.Trim(out, "\r\n") if out != "no" { t.Fatalf("Set default restart policy failed") } logDone("run - set default restart policy success") } func TestRunRestartMaxRetries(t *testing.T) { defer deleteAllContainers() out, err := exec.Command(dockerBinary, "run", "-d", "--restart=on-failure:3", "busybox", "false").CombinedOutput() if err != nil { t.Fatal(string(out), err) } id := strings.TrimSpace(string(out)) if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 5); err != nil { t.Fatal(err) } count, err := inspectField(id, "RestartCount") if err != nil { t.Fatal(err) } if count != "3" { t.Fatalf("Container was restarted %s times, expected %d", count, 3) } MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount") if err != nil { t.Fatal(err) } if MaximumRetryCount != "3" { t.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") } logDone("run - test max-retries for --restart") } func TestRunContainerWithWritableRootfs(t *testing.T) { defer deleteAllContainers() out, err := exec.Command(dockerBinary, "run", "--rm", "busybox", "touch", "/file").CombinedOutput() if err != nil { t.Fatal(string(out), err) } logDone("run - writable rootfs") } func TestRunContainerWithReadonlyRootfs(t *testing.T) { testRequires(t, NativeExecDriver) defer deleteAllContainers() out, err := exec.Command(dockerBinary, "run", "--read-only", "--rm", "busybox", "touch", "/file").CombinedOutput() if err == nil { t.Fatal("expected container to error on run with read only error") } expected := "Read-only file system" if !strings.Contains(string(out), expected) { t.Fatalf("expected output from failure to contain %s but contains %s", expected, out) } logDone("run - read only rootfs") } func TestRunVolumesFromRestartAfterRemoved(t *testing.T) { defer deleteAllContainers() out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "voltest", "-v", "/foo", "busybox")) if err != nil { t.Fatal(out, err) } out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "restarter", "--volumes-from", "voltest", "busybox", "top")) if err != nil { t.Fatal(out, err) } // Remove the main volume container and restart the consuming container out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "rm", "-f", "voltest")) if err != nil { t.Fatal(out, err) } // This should not fail since the volumes-from were already applied out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "restart", "restarter")) if err != nil { t.Fatalf("expected container to restart successfully: %v\n%s", err, out) } logDone("run - can restart a volumes-from container after producer is removed") } func TestRunPidHostWithChildIsKillable(t *testing.T) { defer deleteAllContainers() name := "ibuildthecloud" if out, err := exec.Command(dockerBinary, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi").CombinedOutput(); err != nil { t.Fatal(err, out) } time.Sleep(1 * time.Second) errchan := make(chan error) go func() { if out, err := exec.Command(dockerBinary, "kill", name).CombinedOutput(); err != nil { errchan <- fmt.Errorf("%v:\n%s", err, out) } close(errchan) }() select { case err := <-errchan: if err != nil { t.Fatal(err) } case <-time.After(5 * time.Second): t.Fatal("Kill container timed out") } logDone("run - can kill container with pid-host and some childs of pid 1") } func TestRunWithTooSmallMemoryLimit(t *testing.T) { defer deleteAllContainers() // this memory limit is 1 byte less than the min, which is 4MB // https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22 out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-m", "4194303", "busybox")) if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") { t.Fatalf("expected run to fail when using too low a memory limit: %q", out) } logDone("run - can't set too low memory limit") } func TestRunWriteToProcAsound(t *testing.T) { defer deleteAllContainers() code, err := runCommand(exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version")) if err == nil || code == 0 { t.Fatal("standard container should not be able to write to /proc/asound") } logDone("run - ro write to /proc/asound") } func TestRunReadProcTimer(t *testing.T) { defer deleteAllContainers() out, code, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "busybox", "cat", "/proc/timer_stats")) if err != nil || code != 0 { t.Fatal(err) } if strings.Trim(out, "\n ") != "" { t.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out) } logDone("run - read /proc/timer_stats") } func TestRunReadProcLatency(t *testing.T) { // some kernels don't have this configured so skip the test if this file is not found // on the host running the tests. if _, err := os.Stat("/proc/latency_stats"); err != nil { t.Skip() return } defer deleteAllContainers() out, code, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "busybox", "cat", "/proc/latency_stats")) if err != nil || code != 0 { t.Fatal(err) } if strings.Trim(out, "\n ") != "" { t.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out) } logDone("run - read /proc/latency_stats") } func TestMountIntoProc(t *testing.T) { defer deleteAllContainers() code, err := runCommand(exec.Command(dockerBinary, "run", "-v", "/proc//sys", "busybox", "true")) if err == nil || code == 0 { t.Fatal("container should not be able to mount into /proc") } logDone("run - mount into proc") } func TestMountIntoSys(t *testing.T) { defer deleteAllContainers() _, err := runCommand(exec.Command(dockerBinary, "run", "-v", "/sys/fs/cgroup", "busybox", "true")) if err != nil { t.Fatal("container should be able to mount into /sys") } logDone("run - mount into sys") } docker-1.6.2/integration-cli/docker_utils.go0000644000175000017500000007234012524223634020450 0ustar tianontianonpackage main import ( "bytes" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/httptest" "net/http/httputil" "net/url" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "testing" "time" "github.com/docker/docker/api" ) // Daemon represents a Docker daemon for the testing framework. type Daemon struct { t *testing.T logFile *os.File folder string stdin io.WriteCloser stdout, stderr io.ReadCloser cmd *exec.Cmd storageDriver string execDriver string wait chan error } // NewDaemon returns a Daemon instance to be used for testing. // This will create a directory such as daemon123456789 in the folder specified by $DEST. // The daemon will not automatically start. func NewDaemon(t *testing.T) *Daemon { dest := os.Getenv("DEST") if dest == "" { t.Fatal("Please set the DEST environment variable") } dir := filepath.Join(dest, fmt.Sprintf("daemon%d", time.Now().UnixNano()%100000000)) daemonFolder, err := filepath.Abs(dir) if err != nil { t.Fatalf("Could not make %q an absolute path: %v", dir, err) } if err := os.MkdirAll(filepath.Join(daemonFolder, "graph"), 0600); err != nil { t.Fatalf("Could not create %s/graph directory", daemonFolder) } return &Daemon{ t: t, folder: daemonFolder, storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), execDriver: os.Getenv("DOCKER_EXECDRIVER"), } } // Start will start the daemon and return once it is ready to receive requests. // You can specify additional daemon flags. func (d *Daemon) Start(arg ...string) error { dockerBinary, err := exec.LookPath(dockerBinary) if err != nil { d.t.Fatalf("could not find docker binary in $PATH: %v", err) } args := []string{ "--host", d.sock(), "--daemon", "--graph", fmt.Sprintf("%s/graph", d.folder), "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), } // If we don't explicitly set the log-level or debug flag(-D) then // turn on debug mode foundIt := false for _, a := range arg { if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") { foundIt = true } } if !foundIt { args = append(args, "--debug") } if d.storageDriver != "" { args = append(args, "--storage-driver", d.storageDriver) } if d.execDriver != "" { args = append(args, "--exec-driver", d.execDriver) } args = append(args, arg...) d.cmd = exec.Command(dockerBinary, args...) d.logFile, err = os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { d.t.Fatalf("Could not create %s/docker.log: %v", d.folder, err) } d.cmd.Stdout = d.logFile d.cmd.Stderr = d.logFile if err := d.cmd.Start(); err != nil { return fmt.Errorf("could not start daemon container: %v", err) } wait := make(chan error) go func() { wait <- d.cmd.Wait() d.t.Log("exiting daemon") close(wait) }() d.wait = wait tick := time.Tick(500 * time.Millisecond) // make sure daemon is ready to receive requests startTime := time.Now().Unix() for { d.t.Log("waiting for daemon to start") if time.Now().Unix()-startTime > 5 { // After 5 seconds, give up return errors.New("Daemon exited and never started") } select { case <-time.After(2 * time.Second): return errors.New("timeout: daemon does not respond") case <-tick: c, err := net.Dial("unix", filepath.Join(d.folder, "docker.sock")) if err != nil { continue } client := httputil.NewClientConn(c, nil) defer client.Close() req, err := http.NewRequest("GET", "/_ping", nil) if err != nil { d.t.Fatalf("could not create new request: %v", err) } resp, err := client.Do(req) if err != nil { continue } if resp.StatusCode != http.StatusOK { d.t.Logf("received status != 200 OK: %s", resp.Status) } d.t.Log("daemon started") return nil } } } // StartWithBusybox will first start the daemon with Daemon.Start() // then save the busybox image from the main daemon and load it into this Daemon instance. func (d *Daemon) StartWithBusybox(arg ...string) error { if err := d.Start(arg...); err != nil { return err } bb := filepath.Join(d.folder, "busybox.tar") if _, err := os.Stat(bb); err != nil { if !os.IsNotExist(err) { return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) } // saving busybox image from main daemon if err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").Run(); err != nil { return fmt.Errorf("could not save busybox image: %v", err) } } // loading busybox image to this daemon if _, err := d.Cmd("load", "--input", bb); err != nil { return fmt.Errorf("could not load busybox image: %v", err) } if err := os.Remove(bb); err != nil { d.t.Logf("Could not remove %s: %v", bb, err) } return nil } // Stop will send a SIGINT every second and wait for the daemon to stop. // If it timeouts, a SIGKILL is sent. // Stop will not delete the daemon directory. If a purged daemon is needed, // instantiate a new one with NewDaemon. func (d *Daemon) Stop() error { if d.cmd == nil || d.wait == nil { return errors.New("daemon not started") } defer func() { d.logFile.Close() d.cmd = nil }() i := 1 tick := time.Tick(time.Second) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { return fmt.Errorf("could not send signal: %v", err) } out1: for { select { case err := <-d.wait: return err case <-time.After(15 * time.Second): // time for stopping jobs and run onShutdown hooks d.t.Log("timeout") break out1 } } out2: for { select { case err := <-d.wait: return err case <-tick: i++ if i > 4 { d.t.Logf("tried to interrupt daemon for %d times, now try to kill it", i) break out2 } d.t.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { return fmt.Errorf("could not send signal: %v", err) } } } if err := d.cmd.Process.Kill(); err != nil { d.t.Logf("Could not kill daemon: %v", err) return err } return nil } // Restart will restart the daemon by first stopping it and then starting it. func (d *Daemon) Restart(arg ...string) error { d.Stop() return d.Start(arg...) } func (d *Daemon) sock() string { return fmt.Sprintf("unix://%s/docker.sock", d.folder) } // Cmd will execute a docker CLI command against this Daemon. // Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version func (d *Daemon) Cmd(name string, arg ...string) (string, error) { args := []string{"--host", d.sock(), name} args = append(args, arg...) c := exec.Command(dockerBinary, args...) b, err := c.CombinedOutput() return string(b), err } func (d *Daemon) LogfileName() string { return d.logFile.Name() } func daemonHost() string { daemonUrlStr := "unix://" + api.DEFAULTUNIXSOCKET if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { daemonUrlStr = daemonHostVar } return daemonUrlStr } func sockConn(timeout time.Duration) (net.Conn, error) { daemon := daemonHost() daemonUrl, err := url.Parse(daemon) if err != nil { return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) } var c net.Conn switch daemonUrl.Scheme { case "unix": return net.DialTimeout(daemonUrl.Scheme, daemonUrl.Path, timeout) case "tcp": return net.DialTimeout(daemonUrl.Scheme, daemonUrl.Host, timeout) default: return c, fmt.Errorf("unknown scheme %v (%s)", daemonUrl.Scheme, daemon) } } func sockRequest(method, endpoint string, data interface{}) ([]byte, error) { jsonData := bytes.NewBuffer(nil) if err := json.NewEncoder(jsonData).Encode(data); err != nil { return nil, err } return sockRequestRaw(method, endpoint, jsonData, "application/json") } func sockRequestRaw(method, endpoint string, data io.Reader, ct string) ([]byte, error) { c, err := sockConn(time.Duration(10 * time.Second)) if err != nil { return nil, fmt.Errorf("could not dial docker daemon: %v", err) } client := httputil.NewClientConn(c, nil) defer client.Close() req, err := http.NewRequest(method, endpoint, data) if err != nil { return nil, fmt.Errorf("could not create new request: %v", err) } if ct == "" { ct = "application/json" } req.Header.Set("Content-Type", ct) resp, err := client.Do(req) if err != nil { return nil, fmt.Errorf("could not perform request: %v", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { body, _ := ioutil.ReadAll(resp.Body) return body, fmt.Errorf("received status != 200 OK: %s", resp.Status) } return ioutil.ReadAll(resp.Body) } func deleteContainer(container string) error { container = strings.Replace(container, "\n", " ", -1) container = strings.Trim(container, " ") killArgs := fmt.Sprintf("kill %v", container) killSplitArgs := strings.Split(killArgs, " ") killCmd := exec.Command(dockerBinary, killSplitArgs...) runCommand(killCmd) rmArgs := fmt.Sprintf("rm -v %v", container) rmSplitArgs := strings.Split(rmArgs, " ") rmCmd := exec.Command(dockerBinary, rmSplitArgs...) exitCode, err := runCommand(rmCmd) // set error manually if not set if exitCode != 0 && err == nil { err = fmt.Errorf("failed to remove container: `docker rm` exit is non-zero") } return err } func getAllContainers() (string, error) { getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a") out, exitCode, err := runCommandWithOutput(getContainersCmd) if exitCode != 0 && err == nil { err = fmt.Errorf("failed to get a list of containers: %v\n", out) } return out, err } func deleteAllContainers() error { containers, err := getAllContainers() if err != nil { fmt.Println(containers) return err } if err = deleteContainer(containers); err != nil { return err } return nil } func getPausedContainers() (string, error) { getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") out, exitCode, err := runCommandWithOutput(getPausedContainersCmd) if exitCode != 0 && err == nil { err = fmt.Errorf("failed to get a list of paused containers: %v\n", out) } return out, err } func getSliceOfPausedContainers() ([]string, error) { out, err := getPausedContainers() if err == nil { slice := strings.Split(strings.TrimSpace(out), "\n") return slice, err } else { return []string{out}, err } } func unpauseContainer(container string) error { unpauseCmd := exec.Command(dockerBinary, "unpause", container) exitCode, err := runCommand(unpauseCmd) if exitCode != 0 && err == nil { err = fmt.Errorf("failed to unpause container") } return nil } func unpauseAllContainers() error { containers, err := getPausedContainers() if err != nil { fmt.Println(containers) return err } containers = strings.Replace(containers, "\n", " ", -1) containers = strings.Trim(containers, " ") containerList := strings.Split(containers, " ") for _, value := range containerList { if err = unpauseContainer(value); err != nil { return err } } return nil } func deleteImages(images ...string) error { args := make([]string, 1, 2) args[0] = "rmi" args = append(args, images...) rmiCmd := exec.Command(dockerBinary, args...) exitCode, err := runCommand(rmiCmd) // set error manually if not set if exitCode != 0 && err == nil { err = fmt.Errorf("failed to remove image: `docker rmi` exit is non-zero") } return err } func imageExists(image string) error { inspectCmd := exec.Command(dockerBinary, "inspect", image) exitCode, err := runCommand(inspectCmd) if exitCode != 0 && err == nil { err = fmt.Errorf("couldn't find image %q", image) } return err } func pullImageIfNotExist(image string) (err error) { if err := imageExists(image); err != nil { pullCmd := exec.Command(dockerBinary, "pull", image) _, exitCode, err := runCommandWithOutput(pullCmd) if err != nil || exitCode != 0 { err = fmt.Errorf("image %q wasn't found locally and it couldn't be pulled: %s", image, err) } } return } func dockerCmd(t *testing.T, args ...string) (string, int, error) { out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) if err != nil { t.Fatalf("%q failed with errors: %s, %v", strings.Join(args, " "), out, err) } return out, status, err } // execute a docker command with a timeout func dockerCmdWithTimeout(timeout time.Duration, args ...string) (string, int, error) { out, status, err := runCommandWithOutputAndTimeout(exec.Command(dockerBinary, args...), timeout) if err != nil { return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) } return out, status, err } // execute a docker command in a directory func dockerCmdInDir(t *testing.T, path string, args ...string) (string, int, error) { dockerCommand := exec.Command(dockerBinary, args...) dockerCommand.Dir = path out, status, err := runCommandWithOutput(dockerCommand) if err != nil { return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) } return out, status, err } // execute a docker command in a directory with a timeout func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...string) (string, int, error) { dockerCommand := exec.Command(dockerBinary, args...) dockerCommand.Dir = path out, status, err := runCommandWithOutputAndTimeout(dockerCommand, timeout) if err != nil { return out, status, fmt.Errorf("%q failed with errors: %v : %q)", strings.Join(args, " "), err, out) } return out, status, err } func findContainerIP(t *testing.T, id string) string { cmd := exec.Command(dockerBinary, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id) out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } return strings.Trim(out, " \r\n'") } func getContainerCount() (int, error) { const containers = "Containers:" cmd := exec.Command(dockerBinary, "info") out, _, err := runCommandWithOutput(cmd) if err != nil { return 0, err } lines := strings.Split(out, "\n") for _, line := range lines { if strings.Contains(line, containers) { output := stripTrailingCharacters(line) output = strings.TrimLeft(output, containers) output = strings.Trim(output, " ") containerCount, err := strconv.Atoi(output) if err != nil { return 0, err } return containerCount, nil } } return 0, fmt.Errorf("couldn't find the Container count in the output") } type FakeContext struct { Dir string } func (f *FakeContext) Add(file, content string) error { filepath := path.Join(f.Dir, file) dirpath := path.Dir(filepath) if dirpath != "." { if err := os.MkdirAll(dirpath, 0755); err != nil { return err } } return ioutil.WriteFile(filepath, []byte(content), 0644) } func (f *FakeContext) Delete(file string) error { filepath := path.Join(f.Dir, file) return os.RemoveAll(filepath) } func (f *FakeContext) Close() error { return os.RemoveAll(f.Dir) } func fakeContextFromDir(dir string) *FakeContext { return &FakeContext{dir} } func fakeContextWithFiles(files map[string]string) (*FakeContext, error) { tmp, err := ioutil.TempDir("", "fake-context") if err != nil { return nil, err } if err := os.Chmod(tmp, 0755); err != nil { return nil, err } ctx := fakeContextFromDir(tmp) for file, content := range files { if err := ctx.Add(file, content); err != nil { ctx.Close() return nil, err } } return ctx, nil } func fakeContextAddDockerfile(ctx *FakeContext, dockerfile string) error { if err := ctx.Add("Dockerfile", dockerfile); err != nil { ctx.Close() return err } return nil } func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) { ctx, err := fakeContextWithFiles(files) if err != nil { ctx.Close() return nil, err } if err := fakeContextAddDockerfile(ctx, dockerfile); err != nil { return nil, err } return ctx, nil } // FakeStorage is a static file server. It might be running locally or remotely // on test host. type FakeStorage interface { Close() error URL() string CtxDir() string } // fakeStorage returns either a local or remote (at daemon machine) file server func fakeStorage(files map[string]string) (FakeStorage, error) { ctx, err := fakeContextWithFiles(files) if err != nil { return nil, err } return fakeStorageWithContext(ctx) } // fakeStorageWithContext returns either a local or remote (at daemon machine) file server func fakeStorageWithContext(ctx *FakeContext) (FakeStorage, error) { if isLocalDaemon { return newLocalFakeStorage(ctx) } return newRemoteFileServer(ctx) } // localFileStorage is a file storage on the running machine type localFileStorage struct { *FakeContext *httptest.Server } func (s *localFileStorage) URL() string { return s.Server.URL } func (s *localFileStorage) CtxDir() string { return s.FakeContext.Dir } func (s *localFileStorage) Close() error { defer s.Server.Close() return s.FakeContext.Close() } func newLocalFakeStorage(ctx *FakeContext) (*localFileStorage, error) { handler := http.FileServer(http.Dir(ctx.Dir)) server := httptest.NewServer(handler) return &localFileStorage{ FakeContext: ctx, Server: server, }, nil } // remoteFileServer is a containerized static file server started on the remote // testing machine to be used in URL-accepting docker build functionality. type remoteFileServer struct { host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 container string image string ctx *FakeContext } func (f *remoteFileServer) URL() string { u := url.URL{ Scheme: "http", Host: f.host} return u.String() } func (f *remoteFileServer) CtxDir() string { return f.ctx.Dir } func (f *remoteFileServer) Close() error { defer func() { if f.ctx != nil { f.ctx.Close() } if f.image != "" { deleteImages(f.image) } }() if f.container == "" { return nil } return deleteContainer(f.container) } func newRemoteFileServer(ctx *FakeContext) (*remoteFileServer, error) { var ( image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(makeRandomString(10))) container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(makeRandomString(10))) ) // Build the image if err := fakeContextAddDockerfile(ctx, `FROM httpserver COPY . /static`); err != nil { return nil, fmt.Errorf("Cannot add Dockerfile to context: %v", err) } if _, err := buildImageFromContext(image, ctx, false); err != nil { return nil, fmt.Errorf("failed building file storage container image: %v", err) } // Start the container runCmd := exec.Command(dockerBinary, "run", "-d", "-P", "--name", container, image) if out, ec, err := runCommandWithOutput(runCmd); err != nil { return nil, fmt.Errorf("failed to start file storage container. ec=%v\nout=%s\nerr=%v", ec, out, err) } // Find out the system assigned port out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "port", container, "80/tcp")) if err != nil { return nil, fmt.Errorf("failed to find container port: err=%v\nout=%s", err, out) } return &remoteFileServer{ container: container, image: image, host: strings.Trim(out, "\n"), ctx: ctx}, nil } func inspectFieldAndMarshall(name, field string, output interface{}) error { str, err := inspectFieldJSON(name, field) if err != nil { return err } return json.Unmarshal([]byte(str), output) } func inspectFilter(name, filter string) (string, error) { format := fmt.Sprintf("{{%s}}", filter) inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) out, exitCode, err := runCommandWithOutput(inspectCmd) if err != nil || exitCode != 0 { return "", fmt.Errorf("failed to inspect container %s: %s", name, out) } return strings.TrimSpace(out), nil } func inspectField(name, field string) (string, error) { return inspectFilter(name, fmt.Sprintf(".%s", field)) } func inspectFieldJSON(name, field string) (string, error) { return inspectFilter(name, fmt.Sprintf("json .%s", field)) } func inspectFieldMap(name, path, field string) (string, error) { return inspectFilter(name, fmt.Sprintf("index .%s %q", path, field)) } func getIDByName(name string) (string, error) { return inspectField(name, "Id") } // getContainerState returns the exit code of the container // and true if it's running // the exit code should be ignored if it's running func getContainerState(t *testing.T, id string) (int, bool, error) { var ( exitStatus int running bool ) out, exitCode, err := dockerCmd(t, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) if err != nil || exitCode != 0 { return 0, false, fmt.Errorf("%q doesn't exist: %s", id, err) } out = strings.Trim(out, "\n") splitOutput := strings.Split(out, " ") if len(splitOutput) != 2 { return 0, false, fmt.Errorf("failed to get container state: output is broken") } if splitOutput[0] == "true" { running = true } if n, err := strconv.Atoi(splitOutput[1]); err == nil { exitStatus = n } else { return 0, false, fmt.Errorf("failed to get container state: couldn't parse integer") } return exitStatus, running, nil } func buildImageWithOut(name, dockerfile string, useCache bool) (string, string, error) { args := []string{"build", "-t", name} if !useCache { args = append(args, "--no-cache") } args = append(args, "-") buildCmd := exec.Command(dockerBinary, args...) buildCmd.Stdin = strings.NewReader(dockerfile) out, exitCode, err := runCommandWithOutput(buildCmd) if err != nil || exitCode != 0 { return "", out, fmt.Errorf("failed to build the image: %s", out) } id, err := getIDByName(name) if err != nil { return "", out, err } return id, out, nil } func buildImageWithStdoutStderr(name, dockerfile string, useCache bool) (string, string, string, error) { args := []string{"build", "-t", name} if !useCache { args = append(args, "--no-cache") } args = append(args, "-") buildCmd := exec.Command(dockerBinary, args...) buildCmd.Stdin = strings.NewReader(dockerfile) stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) if err != nil || exitCode != 0 { return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) } id, err := getIDByName(name) if err != nil { return "", stdout, stderr, err } return id, stdout, stderr, nil } func buildImage(name, dockerfile string, useCache bool) (string, error) { id, _, err := buildImageWithOut(name, dockerfile, useCache) return id, err } func buildImageFromContext(name string, ctx *FakeContext, useCache bool) (string, error) { args := []string{"build", "-t", name} if !useCache { args = append(args, "--no-cache") } args = append(args, ".") buildCmd := exec.Command(dockerBinary, args...) buildCmd.Dir = ctx.Dir out, exitCode, err := runCommandWithOutput(buildCmd) if err != nil || exitCode != 0 { return "", fmt.Errorf("failed to build the image: %s", out) } return getIDByName(name) } func buildImageFromPath(name, path string, useCache bool) (string, error) { args := []string{"build", "-t", name} if !useCache { args = append(args, "--no-cache") } args = append(args, path) buildCmd := exec.Command(dockerBinary, args...) out, exitCode, err := runCommandWithOutput(buildCmd) if err != nil || exitCode != 0 { return "", fmt.Errorf("failed to build the image: %s", out) } return getIDByName(name) } type GitServer interface { URL() string Close() error } type localGitServer struct { *httptest.Server } func (r *localGitServer) Close() error { r.Server.Close() return nil } func (r *localGitServer) URL() string { return r.Server.URL } type FakeGIT struct { root string server GitServer RepoURL string } func (g *FakeGIT) Close() { g.server.Close() os.RemoveAll(g.root) } func fakeGIT(name string, files map[string]string, enforceLocalServer bool) (*FakeGIT, error) { ctx, err := fakeContextWithFiles(files) if err != nil { return nil, err } defer ctx.Close() curdir, err := os.Getwd() if err != nil { return nil, err } defer os.Chdir(curdir) if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { return nil, fmt.Errorf("error trying to init repo: %s (%s)", err, output) } err = os.Chdir(ctx.Dir) if err != nil { return nil, err } if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { return nil, fmt.Errorf("error trying to set 'user.name': %s (%s)", err, output) } if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { return nil, fmt.Errorf("error trying to set 'user.email': %s (%s)", err, output) } if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { return nil, fmt.Errorf("error trying to add files to repo: %s (%s)", err, output) } if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { return nil, fmt.Errorf("error trying to commit to repo: %s (%s)", err, output) } root, err := ioutil.TempDir("", "docker-test-git-repo") if err != nil { return nil, err } repoPath := filepath.Join(root, name+".git") if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { os.RemoveAll(root) return nil, fmt.Errorf("error trying to clone --bare: %s (%s)", err, output) } err = os.Chdir(repoPath) if err != nil { os.RemoveAll(root) return nil, err } if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { os.RemoveAll(root) return nil, fmt.Errorf("error trying to git update-server-info: %s (%s)", err, output) } err = os.Chdir(curdir) if err != nil { os.RemoveAll(root) return nil, err } var server GitServer if !enforceLocalServer { // use fakeStorage server, which might be local or remote (at test daemon) server, err = fakeStorageWithContext(fakeContextFromDir(root)) if err != nil { return nil, fmt.Errorf("cannot start fake storage: %v", err) } } else { // always start a local http server on CLI test machin httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) server = &localGitServer{httpServer} } return &FakeGIT{ root: root, server: server, RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), }, nil } // Write `content` to the file at path `dst`, creating it if necessary, // as well as any missing directories. // The file is truncated if it already exists. // Call t.Fatal() at the first error. func writeFile(dst, content string, t *testing.T) { // Create subdirectories if necessary if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { t.Fatal(err) } f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) if err != nil { t.Fatal(err) } // Write content (truncate if it exists) if _, err := io.Copy(f, strings.NewReader(content)); err != nil { t.Fatal(err) } } // Return the contents of file at path `src`. // Call t.Fatal() at the first error (including if the file doesn't exist) func readFile(src string, t *testing.T) (content string) { data, err := ioutil.ReadFile(src) if err != nil { t.Fatal(err) } return string(data) } func containerStorageFile(containerId, basename string) string { return filepath.Join("/var/lib/docker/containers", containerId, basename) } // docker commands that use this function must be run with the '-d' switch. func runCommandAndReadContainerFile(filename string, cmd *exec.Cmd) ([]byte, error) { out, _, err := runCommandWithOutput(cmd) if err != nil { return nil, fmt.Errorf("%v: %q", err, out) } time.Sleep(1 * time.Second) contID := strings.TrimSpace(out) return readContainerFile(contID, filename) } func readContainerFile(containerId, filename string) ([]byte, error) { f, err := os.Open(containerStorageFile(containerId, filename)) if err != nil { return nil, err } defer f.Close() content, err := ioutil.ReadAll(f) if err != nil { return nil, err } return content, nil } func readContainerFileWithExec(containerId, filename string) ([]byte, error) { out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "exec", containerId, "cat", filename)) return []byte(out), err } // daemonTime provides the current time on the daemon host func daemonTime(t *testing.T) time.Time { if isLocalDaemon { return time.Now() } body, err := sockRequest("GET", "/info", nil) if err != nil { t.Fatal("daemonTime: failed to get /info: %v", err) } type infoJSON struct { SystemTime string } var info infoJSON if err = json.Unmarshal(body, &info); err != nil { t.Fatalf("unable to unmarshal /info response: %v", err) } dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) if err != nil { t.Fatal(err) } return dt } func setupRegistry(t *testing.T) func() { testRequires(t, RegistryHosting) reg, err := newTestRegistryV2(t) if err != nil { t.Fatal(err) } // Wait for registry to be ready to serve requests. for i := 0; i != 5; i++ { if err = reg.Ping(); err == nil { break } time.Sleep(100 * time.Millisecond) } if err != nil { t.Fatal("Timeout waiting for test registry to become available") } return func() { reg.Close() } } // appendBaseEnv appends the minimum set of environment variables to exec the // docker cli binary for testing with correct configuration to the given env // list. func appendBaseEnv(env []string) []string { preserveList := []string{ // preserve remote test host "DOCKER_HOST", // windows: requires preserving SystemRoot, otherwise dial tcp fails // with "GetAddrInfoW: A non-recoverable error occurred during a database lookup." "SystemRoot", } for _, key := range preserveList { if val := os.Getenv(key); val != "" { env = append(env, fmt.Sprintf("%s=%s", key, val)) } } return env } docker-1.6.2/integration-cli/docker_api_resize_test.go0000644000175000017500000000305112524223634022472 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) func TestResizeApiResponse(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) } defer deleteAllContainers() cleanedContainerID := stripTrailingCharacters(out) endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" _, err = sockRequest("POST", endpoint, nil) if err != nil { t.Fatalf("resize Request failed %v", err) } logDone("container resize - when started") } func TestResizeApiResponseWhenContainerNotStarted(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) } defer deleteAllContainers() cleanedContainerID := stripTrailingCharacters(out) // make sure the exited cintainer is not running runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) } endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" body, err := sockRequest("POST", endpoint, nil) if err == nil { t.Fatalf("resize should fail when container is not started") } if !strings.Contains(string(body), "Cannot resize container") && !strings.Contains(string(body), cleanedContainerID) { t.Fatalf("resize should fail with message 'Cannot resize container' but instead received %s", string(body)) } logDone("container resize - when not started should not resize") } docker-1.6.2/integration-cli/docker_cli_import_test.go0000644000175000017500000000215112524223634022501 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) func TestImportDisplay(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "export", cleanedContainerID), exec.Command(dockerBinary, "import", "-"), ) if err != nil { t.Errorf("import failed with errors: %v, output: %q", err, out) } if n := strings.Count(out, "\n"); n != 1 { t.Fatalf("display is messed up: %d '\\n' instead of 1:\n%s", n, out) } image := strings.TrimSpace(out) defer deleteImages(image) runCmd = exec.Command(dockerBinary, "run", "--rm", image, "true") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal("failed to create a container", out, err) } if out != "" { t.Fatalf("command output should've been nothing, was %q", out) } logDone("import - display is fine, imported image runs") } docker-1.6.2/integration-cli/docker_cli_login_test.go0000644000175000017500000000070712524223634022304 0ustar tianontianonpackage main import ( "bytes" "os/exec" "testing" ) func TestLoginWithoutTTY(t *testing.T) { cmd := exec.Command(dockerBinary, "login") // Send to stdin so the process does not get the TTY cmd.Stdin = bytes.NewBufferString("buffer test string \n") // run the command and block until it's done if err := cmd.Run(); err == nil { t.Fatal("Expected non nil err when loginning in & TTY not available") } logDone("login - login without TTY") } docker-1.6.2/integration-cli/docker_cli_save_load_test.go0000644000175000017500000003141212524223634023126 0ustar tianontianonpackage main import ( "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "reflect" "sort" "strings" "testing" ) // save a repo using gz compression and try to load it using stdout func TestSaveXzAndLoadRepoStdout(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to create a container: %v %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) repoName := "foobar-save-load-test-xz-gz" inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) out, _, err = runCommandWithOutput(commitCmd) if err != nil { t.Fatalf("failed to commit container: %v %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("the repo should exist before saving it: %v %v", before, err) } repoTarball, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command("xz", "-c"), exec.Command("gzip", "-c")) if err != nil { t.Fatalf("failed to save repo: %v %v", out, err) } deleteImages(repoName) loadCmd := exec.Command(dockerBinary, "load") loadCmd.Stdin = strings.NewReader(repoTarball) out, _, err = runCommandWithOutput(loadCmd) if err == nil { t.Fatalf("expected error, but succeeded with no error and output: %v", out) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) if err == nil { t.Fatalf("the repo should not exist: %v", after) } deleteImages(repoName) logDone("load - save a repo with xz compression & load it using stdout") } // save a repo using xz+gz compression and try to load it using stdout func TestSaveXzGzAndLoadRepoStdout(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to create a container: %v %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) repoName := "foobar-save-load-test-xz-gz" inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) out, _, err = runCommandWithOutput(commitCmd) if err != nil { t.Fatalf("failed to commit container: %v %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("the repo should exist before saving it: %v %v", before, err) } out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command("xz", "-c"), exec.Command("gzip", "-c")) if err != nil { t.Fatalf("failed to save repo: %v %v", out, err) } deleteImages(repoName) loadCmd := exec.Command(dockerBinary, "load") loadCmd.Stdin = strings.NewReader(out) out, _, err = runCommandWithOutput(loadCmd) if err == nil { t.Fatalf("expected error, but succeeded with no error and output: %v", out) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) if err == nil { t.Fatalf("the repo should not exist: %v", after) } deleteContainer(cleanedContainerID) deleteImages(repoName) logDone("load - save a repo with xz+gz compression & load it using stdout") } func TestSaveSingleTag(t *testing.T) { repoName := "foobar-save-single-tag-test" tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName)) defer deleteImages(repoName) if out, _, err := runCommandWithOutput(tagCmd); err != nil { t.Fatalf("failed to tag repo: %s, %v", out, err) } idCmd := exec.Command(dockerBinary, "images", "-q", "--no-trunc", repoName) out, _, err := runCommandWithOutput(idCmd) if err != nil { t.Fatalf("failed to get repo ID: %s, %v", out, err) } cleanedImageID := stripTrailingCharacters(out) out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)), exec.Command("tar", "t"), exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID))) if err != nil { t.Fatalf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err) } logDone("save - save a specific image:tag") } func TestSaveImageId(t *testing.T) { repoName := "foobar-save-image-id-test" tagCmd := exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName)) defer deleteImages(repoName) if out, _, err := runCommandWithOutput(tagCmd); err != nil { t.Fatalf("failed to tag repo: %s, %v", out, err) } idLongCmd := exec.Command(dockerBinary, "images", "-q", "--no-trunc", repoName) out, _, err := runCommandWithOutput(idLongCmd) if err != nil { t.Fatalf("failed to get repo ID: %s, %v", out, err) } cleanedLongImageID := stripTrailingCharacters(out) idShortCmd := exec.Command(dockerBinary, "images", "-q", repoName) out, _, err = runCommandWithOutput(idShortCmd) if err != nil { t.Fatalf("failed to get repo short ID: %s, %v", out, err) } cleanedShortImageID := stripTrailingCharacters(out) saveCmd := exec.Command(dockerBinary, "save", cleanedShortImageID) tarCmd := exec.Command("tar", "t") tarCmd.Stdin, err = saveCmd.StdoutPipe() if err != nil { t.Fatalf("cannot set stdout pipe for tar: %v", err) } grepCmd := exec.Command("grep", cleanedLongImageID) grepCmd.Stdin, err = tarCmd.StdoutPipe() if err != nil { t.Fatalf("cannot set stdout pipe for grep: %v", err) } if err = tarCmd.Start(); err != nil { t.Fatalf("tar failed with error: %v", err) } if err = saveCmd.Start(); err != nil { t.Fatalf("docker save failed with error: %v", err) } defer saveCmd.Wait() defer tarCmd.Wait() out, _, err = runCommandWithOutput(grepCmd) if err != nil { t.Fatalf("failed to save repo with image ID: %s, %v", out, err) } logDone("save - save a image by ID") } // save a repo and try to load it using flags func TestSaveAndLoadRepoFlags(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to create a container: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) repoName := "foobar-save-load-test" inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { t.Fatalf("output should've been a container id: %s, %v", out, err) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) deleteImages(repoName) if out, _, err = runCommandWithOutput(commitCmd); err != nil { t.Fatalf("failed to commit container: %s, %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("the repo should exist before saving it: %s, %v", before, err) } out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command(dockerBinary, "load")) if err != nil { t.Fatalf("failed to save and load repo: %s, %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("the repo should exist after loading it: %s, %v", after, err) } if before != after { t.Fatalf("inspect is not the same after a save / load") } logDone("save - save a repo using -o && load a repo using -i") } func TestSaveMultipleNames(t *testing.T) { repoName := "foobar-save-multi-name-test" // Make one image tagCmd := exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName)) if out, _, err := runCommandWithOutput(tagCmd); err != nil { t.Fatalf("failed to tag repo: %s, %v", out, err) } defer deleteImages(repoName + "-one") // Make two images tagCmd = exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) out, _, err := runCommandWithOutput(tagCmd) if err != nil { t.Fatalf("failed to tag repo: %s, %v", out, err) } defer deleteImages(repoName + "-two") out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), exec.Command("tar", "xO", "repositories"), exec.Command("grep", "-q", "-E", "(-one|-two)"), ) if err != nil { t.Fatalf("failed to save multiple repos: %s, %v", out, err) } logDone("save - save by multiple names") } func TestSaveRepoWithMultipleImages(t *testing.T) { makeImage := func(from string, tag string) string { runCmd := exec.Command(dockerBinary, "run", "-d", from, "true") var ( out string err error ) if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatalf("failed to create a container: %v %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, tag) if out, _, err = runCommandWithOutput(commitCmd); err != nil { t.Fatalf("failed to commit container: %v %v", out, err) } imageID := stripTrailingCharacters(out) return imageID } repoName := "foobar-save-multi-images-test" tagFoo := repoName + ":foo" tagBar := repoName + ":bar" idFoo := makeImage("busybox:latest", tagFoo) defer deleteImages(idFoo) idBar := makeImage("busybox:latest", tagBar) defer deleteImages(idBar) deleteImages(repoName) // create the archive out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command("tar", "t"), exec.Command("grep", "VERSION"), exec.Command("cut", "-d", "/", "-f1")) if err != nil { t.Fatalf("failed to save multiple images: %s, %v", out, err) } actual := strings.Split(stripTrailingCharacters(out), "\n") // make the list of expected layers out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "history", "-q", "--no-trunc", "busybox:latest")) if err != nil { t.Fatalf("failed to get history: %s, %v", out, err) } expected := append(strings.Split(stripTrailingCharacters(out), "\n"), idFoo, idBar) sort.Strings(actual) sort.Strings(expected) if !reflect.DeepEqual(expected, actual) { t.Fatalf("achive does not contains the right layers: got %v, expected %v", actual, expected) } logDone("save - save repository with multiple images") } // Issue #6722 #5892 ensure directories are included in changes func TestSaveDirectoryPermissions(t *testing.T) { layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} name := "save-directory-permissions" tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") if err != nil { t.Errorf("failed to create temporary directory: %s", err) } extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") os.Mkdir(extractionDirectory, 0777) defer os.RemoveAll(tmpDir) defer deleteImages(name) _, err = buildImage(name, `FROM busybox RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`, true) if err != nil { t.Fatal(err) } if out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", name), exec.Command("tar", "-xf", "-", "-C", extractionDirectory), ); err != nil { t.Errorf("failed to save and extract image: %s", out) } dirs, err := ioutil.ReadDir(extractionDirectory) if err != nil { t.Errorf("failed to get a listing of the layer directories: %s", err) } found := false for _, entry := range dirs { var entriesSansDev []string if entry.IsDir() { layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar") f, err := os.Open(layerPath) if err != nil { t.Fatalf("failed to open %s: %s", layerPath, err) } entries, err := ListTar(f) for _, e := range entries { if !strings.Contains(e, "dev/") { entriesSansDev = append(entriesSansDev, e) } } if err != nil { t.Fatalf("encountered error while listing tar entries: %s", err) } if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) { found = true break } } } if !found { t.Fatalf("failed to find the layer with the right content listing") } logDone("save - ensure directories exist in exported layers") } docker-1.6.2/integration-cli/docker_cli_history_test.go0000644000175000017500000000407512524223634022677 0ustar tianontianonpackage main import ( "fmt" "os/exec" "strings" "testing" ) // This is a heisen-test. Because the created timestamp of images and the behavior of // sort is not predictable it doesn't always fail. func TestBuildHistory(t *testing.T) { name := "testbuildhistory" defer deleteImages(name) _, err := buildImage(name, `FROM busybox RUN echo "A" RUN echo "B" RUN echo "C" RUN echo "D" RUN echo "E" RUN echo "F" RUN echo "G" RUN echo "H" RUN echo "I" RUN echo "J" RUN echo "K" RUN echo "L" RUN echo "M" RUN echo "N" RUN echo "O" RUN echo "P" RUN echo "Q" RUN echo "R" RUN echo "S" RUN echo "T" RUN echo "U" RUN echo "V" RUN echo "W" RUN echo "X" RUN echo "Y" RUN echo "Z"`, true) if err != nil { t.Fatal(err) } out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "history", "testbuildhistory")) if err != nil || exitCode != 0 { t.Fatalf("failed to get image history: %s, %v", out, err) } actualValues := strings.Split(out, "\n")[1:27] expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} for i := 0; i < 26; i++ { echoValue := fmt.Sprintf("echo \"%s\"", expectedValues[i]) actualValue := actualValues[i] if !strings.Contains(actualValue, echoValue) { t.Fatalf("Expected layer \"%s\", but was: %s", expectedValues[i], actualValue) } } logDone("history - build history") } func TestHistoryExistentImage(t *testing.T) { historyCmd := exec.Command(dockerBinary, "history", "busybox") _, exitCode, err := runCommandWithOutput(historyCmd) if err != nil || exitCode != 0 { t.Fatal("failed to get image history") } logDone("history - history on existent image must pass") } func TestHistoryNonExistentImage(t *testing.T) { historyCmd := exec.Command(dockerBinary, "history", "testHistoryNonExistentImage") _, exitCode, err := runCommandWithOutput(historyCmd) if err == nil || exitCode == 0 { t.Fatal("history on a non-existent image didn't result in a non-zero exit status") } logDone("history - history on non-existent image must pass") } docker-1.6.2/integration-cli/docker_cli_daemon_test.go0000644000175000017500000005517112524223634022444 0ustar tianontianon// +build daemon package main import ( "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "testing" "time" "github.com/docker/libtrust" ) func TestDaemonRestartWithRunningContainersPorts(t *testing.T) { d := NewDaemon(t) if err := d.StartWithBusybox(); err != nil { t.Fatalf("Could not start daemon with busybox: %v", err) } defer d.Stop() if out, err := d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { t.Fatalf("Could not run top1: err=%v\n%s", err, out) } // --restart=no by default if out, err := d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { t.Fatalf("Could not run top2: err=%v\n%s", err, out) } testRun := func(m map[string]bool, prefix string) { var format string for c, shouldRun := range m { out, err := d.Cmd("ps") if err != nil { t.Fatalf("Could not run ps: err=%v\n%q", err, out) } if shouldRun { format = "%scontainer %q is not running" } else { format = "%scontainer %q is running" } if shouldRun != strings.Contains(out, c) { t.Fatalf(format, prefix, c) } } } testRun(map[string]bool{"top1": true, "top2": true}, "") if err := d.Restart(); err != nil { t.Fatalf("Could not restart daemon: %v", err) } testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") logDone("daemon - running containers on daemon restart") } func TestDaemonRestartWithVolumesRefs(t *testing.T) { d := NewDaemon(t) if err := d.StartWithBusybox(); err != nil { t.Fatal(err) } defer d.Stop() if out, err := d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { t.Fatal(err, out) } if err := d.Restart(); err != nil { t.Fatal(err) } if _, err := d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { t.Fatal(err) } if out, err := d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { t.Fatal(err, out) } v, err := d.Cmd("inspect", "--format", "{{ json .Volumes }}", "volrestarttest1") if err != nil { t.Fatal(err) } volumes := make(map[string]string) json.Unmarshal([]byte(v), &volumes) if _, err := os.Stat(volumes["/foo"]); err != nil { t.Fatalf("Expected volume to exist: %s - %s", volumes["/foo"], err) } logDone("daemon - volume refs are restored") } func TestDaemonStartIptablesFalse(t *testing.T) { d := NewDaemon(t) if err := d.Start("--iptables=false"); err != nil { t.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) } d.Stop() logDone("daemon - started daemon with iptables=false") } // Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and // no longer has an IP associated, we should gracefully handle that case and associate // an IP with it rather than fail daemon start func TestDaemonStartBridgeWithoutIPAssociation(t *testing.T) { d := NewDaemon(t) // rather than depending on brctl commands to verify docker0 is created and up // let's start the daemon and stop it, and then make a modification to run the // actual test if err := d.Start(); err != nil { t.Fatalf("Could not start daemon: %v", err) } if err := d.Stop(); err != nil { t.Fatalf("Could not stop daemon: %v", err) } // now we will remove the ip from docker0 and then try starting the daemon ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) if err != nil { t.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) } if err := d.Start(); err != nil { warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" t.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) } // cleanup - stop the daemon if test passed if err := d.Stop(); err != nil { t.Fatalf("Could not stop daemon: %v", err) } logDone("daemon - successful daemon start when bridge has no IP association") } func TestDaemonIptablesClean(t *testing.T) { defer deleteAllContainers() d := NewDaemon(t) if err := d.StartWithBusybox(); err != nil { t.Fatalf("Could not start daemon with busybox: %v", err) } defer d.Stop() if out, err := d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { t.Fatalf("Could not run top: %s, %v", out, err) } // get output from iptables with container running ipTablesSearchString := "tcp dpt:80" ipTablesCmd := exec.Command("iptables", "-nvL") out, _, err := runCommandWithOutput(ipTablesCmd) if err != nil { t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { t.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) } if err := d.Stop(); err != nil { t.Fatalf("Could not stop daemon: %v", err) } // get output from iptables after restart ipTablesCmd = exec.Command("iptables", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) if err != nil { t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if strings.Contains(out, ipTablesSearchString) { t.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) } logDone("daemon - run,iptables - iptables rules cleaned after daemon restart") } func TestDaemonIptablesCreate(t *testing.T) { defer deleteAllContainers() d := NewDaemon(t) if err := d.StartWithBusybox(); err != nil { t.Fatalf("Could not start daemon with busybox: %v", err) } defer d.Stop() if out, err := d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { t.Fatalf("Could not run top: %s, %v", out, err) } // get output from iptables with container running ipTablesSearchString := "tcp dpt:80" ipTablesCmd := exec.Command("iptables", "-nvL") out, _, err := runCommandWithOutput(ipTablesCmd) if err != nil { t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { t.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) } if err := d.Restart(); err != nil { t.Fatalf("Could not restart daemon: %v", err) } // make sure the container is not running runningOut, err := d.Cmd("inspect", "--format='{{.State.Running}}'", "top") if err != nil { t.Fatalf("Could not inspect on container: %s, %v", out, err) } if strings.TrimSpace(runningOut) != "true" { t.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) } // get output from iptables after restart ipTablesCmd = exec.Command("iptables", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) if err != nil { t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { t.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) } logDone("daemon - run,iptables - iptables rules for always restarted container created after daemon restart") } func TestDaemonLoggingLevel(t *testing.T) { d := NewDaemon(t) if err := d.Start("--log-level=bogus"); err == nil { t.Fatal("Daemon should not have been able to start") } d = NewDaemon(t) if err := d.Start("--log-level=debug"); err != nil { t.Fatal(err) } d.Stop() content, _ := ioutil.ReadFile(d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { t.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) } d = NewDaemon(t) if err := d.Start("--log-level=fatal"); err != nil { t.Fatal(err) } d.Stop() content, _ = ioutil.ReadFile(d.logFile.Name()) if strings.Contains(string(content), `level=debug`) { t.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) } d = NewDaemon(t) if err := d.Start("-D"); err != nil { t.Fatal(err) } d.Stop() content, _ = ioutil.ReadFile(d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { t.Fatalf(`Missing level="debug" in log file using -D:\n%s`, string(content)) } d = NewDaemon(t) if err := d.Start("--debug"); err != nil { t.Fatal(err) } d.Stop() content, _ = ioutil.ReadFile(d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { t.Fatalf(`Missing level="debug" in log file using --debug:\n%s`, string(content)) } d = NewDaemon(t) if err := d.Start("--debug", "--log-level=fatal"); err != nil { t.Fatal(err) } d.Stop() content, _ = ioutil.ReadFile(d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { t.Fatalf(`Missing level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) } logDone("daemon - Logging Level") } func TestDaemonAllocatesListeningPort(t *testing.T) { listeningPorts := [][]string{ {"0.0.0.0", "0.0.0.0", "5678"}, {"127.0.0.1", "127.0.0.1", "1234"}, {"localhost", "127.0.0.1", "1235"}, } cmdArgs := []string{} for _, hostDirective := range listeningPorts { cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) } d := NewDaemon(t) if err := d.StartWithBusybox(cmdArgs...); err != nil { t.Fatalf("Could not start daemon with busybox: %v", err) } defer d.Stop() for _, hostDirective := range listeningPorts { output, err := d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") if err == nil { t.Fatalf("Container should not start, expected port already allocated error: %q", output) } else if !strings.Contains(output, "port is already allocated") { t.Fatalf("Expected port is already allocated error: %q", output) } } logDone("daemon - daemon listening port is allocated") } // #9629 func TestDaemonVolumesBindsRefs(t *testing.T) { d := NewDaemon(t) if err := d.StartWithBusybox(); err != nil { t.Fatal(err) } tmp, err := ioutil.TempDir(os.TempDir(), "") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) if err := ioutil.WriteFile(tmp+"/test", []byte("testing"), 0655); err != nil { t.Fatal(err) } if out, err := d.Cmd("create", "-v", tmp+":/foo", "--name=voltest", "busybox"); err != nil { t.Fatal(err, out) } if err := d.Restart(); err != nil { t.Fatal(err) } if out, err := d.Cmd("run", "--volumes-from=voltest", "--name=consumer", "busybox", "/bin/sh", "-c", "[ -f /foo/test ]"); err != nil { t.Fatal(err, out) } logDone("daemon - bind refs in data-containers survive daemon restart") } func TestDaemonKeyGeneration(t *testing.T) { // TODO: skip or update for Windows daemon os.Remove("/etc/docker/key.json") d := NewDaemon(t) if err := d.Start(); err != nil { t.Fatalf("Could not start daemon: %v", err) } d.Stop() k, err := libtrust.LoadKeyFile("/etc/docker/key.json") if err != nil { t.Fatalf("Error opening key file") } kid := k.KeyID() // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) if len(kid) != 59 { t.Fatalf("Bad key ID: %s", kid) } logDone("daemon - key generation") } func TestDaemonKeyMigration(t *testing.T) { // TODO: skip or update for Windows daemon os.Remove("/etc/docker/key.json") k1, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("Error generating private key: %s", err) } if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil { t.Fatalf("Error creating .docker directory: %s", err) } if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil { t.Fatalf("Error saving private key: %s", err) } d := NewDaemon(t) if err := d.Start(); err != nil { t.Fatalf("Could not start daemon: %v", err) } d.Stop() k2, err := libtrust.LoadKeyFile("/etc/docker/key.json") if err != nil { t.Fatalf("Error opening key file") } if k1.KeyID() != k2.KeyID() { t.Fatalf("Key not migrated") } logDone("daemon - key migration") } // Simulate an older daemon (pre 1.3) coming up with volumes specified in containers // without corresponding volume json func TestDaemonUpgradeWithVolumes(t *testing.T) { d := NewDaemon(t) graphDir := filepath.Join(os.TempDir(), "docker-test") defer os.RemoveAll(graphDir) if err := d.StartWithBusybox("-g", graphDir); err != nil { t.Fatal(err) } tmpDir := filepath.Join(os.TempDir(), "test") defer os.RemoveAll(tmpDir) if out, err := d.Cmd("create", "-v", tmpDir+":/foo", "--name=test", "busybox"); err != nil { t.Fatal(err, out) } if err := d.Stop(); err != nil { t.Fatal(err) } // Remove this since we're expecting the daemon to re-create it too if err := os.RemoveAll(tmpDir); err != nil { t.Fatal(err) } configDir := filepath.Join(graphDir, "volumes") if err := os.RemoveAll(configDir); err != nil { t.Fatal(err) } if err := d.Start("-g", graphDir); err != nil { t.Fatal(err) } if _, err := os.Stat(tmpDir); os.IsNotExist(err) { t.Fatalf("expected volume path %s to exist but it does not", tmpDir) } dir, err := ioutil.ReadDir(configDir) if err != nil { t.Fatal(err) } if len(dir) == 0 { t.Fatalf("expected volumes config dir to contain data for new volume") } // Now with just removing the volume config and not the volume data if err := d.Stop(); err != nil { t.Fatal(err) } if err := os.RemoveAll(configDir); err != nil { t.Fatal(err) } if err := d.Start("-g", graphDir); err != nil { t.Fatal(err) } dir, err = ioutil.ReadDir(configDir) if err != nil { t.Fatal(err) } if len(dir) == 0 { t.Fatalf("expected volumes config dir to contain data for new volume") } logDone("daemon - volumes from old(pre 1.3) daemon work") } // GH#11320 - verify that the daemon exits on failure properly // Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means // to get a daemon init failure; no other tests for -b/--bip conflict are therefore required func TestDaemonExitOnFailure(t *testing.T) { d := NewDaemon(t) defer d.Stop() //attempt to start daemon with incorrect flags (we know -b and --bip conflict) if err := d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { //verify we got the right error if !strings.Contains(err.Error(), "Daemon exited and never started") { t.Fatalf("Expected daemon not to start, got %v", err) } // look in the log and make sure we got the message that daemon is shutting down runCmd := exec.Command("grep", "Shutting down daemon due to", d.LogfileName()) if out, _, err := runCommandWithOutput(runCmd); err != nil { t.Fatalf("Expected 'shutting down daemon due to error' message; but doesn't exist in log: %q, err: %v", out, err) } } else { //if we didn't get an error and the daemon is running, this is a failure d.Stop() t.Fatal("Conflicting options should cause the daemon to error out with a failure") } logDone("daemon - verify no start on daemon init errors") } func TestDaemonUlimitDefaults(t *testing.T) { testRequires(t, NativeExecDriver) d := NewDaemon(t) if err := d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil { t.Fatal(err) } out, err := d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") if err != nil { t.Fatal(out, err) } outArr := strings.Split(out, "\n") if len(outArr) < 2 { t.Fatal("got unexpected output: %s", out) } nofile := strings.TrimSpace(outArr[0]) nproc := strings.TrimSpace(outArr[1]) if nofile != "42" { t.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) } if nproc != "2048" { t.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) } // Now restart daemon with a new default if err := d.Restart("--default-ulimit", "nofile=43"); err != nil { t.Fatal(err) } out, err = d.Cmd("start", "-a", "test") if err != nil { t.Fatal(err) } outArr = strings.Split(out, "\n") if len(outArr) < 2 { t.Fatal("got unexpected output: %s", out) } nofile = strings.TrimSpace(outArr[0]) nproc = strings.TrimSpace(outArr[1]) if nofile != "43" { t.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) } if nproc != "2048" { t.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) } logDone("daemon - default ulimits are applied") } // #11315 func TestDaemonRestartRenameContainer(t *testing.T) { d := NewDaemon(t) if err := d.StartWithBusybox(); err != nil { t.Fatal(err) } if out, err := d.Cmd("run", "--name=test", "busybox"); err != nil { t.Fatal(err, out) } if out, err := d.Cmd("rename", "test", "test2"); err != nil { t.Fatal(err, out) } if err := d.Restart(); err != nil { t.Fatal(err) } if out, err := d.Cmd("start", "test2"); err != nil { t.Fatal(err, out) } logDone("daemon - rename persists through daemon restart") } func TestDaemonLoggingDriverDefault(t *testing.T) { d := NewDaemon(t) if err := d.StartWithBusybox(); err != nil { t.Fatal(err) } defer d.Stop() out, err := d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { t.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := d.Cmd("wait", id); err != nil { t.Fatal(out, err) } logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { t.Fatal(err) } f, err := os.Open(logPath) if err != nil { t.Fatal(err) } var res struct { Log string `json:log` Stream string `json:stream` Time time.Time `json:time` } if err := json.NewDecoder(f).Decode(&res); err != nil { t.Fatal(err) } if res.Log != "testline\n" { t.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") } if res.Stream != "stdout" { t.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") } if !time.Now().After(res.Time) { t.Fatalf("Log time %v in future", res.Time) } logDone("daemon - default 'json-file' logging driver") } func TestDaemonLoggingDriverDefaultOverride(t *testing.T) { d := NewDaemon(t) if err := d.StartWithBusybox(); err != nil { t.Fatal(err) } defer d.Stop() out, err := d.Cmd("run", "-d", "--log-driver=none", "busybox", "echo", "testline") if err != nil { t.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := d.Cmd("wait", id); err != nil { t.Fatal(out, err) } logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { t.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) } logDone("daemon - default logging driver override in run") } func TestDaemonLoggingDriverNone(t *testing.T) { d := NewDaemon(t) if err := d.StartWithBusybox("--log-driver=none"); err != nil { t.Fatal(err) } defer d.Stop() out, err := d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { t.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := d.Cmd("wait", id); err != nil { t.Fatal(out, err) } logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { t.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) } logDone("daemon - 'none' logging driver") } func TestDaemonLoggingDriverNoneOverride(t *testing.T) { d := NewDaemon(t) if err := d.StartWithBusybox("--log-driver=none"); err != nil { t.Fatal(err) } defer d.Stop() out, err := d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "echo", "testline") if err != nil { t.Fatal(out, err) } id := strings.TrimSpace(out) if out, err := d.Cmd("wait", id); err != nil { t.Fatal(out, err) } logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { t.Fatal(err) } f, err := os.Open(logPath) if err != nil { t.Fatal(err) } var res struct { Log string `json:log` Stream string `json:stream` Time time.Time `json:time` } if err := json.NewDecoder(f).Decode(&res); err != nil { t.Fatal(err) } if res.Log != "testline\n" { t.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") } if res.Stream != "stdout" { t.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") } if !time.Now().After(res.Time) { t.Fatalf("Log time %v in future", res.Time) } logDone("daemon - 'none' logging driver override in run") } func TestDaemonLoggingDriverNoneLogsError(t *testing.T) { d := NewDaemon(t) if err := d.StartWithBusybox("--log-driver=none"); err != nil { t.Fatal(err) } defer d.Stop() out, err := d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { t.Fatal(out, err) } id := strings.TrimSpace(out) out, err = d.Cmd("logs", id) if err == nil { t.Fatalf("Logs should fail with \"none\" driver") } if !strings.Contains(out, `\"logs\" command is supported only for \"json-file\" logging driver`) { t.Fatalf("There should be error about non-json-file driver, got %s", out) } logDone("daemon - logs not available for non-json-file drivers") } func TestDaemonDots(t *testing.T) { defer deleteAllContainers() d := NewDaemon(t) if err := d.StartWithBusybox(); err != nil { t.Fatal(err) } // Now create 4 containers if _, err := d.Cmd("create", "busybox"); err != nil { t.Fatalf("Error creating container: %q", err) } if _, err := d.Cmd("create", "busybox"); err != nil { t.Fatalf("Error creating container: %q", err) } if _, err := d.Cmd("create", "busybox"); err != nil { t.Fatalf("Error creating container: %q", err) } if _, err := d.Cmd("create", "busybox"); err != nil { t.Fatalf("Error creating container: %q", err) } d.Stop() d.Start("--log-level=debug") d.Stop() content, _ := ioutil.ReadFile(d.logFile.Name()) if strings.Contains(string(content), "....") { t.Fatalf("Debug level should not have ....\n%s", string(content)) } d.Start("--log-level=error") d.Stop() content, _ = ioutil.ReadFile(d.logFile.Name()) if strings.Contains(string(content), "....") { t.Fatalf("Error level should not have ....\n%s", string(content)) } d.Start("--log-level=info") d.Stop() content, _ = ioutil.ReadFile(d.logFile.Name()) if !strings.Contains(string(content), "....") { t.Fatalf("Info level should have ....\n%s", string(content)) } logDone("daemon - test dots on INFO") } func TestDaemonUnixSockCleanedUp(t *testing.T) { d := NewDaemon(t) dir, err := ioutil.TempDir("", "socket-cleanup-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) sockPath := filepath.Join(dir, "docker.sock") if err := d.Start("--host", "unix://"+sockPath); err != nil { t.Fatal(err) } if _, err := os.Stat(sockPath); err != nil { t.Fatal("socket does not exist") } if err := d.Stop(); err != nil { t.Fatal(err) } if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { t.Fatal("unix socket is not cleaned up") } logDone("daemon - unix socket is cleaned up") } docker-1.6.2/integration-cli/docker_cli_restart_test.go0000644000175000017500000001541312524223634022660 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" "time" ) func TestRestartStoppedContainer(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "foobar") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if out != "foobar\n" { t.Errorf("container should've printed 'foobar'") } runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if out != "foobar\nfoobar\n" { t.Errorf("container should've printed 'foobar' twice") } logDone("restart - echo foobar for stopped container") } func TestRestartRunningContainer(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) time.Sleep(1 * time.Second) runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if out != "foobar\n" { t.Errorf("container should've printed 'foobar'") } runCmd = exec.Command(dockerBinary, "restart", "-t", "1", cleanedContainerID) if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } time.Sleep(1 * time.Second) if out != "foobar\nfoobar\n" { t.Errorf("container should've printed 'foobar' twice") } logDone("restart - echo foobar for running container") } // Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. func TestRestartWithVolumes(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "-v", "/test", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if out = strings.Trim(out, " \n\r"); out != "1" { t.Errorf("expect 1 volume received %s", out) } runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) volumes, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(volumes, err) } runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if out = strings.Trim(out, " \n\r"); out != "1" { t.Errorf("expect 1 volume after restart received %s", out) } runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) volumesAfterRestart, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(volumesAfterRestart, err) } if volumes != volumesAfterRestart { volumes = strings.Trim(volumes, " \n\r") volumesAfterRestart = strings.Trim(volumesAfterRestart, " \n\r") t.Errorf("expected volume path: %s Actual path: %s", volumes, volumesAfterRestart) } logDone("restart - does not create a new volume on restart") } func TestRestartPolicyNO(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "--restart=no", "busybox", "false") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } id := strings.TrimSpace(string(out)) name, err := inspectField(id, "HostConfig.RestartPolicy.Name") if err != nil { t.Fatal(err, out) } if name != "no" { t.Fatalf("Container restart policy name is %s, expected %s", name, "no") } logDone("restart - recording restart policy name for --restart=no") } func TestRestartPolicyAlways(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "--restart=always", "busybox", "false") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } id := strings.TrimSpace(string(out)) name, err := inspectField(id, "HostConfig.RestartPolicy.Name") if err != nil { t.Fatal(err, out) } if name != "always" { t.Fatalf("Container restart policy name is %s, expected %s", name, "always") } logDone("restart - recording restart policy name for --restart=always") } func TestRestartPolicyOnFailure(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "--restart=on-failure:1", "busybox", "false") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } id := strings.TrimSpace(string(out)) name, err := inspectField(id, "HostConfig.RestartPolicy.Name") if err != nil { t.Fatal(err, out) } if name != "on-failure" { t.Fatalf("Container restart policy name is %s, expected %s", name, "on-failure") } logDone("restart - recording restart policy name for --restart=on-failure") } // a good container with --restart=on-failure:3 // MaximumRetryCount!=0; RestartCount=0 func TestContainerRestartwithGoodContainer(t *testing.T) { defer deleteAllContainers() out, err := exec.Command(dockerBinary, "run", "-d", "--restart=on-failure:3", "busybox", "true").CombinedOutput() if err != nil { t.Fatal(string(out), err) } id := strings.TrimSpace(string(out)) if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 5); err != nil { t.Fatal(err) } count, err := inspectField(id, "RestartCount") if err != nil { t.Fatal(err) } if count != "0" { t.Fatalf("Container was restarted %s times, expected %d", count, 0) } MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount") if err != nil { t.Fatal(err) } if MaximumRetryCount != "3" { t.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") } logDone("restart - for a good container with restart policy, MaximumRetryCount is not 0 and RestartCount is 0") } docker-1.6.2/integration-cli/docker_cli_rename_test.go0000644000175000017500000000615612524223634022447 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) func TestRenameStoppedContainer(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--name", "first_name", "-d", "busybox", "sh") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) } name, err := inspectField(cleanedContainerID, "Name") runCmd = exec.Command(dockerBinary, "rename", "first_name", "new_name") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) } name, err = inspectField(cleanedContainerID, "Name") if err != nil { t.Fatal(err) } if name != "/new_name" { t.Fatal("Failed to rename container ", name) } logDone("rename - stopped container") } func TestRenameRunningContainer(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--name", "first_name", "-d", "busybox", "sh") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "rename", "first_name", "new_name") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) } name, err := inspectField(cleanedContainerID, "Name") if err != nil { t.Fatal(err) } if name != "/new_name" { t.Fatal("Failed to rename container ") } logDone("rename - running container") } func TestRenameCheckNames(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--name", "first_name", "-d", "busybox", "sh") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) } runCmd = exec.Command(dockerBinary, "rename", "first_name", "new_name") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatalf(out, err) } name, err := inspectField("new_name", "Name") if err != nil { t.Fatal(err) } if name != "/new_name" { t.Fatal("Failed to rename container ") } name, err = inspectField("first_name", "Name") if err == nil && !strings.Contains(err.Error(), "No such image or container: first_name") { t.Fatal(err) } logDone("rename - running container") } func TestRenameInvalidName(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--name", "myname", "-d", "busybox", "top") if out, _, err := runCommandWithOutput(runCmd); err != nil { t.Fatalf(out, err) } runCmd = exec.Command(dockerBinary, "rename", "myname", "new:invalid") if out, _, err := runCommandWithOutput(runCmd); err == nil || !strings.Contains(out, "Invalid container name") { t.Fatalf("Renaming container to invalid name should have failed: %s\n%v", out, err) } runCmd = exec.Command(dockerBinary, "ps", "-a") if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "myname") { t.Fatalf("Output of docker ps should have included 'myname': %s\n%v", out, err) } logDone("rename - invalid container name") } docker-1.6.2/integration-cli/docker_cli_info_test.go0000644000175000017500000000115612524223634022126 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) // ensure docker info succeeds func TestInfoEnsureSucceeds(t *testing.T) { versionCmd := exec.Command(dockerBinary, "info") out, exitCode, err := runCommandWithOutput(versionCmd) if err != nil || exitCode != 0 { t.Fatalf("failed to execute docker info: %s, %v", out, err) } stringsToCheck := []string{"Containers:", "Execution Driver:", "Kernel Version:"} for _, linePrefix := range stringsToCheck { if !strings.Contains(out, linePrefix) { t.Errorf("couldn't find string %v in output", linePrefix) } } logDone("info - verify that it works") } docker-1.6.2/integration-cli/test_vars_exec.go0000644000175000017500000000020012524223634020761 0ustar tianontianon// +build !test_no_exec package main const ( // indicates docker daemon tested supports 'docker exec' supportsExec = true ) docker-1.6.2/integration-cli/docker_cli_nat_test.go0000644000175000017500000000330312524223634021751 0ustar tianontianonpackage main import ( "fmt" "net" "os/exec" "strings" "testing" ) func TestNetworkNat(t *testing.T) { testRequires(t, SameHostDaemon, NativeExecDriver) defer deleteAllContainers() iface, err := net.InterfaceByName("eth0") if err != nil { t.Skipf("Test not running with `make test`. Interface eth0 not found: %s", err) } ifaceAddrs, err := iface.Addrs() if err != nil || len(ifaceAddrs) == 0 { t.Fatalf("Error retrieving addresses for eth0: %v (%d addresses)", err, len(ifaceAddrs)) } ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) if err != nil { t.Fatalf("Error retrieving the up for eth0: %s", err) } runCmd := exec.Command(dockerBinary, "run", "-dt", "-p", "8080:8080", "busybox", "nc", "-lp", "8080") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "run", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s 8080", ifaceIP)) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to retrieve logs for container: %s, %v", out, err) } out = strings.Trim(out, "\r\n") if expected := "hello world"; out != expected { t.Fatalf("Unexpected output. Expected: %q, received: %q for iface %s", expected, out, ifaceIP) } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) if out, _, err = runCommandWithOutput(killCmd); err != nil { t.Fatalf("failed to kill container: %s, %v", out, err) } logDone("network - make sure nat works through the host") } docker-1.6.2/integration-cli/docker_cli_proxy_test.go0000644000175000017500000000316212524223634022353 0ustar tianontianonpackage main import ( "net" "os/exec" "strings" "testing" ) func TestCliProxyDisableProxyUnixSock(t *testing.T) { testRequires(t, SameHostDaemon) // test is valid when DOCKER_HOST=unix://.. cmd := exec.Command(dockerBinary, "info") cmd.Env = appendBaseEnv([]string{"HTTP_PROXY=http://127.0.0.1:9999"}) if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(err, out) } logDone("cli proxy - HTTP_PROXY is not used when connecting to unix sock") } // Can't use localhost here since go has a special case to not use proxy if connecting to localhost // See http://golang.org/pkg/net/http/#ProxyFromEnvironment func TestCliProxyProxyTCPSock(t *testing.T) { testRequires(t, SameHostDaemon) // get the IP to use to connect since we can't use localhost addrs, err := net.InterfaceAddrs() if err != nil { t.Fatal(err) } var ip string for _, addr := range addrs { sAddr := addr.String() if !strings.Contains(sAddr, "127.0.0.1") { addrArr := strings.Split(sAddr, "/") ip = addrArr[0] break } } if ip == "" { t.Fatal("could not find ip to connect to") } d := NewDaemon(t) if err := d.Start("-H", "tcp://"+ip+":2375"); err != nil { t.Fatal(err) } cmd := exec.Command(dockerBinary, "info") cmd.Env = []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"} if out, _, err := runCommandWithOutput(cmd); err == nil { t.Fatal(err, out) } // Test with no_proxy cmd.Env = append(cmd.Env, "NO_PROXY="+ip) if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "info")); err != nil { t.Fatal(err, out) } logDone("cli proxy - HTTP_PROXY is used for TCP sock") } docker-1.6.2/integration-cli/docker_cli_events_test.go0000644000175000017500000003101612524223634022475 0ustar tianontianonpackage main import ( "fmt" "os/exec" "regexp" "strconv" "strings" "testing" "time" ) func TestEventsUntag(t *testing.T) { image := "busybox" dockerCmd(t, "tag", image, "utest:tag1") dockerCmd(t, "tag", image, "utest:tag2") dockerCmd(t, "rmi", "utest:tag1") dockerCmd(t, "rmi", "utest:tag2") eventsCmd := exec.Command(dockerBinary, "events", "--since=1") out, exitCode, _, err := runCommandWithOutputForDuration(eventsCmd, time.Duration(time.Millisecond*200)) if exitCode != 0 || err != nil { t.Fatalf("Failed to get events - exit code %d: %s", exitCode, err) } events := strings.Split(out, "\n") nEvents := len(events) // The last element after the split above will be an empty string, so we // get the two elements before the last, which are the untags we're // looking for. for _, v := range events[nEvents-3 : nEvents-1] { if !strings.Contains(v, "untag") { t.Fatalf("event should be untag, not %#v", v) } } logDone("events - untags are logged") } func TestEventsContainerFailStartDie(t *testing.T) { defer deleteAllContainers() out, _, _ := dockerCmd(t, "images", "-q") image := strings.Split(out, "\n")[0] eventsCmd := exec.Command(dockerBinary, "run", "--name", "testeventdie", image, "blerg") _, _, err := runCommandWithOutput(eventsCmd) if err == nil { t.Fatalf("Container run with command blerg should have failed, but it did not") } eventsCmd = exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) out, _, _ = runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") if len(events) <= 1 { t.Fatalf("Missing expected event") } startEvent := strings.Fields(events[len(events)-3]) dieEvent := strings.Fields(events[len(events)-2]) if startEvent[len(startEvent)-1] != "start" { t.Fatalf("event should be start, not %#v", startEvent) } if dieEvent[len(dieEvent)-1] != "die" { t.Fatalf("event should be die, not %#v", dieEvent) } logDone("events - container unwilling to start logs die") } func TestEventsLimit(t *testing.T) { defer deleteAllContainers() for i := 0; i < 30; i++ { dockerCmd(t, "run", "busybox", "echo", strconv.Itoa(i)) } eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) out, _, _ := runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") nEvents := len(events) - 1 if nEvents != 64 { t.Fatalf("events should be limited to 64, but received %d", nEvents) } logDone("events - limited to 64 entries") } func TestEventsContainerEvents(t *testing.T) { dockerCmd(t, "run", "--rm", "busybox", "true") eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) out, exitCode, err := runCommandWithOutput(eventsCmd) if exitCode != 0 || err != nil { t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) } events := strings.Split(out, "\n") events = events[:len(events)-1] if len(events) < 4 { t.Fatalf("Missing expected event") } createEvent := strings.Fields(events[len(events)-4]) startEvent := strings.Fields(events[len(events)-3]) dieEvent := strings.Fields(events[len(events)-2]) destroyEvent := strings.Fields(events[len(events)-1]) if createEvent[len(createEvent)-1] != "create" { t.Fatalf("event should be create, not %#v", createEvent) } if startEvent[len(startEvent)-1] != "start" { t.Fatalf("event should be start, not %#v", startEvent) } if dieEvent[len(dieEvent)-1] != "die" { t.Fatalf("event should be die, not %#v", dieEvent) } if destroyEvent[len(destroyEvent)-1] != "destroy" { t.Fatalf("event should be destroy, not %#v", destroyEvent) } logDone("events - container create, start, die, destroy is logged") } func TestEventsImageUntagDelete(t *testing.T) { name := "testimageevents" defer deleteImages(name) _, err := buildImage(name, `FROM scratch MAINTAINER "docker"`, true) if err != nil { t.Fatal(err) } if err := deleteImages(name); err != nil { t.Fatal(err) } eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) out, exitCode, err := runCommandWithOutput(eventsCmd) if exitCode != 0 || err != nil { t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) } events := strings.Split(out, "\n") events = events[:len(events)-1] if len(events) < 2 { t.Fatalf("Missing expected event") } untagEvent := strings.Fields(events[len(events)-2]) deleteEvent := strings.Fields(events[len(events)-1]) if untagEvent[len(untagEvent)-1] != "untag" { t.Fatalf("untag should be untag, not %#v", untagEvent) } if deleteEvent[len(deleteEvent)-1] != "delete" { t.Fatalf("delete should be delete, not %#v", deleteEvent) } logDone("events - image untag, delete is logged") } func TestEventsImagePull(t *testing.T) { since := daemonTime(t).Unix() defer deleteImages("hello-world") pullCmd := exec.Command(dockerBinary, "pull", "hello-world") if out, _, err := runCommandWithOutput(pullCmd); err != nil { t.Fatalf("pulling the hello-world image from has failed: %s, %v", out, err) } eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix())) out, _, _ := runCommandWithOutput(eventsCmd) events := strings.Split(strings.TrimSpace(out), "\n") event := strings.TrimSpace(events[len(events)-1]) if !strings.HasSuffix(event, "hello-world:latest: pull") { t.Fatalf("Missing pull event - got:%q", event) } logDone("events - image pull is logged") } func TestEventsImageImport(t *testing.T) { defer deleteAllContainers() since := daemonTime(t).Unix() runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "export", cleanedContainerID), exec.Command(dockerBinary, "import", "-"), ) if err != nil { t.Errorf("import failed with errors: %v, output: %q", err, out) } eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix())) out, _, _ = runCommandWithOutput(eventsCmd) events := strings.Split(strings.TrimSpace(out), "\n") event := strings.TrimSpace(events[len(events)-1]) if !strings.HasSuffix(event, ": import") { t.Fatalf("Missing pull event - got:%q", event) } logDone("events - image import is logged") } func TestEventsFilters(t *testing.T) { parseEvents := func(out, match string) { events := strings.Split(out, "\n") events = events[:len(events)-1] for _, event := range events { eventFields := strings.Fields(event) eventName := eventFields[len(eventFields)-1] if ok, err := regexp.MatchString(match, eventName); err != nil || !ok { t.Fatalf("event should match %s, got %#v, err: %v", match, eventFields, err) } } } since := daemonTime(t).Unix() out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", "busybox", "true")) if err != nil { t.Fatal(out, err) } out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", "busybox", "true")) if err != nil { t.Fatal(out, err) } out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", "event=die")) if err != nil { t.Fatalf("Failed to get events: %s", err) } parseEvents(out, "die") out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", "event=die", "--filter", "event=start")) if err != nil { t.Fatalf("Failed to get events: %s", err) } parseEvents(out, "((die)|(start))") // make sure we at least got 2 start events count := strings.Count(out, "start") if count < 2 { t.Fatalf("should have had 2 start events but had %d, out: %s", count, out) } logDone("events - filters") } func TestEventsFilterImageName(t *testing.T) { since := daemonTime(t).Unix() defer deleteAllContainers() out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "container_1", "-d", "busybox", "true")) if err != nil { t.Fatal(out, err) } container1 := stripTrailingCharacters(out) out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "container_2", "-d", "busybox", "true")) if err != nil { t.Fatal(out, err) } container2 := stripTrailingCharacters(out) for _, s := range []string{"busybox", "busybox:latest"} { eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", fmt.Sprintf("image=%s", s)) out, _, err := runCommandWithOutput(eventsCmd) if err != nil { t.Fatalf("Failed to get events, error: %s(%s)", err, out) } events := strings.Split(out, "\n") events = events[:len(events)-1] if len(events) == 0 { t.Fatalf("Expected events but found none for the image busybox:latest") } count1 := 0 count2 := 0 for _, e := range events { if strings.Contains(e, container1) { count1++ } else if strings.Contains(e, container2) { count2++ } } if count1 == 0 || count2 == 0 { t.Fatalf("Expected events from each container but got %d from %s and %d from %s", count1, container1, count2, container2) } } logDone("events - filters using image") } func TestEventsFilterContainerID(t *testing.T) { since := daemonTime(t).Unix() defer deleteAllContainers() out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox", "true")) if err != nil { t.Fatal(out, err) } container1 := stripTrailingCharacters(out) out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox", "true")) if err != nil { t.Fatal(out, err) } container2 := stripTrailingCharacters(out) for _, s := range []string{container1, container2, container1[:12], container2[:12]} { eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", fmt.Sprintf("container=%s", s)) out, _, err := runCommandWithOutput(eventsCmd) if err != nil { t.Fatalf("Failed to get events, error: %s(%s)", err, out) } events := strings.Split(out, "\n") events = events[:len(events)-1] if len(events) == 0 || len(events) > 3 { t.Fatalf("Expected 3 events, got %d: %v", len(events), events) } createEvent := strings.Fields(events[0]) if createEvent[len(createEvent)-1] != "create" { t.Fatalf("first event should be create, not %#v", createEvent) } if len(events) > 1 { startEvent := strings.Fields(events[1]) if startEvent[len(startEvent)-1] != "start" { t.Fatalf("second event should be start, not %#v", startEvent) } } if len(events) == 3 { dieEvent := strings.Fields(events[len(events)-1]) if dieEvent[len(dieEvent)-1] != "die" { t.Fatalf("event should be die, not %#v", dieEvent) } } } logDone("events - filters using container id") } func TestEventsFilterContainerName(t *testing.T) { since := daemonTime(t).Unix() defer deleteAllContainers() _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "container_1", "busybox", "true")) if err != nil { t.Fatal(err) } _, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "container_2", "busybox", "true")) if err != nil { t.Fatal(err) } for _, s := range []string{"container_1", "container_2"} { eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", fmt.Sprintf("container=%s", s)) out, _, err := runCommandWithOutput(eventsCmd) if err != nil { t.Fatalf("Failed to get events, error : %s(%s)", err, out) } events := strings.Split(out, "\n") events = events[:len(events)-1] if len(events) == 0 || len(events) > 3 { t.Fatalf("Expected 3 events, got %d: %v", len(events), events) } createEvent := strings.Fields(events[0]) if createEvent[len(createEvent)-1] != "create" { t.Fatalf("first event should be create, not %#v", createEvent) } if len(events) > 1 { startEvent := strings.Fields(events[1]) if startEvent[len(startEvent)-1] != "start" { t.Fatalf("second event should be start, not %#v", startEvent) } } if len(events) == 3 { dieEvent := strings.Fields(events[len(events)-1]) if dieEvent[len(dieEvent)-1] != "die" { t.Fatalf("event should be die, not %#v", dieEvent) } } } logDone("events - filters using container name") } docker-1.6.2/integration-cli/docker_cli_exec_test.go0000644000175000017500000004125712524223634022125 0ustar tianontianon// +build !test_no_exec package main import ( "bufio" "fmt" "os" "os/exec" "path/filepath" "reflect" "sort" "strings" "sync" "testing" "time" ) func TestExec(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100") if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { t.Fatal(out, err) } execCmd := exec.Command(dockerBinary, "exec", "testing", "cat", "/tmp/file") out, _, err := runCommandWithOutput(execCmd) if err != nil { t.Fatal(out, err) } out = strings.Trim(out, "\r\n") if expected := "test"; out != expected { t.Errorf("container exec should've printed %q but printed %q", expected, out) } logDone("exec - basic test") } func TestExecInteractiveStdinClose(t *testing.T) { defer deleteAllContainers() out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "busybox", "/bin/cat")) if err != nil { t.Fatal(err) } contId := strings.TrimSpace(out) returnchan := make(chan struct{}) go func() { var err error cmd := exec.Command(dockerBinary, "exec", "-i", contId, "/bin/ls", "/") cmd.Stdin = os.Stdin if err != nil { t.Fatal(err) } out, err := cmd.CombinedOutput() if err != nil { t.Fatal(err, string(out)) } if string(out) == "" { t.Fatalf("Output was empty, likely blocked by standard input") } returnchan <- struct{}{} }() select { case <-returnchan: case <-time.After(10 * time.Second): t.Fatal("timed out running docker exec") } logDone("exec - interactive mode closes stdin after execution") } func TestExecInteractive(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && sleep 100") if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { t.Fatal(out, err) } execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") stdin, err := execCmd.StdinPipe() if err != nil { t.Fatal(err) } stdout, err := execCmd.StdoutPipe() if err != nil { t.Fatal(err) } if err := execCmd.Start(); err != nil { t.Fatal(err) } if _, err := stdin.Write([]byte("cat /tmp/file\n")); err != nil { t.Fatal(err) } r := bufio.NewReader(stdout) line, err := r.ReadString('\n') if err != nil { t.Fatal(err) } line = strings.TrimSpace(line) if line != "test" { t.Fatalf("Output should be 'test', got '%q'", line) } if err := stdin.Close(); err != nil { t.Fatal(err) } finish := make(chan struct{}) go func() { if err := execCmd.Wait(); err != nil { t.Fatal(err) } close(finish) }() select { case <-finish: case <-time.After(1 * time.Second): t.Fatal("docker exec failed to exit on stdin close") } logDone("exec - Interactive test") } func TestExecAfterContainerRestart(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "exec", cleanedContainerID, "echo", "hello") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } outStr := strings.TrimSpace(out) if outStr != "hello" { t.Errorf("container should've printed hello, instead printed %q", outStr) } logDone("exec - exec running container after container restart") } func TestExecAfterDaemonRestart(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() d := NewDaemon(t) if err := d.StartWithBusybox(); err != nil { t.Fatalf("Could not start daemon with busybox: %v", err) } defer d.Stop() if out, err := d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { t.Fatalf("Could not run top: err=%v\n%s", err, out) } if err := d.Restart(); err != nil { t.Fatalf("Could not restart daemon: %v", err) } if out, err := d.Cmd("start", "top"); err != nil { t.Fatalf("Could not start top after daemon restart: err=%v\n%s", err, out) } out, err := d.Cmd("exec", "top", "echo", "hello") if err != nil { t.Fatalf("Could not exec on container top: err=%v\n%s", err, out) } outStr := strings.TrimSpace(string(out)) if outStr != "hello" { t.Errorf("container should've printed hello, instead printed %q", outStr) } logDone("exec - exec running container after daemon restart") } // Regression test for #9155, #9044 func TestExecEnv(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-e", "LALA=value1", "-e", "LALA=value2", "-d", "--name", "testing", "busybox", "top") if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { t.Fatal(out, err) } execCmd := exec.Command(dockerBinary, "exec", "testing", "env") out, _, err := runCommandWithOutput(execCmd) if err != nil { t.Fatal(out, err) } if strings.Contains(out, "LALA=value1") || !strings.Contains(out, "LALA=value2") || !strings.Contains(out, "HOME=/root") { t.Errorf("exec env(%q), expect %q, %q", out, "LALA=value2", "HOME=/root") } logDone("exec - exec inherits correct env") } func TestExecExitStatus(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "top", "busybox", "top") if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { t.Fatal(out, err) } // Test normal (non-detached) case first cmd := exec.Command(dockerBinary, "exec", "top", "sh", "-c", "exit 23") ec, _ := runCommand(cmd) if ec != 23 { t.Fatalf("Should have had an ExitCode of 23, not: %d", ec) } logDone("exec - exec non-zero ExitStatus") } func TestExecPausedContainer(t *testing.T) { defer deleteAllContainers() defer unpauseAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } ContainerID := stripTrailingCharacters(out) pausedCmd := exec.Command(dockerBinary, "pause", "testing") out, _, _, err = runCommandWithStdoutStderr(pausedCmd) if err != nil { t.Fatal(out, err) } execCmd := exec.Command(dockerBinary, "exec", "-i", "-t", ContainerID, "echo", "hello") out, _, err = runCommandWithOutput(execCmd) if err == nil { t.Fatal("container should fail to exec new command if it is paused") } expected := ContainerID + " is paused, unpause the container before exec" if !strings.Contains(out, expected) { t.Fatal("container should not exec new command if it is paused") } logDone("exec - exec should not exec a pause container") } // regression test for #9476 func TestExecTtyCloseStdin(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(out, err) } cmd = exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") stdinRw, err := cmd.StdinPipe() if err != nil { t.Fatal(err) } stdinRw.Write([]byte("test")) stdinRw.Close() if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(out, err) } cmd = exec.Command(dockerBinary, "top", "exec_tty_stdin") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } outArr := strings.Split(out, "\n") if len(outArr) > 3 || strings.Contains(out, "nsenter-exec") { // This is the really bad part if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "-f", "exec_tty_stdin")); err != nil { t.Fatal(out, err) } t.Fatalf("exec process left running\n\t %s", out) } logDone("exec - stdin is closed properly with tty enabled") } func TestExecTtyWithoutStdin(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "-ti", "busybox") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to start container: %v (%v)", out, err) } id := strings.TrimSpace(out) if err := waitRun(id); err != nil { t.Fatal(err) } defer func() { cmd := exec.Command(dockerBinary, "kill", id) if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatalf("failed to kill container: %v (%v)", out, err) } }() done := make(chan struct{}) go func() { defer close(done) cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") if _, err := cmd.StdinPipe(); err != nil { t.Fatal(err) } expected := "cannot enable tty mode" if out, _, err := runCommandWithOutput(cmd); err == nil { t.Fatal("exec should have failed") } else if !strings.Contains(out, expected) { t.Fatalf("exec failed with error %q: expected %q", out, expected) } }() select { case <-done: case <-time.After(3 * time.Second): t.Fatal("exec is running but should have failed") } logDone("exec - forbid piped stdin to tty enabled container") } func TestExecParseError(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "top", "busybox", "top") if out, _, err := runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } // Test normal (non-detached) case first cmd := exec.Command(dockerBinary, "exec", "top") if _, stderr, code, err := runCommandWithStdoutStderr(cmd); err == nil || !strings.Contains(stderr, "See '"+dockerBinary+" exec --help'") || code == 0 { t.Fatalf("Should have thrown error & point to help: %s", stderr) } logDone("exec - error on parseExec should point to help") } func TestExecStopNotHanging(t *testing.T) { defer deleteAllContainers() if out, err := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top").CombinedOutput(); err != nil { t.Fatal(out, err) } if err := exec.Command(dockerBinary, "exec", "testing", "top").Start(); err != nil { t.Fatal(err) } wait := make(chan struct{}) go func() { if out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput(); err != nil { t.Fatal(out, err) } close(wait) }() select { case <-time.After(3 * time.Second): t.Fatal("Container stop timed out") case <-wait: } logDone("exec - container with exec not hanging on stop") } func TestExecCgroup(t *testing.T) { defer deleteAllContainers() var cmd *exec.Cmd cmd = exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top") _, err := runCommand(cmd) if err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "exec", "testing", "cat", "/proc/1/cgroup") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } containerCgroups := sort.StringSlice(strings.Split(string(out), "\n")) var wg sync.WaitGroup var s sync.Mutex execCgroups := []sort.StringSlice{} // exec a few times concurrently to get consistent failure for i := 0; i < 5; i++ { wg.Add(1) go func() { cmd := exec.Command(dockerBinary, "exec", "testing", "cat", "/proc/self/cgroup") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } cg := sort.StringSlice(strings.Split(string(out), "\n")) s.Lock() execCgroups = append(execCgroups, cg) s.Unlock() wg.Done() }() } wg.Wait() for _, cg := range execCgroups { if !reflect.DeepEqual(cg, containerCgroups) { fmt.Println("exec cgroups:") for _, name := range cg { fmt.Printf(" %s\n", name) } fmt.Println("container cgroups:") for _, name := range containerCgroups { fmt.Printf(" %s\n", name) } t.Fatal("cgroups mismatched") } } logDone("exec - exec has the container cgroups") } func TestInspectExecID(t *testing.T) { defer deleteAllContainers() out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox", "top")) if exitCode != 0 || err != nil { t.Fatalf("failed to run container: %s, %v", out, err) } id := strings.TrimSuffix(out, "\n") out, err = inspectField(id, "ExecIDs") if err != nil { t.Fatalf("failed to inspect container: %s, %v", out, err) } if out != "" { t.Fatalf("ExecIDs should be empty, got: %s", out) } exitCode, err = runCommand(exec.Command(dockerBinary, "exec", "-d", id, "ls", "/")) if exitCode != 0 || err != nil { t.Fatalf("failed to exec in container: %s, %v", out, err) } out, err = inspectField(id, "ExecIDs") if err != nil { t.Fatalf("failed to inspect container: %s, %v", out, err) } out = strings.TrimSuffix(out, "\n") if out == "[]" || out == "" { t.Fatalf("ExecIDs should not be empty, got: %s", out) } logDone("inspect - inspect a container with ExecIDs") } func TestLinksPingLinkedContainersOnRename(t *testing.T) { defer deleteAllContainers() var out string out, _, _ = dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10") idA := stripTrailingCharacters(out) if idA == "" { t.Fatal(out, "id should not be nil") } out, _, _ = dockerCmd(t, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "sleep", "10") idB := stripTrailingCharacters(out) if idB == "" { t.Fatal(out, "id should not be nil") } execCmd := exec.Command(dockerBinary, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") out, _, err := runCommandWithOutput(execCmd) if err != nil { t.Fatal(out, err) } dockerCmd(t, "rename", "container1", "container_new") execCmd = exec.Command(dockerBinary, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") out, _, err = runCommandWithOutput(execCmd) if err != nil { t.Fatal(out, err) } logDone("links - ping linked container upon rename") } func TestRunExecDir(t *testing.T) { testRequires(t, SameHostDaemon) cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } id := strings.TrimSpace(out) execDir := filepath.Join(execDriverPath, id) stateFile := filepath.Join(execDir, "state.json") { fi, err := os.Stat(execDir) if err != nil { t.Fatal(err) } if !fi.IsDir() { t.Fatalf("%q must be a directory", execDir) } fi, err = os.Stat(stateFile) if err != nil { t.Fatal(err) } } stopCmd := exec.Command(dockerBinary, "stop", id) out, _, err = runCommandWithOutput(stopCmd) if err != nil { t.Fatal(err, out) } { _, err := os.Stat(execDir) if err == nil { t.Fatal(err) } if err == nil { t.Fatalf("Exec directory %q exists for removed container!", execDir) } if !os.IsNotExist(err) { t.Fatalf("Error should be about non-existing, got %s", err) } } startCmd := exec.Command(dockerBinary, "start", id) out, _, err = runCommandWithOutput(startCmd) if err != nil { t.Fatal(err, out) } { fi, err := os.Stat(execDir) if err != nil { t.Fatal(err) } if !fi.IsDir() { t.Fatalf("%q must be a directory", execDir) } fi, err = os.Stat(stateFile) if err != nil { t.Fatal(err) } } rmCmd := exec.Command(dockerBinary, "rm", "-f", id) out, _, err = runCommandWithOutput(rmCmd) if err != nil { t.Fatal(err, out) } { _, err := os.Stat(execDir) if err == nil { t.Fatal(err) } if err == nil { t.Fatalf("Exec directory %q is exists for removed container!", execDir) } if !os.IsNotExist(err) { t.Fatalf("Error should be about non-existing, got %s", err) } } logDone("run - check execdriver dir behavior") } func TestRunMutableNetworkFiles(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() for _, fn := range []string{"resolv.conf", "hosts"} { deleteAllContainers() content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn))) if err != nil { t.Fatal(err) } if strings.TrimSpace(string(content)) != "success" { t.Fatal("Content was not what was modified in the container", string(content)) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c2", "busybox", "top")) if err != nil { t.Fatal(err) } contID := strings.TrimSpace(out) netFilePath := containerStorageFile(contID, fn) f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) if err != nil { t.Fatal(err) } if _, err := f.Seek(0, 0); err != nil { f.Close() t.Fatal(err) } if err := f.Truncate(0); err != nil { f.Close() t.Fatal(err) } if _, err := f.Write([]byte("success2\n")); err != nil { f.Close() t.Fatal(err) } f.Close() res, err := exec.Command(dockerBinary, "exec", contID, "cat", "/etc/"+fn).CombinedOutput() if err != nil { t.Fatalf("Output: %s, error: %s", res, err) } if string(res) != "success2\n" { t.Fatalf("Expected content of %s: %q, got: %q", fn, "success2\n", res) } } logDone("run - mutable network files") } docker-1.6.2/integration-cli/docker_cli_export_import_test.go0000644000175000017500000000604412524223634024107 0ustar tianontianonpackage main import ( "os" "os/exec" "strings" "testing" ) // export an image and try to import it into a new one func TestExportContainerAndImportImage(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("output should've been a container id: %s %s ", cleanedContainerID, err) } exportCmd := exec.Command(dockerBinary, "export", cleanedContainerID) if out, _, err = runCommandWithOutput(exportCmd); err != nil { t.Fatalf("failed to export container: %s, %v", out, err) } importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") importCmd.Stdin = strings.NewReader(out) out, _, err = runCommandWithOutput(importCmd) if err != nil { t.Fatalf("failed to import image: %s, %v", out, err) } cleanedImageID := stripTrailingCharacters(out) inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { t.Fatalf("output should've been an image id: %s, %v", out, err) } deleteContainer(cleanedContainerID) deleteImages("repo/testexp:v1") logDone("export - export a container") logDone("import - import an image") } // Used to test output flag in the export command func TestExportContainerWithOutputAndImportImage(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("output should've been a container id: %s %s ", cleanedContainerID, err) } exportCmd := exec.Command(dockerBinary, "export", "--output=testexp.tar", cleanedContainerID) if out, _, err = runCommandWithOutput(exportCmd); err != nil { t.Fatalf("failed to export container: %s, %v", out, err) } out, _, err = runCommandWithOutput(exec.Command("cat", "testexp.tar")) if err != nil { t.Fatal(out, err) } importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") importCmd.Stdin = strings.NewReader(out) out, _, err = runCommandWithOutput(importCmd) if err != nil { t.Fatalf("failed to import image: %s, %v", out, err) } cleanedImageID := stripTrailingCharacters(out) inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { t.Fatalf("output should've been an image id: %s, %v", out, err) } deleteContainer(cleanedContainerID) deleteImages("repo/testexp:v1") os.Remove("/tmp/testexp.tar") logDone("export - export a container with output flag") logDone("import - import an image with output flag") } docker-1.6.2/integration-cli/docker_api_inspect_test.go0000644000175000017500000000336412524223634022645 0ustar tianontianonpackage main import ( "encoding/json" "os/exec" "testing" ) func TestInspectApiContainerResponse(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to create a container: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) // test on json marshal version // and latest version testVersions := []string{"v1.11", "latest"} for _, testVersion := range testVersions { endpoint := "/containers/" + cleanedContainerID + "/json" if testVersion != "latest" { endpoint = "/" + testVersion + endpoint } body, err := sockRequest("GET", endpoint, nil) if err != nil { t.Fatalf("sockRequest failed for %s version: %v", testVersion, err) } var inspectJSON map[string]interface{} if err = json.Unmarshal(body, &inspectJSON); err != nil { t.Fatalf("unable to unmarshal body for %s version: %v", testVersion, err) } keys := []string{"State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "ExecDriver", "MountLabel", "ProcessLabel", "Volumes", "VolumesRW"} if testVersion == "v1.11" { keys = append(keys, "ID") } else { keys = append(keys, "Id") } for _, key := range keys { if _, ok := inspectJSON[key]; !ok { t.Fatalf("%s does not exist in reponse for %s version", key, testVersion) } } //Issue #6830: type not properly converted to JSON/back if _, ok := inspectJSON["Path"].(bool); ok { t.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling") } } logDone("container json - check keys in container json response") } docker-1.6.2/integration-cli/test_vars_windows.go0000644000175000017500000000033612524223634021541 0ustar tianontianon// +build windows package main const ( // identifies if test suite is running on a unix platform isUnixCli = false // this is the expected file permission set on windows: gh#11395 expectedFileChmod = "-rwxr-xr-x" ) docker-1.6.2/integration-cli/docker_cli_tag_test.go0000644000175000017500000001371612524223634021753 0ustar tianontianonpackage main import ( "fmt" "os/exec" "strings" "testing" ) // tagging a named image in a new unprefixed repo should work func TestTagUnprefixedRepoByName(t *testing.T) { if err := pullImageIfNotExist("busybox:latest"); err != nil { t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "testfoobarbaz") if out, _, err := runCommandWithOutput(tagCmd); err != nil { t.Fatal(out, err) } deleteImages("testfoobarbaz") logDone("tag - busybox -> testfoobarbaz") } // tagging an image by ID in a new unprefixed repo should work func TestTagUnprefixedRepoByID(t *testing.T) { getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.Id}}", "busybox") out, _, err := runCommandWithOutput(getIDCmd) if err != nil { t.Fatalf("failed to get the image ID of busybox: %s, %v", out, err) } cleanedImageID := stripTrailingCharacters(out) tagCmd := exec.Command(dockerBinary, "tag", cleanedImageID, "testfoobarbaz") if out, _, err = runCommandWithOutput(tagCmd); err != nil { t.Fatal(out, err) } deleteImages("testfoobarbaz") logDone("tag - busybox's image ID -> testfoobarbaz") } // ensure we don't allow the use of invalid repository names; these tag operations should fail func TestTagInvalidUnprefixedRepo(t *testing.T) { invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd"} for _, repo := range invalidRepos { tagCmd := exec.Command(dockerBinary, "tag", "busybox", repo) _, _, err := runCommandWithOutput(tagCmd) if err == nil { t.Fatalf("tag busybox %v should have failed", repo) } } logDone("tag - busybox invalid repo names --> must not work") } // ensure we don't allow the use of invalid tags; these tag operations should fail func TestTagInvalidPrefixedRepo(t *testing.T) { long_tag := makeRandomString(121) invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", long_tag} for _, repotag := range invalidTags { tagCmd := exec.Command(dockerBinary, "tag", "busybox", repotag) _, _, err := runCommandWithOutput(tagCmd) if err == nil { t.Fatalf("tag busybox %v should have failed", repotag) } } logDone("tag - busybox with invalid repo:tagnames --> must not work") } // ensure we allow the use of valid tags func TestTagValidPrefixedRepo(t *testing.T) { if err := pullImageIfNotExist("busybox:latest"); err != nil { t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t"} for _, repo := range validRepos { tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", repo) _, _, err := runCommandWithOutput(tagCmd) if err != nil { t.Errorf("tag busybox %v should have worked: %s", repo, err) continue } deleteImages(repo) logMessage := fmt.Sprintf("tag - busybox %v", repo) logDone(logMessage) } } // tag an image with an existed tag name without -f option should fail func TestTagExistedNameWithoutForce(t *testing.T) { if err := pullImageIfNotExist("busybox:latest"); err != nil { t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") if out, _, err := runCommandWithOutput(tagCmd); err != nil { t.Fatal(out, err) } tagCmd = exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") out, _, err := runCommandWithOutput(tagCmd) if err == nil || !strings.Contains(out, "Conflict: Tag test is already set to image") { t.Fatal("tag busybox busybox:test should have failed,because busybox:test is existed") } deleteImages("busybox:test") logDone("tag - busybox with an existed tag name without -f option --> must not work") } // tag an image with an existed tag name with -f option should work func TestTagExistedNameWithForce(t *testing.T) { if err := pullImageIfNotExist("busybox:latest"); err != nil { t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") if out, _, err := runCommandWithOutput(tagCmd); err != nil { t.Fatal(out, err) } tagCmd = exec.Command(dockerBinary, "tag", "-f", "busybox:latest", "busybox:test") if out, _, err := runCommandWithOutput(tagCmd); err != nil { t.Fatal(out, err) } deleteImages("busybox:test") logDone("tag - busybox with an existed tag name with -f option work") } // ensure tagging using official names works // ensure all tags result in the same name func TestTagOfficialNames(t *testing.T) { names := []string{ "docker.io/busybox", "index.docker.io/busybox", "library/busybox", "docker.io/library/busybox", "index.docker.io/library/busybox", } for _, name := range names { tagCmd := exec.Command(dockerBinary, "tag", "-f", "busybox:latest", name+":latest") out, exitCode, err := runCommandWithOutput(tagCmd) if err != nil || exitCode != 0 { t.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) continue } // ensure we don't have multiple tag names. imagesCmd := exec.Command(dockerBinary, "images") out, _, err = runCommandWithOutput(imagesCmd) if err != nil { t.Errorf("listing images failed with errors: %v, %s", err, out) } else if strings.Contains(out, name) { t.Errorf("images should not have listed '%s'", name) deleteImages(name + ":latest") } else { logMessage := fmt.Sprintf("tag official name - busybox %v", name) logDone(logMessage) } } for _, name := range names { tagCmd := exec.Command(dockerBinary, "tag", "-f", name+":latest", "fooo/bar:latest") _, exitCode, err := runCommandWithOutput(tagCmd) if err != nil || exitCode != 0 { t.Errorf("tag %v fooo/bar should have worked: %s", name, err) continue } deleteImages("fooo/bar:latest") logMessage := fmt.Sprintf("tag official name - %v fooo/bar", name) logDone(logMessage) } } docker-1.6.2/integration-cli/docker_cli_events_unix_test.go0000644000175000017500000000221412524223634023536 0ustar tianontianon// +build !windows package main import ( "bufio" "fmt" "io/ioutil" "os" "os/exec" "testing" "unicode" "github.com/kr/pty" ) // #5979 func TestEventsRedirectStdout(t *testing.T) { since := daemonTime(t).Unix() dockerCmd(t, "run", "busybox", "true") defer deleteAllContainers() file, err := ioutil.TempFile("", "") if err != nil { t.Fatalf("could not create temp file: %v", err) } defer os.Remove(file.Name()) command := fmt.Sprintf("%s events --since=%d --until=%d > %s", dockerBinary, since, daemonTime(t).Unix(), file.Name()) _, tty, err := pty.Open() if err != nil { t.Fatalf("Could not open pty: %v", err) } cmd := exec.Command("sh", "-c", command) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty if err := cmd.Run(); err != nil { t.Fatalf("run err for command %q: %v", command, err) } scanner := bufio.NewScanner(file) for scanner.Scan() { for _, c := range scanner.Text() { if unicode.IsControl(c) { t.Fatalf("found control character %v", []byte(string(c))) } } } if err := scanner.Err(); err != nil { t.Fatalf("Scan err for command %q: %v", command, err) } logDone("events - redirect stdout") } docker-1.6.2/integration-cli/docker_cli_rm_test.go0000644000175000017500000001002712524223634021606 0ustar tianontianonpackage main import ( "os" "os/exec" "strings" "testing" ) func TestRmContainerWithRemovedVolume(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--name", "losemyvolumes", "-v", "/tmp/testing:/test", "busybox", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } if err := os.Remove("/tmp/testing"); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "rm", "-v", "losemyvolumes") if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(out, err) } logDone("rm - removed volume") } func TestRmContainerWithVolume(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--name", "foo", "-v", "/srv", "busybox", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "rm", "-v", "foo") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } logDone("rm - volume") } func TestRmRunningContainer(t *testing.T) { defer deleteAllContainers() createRunningContainer(t, "foo") // Test cannot remove running container cmd := exec.Command(dockerBinary, "rm", "foo") if _, err := runCommand(cmd); err == nil { t.Fatalf("Expected error, can't rm a running container") } logDone("rm - running container") } func TestRmRunningContainerCheckError409(t *testing.T) { defer deleteAllContainers() createRunningContainer(t, "foo") endpoint := "/containers/foo" _, err := sockRequest("DELETE", endpoint, nil) if err == nil { t.Fatalf("Expected error, can't rm a running container") } if !strings.Contains(err.Error(), "409 Conflict") { t.Fatalf("Expected error to contain '409 Conflict' but found %s", err) } logDone("rm - running container") } func TestRmForceRemoveRunningContainer(t *testing.T) { defer deleteAllContainers() createRunningContainer(t, "foo") // Stop then remove with -s cmd := exec.Command(dockerBinary, "rm", "-f", "foo") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } logDone("rm - running container with --force=true") } func TestRmContainerOrphaning(t *testing.T) { defer deleteAllContainers() dockerfile1 := `FROM busybox:latest ENTRYPOINT ["/bin/true"]` img := "test-container-orphaning" dockerfile2 := `FROM busybox:latest ENTRYPOINT ["/bin/true"] MAINTAINER Integration Tests` // build first dockerfile img1, err := buildImage(img, dockerfile1, true) defer deleteImages(img1) if err != nil { t.Fatalf("Could not build image %s: %v", img, err) } // run container on first image if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", img)); err != nil { t.Fatalf("Could not run image %s: %v: %s", img, err, out) } // rebuild dockerfile with a small addition at the end if _, err := buildImage(img, dockerfile2, true); err != nil { t.Fatalf("Could not rebuild image %s: %v", img, err) } // try to remove the image, should error out. if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", img)); err == nil { t.Fatalf("Expected to error out removing the image, but succeeded: %s", out) } // check if we deleted the first image out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc")) if err != nil { t.Fatalf("%v: %s", err, out) } if !strings.Contains(out, img1) { t.Fatalf("Orphaned container (could not find %q in docker images): %s", img1, out) } logDone("rm - container orphaning") } func TestRmInvalidContainer(t *testing.T) { if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "unknown")); err == nil { t.Fatal("Expected error on rm unknown container, got none") } else if !strings.Contains(out, "failed to remove one or more containers") { t.Fatalf("Expected output to contain 'failed to remove one or more containers', got %q", out) } logDone("rm - delete unknown container") } func createRunningContainer(t *testing.T, name string) { cmd := exec.Command(dockerBinary, "run", "-dt", "--name", name, "busybox", "top") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } } docker-1.6.2/integration-cli/test_vars_unix.go0000644000175000017500000000023412524223634021027 0ustar tianontianon// +build !windows package main const ( // identifies if test suite is running on a unix platform isUnixCli = true expectedFileChmod = "-rw-r--r--" ) docker-1.6.2/integration-cli/docker_cli_help_test.go0000644000175000017500000001141012524223634022115 0ustar tianontianonpackage main import ( "os" "os/exec" "runtime" "strings" "testing" "unicode" "github.com/docker/docker/pkg/homedir" ) func TestHelpTextVerify(t *testing.T) { // Make sure main help text fits within 80 chars and that // on non-windows system we use ~ when possible (to shorten things). // Test for HOME set to its default value and set to "/" on linux // Yes on windows setting up an array and looping (right now) isn't // necessary because we just have one value, but we'll need the // array/loop on linux so we might as well set it up so that we can // test any number of home dirs later on and all we need to do is // modify the array - the rest of the testing infrastructure should work homes := []string{homedir.Get()} // Non-Windows machines need to test for this special case of $HOME if runtime.GOOS != "windows" { homes = append(homes, "/") } homeKey := homedir.Key() baseEnvs := os.Environ() // Remove HOME env var from list so we can add a new value later. for i, env := range baseEnvs { if strings.HasPrefix(env, homeKey+"=") { baseEnvs = append(baseEnvs[:i], baseEnvs[i+1:]...) break } } for _, home := range homes { // Dup baseEnvs and add our new HOME value newEnvs := make([]string, len(baseEnvs)+1) copy(newEnvs, baseEnvs) newEnvs[len(newEnvs)-1] = homeKey + "=" + home scanForHome := runtime.GOOS != "windows" && home != "/" // Check main help text to make sure its not over 80 chars helpCmd := exec.Command(dockerBinary, "help") helpCmd.Env = newEnvs out, ec, err := runCommandWithOutput(helpCmd) if err != nil || ec != 0 { t.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec) } lines := strings.Split(out, "\n") for _, line := range lines { if len(line) > 80 { t.Fatalf("Line is too long(%d chars):\n%s", len(line), line) } // All lines should not end with a space if strings.HasSuffix(line, " ") { t.Fatalf("Line should not end with a space: %s", line) } if scanForHome && strings.Contains(line, `=`+home) { t.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) } if runtime.GOOS != "windows" { i := strings.Index(line, homedir.GetShortcutString()) if i >= 0 && i != len(line)-1 && line[i+1] != '/' { t.Fatalf("Main help should not have used home shortcut:\n%s", line) } } } // Make sure each cmd's help text fits within 80 chars and that // on non-windows system we use ~ when possible (to shorten things). // Pull the list of commands from the "Commands:" section of docker help helpCmd = exec.Command(dockerBinary, "help") helpCmd.Env = newEnvs out, ec, err = runCommandWithOutput(helpCmd) if err != nil || ec != 0 { t.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec) } i := strings.Index(out, "Commands:") if i < 0 { t.Fatalf("Missing 'Commands:' in:\n%s", out) } // Grab all chars starting at "Commands:" // Skip first line, its "Commands:" cmds := []string{} for _, cmd := range strings.Split(out[i:], "\n")[1:] { // Stop on blank line or non-idented line if cmd == "" || !unicode.IsSpace(rune(cmd[0])) { break } // Grab just the first word of each line cmd = strings.Split(strings.TrimSpace(cmd), " ")[0] cmds = append(cmds, cmd) helpCmd := exec.Command(dockerBinary, cmd, "--help") helpCmd.Env = newEnvs out, ec, err := runCommandWithOutput(helpCmd) if err != nil || ec != 0 { t.Fatalf("Error on %q help: %s\nexit code:%d", cmd, out, ec) } lines := strings.Split(out, "\n") for _, line := range lines { if len(line) > 80 { t.Fatalf("Help for %q is too long(%d chars):\n%s", cmd, len(line), line) } if scanForHome && strings.Contains(line, `"`+home) { t.Fatalf("Help for %q should use ~ instead of %q on:\n%s", cmd, home, line) } i := strings.Index(line, "~") if i >= 0 && i != len(line)-1 && line[i+1] != '/' { t.Fatalf("Help for %q should not have used ~:\n%s", cmd, line) } // If a line starts with 4 spaces then assume someone // added a multi-line description for an option and we need // to flag it if strings.HasPrefix(line, " ") { t.Fatalf("Help for %q should not have a multi-line option: %s", cmd, line) } // Options should NOT end with a period if strings.HasPrefix(line, " -") && strings.HasSuffix(line, ".") { t.Fatalf("Help for %q should not end with a period: %s", cmd, line) } // Options should NOT end with a space if strings.HasSuffix(line, " ") { t.Fatalf("Help for %q should not end with a space: %s", cmd, line) } } } expected := 39 if len(cmds) != expected { t.Fatalf("Wrong # of cmds(%d), it should be: %d\nThe list:\n%q", len(cmds), expected, cmds) } } logDone("help - verify text") } docker-1.6.2/integration-cli/docker_cli_ps_test.go0000644000175000017500000004575512524223634021632 0ustar tianontianonpackage main import ( "fmt" "os/exec" "reflect" "strconv" "strings" "testing" "time" ) func TestPsListContainers(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } firstID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } secondID := stripTrailingCharacters(out) // not long running runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } thirdID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } fourthID := stripTrailingCharacters(out) // make sure third one is not running runCmd = exec.Command(dockerBinary, "wait", thirdID) if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } // all runCmd = exec.Command(dockerBinary, "ps", "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}) { t.Error("Container list is not in the correct order") } // running runCmd = exec.Command(dockerBinary, "ps") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertContainerList(out, []string{fourthID, secondID, firstID}) { t.Error("Container list is not in the correct order") } // from here all flag '-a' is ignored // limit runCmd = exec.Command(dockerBinary, "ps", "-n=2", "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } expected := []string{fourthID, thirdID} if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } runCmd = exec.Command(dockerBinary, "ps", "-n=2") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } // since runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } expected = []string{fourthID, thirdID, secondID} if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } runCmd = exec.Command(dockerBinary, "ps", "--since", firstID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } // before runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID, "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } expected = []string{secondID, firstID} if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } // since & before runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } expected = []string{thirdID, secondID} if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } // since & limit runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2", "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } expected = []string{fourthID, thirdID} if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } // before & limit runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1", "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } expected = []string{thirdID} if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } // since & before & limit runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1", "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } expected = []string{thirdID} if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertContainerList(out, expected) { t.Error("Container list is not in the correct order") } logDone("ps - test ps options") } func assertContainerList(out string, expected []string) bool { lines := strings.Split(strings.Trim(out, "\n "), "\n") if len(lines)-1 != len(expected) { return false } containerIDIndex := strings.Index(lines[0], "CONTAINER ID") for i := 0; i < len(expected); i++ { foundID := lines[i+1][containerIDIndex : containerIDIndex+12] if foundID != expected[i][:12] { return false } } return true } func TestPsListContainersSize(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello") runCommandWithOutput(cmd) cmd = exec.Command(dockerBinary, "ps", "-s", "-n=1") base_out, _, err := runCommandWithOutput(cmd) base_lines := strings.Split(strings.Trim(base_out, "\n "), "\n") base_sizeIndex := strings.Index(base_lines[0], "SIZE") base_foundSize := base_lines[1][base_sizeIndex:] base_bytes, err := strconv.Atoi(strings.Split(base_foundSize, " ")[0]) if err != nil { t.Fatal(err) } name := "test_size" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } id, err := getIDByName(name) if err != nil { t.Fatal(err) } runCmd = exec.Command(dockerBinary, "ps", "-s", "-n=1") wait := make(chan struct{}) go func() { out, _, err = runCommandWithOutput(runCmd) close(wait) }() select { case <-wait: case <-time.After(3 * time.Second): t.Fatalf("Calling \"docker ps -s\" timed out!") } if err != nil { t.Fatal(out, err) } lines := strings.Split(strings.Trim(out, "\n "), "\n") sizeIndex := strings.Index(lines[0], "SIZE") idIndex := strings.Index(lines[0], "CONTAINER ID") foundID := lines[1][idIndex : idIndex+12] if foundID != id[:12] { t.Fatalf("Expected id %s, got %s", id[:12], foundID) } expectedSize := fmt.Sprintf("%d B", (2 + base_bytes)) foundSize := lines[1][sizeIndex:] if foundSize != expectedSize { t.Fatalf("Expected size %q, got %q", expectedSize, foundSize) } logDone("ps - test ps size") } func TestPsListContainersFilterStatus(t *testing.T) { // FIXME: this should test paused, but it makes things hang and its wonky // this is because paused containers can't be controlled by signals defer deleteAllContainers() // start exited container runCmd := exec.Command(dockerBinary, "run", "-d", "busybox") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } firstID := stripTrailingCharacters(out) // make sure the exited cintainer is not running runCmd = exec.Command(dockerBinary, "wait", firstID) if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } // start running container runCmd = exec.Command(dockerBinary, "run", "-itd", "busybox") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } secondID := stripTrailingCharacters(out) // filter containers by exited runCmd = exec.Command(dockerBinary, "ps", "-q", "--filter=status=exited") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } containerOut := strings.TrimSpace(out) if containerOut != firstID[:12] { t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) } runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=status=running") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } containerOut = strings.TrimSpace(out) if containerOut != secondID[:12] { t.Fatalf("Expected id %s, got %s for running filter, output: %q", secondID[:12], containerOut, out) } logDone("ps - test ps filter status") } func TestPsListContainersFilterID(t *testing.T) { defer deleteAllContainers() // start container runCmd := exec.Command(dockerBinary, "run", "-d", "busybox") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } firstID := stripTrailingCharacters(out) // start another container runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 360") if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } // filter containers by id runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=id="+firstID) if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } containerOut := strings.TrimSpace(out) if containerOut != firstID[:12] { t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) } logDone("ps - test ps filter id") } func TestPsListContainersFilterName(t *testing.T) { defer deleteAllContainers() // start container runCmd := exec.Command(dockerBinary, "run", "-d", "--name=a_name_to_match", "busybox") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } firstID := stripTrailingCharacters(out) // start another container runCmd = exec.Command(dockerBinary, "run", "-d", "--name=b_name_to_match", "busybox", "sh", "-c", "sleep 360") if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } // filter containers by name runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=name=a_name_to_match") if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } containerOut := strings.TrimSpace(out) if containerOut != firstID[:12] { t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) } logDone("ps - test ps filter name") } func TestPsListContainersFilterLabel(t *testing.T) { // start container runCmd := exec.Command(dockerBinary, "run", "-d", "-l", "match=me", "-l", "second=tag", "busybox") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } firstID := stripTrailingCharacters(out) // start another container runCmd = exec.Command(dockerBinary, "run", "-d", "-l", "match=me too", "busybox") if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } secondID := stripTrailingCharacters(out) // start third container runCmd = exec.Command(dockerBinary, "run", "-d", "-l", "nomatch=me", "busybox") if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } thirdID := stripTrailingCharacters(out) // filter containers by exact match runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me") if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } containerOut := strings.TrimSpace(out) if containerOut != firstID { t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out) } // filter containers by two labels runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag") if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } containerOut = strings.TrimSpace(out) if containerOut != firstID { t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out) } // filter containers by two labels, but expect not found because of AND behavior runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag-no") if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } containerOut = strings.TrimSpace(out) if containerOut != "" { t.Fatalf("Expected nothing, got %s for exited filter, output: %q", containerOut, out) } // filter containers by exact key runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match") if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } containerOut = strings.TrimSpace(out) if (!strings.Contains(containerOut, firstID) || !strings.Contains(containerOut, secondID)) || strings.Contains(containerOut, thirdID) { t.Fatalf("Expected ids %s,%s, got %s for exited filter, output: %q", firstID, secondID, containerOut, out) } deleteAllContainers() logDone("ps - test ps filter label") } func TestPsListContainersFilterExited(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "top", "busybox", "top") if out, _, err := runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "run", "--name", "zero1", "busybox", "true") if out, _, err := runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } firstZero, err := getIDByName("zero1") if err != nil { t.Fatal(err) } runCmd = exec.Command(dockerBinary, "run", "--name", "zero2", "busybox", "true") if out, _, err := runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } secondZero, err := getIDByName("zero2") if err != nil { t.Fatal(err) } runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero1", "busybox", "false") if out, _, err := runCommandWithOutput(runCmd); err == nil { t.Fatal("Should fail.", out, err) } firstNonZero, err := getIDByName("nonzero1") if err != nil { t.Fatal(err) } runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero2", "busybox", "false") if out, _, err := runCommandWithOutput(runCmd); err == nil { t.Fatal("Should fail.", out, err) } secondNonZero, err := getIDByName("nonzero2") if err != nil { t.Fatal(err) } // filter containers by exited=0 runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } ids := strings.Split(strings.TrimSpace(out), "\n") if len(ids) != 2 { t.Fatalf("Should be 2 zero exited containerst got %d", len(ids)) } if ids[0] != secondZero { t.Fatalf("First in list should be %q, got %q", secondZero, ids[0]) } if ids[1] != firstZero { t.Fatalf("Second in list should be %q, got %q", firstZero, ids[1]) } runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } ids = strings.Split(strings.TrimSpace(out), "\n") if len(ids) != 2 { t.Fatalf("Should be 2 zero exited containerst got %d", len(ids)) } if ids[0] != secondNonZero { t.Fatalf("First in list should be %q, got %q", secondNonZero, ids[0]) } if ids[1] != firstNonZero { t.Fatalf("Second in list should be %q, got %q", firstNonZero, ids[1]) } logDone("ps - test ps filter exited") } func TestPsRightTagName(t *testing.T) { tag := "asybox:shmatest" defer deleteAllContainers() defer deleteImages(tag) if out, err := exec.Command(dockerBinary, "tag", "busybox", tag).CombinedOutput(); err != nil { t.Fatalf("Failed to tag image: %s, out: %q", err, out) } var id1 string if out, err := exec.Command(dockerBinary, "run", "-d", "busybox", "top").CombinedOutput(); err != nil { t.Fatalf("Failed to run container: %s, out: %q", err, out) } else { id1 = strings.TrimSpace(string(out)) } var id2 string if out, err := exec.Command(dockerBinary, "run", "-d", tag, "top").CombinedOutput(); err != nil { t.Fatalf("Failed to run container: %s, out: %q", err, out) } else { id2 = strings.TrimSpace(string(out)) } out, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() if err != nil { t.Fatalf("Failed to run 'ps': %s, out: %q", err, out) } lines := strings.Split(strings.TrimSpace(string(out)), "\n") // skip header lines = lines[1:] if len(lines) != 2 { t.Fatalf("There should be 2 running container, got %d", len(lines)) } for _, line := range lines { f := strings.Fields(line) switch f[0] { case id1: if f[1] != "busybox:latest" { t.Fatalf("Expected %s tag for id %s, got %s", "busybox", id1, f[1]) } case id2: if f[1] != tag { t.Fatalf("Expected %s tag for id %s, got %s", tag, id1, f[1]) } default: t.Fatalf("Unexpected id %s, expected %s and %s", f[0], id1, id2) } } logDone("ps - right tags for containers") } func TestPsLinkedWithNoTrunc(t *testing.T) { defer deleteAllContainers() if out, err := exec.Command(dockerBinary, "run", "--name=first", "-d", "busybox", "top").CombinedOutput(); err != nil { t.Fatalf("Output: %s, err: %s", out, err) } if out, err := exec.Command(dockerBinary, "run", "--name=second", "--link=first:first", "-d", "busybox", "top").CombinedOutput(); err != nil { t.Fatalf("Output: %s, err: %s", out, err) } out, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() if err != nil { t.Fatalf("Output: %s, err: %s", out, err) } lines := strings.Split(strings.TrimSpace(string(out)), "\n") // strip header lines = lines[1:] expected := []string{"second", "first,second/first"} var names []string for _, l := range lines { fields := strings.Fields(l) names = append(names, fields[len(fields)-1]) } if !reflect.DeepEqual(expected, names) { t.Fatalf("Expected array: %v, got: %v", expected, names) } } func TestPsGroupPortRange(t *testing.T) { defer deleteAllContainers() portRange := "3300-3900" out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top")) if err != nil { t.Fatal(out, err) } out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "ps")) if err != nil { t.Fatal(out, err) } // check that the port range is in the output if !strings.Contains(string(out), portRange) { t.Fatalf("docker ps output should have had the port range %q: %s", portRange, string(out)) } logDone("ps - port range") } docker-1.6.2/integration-cli/docker_cli_top_test.go0000644000175000017500000000701412524223634021774 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) func TestTopMultipleArgs(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to start the container: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) topCmd := exec.Command(dockerBinary, "top", cleanedContainerID, "-o", "pid") out, _, err = runCommandWithOutput(topCmd) if err != nil { t.Fatalf("failed to run top: %s, %v", out, err) } if !strings.Contains(out, "PID") { t.Fatalf("did not see PID after top -o pid: %s", out) } logDone("top - multiple arguments") } func TestTopNonPrivileged(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to start the container: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) out1, _, err := runCommandWithOutput(topCmd) if err != nil { t.Fatalf("failed to run top: %s, %v", out1, err) } topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) out2, _, err := runCommandWithOutput(topCmd) if err != nil { t.Fatalf("failed to run top: %s, %v", out2, err) } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) if out, _, err = runCommandWithOutput(killCmd); err != nil { t.Fatalf("failed to kill container: %s, %v", out, err) } deleteContainer(cleanedContainerID) if !strings.Contains(out1, "sleep 20") && !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed twice") } else if !strings.Contains(out1, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time") } else if !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime") } logDone("top - sleep process should be listed in non privileged mode") } func TestTopPrivileged(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "--privileged", "-i", "-d", "busybox", "sleep", "20") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to start the container: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) out1, _, err := runCommandWithOutput(topCmd) if err != nil { t.Fatalf("failed to run top: %s, %v", out1, err) } topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) out2, _, err := runCommandWithOutput(topCmd) if err != nil { t.Fatalf("failed to run top: %s, %v", out2, err) } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) if out, _, err = runCommandWithOutput(killCmd); err != nil { t.Fatalf("failed to kill container: %s, %v", out, err) } deleteContainer(cleanedContainerID) if !strings.Contains(out1, "sleep 20") && !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed twice") } else if !strings.Contains(out1, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the first time") } else if !strings.Contains(out2, "sleep 20") { t.Fatal("top should've listed `sleep 20` in the process list, but failed the second itime") } logDone("top - sleep process should be listed in privileged mode") } docker-1.6.2/integration-cli/docker_cli_port_test.go0000644000175000017500000000667512524223634022172 0ustar tianontianonpackage main import ( "os/exec" "sort" "strings" "testing" ) func TestPortList(t *testing.T) { defer deleteAllContainers() // one port runCmd := exec.Command(dockerBinary, "run", "-d", "-p", "9876:80", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } firstID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "port", firstID, "80") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { t.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "port", firstID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertPortList(t, out, []string{"80/tcp -> 0.0.0.0:9876"}) { t.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "rm", "-f", firstID) if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } // three port runCmd = exec.Command(dockerBinary, "run", "-d", "-p", "9876:80", "-p", "9877:81", "-p", "9878:82", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } ID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "port", ID, "80") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { t.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "port", ID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertPortList(t, out, []string{ "80/tcp -> 0.0.0.0:9876", "81/tcp -> 0.0.0.0:9877", "82/tcp -> 0.0.0.0:9878"}) { t.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "rm", "-f", ID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } // more and one port mapped to the same container port runCmd = exec.Command(dockerBinary, "run", "-d", "-p", "9876:80", "-p", "9999:80", "-p", "9877:81", "-p", "9878:82", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } ID = stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "port", ID, "80") out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertPortList(t, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) { t.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "port", ID) out, _, err = runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } if !assertPortList(t, out, []string{ "80/tcp -> 0.0.0.0:9876", "80/tcp -> 0.0.0.0:9999", "81/tcp -> 0.0.0.0:9877", "82/tcp -> 0.0.0.0:9878"}) { t.Error("Port list is not correct\n", out) } runCmd = exec.Command(dockerBinary, "rm", "-f", ID) if out, _, err = runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } logDone("port - test port list") } func assertPortList(t *testing.T, out string, expected []string) bool { //lines := strings.Split(out, "\n") lines := strings.Split(strings.Trim(out, "\n "), "\n") if len(lines) != len(expected) { t.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) return false } sort.Strings(lines) sort.Strings(expected) for i := 0; i < len(expected); i++ { if lines[i] != expected[i] { t.Error("|" + lines[i] + "!=" + expected[i] + "|") return false } } return true } docker-1.6.2/integration-cli/docker_cli_search_test.go0000644000175000017500000000116712524223634022442 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) // search for repos named "registry" on the central registry func TestSearchOnCentralRegistry(t *testing.T) { searchCmd := exec.Command(dockerBinary, "search", "busybox") out, exitCode, err := runCommandWithOutput(searchCmd) if err != nil || exitCode != 0 { t.Fatalf("failed to search on the central registry: %s, %v", out, err) } if !strings.Contains(out, "Busybox base image.") { t.Fatal("couldn't find any repository named (or containing) 'Busybox base image.'") } logDone("search - search for repositories named (or containing) 'Busybox base image.'") } docker-1.6.2/integration-cli/docker_api_exec_test.go0000644000175000017500000000134212524223634022116 0ustar tianontianon// +build !test_no_exec package main import ( "bytes" "fmt" "os/exec" "testing" ) // Regression test for #9414 func TestExecApiCreateNoCmd(t *testing.T) { defer deleteAllContainers() name := "exec_test" runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") if out, _, err := runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) if err == nil || !bytes.Contains(body, []byte("No exec command specified")) { t.Fatalf("Expected error when creating exec command with no Cmd specified: %q", err) } logDone("exec create API - returns error when missing Cmd") } docker-1.6.2/integration-cli/docker_cli_build_test.go0000644000175000017500000043507012524223634022300 0ustar tianontianonpackage main import ( "archive/tar" "bufio" "bytes" "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "reflect" "regexp" "runtime" "strconv" "strings" "sync" "testing" "text/template" "time" "github.com/docker/docker/builder/command" "github.com/docker/docker/pkg/archive" ) func TestBuildJSONEmptyRun(t *testing.T) { name := "testbuildjsonemptyrun" defer deleteImages(name) _, err := buildImage( name, ` FROM busybox RUN [] `, true) if err != nil { t.Fatal("error when dealing with a RUN statement with empty JSON array") } logDone("build - RUN with an empty array should not panic") } func TestBuildEmptyWhitespace(t *testing.T) { name := "testbuildemptywhitespace" defer deleteImages(name) _, err := buildImage( name, ` FROM busybox COPY quux \ bar `, true) if err == nil { t.Fatal("no error when dealing with a COPY statement with no content on the same line") } logDone("build - statements with whitespace and no content should generate a parse error") } func TestBuildShCmdJSONEntrypoint(t *testing.T) { name := "testbuildshcmdjsonentrypoint" defer deleteImages(name) _, err := buildImage( name, ` FROM busybox ENTRYPOINT ["/bin/echo"] CMD echo test `, true) if err != nil { t.Fatal(err) } out, _, err := runCommandWithOutput( exec.Command( dockerBinary, "run", "--rm", name)) if err != nil { t.Fatal(err) } if strings.TrimSpace(out) != "/bin/sh -c echo test" { t.Fatal("CMD did not contain /bin/sh -c") } logDone("build - CMD should always contain /bin/sh -c when specified without JSON") } func TestBuildEnvironmentReplacementUser(t *testing.T) { name := "testbuildenvironmentreplacement" defer deleteImages(name) _, err := buildImage(name, ` FROM scratch ENV user foo USER ${user} `, true) if err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name, "Config.User") if err != nil { t.Fatal(err) } if res != `"foo"` { t.Fatal("User foo from environment not in Config.User on image") } logDone("build - user environment replacement") } func TestBuildEnvironmentReplacementVolume(t *testing.T) { name := "testbuildenvironmentreplacement" defer deleteImages(name) _, err := buildImage(name, ` FROM scratch ENV volume /quux VOLUME ${volume} `, true) if err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Volumes") if err != nil { t.Fatal(err) } var volumes map[string]interface{} if err := json.Unmarshal([]byte(res), &volumes); err != nil { t.Fatal(err) } if _, ok := volumes["/quux"]; !ok { t.Fatal("Volume /quux from environment not in Config.Volumes on image") } logDone("build - volume environment replacement") } func TestBuildEnvironmentReplacementExpose(t *testing.T) { name := "testbuildenvironmentreplacement" defer deleteImages(name) _, err := buildImage(name, ` FROM scratch ENV port 80 EXPOSE ${port} `, true) if err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name, "Config.ExposedPorts") if err != nil { t.Fatal(err) } var exposedPorts map[string]interface{} if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { t.Fatal(err) } if _, ok := exposedPorts["80/tcp"]; !ok { t.Fatal("Exposed port 80 from environment not in Config.ExposedPorts on image") } logDone("build - expose environment replacement") } func TestBuildEnvironmentReplacementWorkdir(t *testing.T) { name := "testbuildenvironmentreplacement" defer deleteImages(name) _, err := buildImage(name, ` FROM busybox ENV MYWORKDIR /work RUN mkdir ${MYWORKDIR} WORKDIR ${MYWORKDIR} `, true) if err != nil { t.Fatal(err) } logDone("build - workdir environment replacement") } func TestBuildEnvironmentReplacementAddCopy(t *testing.T) { name := "testbuildenvironmentreplacement" defer deleteImages(name) ctx, err := fakeContext(` FROM scratch ENV baz foo ENV quux bar ENV dot . ADD ${baz} ${dot} COPY ${quux} ${dot} `, map[string]string{ "foo": "test1", "bar": "test2", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add/copy environment replacement") } func TestBuildEnvironmentReplacementEnv(t *testing.T) { name := "testbuildenvironmentreplacement" defer deleteImages(name) _, err := buildImage(name, ` FROM busybox ENV foo zzz ENV bar ${foo} ENV abc1='$foo' ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}" RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo) ENV abc2="\$foo" RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo) ENV abc3 '$foo' RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) ENV abc4 "\$foo" RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) `, true) if err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Env") if err != nil { t.Fatal(err) } envResult := []string{} if err = unmarshalJSON([]byte(res), &envResult); err != nil { t.Fatal(err) } found := false envCount := 0 for _, env := range envResult { parts := strings.SplitN(env, "=", 2) if parts[0] == "bar" { found = true if parts[1] != "zzz" { t.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) } } else if strings.HasPrefix(parts[0], "env") { envCount++ if parts[1] != "zzz" { t.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) } } } if !found { t.Fatal("Never found the `bar` env variable") } if envCount != 4 { t.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) } logDone("build - env environment replacement") } func TestBuildHandleEscapes(t *testing.T) { name := "testbuildhandleescapes" defer deleteImages(name) _, err := buildImage(name, ` FROM scratch ENV FOO bar VOLUME ${FOO} `, true) if err != nil { t.Fatal(err) } var result map[string]map[string]struct{} res, err := inspectFieldJSON(name, "Config.Volumes") if err != nil { t.Fatal(err) } if err = unmarshalJSON([]byte(res), &result); err != nil { t.Fatal(err) } if _, ok := result["bar"]; !ok { t.Fatal("Could not find volume bar set from env foo in volumes table") } deleteImages(name) _, err = buildImage(name, ` FROM scratch ENV FOO bar VOLUME \${FOO} `, true) if err != nil { t.Fatal(err) } res, err = inspectFieldJSON(name, "Config.Volumes") if err != nil { t.Fatal(err) } if err = unmarshalJSON([]byte(res), &result); err != nil { t.Fatal(err) } if _, ok := result["${FOO}"]; !ok { t.Fatal("Could not find volume ${FOO} set from env foo in volumes table") } deleteImages(name) // this test in particular provides *7* backslashes and expects 6 to come back. // Like above, the first escape is swallowed and the rest are treated as // literals, this one is just less obvious because of all the character noise. _, err = buildImage(name, ` FROM scratch ENV FOO bar VOLUME \\\\\\\${FOO} `, true) if err != nil { t.Fatal(err) } res, err = inspectFieldJSON(name, "Config.Volumes") if err != nil { t.Fatal(err) } if err = unmarshalJSON([]byte(res), &result); err != nil { t.Fatal(err) } if _, ok := result[`\\\${FOO}`]; !ok { t.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result) } logDone("build - handle escapes") } func TestBuildOnBuildLowercase(t *testing.T) { name := "testbuildonbuildlowercase" name2 := "testbuildonbuildlowercase2" defer deleteImages(name, name2) _, err := buildImage(name, ` FROM busybox onbuild run echo quux `, true) if err != nil { t.Fatal(err) } _, out, err := buildImageWithOut(name2, fmt.Sprintf(` FROM %s `, name), true) if err != nil { t.Fatal(err) } if !strings.Contains(out, "quux") { t.Fatalf("Did not receive the expected echo text, got %s", out) } if strings.Contains(out, "ONBUILD ONBUILD") { t.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) } logDone("build - handle case-insensitive onbuild statement") } func TestBuildEnvEscapes(t *testing.T) { name := "testbuildenvescapes" defer deleteImages(name) defer deleteAllContainers() _, err := buildImage(name, ` FROM busybox ENV TEST foo CMD echo \$ `, true) out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name)) if err != nil { t.Fatal(err) } if strings.TrimSpace(out) != "$" { t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) } logDone("build - env should handle \\$ properly") } func TestBuildEnvOverwrite(t *testing.T) { name := "testbuildenvoverwrite" defer deleteImages(name) defer deleteAllContainers() _, err := buildImage(name, ` FROM busybox ENV TEST foo CMD echo ${TEST} `, true) if err != nil { t.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-e", "TEST=bar", "-t", name)) if err != nil { t.Fatal(err) } if strings.TrimSpace(out) != "bar" { t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) } logDone("build - env should overwrite builder ENV during run") } func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { name := "testbuildonbuildforbiddenmaintainerinsourceimage" defer deleteImages("onbuild") defer deleteImages(name) defer deleteAllContainers() createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"MAINTAINER docker.io\"]}", cleanedContainerID, "onbuild") if _, err := runCommand(commitCmd); err != nil { t.Fatal(err) } _, err = buildImage(name, `FROM onbuild`, true) if err != nil { if !strings.Contains(err.Error(), "maintainer isn't allowed as an ONBUILD trigger") { t.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err) } } else { t.Fatal("Error must not be nil") } logDone("build - onbuild forbidden maintainer in source image") } func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) { name := "testbuildonbuildforbiddenfrominsourceimage" defer deleteImages("onbuild") defer deleteImages(name) defer deleteAllContainers() createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"FROM busybox\"]}", cleanedContainerID, "onbuild") if _, err := runCommand(commitCmd); err != nil { t.Fatal(err) } _, err = buildImage(name, `FROM onbuild`, true) if err != nil { if !strings.Contains(err.Error(), "from isn't allowed as an ONBUILD trigger") { t.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err) } } else { t.Fatal("Error must not be nil") } logDone("build - onbuild forbidden from in source image") } func TestBuildOnBuildForbiddenChainedInSourceImage(t *testing.T) { name := "testbuildonbuildforbiddenchainedinsourceimage" defer deleteImages("onbuild") defer deleteImages(name) defer deleteAllContainers() createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"ONBUILD RUN ls\"]}", cleanedContainerID, "onbuild") if _, err := runCommand(commitCmd); err != nil { t.Fatal(err) } _, err = buildImage(name, `FROM onbuild`, true) if err != nil { if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { t.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err) } } else { t.Fatal("Error must not be nil") } logDone("build - onbuild forbidden chained in source image") } func TestBuildOnBuildCmdEntrypointJSON(t *testing.T) { name1 := "onbuildcmd" name2 := "onbuildgenerated" defer deleteImages(name2) defer deleteImages(name1) defer deleteAllContainers() _, err := buildImage(name1, ` FROM busybox ONBUILD CMD ["hello world"] ONBUILD ENTRYPOINT ["echo"] ONBUILD RUN ["true"]`, false) if err != nil { t.Fatal(err) } _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) if err != nil { t.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) if err != nil { t.Fatal(err) } if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { t.Fatal("did not get echo output from onbuild", out) } logDone("build - onbuild with json entrypoint/cmd") } func TestBuildOnBuildEntrypointJSON(t *testing.T) { name1 := "onbuildcmd" name2 := "onbuildgenerated" defer deleteImages(name2) defer deleteImages(name1) defer deleteAllContainers() _, err := buildImage(name1, ` FROM busybox ONBUILD ENTRYPOINT ["echo"]`, false) if err != nil { t.Fatal(err) } _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) if err != nil { t.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) if err != nil { t.Fatal(err) } if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { t.Fatal("got malformed output from onbuild", out) } logDone("build - onbuild with json entrypoint") } func TestBuildCacheADD(t *testing.T) { name := "testbuildtwoimageswithadd" defer deleteImages(name) server, err := fakeStorage(map[string]string{ "robots.txt": "hello", "index.html": "world", }) if err != nil { t.Fatal(err) } defer server.Close() if _, err := buildImage(name, fmt.Sprintf(`FROM scratch ADD %s/robots.txt /`, server.URL()), true); err != nil { t.Fatal(err) } if err != nil { t.Fatal(err) } deleteImages(name) _, out, err := buildImageWithOut(name, fmt.Sprintf(`FROM scratch ADD %s/index.html /`, server.URL()), true) if err != nil { t.Fatal(err) } if strings.Contains(out, "Using cache") { t.Fatal("2nd build used cache on ADD, it shouldn't") } logDone("build - build two images with remote ADD") } func TestBuildLastModified(t *testing.T) { name := "testbuildlastmodified" defer deleteImages(name) server, err := fakeStorage(map[string]string{ "file": "hello", }) if err != nil { t.Fatal(err) } defer server.Close() var out, out2 string dFmt := `FROM busybox ADD %s/file / RUN ls -le /file` dockerfile := fmt.Sprintf(dFmt, server.URL()) if _, out, err = buildImageWithOut(name, dockerfile, false); err != nil { t.Fatal(err) } originMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out) // Make sure our regexp is correct if strings.Index(originMTime, "/file") < 0 { t.Fatalf("Missing ls info on 'file':\n%s", out) } // Build it again and make sure the mtime of the file didn't change. // Wait a few seconds to make sure the time changed enough to notice time.Sleep(2 * time.Second) if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { t.Fatal(err) } newMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out2) if newMTime != originMTime { t.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", originMTime, newMTime) } // Now 'touch' the file and make sure the timestamp DID change this time // Create a new fakeStorage instead of just using Add() to help windows server, err = fakeStorage(map[string]string{ "file": "hello", }) if err != nil { t.Fatal(err) } defer server.Close() dockerfile = fmt.Sprintf(dFmt, server.URL()) if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { t.Fatal(err) } newMTime = regexp.MustCompile(`root.*/file.*\n`).FindString(out2) if newMTime == originMTime { t.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", originMTime, newMTime) } logDone("build - use Last-Modified header") } func TestBuildSixtySteps(t *testing.T) { name := "foobuildsixtysteps" defer deleteImages(name) ctx, err := fakeContext("FROM scratch\n"+strings.Repeat("ADD foo /\n", 60), map[string]string{ "foo": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - build an image with sixty build steps") } func TestBuildAddSingleFileToRoot(t *testing.T) { name := "testaddimg" defer deleteImages(name) ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists ADD test_file / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add single file to root") } // Issue #3960: "ADD src ." hangs func TestBuildAddSingleFileToWorkdir(t *testing.T) { name := "testaddsinglefiletoworkdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox ADD test_file .`, map[string]string{ "test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() done := make(chan struct{}) go func() { if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } close(done) }() select { case <-time.After(5 * time.Second): t.Fatal("Build with adding to workdir timed out") case <-done: } logDone("build - add single file to workdir") } func TestBuildAddSingleFileToExistDir(t *testing.T) { name := "testaddsinglefiletoexistdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists ADD test_file /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add single file to existing dir") } func TestBuildCopyAddMultipleFiles(t *testing.T) { server, err := fakeStorage(map[string]string{ "robots.txt": "hello", }) if err != nil { t.Fatal(err) } defer server.Close() name := "testcopymultiplefilestofile" defer deleteImages(name) ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists COPY test_file1 test_file2 /exists/ ADD test_file3 test_file4 %s/robots.txt /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] `, server.URL()), map[string]string{ "test_file1": "test1", "test_file2": "test2", "test_file3": "test3", "test_file4": "test4", }) defer ctx.Close() if err != nil { t.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - multiple file copy/add tests") } func TestBuildAddMultipleFilesToFile(t *testing.T) { name := "testaddmultiplefilestofile" defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD file1.txt file2.txt test `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) defer ctx.Close() if err != nil { t.Fatal(err) } expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } logDone("build - multiple add files to file") } func TestBuildJSONAddMultipleFilesToFile(t *testing.T) { name := "testjsonaddmultiplefilestofile" defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD ["file1.txt", "file2.txt", "test"] `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) defer ctx.Close() if err != nil { t.Fatal(err) } expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } logDone("build - multiple add files to file json syntax") } func TestBuildAddMultipleFilesToFileWild(t *testing.T) { name := "testaddmultiplefilestofilewild" defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD file*.txt test `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) defer ctx.Close() if err != nil { t.Fatal(err) } expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } logDone("build - multiple add files to file wild") } func TestBuildJSONAddMultipleFilesToFileWild(t *testing.T) { name := "testjsonaddmultiplefilestofilewild" defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD ["file*.txt", "test"] `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) defer ctx.Close() if err != nil { t.Fatal(err) } expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } logDone("build - multiple add files to file wild json syntax") } func TestBuildCopyMultipleFilesToFile(t *testing.T) { name := "testcopymultiplefilestofile" defer deleteImages(name) ctx, err := fakeContext(`FROM scratch COPY file1.txt file2.txt test `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) defer ctx.Close() if err != nil { t.Fatal(err) } expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } logDone("build - multiple copy files to file") } func TestBuildJSONCopyMultipleFilesToFile(t *testing.T) { name := "testjsoncopymultiplefilestofile" defer deleteImages(name) ctx, err := fakeContext(`FROM scratch COPY ["file1.txt", "file2.txt", "test"] `, map[string]string{ "file1.txt": "test1", "file2.txt": "test1", }) defer ctx.Close() if err != nil { t.Fatal(err) } expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } logDone("build - multiple copy files to file json syntax") } func TestBuildAddFileWithWhitespace(t *testing.T) { name := "testaddfilewithwhitespace" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN mkdir "/test dir" RUN mkdir "/test_dir" ADD [ "test file1", "/test_file1" ] ADD [ "test_file2", "/test file2" ] ADD [ "test file3", "/test file3" ] ADD [ "test dir/test_file4", "/test_dir/test_file4" ] ADD [ "test_dir/test_file5", "/test dir/test_file5" ] ADD [ "test dir/test_file6", "/test dir/test_file6" ] RUN [ $(cat "/test_file1") = 'test1' ] RUN [ $(cat "/test file2") = 'test2' ] RUN [ $(cat "/test file3") = 'test3' ] RUN [ $(cat "/test_dir/test_file4") = 'test4' ] RUN [ $(cat "/test dir/test_file5") = 'test5' ] RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, map[string]string{ "test file1": "test1", "test_file2": "test2", "test file3": "test3", "test dir/test_file4": "test4", "test_dir/test_file5": "test5", "test dir/test_file6": "test6", }) defer ctx.Close() if err != nil { t.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add file with whitespace") } func TestBuildCopyFileWithWhitespace(t *testing.T) { name := "testcopyfilewithwhitespace" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN mkdir "/test dir" RUN mkdir "/test_dir" COPY [ "test file1", "/test_file1" ] COPY [ "test_file2", "/test file2" ] COPY [ "test file3", "/test file3" ] COPY [ "test dir/test_file4", "/test_dir/test_file4" ] COPY [ "test_dir/test_file5", "/test dir/test_file5" ] COPY [ "test dir/test_file6", "/test dir/test_file6" ] RUN [ $(cat "/test_file1") = 'test1' ] RUN [ $(cat "/test file2") = 'test2' ] RUN [ $(cat "/test file3") = 'test3' ] RUN [ $(cat "/test_dir/test_file4") = 'test4' ] RUN [ $(cat "/test dir/test_file5") = 'test5' ] RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, map[string]string{ "test file1": "test1", "test_file2": "test2", "test file3": "test3", "test dir/test_file4": "test4", "test_dir/test_file5": "test5", "test dir/test_file6": "test6", }) defer ctx.Close() if err != nil { t.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - copy file with whitespace") } func TestBuildAddMultipleFilesToFileWithWhitespace(t *testing.T) { name := "testaddmultiplefilestofilewithwhitespace" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox ADD [ "test file1", "test file2", "test" ] `, map[string]string{ "test file1": "test1", "test file2": "test2", }) defer ctx.Close() if err != nil { t.Fatal(err) } expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } logDone("build - multiple add files to file with whitespace") } func TestBuildCopyMultipleFilesToFileWithWhitespace(t *testing.T) { name := "testcopymultiplefilestofilewithwhitespace" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox COPY [ "test file1", "test file2", "test" ] `, map[string]string{ "test file1": "test1", "test file2": "test2", }) defer ctx.Close() if err != nil { t.Fatal(err) } expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } logDone("build - multiple copy files to file with whitespace") } func TestBuildCopyWildcard(t *testing.T) { name := "testcopywildcard" defer deleteImages(name) server, err := fakeStorage(map[string]string{ "robots.txt": "hello", "index.html": "world", }) if err != nil { t.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM busybox COPY file*.txt /tmp/ RUN ls /tmp/file1.txt /tmp/file2.txt RUN mkdir /tmp1 COPY dir* /tmp1/ RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file RUN mkdir /tmp2 ADD dir/*dir %s/robots.txt /tmp2/ RUN ls /tmp2/nest_nest_file /tmp2/robots.txt `, server.URL()), map[string]string{ "file1.txt": "test1", "file2.txt": "test2", "dir/nested_file": "nested file", "dir/nested_dir/nest_nest_file": "2 times nested", "dirt": "dirty", }) defer ctx.Close() if err != nil { t.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } // Now make sure we use a cache the 2nd time id2, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } if id1 != id2 { t.Fatal("didn't use the cache") } logDone("build - copy wild card") } func TestBuildCopyWildcardNoFind(t *testing.T) { name := "testcopywildcardnofind" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox COPY file*.txt /tmp/ `, nil) defer ctx.Close() if err != nil { t.Fatal(err) } _, err = buildImageFromContext(name, ctx, true) if err == nil { t.Fatal("should have failed to find a file") } if !strings.Contains(err.Error(), "No source files were specified") { t.Fatalf("Wrong error %v, must be about no source files", err) } logDone("build - copy wild card no find") } func TestBuildCopyWildcardCache(t *testing.T) { name := "testcopywildcardcache" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox COPY file1.txt /tmp/`, map[string]string{ "file1.txt": "test1", }) defer ctx.Close() if err != nil { t.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } // Now make sure we use a cache the 2nd time even with wild cards. // Use the same context so the file is the same and the checksum will match ctx.Add("Dockerfile", `FROM busybox COPY file*.txt /tmp/`) id2, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } if id1 != id2 { t.Fatal("didn't use the cache") } logDone("build - copy wild card cache") } func TestBuildAddSingleFileToNonExistingDir(t *testing.T) { name := "testaddsinglefiletononexistingdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists ADD test_file /test_dir/ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add single file to non-existing dir") } func TestBuildAddDirContentToRoot(t *testing.T) { name := "testadddircontenttoroot" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists ADD test_dir / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add directory contents to root") } func TestBuildAddDirContentToExistingDir(t *testing.T) { name := "testadddircontenttoexistingdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists ADD test_dir/ /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add directory contents to existing dir") } func TestBuildAddWholeDirToRoot(t *testing.T) { name := "testaddwholedirtoroot" defer deleteImages(name) ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists ADD test_dir /test_dir RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_dir/test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add whole directory to root") } // Testing #5941 func TestBuildAddEtcToRoot(t *testing.T) { name := "testaddetctoroot" defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD . /`, map[string]string{ "etc/test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add etc directory to root") } // Testing #9401 func TestBuildAddPreservesFilesSpecialBits(t *testing.T) { name := "testaddpreservesfilesspecialbits" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox ADD suidbin /usr/bin/suidbin RUN chmod 4755 /usr/bin/suidbin RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] ADD ./data/ / RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, map[string]string{ "suidbin": "suidbin", "/data/usr/test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add preserves files special bits") } func TestBuildCopySingleFileToRoot(t *testing.T) { name := "testcopysinglefiletoroot" defer deleteImages(name) ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists COPY test_file / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - copy single file to root") } // Issue #3960: "ADD src ." hangs - adapted for COPY func TestBuildCopySingleFileToWorkdir(t *testing.T) { name := "testcopysinglefiletoworkdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox COPY test_file .`, map[string]string{ "test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() done := make(chan struct{}) go func() { if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } close(done) }() select { case <-time.After(5 * time.Second): t.Fatal("Build with adding to workdir timed out") case <-done: } logDone("build - copy single file to workdir") } func TestBuildCopySingleFileToExistDir(t *testing.T) { name := "testcopysinglefiletoexistdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists COPY test_file /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - copy single file to existing dir") } func TestBuildCopySingleFileToNonExistDir(t *testing.T) { name := "testcopysinglefiletononexistdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists COPY test_file /test_dir/ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - copy single file to non-existing dir") } func TestBuildCopyDirContentToRoot(t *testing.T) { name := "testcopydircontenttoroot" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists COPY test_dir / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - copy directory contents to root") } func TestBuildCopyDirContentToExistDir(t *testing.T) { name := "testcopydircontenttoexistdir" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists COPY test_dir/ /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - copy directory contents to existing dir") } func TestBuildCopyWholeDirToRoot(t *testing.T) { name := "testcopywholedirtoroot" defer deleteImages(name) ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists COPY test_dir /test_dir RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_dir/test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - copy whole directory to root") } func TestBuildCopyEtcToRoot(t *testing.T) { name := "testcopyetctoroot" defer deleteImages(name) ctx, err := fakeContext(`FROM scratch COPY . /`, map[string]string{ "etc/test_file": "test1", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - copy etc directory to root") } func TestBuildCopyDisallowRemote(t *testing.T) { name := "testcopydisallowremote" defer deleteImages(name) _, out, err := buildImageWithOut(name, `FROM scratch COPY https://index.docker.io/robots.txt /`, true) if err == nil || !strings.Contains(out, "Source can't be a URL for COPY") { t.Fatalf("Error should be about disallowed remote source, got err: %s, out: %q", err, out) } logDone("build - copy - disallow copy from remote") } func TestBuildAddBadLinks(t *testing.T) { const ( dockerfile = ` FROM scratch ADD links.tar / ADD foo.txt /symlink/ ` targetFile = "foo.txt" ) var ( name = "test-link-absolute" ) defer deleteImages(name) ctx, err := fakeContext(dockerfile, nil) if err != nil { t.Fatal(err) } defer ctx.Close() tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") if err != nil { t.Fatalf("failed to create temporary directory: %s", tempDir) } defer os.RemoveAll(tempDir) var symlinkTarget string if runtime.GOOS == "windows" { var driveLetter string if abs, err := filepath.Abs(tempDir); err != nil { t.Fatal(err) } else { driveLetter = abs[:1] } tempDirWithoutDrive := tempDir[2:] symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive) } else { symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) } tarPath := filepath.Join(ctx.Dir, "links.tar") nonExistingFile := filepath.Join(tempDir, targetFile) fooPath := filepath.Join(ctx.Dir, targetFile) tarOut, err := os.Create(tarPath) if err != nil { t.Fatal(err) } tarWriter := tar.NewWriter(tarOut) header := &tar.Header{ Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: symlinkTarget, Mode: 0755, Uid: 0, Gid: 0, } err = tarWriter.WriteHeader(header) if err != nil { t.Fatal(err) } tarWriter.Close() tarOut.Close() foo, err := os.Create(fooPath) if err != nil { t.Fatal(err) } defer foo.Close() if _, err := foo.WriteString("test"); err != nil { t.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } logDone("build - ADD must add files in container") } func TestBuildAddBadLinksVolume(t *testing.T) { const ( dockerfileTemplate = ` FROM busybox RUN ln -s /../../../../../../../../%s /x VOLUME /x ADD foo.txt /x/` targetFile = "foo.txt" ) var ( name = "test-link-absolute-volume" dockerfile = "" ) defer deleteImages(name) tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") if err != nil { t.Fatalf("failed to create temporary directory: %s", tempDir) } defer os.RemoveAll(tempDir) dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) nonExistingFile := filepath.Join(tempDir, targetFile) ctx, err := fakeContext(dockerfile, nil) if err != nil { t.Fatal(err) } defer ctx.Close() fooPath := filepath.Join(ctx.Dir, targetFile) foo, err := os.Create(fooPath) if err != nil { t.Fatal(err) } defer foo.Close() if _, err := foo.WriteString("test"); err != nil { t.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } logDone("build - ADD should add files in volume") } // Issue #5270 - ensure we throw a better error than "unexpected EOF" // when we can't access files in the context. func TestBuildWithInaccessibleFilesInContext(t *testing.T) { testRequires(t, UnixCli) // test uses chown/chmod: not available on windows { name := "testbuildinaccessiblefiles" defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) if err != nil { t.Fatal(err) } defer ctx.Close() // This is used to ensure we detect inaccessible files early during build in the cli client pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { t.Fatalf("failed to chown file to root: %s", err) } if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { t.Fatalf("failed to chmod file to 700: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err == nil { t.Fatalf("build should have failed: %s %s", err, out) } // check if we've detected the failure before we started building if !strings.Contains(out, "no permission to read from ") { t.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) } if !strings.Contains(out, "Error checking context is accessible") { t.Fatalf("output should've contained the string: Error checking context is accessible") } } { name := "testbuildinaccessibledirectory" defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) if err != nil { t.Fatal(err) } defer ctx.Close() // This is used to ensure we detect inaccessible directories early during build in the cli client pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { t.Fatalf("failed to chown directory to root: %s", err) } if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { t.Fatalf("failed to chmod directory to 444: %s", err) } if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { t.Fatalf("failed to chmod file to 700: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err == nil { t.Fatalf("build should have failed: %s %s", err, out) } // check if we've detected the failure before we started building if !strings.Contains(out, "can't stat") { t.Fatalf("output should've contained the string: can't access %s", out) } if !strings.Contains(out, "Error checking context is accessible") { t.Fatalf("output should've contained the string: Error checking context is accessible") } } { name := "testlinksok" defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) if err != nil { t.Fatal(err) } defer ctx.Close() target := "../../../../../../../../../../../../../../../../../../../azA" if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { t.Fatal(err) } defer os.Remove(target) // This is used to ensure we don't follow links when checking if everything in the context is accessible // This test doesn't require that we run commands as an unprivileged user if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } } { name := "testbuildignoredinaccessible" defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{ "directoryWeCantStat/bar": "foo", ".dockerignore": "directoryWeCantStat", }) if err != nil { t.Fatal(err) } defer ctx.Close() // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { t.Fatalf("failed to chown directory to root: %s", err) } if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { t.Fatalf("failed to chmod directory to 755: %s", err) } if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { t.Fatalf("failed to chmod file to 444: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir if out, _, err := runCommandWithOutput(buildCmd); err != nil { t.Fatalf("build should have worked: %s %s", err, out) } } logDone("build - ADD from context with inaccessible files must not pass") logDone("build - ADD from context with accessible links must work") logDone("build - ADD from context with ignored inaccessible files must work") } func TestBuildForceRm(t *testing.T) { containerCountBefore, err := getContainerCount() if err != nil { t.Fatalf("failed to get the container count: %s", err) } name := "testbuildforcerm" defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nRUN true\nRUN thiswillfail", nil) if err != nil { t.Fatal(err) } defer ctx.Close() buildCmd := exec.Command(dockerBinary, "build", "-t", name, "--force-rm", ".") buildCmd.Dir = ctx.Dir if out, _, err := runCommandWithOutput(buildCmd); err == nil { t.Fatalf("failed to build the image: %s, %v", out, err) } containerCountAfter, err := getContainerCount() if err != nil { t.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { t.Fatalf("--force-rm shouldn't have left containers behind") } logDone("build - ensure --force-rm doesn't leave containers behind") } // Test that an infinite sleep during a build is killed if the client disconnects. // This test is fairly hairy because there are lots of ways to race. // Strategy: // * Monitor the output of docker events starting from before // * Run a 1-year-long sleep from a docker build. // * When docker events sees container start, close the "docker build" command // * Wait for docker events to emit a dying event. func TestBuildCancelationKillsSleep(t *testing.T) { // TODO(jfrazelle): Make this work on Windows. testRequires(t, SameHostDaemon) name := "testbuildcancelation" defer deleteImages(name) defer deleteAllContainers() // (Note: one year, will never finish) ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil) if err != nil { t.Fatal(err) } defer ctx.Close() var wg sync.WaitGroup defer wg.Wait() finish := make(chan struct{}) defer close(finish) eventStart := make(chan struct{}) eventDie := make(chan struct{}) // Start one second ago, to avoid rounding problems startEpoch := time.Now().Add(-1 * time.Second) // Goroutine responsible for watching start/die events from `docker events` wg.Add(1) go func() { defer wg.Done() // Watch for events since epoch. eventsCmd := exec.Command(dockerBinary, "events", "-since", fmt.Sprint(startEpoch.Unix())) stdout, err := eventsCmd.StdoutPipe() err = eventsCmd.Start() if err != nil { t.Fatalf("failed to start 'docker events': %s", err) } go func() { <-finish eventsCmd.Process.Kill() }() var started, died bool matchStart := regexp.MustCompile(" \\(from busybox\\:latest\\) start$") matchDie := regexp.MustCompile(" \\(from busybox\\:latest\\) die$") // // Read lines of `docker events` looking for container start and stop. // scanner := bufio.NewScanner(stdout) for scanner.Scan() { if ok := matchStart.MatchString(scanner.Text()); ok { if started { t.Fatal("assertion fail: more than one container started") } close(eventStart) started = true } if ok := matchDie.MatchString(scanner.Text()); ok { if died { t.Fatal("assertion fail: more than one container died") } close(eventDie) died = true } } err = eventsCmd.Wait() if err != nil && !IsKilled(err) { t.Fatalf("docker events had bad exit status: %s", err) } }() buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") buildCmd.Dir = ctx.Dir buildCmd.Stdout = os.Stdout err = buildCmd.Start() if err != nil { t.Fatalf("failed to run build: %s", err) } select { case <-time.After(30 * time.Second): t.Fatal("failed to observe build container start in timely fashion") case <-eventStart: // Proceeds from here when we see the container fly past in the // output of "docker events". // Now we know the container is running. } // Send a kill to the `docker build` command. // Causes the underlying build to be cancelled due to socket close. err = buildCmd.Process.Kill() if err != nil { t.Fatalf("error killing build command: %s", err) } // Get the exit status of `docker build`, check it exited because killed. err = buildCmd.Wait() if err != nil && !IsKilled(err) { t.Fatalf("wait failed during build run: %T %s", err, err) } select { case <-time.After(30 * time.Second): // If we don't get here in a timely fashion, it wasn't killed. t.Fatal("container cancel did not succeed") case <-eventDie: // We saw the container shut down in the `docker events` stream, // as expected. } logDone("build - ensure canceled job finishes immediately") } func TestBuildRm(t *testing.T) { name := "testbuildrm" defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nADD foo /\nADD foo /", map[string]string{"foo": "bar"}) if err != nil { t.Fatal(err) } defer ctx.Close() { containerCountBefore, err := getContainerCount() if err != nil { t.Fatalf("failed to get the container count: %s", err) } out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm", "-t", name, ".") if err != nil { t.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { t.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { t.Fatalf("-rm shouldn't have left containers behind") } deleteImages(name) } { containerCountBefore, err := getContainerCount() if err != nil { t.Fatalf("failed to get the container count: %s", err) } out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name, ".") if err != nil { t.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { t.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { t.Fatalf("--rm shouldn't have left containers behind") } deleteImages(name) } { containerCountBefore, err := getContainerCount() if err != nil { t.Fatalf("failed to get the container count: %s", err) } out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm=false", "-t", name, ".") if err != nil { t.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { t.Fatalf("failed to get the container count: %s", err) } if containerCountBefore == containerCountAfter { t.Fatalf("--rm=false should have left containers behind") } deleteAllContainers() deleteImages(name) } logDone("build - ensure --rm doesn't leave containers behind and that --rm=true is the default") logDone("build - ensure --rm=false overrides the default") } func TestBuildWithVolumes(t *testing.T) { var ( result map[string]map[string]struct{} name = "testbuildvolumes" emptyMap = make(map[string]struct{}) expected = map[string]map[string]struct{}{ "/test1": emptyMap, "/test2": emptyMap, "/test3": emptyMap, "/test4": emptyMap, "/test5": emptyMap, "/test6": emptyMap, "[/test7": emptyMap, "/test8]": emptyMap, } ) defer deleteImages(name) _, err := buildImage(name, `FROM scratch VOLUME /test1 VOLUME /test2 VOLUME /test3 /test4 VOLUME ["/test5", "/test6"] VOLUME [/test7 /test8] `, true) if err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Volumes") if err != nil { t.Fatal(err) } err = unmarshalJSON([]byte(res), &result) if err != nil { t.Fatal(err) } equal := reflect.DeepEqual(&result, &expected) if !equal { t.Fatalf("Volumes %s, expected %s", result, expected) } logDone("build - with volumes") } func TestBuildMaintainer(t *testing.T) { name := "testbuildmaintainer" expected := "dockerio" defer deleteImages(name) _, err := buildImage(name, `FROM scratch MAINTAINER dockerio`, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Author") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("Maintainer %s, expected %s", res, expected) } logDone("build - maintainer") } func TestBuildUser(t *testing.T) { name := "testbuilduser" expected := "dockerio" defer deleteImages(name) _, err := buildImage(name, `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd USER dockerio RUN [ $(whoami) = 'dockerio' ]`, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.User") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("User %s, expected %s", res, expected) } logDone("build - user") } func TestBuildRelativeWorkdir(t *testing.T) { name := "testbuildrelativeworkdir" expected := "/test2/test3" defer deleteImages(name) _, err := buildImage(name, `FROM busybox RUN [ "$PWD" = '/' ] WORKDIR test1 RUN [ "$PWD" = '/test1' ] WORKDIR /test2 RUN [ "$PWD" = '/test2' ] WORKDIR test3 RUN [ "$PWD" = '/test2/test3' ]`, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.WorkingDir") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("Workdir %s, expected %s", res, expected) } logDone("build - relative workdir") } func TestBuildWorkdirWithEnvVariables(t *testing.T) { name := "testbuildworkdirwithenvvariables" expected := "/test1/test2" defer deleteImages(name) _, err := buildImage(name, `FROM busybox ENV DIRPATH /test1 ENV SUBDIRNAME test2 WORKDIR $DIRPATH WORKDIR $SUBDIRNAME/$MISSING_VAR`, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.WorkingDir") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("Workdir %s, expected %s", res, expected) } logDone("build - workdir with env variables") } func TestBuildRelativeCopy(t *testing.T) { name := "testbuildrelativecopy" defer deleteImages(name) dockerfile := ` FROM busybox WORKDIR /test1 WORKDIR test2 RUN [ "$PWD" = '/test1/test2' ] COPY foo ./ RUN [ "$(cat /test1/test2/foo)" = 'hello' ] ADD foo ./bar/baz RUN [ "$(cat /test1/test2/bar/baz)" = 'hello' ] COPY foo ./bar/baz2 RUN [ "$(cat /test1/test2/bar/baz2)" = 'hello' ] WORKDIR .. COPY foo ./ RUN [ "$(cat /test1/foo)" = 'hello' ] COPY foo /test3/ RUN [ "$(cat /test3/foo)" = 'hello' ] WORKDIR /test4 COPY . . RUN [ "$(cat /test4/foo)" = 'hello' ] WORKDIR /test5/test6 COPY foo ../ RUN [ "$(cat /test5/foo)" = 'hello' ] ` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) defer ctx.Close() if err != nil { t.Fatal(err) } _, err = buildImageFromContext(name, ctx, false) if err != nil { t.Fatal(err) } logDone("build - relative copy/add") } func TestBuildEnv(t *testing.T) { name := "testbuildenv" expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" defer deleteImages(name) _, err := buildImage(name, `FROM busybox ENV PATH /test:$PATH ENV PORT 2375 RUN [ $(env | grep PORT) = 'PORT=2375' ]`, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.Env") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("Env %s, expected %s", res, expected) } logDone("build - env") } func TestBuildContextCleanup(t *testing.T) { testRequires(t, SameHostDaemon) name := "testbuildcontextcleanup" defer deleteImages(name) entries, err := ioutil.ReadDir("/var/lib/docker/tmp") if err != nil { t.Fatalf("failed to list contents of tmp dir: %s", err) } _, err = buildImage(name, `FROM scratch ENTRYPOINT ["/bin/echo"]`, true) if err != nil { t.Fatal(err) } entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp") if err != nil { t.Fatalf("failed to list contents of tmp dir: %s", err) } if err = compareDirectoryEntries(entries, entriesFinal); err != nil { t.Fatalf("context should have been deleted, but wasn't") } logDone("build - verify context cleanup works properly") } func TestBuildContextCleanupFailedBuild(t *testing.T) { testRequires(t, SameHostDaemon) name := "testbuildcontextcleanup" defer deleteImages(name) defer deleteAllContainers() entries, err := ioutil.ReadDir("/var/lib/docker/tmp") if err != nil { t.Fatalf("failed to list contents of tmp dir: %s", err) } _, err = buildImage(name, `FROM scratch RUN /non/existing/command`, true) if err == nil { t.Fatalf("expected build to fail, but it didn't") } entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp") if err != nil { t.Fatalf("failed to list contents of tmp dir: %s", err) } if err = compareDirectoryEntries(entries, entriesFinal); err != nil { t.Fatalf("context should have been deleted, but wasn't") } logDone("build - verify context cleanup works properly after an unsuccessful build") } func TestBuildCmd(t *testing.T) { name := "testbuildcmd" expected := "[/bin/echo Hello World]" defer deleteImages(name) _, err := buildImage(name, `FROM scratch CMD ["/bin/echo", "Hello World"]`, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.Cmd") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("Cmd %s, expected %s", res, expected) } logDone("build - cmd") } func TestBuildExpose(t *testing.T) { name := "testbuildexpose" expected := "map[2375/tcp:map[]]" defer deleteImages(name) _, err := buildImage(name, `FROM scratch EXPOSE 2375`, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.ExposedPorts") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("Exposed ports %s, expected %s", res, expected) } logDone("build - expose") } func TestBuildExposeMorePorts(t *testing.T) { // start building docker file with a large number of ports portList := make([]string, 50) line := make([]string, 100) expectedPorts := make([]int, len(portList)*len(line)) for i := 0; i < len(portList); i++ { for j := 0; j < len(line); j++ { p := i*len(line) + j + 1 line[j] = strconv.Itoa(p) expectedPorts[p-1] = p } if i == len(portList)-1 { portList[i] = strings.Join(line, " ") } else { portList[i] = strings.Join(line, " ") + ` \` } } dockerfile := `FROM scratch EXPOSE {{range .}} {{.}} {{end}}` tmpl := template.Must(template.New("dockerfile").Parse(dockerfile)) buf := bytes.NewBuffer(nil) tmpl.Execute(buf, portList) name := "testbuildexpose" defer deleteImages(name) _, err := buildImage(name, buf.String(), true) if err != nil { t.Fatal(err) } // check if all the ports are saved inside Config.ExposedPorts res, err := inspectFieldJSON(name, "Config.ExposedPorts") if err != nil { t.Fatal(err) } var exposedPorts map[string]interface{} if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { t.Fatal(err) } for _, p := range expectedPorts { ep := fmt.Sprintf("%d/tcp", p) if _, ok := exposedPorts[ep]; !ok { t.Errorf("Port(%s) is not exposed", ep) } else { delete(exposedPorts, ep) } } if len(exposedPorts) != 0 { t.Errorf("Unexpected extra exposed ports %v", exposedPorts) } logDone("build - expose large number of ports") } func TestBuildExposeOrder(t *testing.T) { buildID := func(name, exposed string) string { _, err := buildImage(name, fmt.Sprintf(`FROM scratch EXPOSE %s`, exposed), true) if err != nil { t.Fatal(err) } id, err := inspectField(name, "Id") if err != nil { t.Fatal(err) } return id } id1 := buildID("testbuildexpose1", "80 2375") id2 := buildID("testbuildexpose2", "2375 80") defer deleteImages("testbuildexpose1", "testbuildexpose2") if id1 != id2 { t.Errorf("EXPOSE should invalidate the cache only when ports actually changed") } logDone("build - expose order") } func TestBuildExposeUpperCaseProto(t *testing.T) { name := "testbuildexposeuppercaseproto" expected := "map[5678/udp:map[]]" defer deleteImages(name) _, err := buildImage(name, `FROM scratch EXPOSE 5678/UDP`, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.ExposedPorts") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("Exposed ports %s, expected %s", res, expected) } logDone("build - expose port with upper case proto") } func TestBuildExposeHostPort(t *testing.T) { // start building docker file with ip:hostPort:containerPort name := "testbuildexpose" expected := "map[5678/tcp:map[]]" defer deleteImages(name) _, out, err := buildImageWithOut(name, `FROM scratch EXPOSE 192.168.1.2:2375:5678`, true) if err != nil { t.Fatal(err) } if !strings.Contains(out, "to map host ports to container ports (ip:hostPort:containerPort) is deprecated.") { t.Fatal("Missing warning message") } res, err := inspectField(name, "Config.ExposedPorts") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("Exposed ports %s, expected %s", res, expected) } logDone("build - ignore exposing host's port") } func TestBuildEmptyEntrypointInheritance(t *testing.T) { name := "testbuildentrypointinheritance" name2 := "testbuildentrypointinheritance2" defer deleteImages(name, name2) _, err := buildImage(name, `FROM busybox ENTRYPOINT ["/bin/echo"]`, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { t.Fatal(err) } expected := "[/bin/echo]" if res != expected { t.Fatalf("Entrypoint %s, expected %s", res, expected) } _, err = buildImage(name2, fmt.Sprintf(`FROM %s ENTRYPOINT []`, name), true) if err != nil { t.Fatal(err) } res, err = inspectField(name2, "Config.Entrypoint") if err != nil { t.Fatal(err) } expected = "[]" if res != expected { t.Fatalf("Entrypoint %s, expected %s", res, expected) } logDone("build - empty entrypoint inheritance") } func TestBuildEmptyEntrypoint(t *testing.T) { name := "testbuildentrypoint" defer deleteImages(name) expected := "[]" _, err := buildImage(name, `FROM busybox ENTRYPOINT []`, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("Entrypoint %s, expected %s", res, expected) } logDone("build - empty entrypoint") } func TestBuildEntrypoint(t *testing.T) { name := "testbuildentrypoint" expected := "[/bin/echo]" defer deleteImages(name) _, err := buildImage(name, `FROM scratch ENTRYPOINT ["/bin/echo"]`, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("Entrypoint %s, expected %s", res, expected) } logDone("build - entrypoint") } // #6445 ensure ONBUILD triggers aren't committed to grandchildren func TestBuildOnBuildLimitedInheritence(t *testing.T) { var ( out2, out3 string ) { name1 := "testonbuildtrigger1" dockerfile1 := ` FROM busybox RUN echo "GRANDPARENT" ONBUILD RUN echo "ONBUILD PARENT" ` ctx, err := fakeContext(dockerfile1, nil) if err != nil { t.Fatal(err) } defer ctx.Close() out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".") if err != nil { t.Fatalf("build failed to complete: %s, %v", out1, err) } defer deleteImages(name1) } { name2 := "testonbuildtrigger2" dockerfile2 := ` FROM testonbuildtrigger1 ` ctx, err := fakeContext(dockerfile2, nil) if err != nil { t.Fatal(err) } defer ctx.Close() out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".") if err != nil { t.Fatalf("build failed to complete: %s, %v", out2, err) } defer deleteImages(name2) } { name3 := "testonbuildtrigger3" dockerfile3 := ` FROM testonbuildtrigger2 ` ctx, err := fakeContext(dockerfile3, nil) if err != nil { t.Fatal(err) } defer ctx.Close() out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".") if err != nil { t.Fatalf("build failed to complete: %s, %v", out3, err) } defer deleteImages(name3) } // ONBUILD should be run in second build. if !strings.Contains(out2, "ONBUILD PARENT") { t.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") } // ONBUILD should *not* be run in third build. if strings.Contains(out3, "ONBUILD PARENT") { t.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") } logDone("build - onbuild") } func TestBuildWithCache(t *testing.T) { name := "testbuildwithcache" defer deleteImages(name) id1, err := buildImage(name, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, true) if err != nil { t.Fatal(err) } id2, err := buildImage(name, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, true) if err != nil { t.Fatal(err) } if id1 != id2 { t.Fatal("The cache should have been used but hasn't.") } logDone("build - with cache") } func TestBuildWithoutCache(t *testing.T) { name := "testbuildwithoutcache" name2 := "testbuildwithoutcache2" defer deleteImages(name, name2) id1, err := buildImage(name, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, true) if err != nil { t.Fatal(err) } id2, err := buildImage(name2, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, false) if err != nil { t.Fatal(err) } if id1 == id2 { t.Fatal("The cache should have been invalided but hasn't.") } logDone("build - without cache") } func TestBuildConditionalCache(t *testing.T) { name := "testbuildconditionalcache" name2 := "testbuildconditionalcache2" defer deleteImages(name, name2) dockerfile := ` FROM busybox ADD foo /tmp/` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { t.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatalf("Error building #1: %s", err) } if err := ctx.Add("foo", "bye"); err != nil { t.Fatalf("Error modifying foo: %s", err) } id2, err := buildImageFromContext(name, ctx, false) if err != nil { t.Fatalf("Error building #2: %s", err) } if id2 == id1 { t.Fatal("Should not have used the cache") } id3, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatalf("Error building #3: %s", err) } if id3 != id2 { t.Fatal("Should have used the cache") } logDone("build - conditional cache") } func TestBuildADDLocalFileWithCache(t *testing.T) { name := "testbuildaddlocalfilewithcache" name2 := "testbuildaddlocalfilewithcache2" defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio ADD foo /usr/lib/bla/bar RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) defer ctx.Close() if err != nil { t.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } if id1 != id2 { t.Fatal("The cache should have been used but hasn't.") } logDone("build - add local file with cache") } func TestBuildADDMultipleLocalFileWithCache(t *testing.T) { name := "testbuildaddmultiplelocalfilewithcache" name2 := "testbuildaddmultiplelocalfilewithcache2" defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio ADD foo Dockerfile /usr/lib/bla/ RUN [ "$(cat /usr/lib/bla/foo)" = "hello" ]` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) defer ctx.Close() if err != nil { t.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } if id1 != id2 { t.Fatal("The cache should have been used but hasn't.") } logDone("build - add multiple local files with cache") } func TestBuildADDLocalFileWithoutCache(t *testing.T) { name := "testbuildaddlocalfilewithoutcache" name2 := "testbuildaddlocalfilewithoutcache2" defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio ADD foo /usr/lib/bla/bar RUN [ "$(cat /usr/lib/bla/bar)" = "hello" ]` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) defer ctx.Close() if err != nil { t.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { t.Fatal(err) } if id1 == id2 { t.Fatal("The cache should have been invalided but hasn't.") } logDone("build - add local file without cache") } func TestBuildCopyDirButNotFile(t *testing.T) { name := "testbuildcopydirbutnotfile" name2 := "testbuildcopydirbutnotfile2" defer deleteImages(name, name2) dockerfile := ` FROM scratch COPY dir /tmp/` ctx, err := fakeContext(dockerfile, map[string]string{ "dir/foo": "hello", }) defer ctx.Close() if err != nil { t.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } // Check that adding file with similar name doesn't mess with cache if err := ctx.Add("dir_file", "hello2"); err != nil { t.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } if id1 != id2 { t.Fatal("The cache should have been used but wasn't") } logDone("build - add current directory but not file") } func TestBuildADDCurrentDirWithCache(t *testing.T) { name := "testbuildaddcurrentdirwithcache" name2 := name + "2" name3 := name + "3" name4 := name + "4" name5 := name + "5" defer deleteImages(name, name2, name3, name4, name5) dockerfile := ` FROM scratch MAINTAINER dockerio ADD . /usr/lib/bla` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) defer ctx.Close() if err != nil { t.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } // Check that adding file invalidate cache of "ADD ." if err := ctx.Add("bar", "hello2"); err != nil { t.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } if id1 == id2 { t.Fatal("The cache should have been invalided but hasn't.") } // Check that changing file invalidate cache of "ADD ." if err := ctx.Add("foo", "hello1"); err != nil { t.Fatal(err) } id3, err := buildImageFromContext(name3, ctx, true) if err != nil { t.Fatal(err) } if id2 == id3 { t.Fatal("The cache should have been invalided but hasn't.") } // Check that changing file to same content invalidate cache of "ADD ." time.Sleep(1 * time.Second) // wait second because of mtime precision if err := ctx.Add("foo", "hello1"); err != nil { t.Fatal(err) } id4, err := buildImageFromContext(name4, ctx, true) if err != nil { t.Fatal(err) } if id3 == id4 { t.Fatal("The cache should have been invalided but hasn't.") } id5, err := buildImageFromContext(name5, ctx, true) if err != nil { t.Fatal(err) } if id4 != id5 { t.Fatal("The cache should have been used but hasn't.") } logDone("build - add current directory with cache") } func TestBuildADDCurrentDirWithoutCache(t *testing.T) { name := "testbuildaddcurrentdirwithoutcache" name2 := "testbuildaddcurrentdirwithoutcache2" defer deleteImages(name, name2) dockerfile := ` FROM scratch MAINTAINER dockerio ADD . /usr/lib/bla` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) defer ctx.Close() if err != nil { t.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { t.Fatal(err) } if id1 == id2 { t.Fatal("The cache should have been invalided but hasn't.") } logDone("build - add current directory without cache") } func TestBuildADDRemoteFileWithCache(t *testing.T) { name := "testbuildaddremotefilewithcache" defer deleteImages(name) server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { t.Fatal(err) } defer server.Close() id1, err := buildImage(name, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { t.Fatal(err) } id2, err := buildImage(name, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { t.Fatal(err) } if id1 != id2 { t.Fatal("The cache should have been used but hasn't.") } logDone("build - add remote file with cache") } func TestBuildADDRemoteFileWithoutCache(t *testing.T) { name := "testbuildaddremotefilewithoutcache" name2 := "testbuildaddremotefilewithoutcache2" defer deleteImages(name, name2) server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { t.Fatal(err) } defer server.Close() id1, err := buildImage(name, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { t.Fatal(err) } id2, err := buildImage(name2, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), false) if err != nil { t.Fatal(err) } if id1 == id2 { t.Fatal("The cache should have been invalided but hasn't.") } logDone("build - add remote file without cache") } func TestBuildADDRemoteFileMTime(t *testing.T) { name := "testbuildaddremotefilemtime" name2 := name + "2" name3 := name + "3" name4 := name + "4" defer deleteImages(name, name2, name3, name4) files := map[string]string{"baz": "hello"} server, err := fakeStorage(files) if err != nil { t.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil) if err != nil { t.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { t.Fatal(err) } if id1 != id2 { t.Fatal("The cache should have been used but wasn't - #1") } // Now create a different server withsame contents (causes different mtim) // This time the cache should not be used // allow some time for clock to pass as mtime precision is only 1s time.Sleep(2 * time.Second) server2, err := fakeStorage(files) if err != nil { t.Fatal(err) } defer server2.Close() ctx2, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil) if err != nil { t.Fatal(err) } defer ctx2.Close() id3, err := buildImageFromContext(name3, ctx2, true) if err != nil { t.Fatal(err) } if id1 == id3 { t.Fatal("The cache should not have been used but was") } // And for good measure do it again and make sure cache is used this time id4, err := buildImageFromContext(name4, ctx2, true) if err != nil { t.Fatal(err) } if id3 != id4 { t.Fatal("The cache should have been used but wasn't - #2") } logDone("build - add remote file testing mtime") } func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) { name := "testbuildaddlocalandremotefilewithcache" defer deleteImages(name) server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { t.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD foo /usr/lib/bla/bar ADD %s/baz /usr/lib/baz/quux`, server.URL()), map[string]string{ "foo": "hello world", }) if err != nil { t.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } id2, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } if id1 != id2 { t.Fatal("The cache should have been used but hasn't.") } logDone("build - add local and remote file with cache") } func testContextTar(t *testing.T, compression archive.Compression) { ctx, err := fakeContext( `FROM busybox ADD foo /foo CMD ["cat", "/foo"]`, map[string]string{ "foo": "bar", }, ) defer ctx.Close() if err != nil { t.Fatal(err) } context, err := archive.Tar(ctx.Dir, compression) if err != nil { t.Fatalf("failed to build context tar: %v", err) } name := "contexttar" buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") defer deleteImages(name) buildCmd.Stdin = context if out, _, err := runCommandWithOutput(buildCmd); err != nil { t.Fatalf("build failed to complete: %v %v", out, err) } logDone(fmt.Sprintf("build - build an image with a context tar, compression: %v", compression)) } func TestBuildContextTarGzip(t *testing.T) { testContextTar(t, archive.Gzip) } func TestBuildContextTarNoCompression(t *testing.T) { testContextTar(t, archive.Uncompressed) } func TestBuildNoContext(t *testing.T) { buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") buildCmd.Stdin = strings.NewReader("FROM busybox\nCMD echo ok\n") if out, _, err := runCommandWithOutput(buildCmd); err != nil { t.Fatalf("build failed to complete: %v %v", out, err) } if out, _, err := dockerCmd(t, "run", "--rm", "nocontext"); out != "ok\n" || err != nil { t.Fatalf("run produced invalid output: %q, expected %q", out, "ok") } deleteImages("nocontext") logDone("build - build an image with no context") } // TODO: TestCaching func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { name := "testbuildaddlocalandremotefilewithoutcache" name2 := "testbuildaddlocalandremotefilewithoutcache2" defer deleteImages(name, name2) server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { t.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD foo /usr/lib/bla/bar ADD %s/baz /usr/lib/baz/quux`, server.URL()), map[string]string{ "foo": "hello world", }) if err != nil { t.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { t.Fatal(err) } if id1 == id2 { t.Fatal("The cache should have been invalided but hasn't.") } logDone("build - add local and remote file without cache") } func TestBuildWithVolumeOwnership(t *testing.T) { name := "testbuildimg" defer deleteImages(name) _, err := buildImage(name, `FROM busybox:latest RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test VOLUME /test`, true) if err != nil { t.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "--rm", "testbuildimg", "ls", "-la", "/test") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } if expected := "drw-------"; !strings.Contains(out, expected) { t.Fatalf("expected %s received %s", expected, out) } if expected := "daemon daemon"; !strings.Contains(out, expected) { t.Fatalf("expected %s received %s", expected, out) } logDone("build - volume ownership") } // testing #1405 - config.Cmd does not get cleaned up if // utilizing cache func TestBuildEntrypointRunCleanup(t *testing.T) { name := "testbuildcmdcleanup" defer deleteImages(name) if _, err := buildImage(name, `FROM busybox RUN echo "hello"`, true); err != nil { t.Fatal(err) } ctx, err := fakeContext(`FROM busybox RUN echo "hello" ADD foo /foo ENTRYPOINT ["/bin/echo"]`, map[string]string{ "foo": "hello", }) defer ctx.Close() if err != nil { t.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.Cmd") if err != nil { t.Fatal(err) } // Cmd must be cleaned up if expected := ""; res != expected { t.Fatalf("Cmd %s, expected %s", res, expected) } logDone("build - cleanup cmd after RUN") } func TestBuildForbiddenContextPath(t *testing.T) { name := "testbuildforbidpath" defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD ../../ test/ `, map[string]string{ "test.txt": "test1", "other.txt": "other", }) defer ctx.Close() if err != nil { t.Fatal(err) } expected := "Forbidden path outside the build context: ../../ " if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) } logDone("build - forbidden context path") } func TestBuildADDFileNotFound(t *testing.T) { name := "testbuildaddnotfound" defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD foo /usr/local/bar`, map[string]string{"bar": "hello"}) defer ctx.Close() if err != nil { t.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { if !strings.Contains(err.Error(), "foo: no such file or directory") { t.Fatalf("Wrong error %v, must be about missing foo file or directory", err) } } else { t.Fatal("Error must not be nil") } logDone("build - add file not found") } func TestBuildInheritance(t *testing.T) { name := "testbuildinheritance" defer deleteImages(name) _, err := buildImage(name, `FROM scratch EXPOSE 2375`, true) if err != nil { t.Fatal(err) } ports1, err := inspectField(name, "Config.ExposedPorts") if err != nil { t.Fatal(err) } _, err = buildImage(name, fmt.Sprintf(`FROM %s ENTRYPOINT ["/bin/echo"]`, name), true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { t.Fatal(err) } if expected := "[/bin/echo]"; res != expected { t.Fatalf("Entrypoint %s, expected %s", res, expected) } ports2, err := inspectField(name, "Config.ExposedPorts") if err != nil { t.Fatal(err) } if ports1 != ports2 { t.Fatalf("Ports must be same: %s != %s", ports1, ports2) } logDone("build - inheritance") } func TestBuildFails(t *testing.T) { name := "testbuildfails" defer deleteImages(name) defer deleteAllContainers() _, err := buildImage(name, `FROM busybox RUN sh -c "exit 23"`, true) if err != nil { if !strings.Contains(err.Error(), "returned a non-zero code: 23") { t.Fatalf("Wrong error %v, must be about non-zero code 23", err) } } else { t.Fatal("Error must not be nil") } logDone("build - unsuccessful") } func TestBuildFailsDockerfileEmpty(t *testing.T) { name := "testbuildfails" defer deleteImages(name) _, err := buildImage(name, ``, true) if err != nil { if !strings.Contains(err.Error(), "Dockerfile cannot be empty") { t.Fatalf("Wrong error %v, must be about empty Dockerfile", err) } } else { t.Fatal("Error must not be nil") } logDone("build - unsuccessful with empty dockerfile") } func TestBuildOnBuild(t *testing.T) { name := "testbuildonbuild" defer deleteImages(name) _, err := buildImage(name, `FROM busybox ONBUILD RUN touch foobar`, true) if err != nil { t.Fatal(err) } _, err = buildImage(name, fmt.Sprintf(`FROM %s RUN [ -f foobar ]`, name), true) if err != nil { t.Fatal(err) } logDone("build - onbuild") } func TestBuildOnBuildForbiddenChained(t *testing.T) { name := "testbuildonbuildforbiddenchained" defer deleteImages(name) _, err := buildImage(name, `FROM busybox ONBUILD ONBUILD RUN touch foobar`, true) if err != nil { if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { t.Fatalf("Wrong error %v, must be about chaining ONBUILD", err) } } else { t.Fatal("Error must not be nil") } logDone("build - onbuild forbidden chained") } func TestBuildOnBuildForbiddenFrom(t *testing.T) { name := "testbuildonbuildforbiddenfrom" defer deleteImages(name) _, err := buildImage(name, `FROM busybox ONBUILD FROM scratch`, true) if err != nil { if !strings.Contains(err.Error(), "FROM isn't allowed as an ONBUILD trigger") { t.Fatalf("Wrong error %v, must be about FROM forbidden", err) } } else { t.Fatal("Error must not be nil") } logDone("build - onbuild forbidden from") } func TestBuildOnBuildForbiddenMaintainer(t *testing.T) { name := "testbuildonbuildforbiddenmaintainer" defer deleteImages(name) _, err := buildImage(name, `FROM busybox ONBUILD MAINTAINER docker.io`, true) if err != nil { if !strings.Contains(err.Error(), "MAINTAINER isn't allowed as an ONBUILD trigger") { t.Fatalf("Wrong error %v, must be about MAINTAINER forbidden", err) } } else { t.Fatal("Error must not be nil") } logDone("build - onbuild forbidden maintainer") } // gh #2446 func TestBuildAddToSymlinkDest(t *testing.T) { name := "testbuildaddtosymlinkdest" defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN mkdir /foo RUN ln -s /foo /bar ADD foo /bar/ RUN [ -f /bar/foo ] RUN [ -f /foo/foo ]`, map[string]string{ "foo": "hello", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - add to symlink destination") } func TestBuildEscapeWhitespace(t *testing.T) { name := "testbuildescaping" defer deleteImages(name) _, err := buildImage(name, ` FROM busybox MAINTAINER "Docker \ IO " `, true) res, err := inspectField(name, "Author") if err != nil { t.Fatal(err) } if res != "\"Docker IO \"" { t.Fatalf("Parsed string did not match the escaped string. Got: %q", res) } logDone("build - validate escaping whitespace") } func TestBuildVerifyIntString(t *testing.T) { // Verify that strings that look like ints are still passed as strings name := "testbuildstringing" defer deleteImages(name) _, err := buildImage(name, ` FROM busybox MAINTAINER 123 `, true) out, rc, err := runCommandWithOutput(exec.Command(dockerBinary, "inspect", name)) if rc != 0 || err != nil { t.Fatalf("Unexcepted error from inspect: rc: %v err: %v", rc, err) } if !strings.Contains(out, "\"123\"") { t.Fatalf("Output does not contain the int as a string:\n%s", out) } logDone("build - verify int/strings as strings") } func TestBuildDockerignore(t *testing.T) { name := "testbuilddockerignore" defer deleteImages(name) dockerfile := ` FROM busybox ADD . /bla RUN [[ -f /bla/src/x.go ]] RUN [[ -f /bla/Makefile ]] RUN [[ ! -e /bla/src/_vendor ]] RUN [[ ! -e /bla/.gitignore ]] RUN [[ ! -e /bla/README.md ]] RUN [[ ! -e /bla/.git ]]` ctx, err := fakeContext(dockerfile, map[string]string{ "Makefile": "all:", ".git/HEAD": "ref: foo", "src/x.go": "package main", "src/_vendor/v.go": "package main", ".gitignore": "", "README.md": "readme", ".dockerignore": ".git\npkg\n.gitignore\nsrc/_vendor\n*.md", }) defer ctx.Close() if err != nil { t.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - test .dockerignore") } func TestBuildDockerignoreCleanPaths(t *testing.T) { name := "testbuilddockerignorecleanpaths" defer deleteImages(name) dockerfile := ` FROM busybox ADD . /tmp/ RUN (! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "foo", "foo2": "foo2", "dir1/foo": "foo in dir1", ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - test .dockerignore with clean paths") } func TestBuildDockerignoringDockerfile(t *testing.T) { name := "testbuilddockerignoredockerfile" defer deleteImages(name) dockerfile := ` FROM busybox ADD . /tmp/ RUN ! ls /tmp/Dockerfile RUN ls /tmp/.dockerignore` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": dockerfile, ".dockerignore": "Dockerfile\n", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("Didn't ignore Dockerfile correctly:%s", err) } // now try it with ./Dockerfile ctx.Add(".dockerignore", "./Dockerfile\n") if _, err = buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err) } logDone("build - test .dockerignore of Dockerfile") } func TestBuildDockerignoringRenamedDockerfile(t *testing.T) { name := "testbuilddockerignoredockerfile" defer deleteImages(name) dockerfile := ` FROM busybox ADD . /tmp/ RUN ls /tmp/Dockerfile RUN ! ls /tmp/MyDockerfile RUN ls /tmp/.dockerignore` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "Should not use me", "MyDockerfile": dockerfile, ".dockerignore": "MyDockerfile\n", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("Didn't ignore MyDockerfile correctly:%s", err) } // now try it with ./MyDockerfile ctx.Add(".dockerignore", "./MyDockerfile\n") if _, err = buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err) } logDone("build - test .dockerignore of renamed Dockerfile") } func TestBuildDockerignoringDockerignore(t *testing.T) { name := "testbuilddockerignoredockerignore" defer deleteImages(name) dockerfile := ` FROM busybox ADD . /tmp/ RUN ! ls /tmp/.dockerignore RUN ls /tmp/Dockerfile` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": dockerfile, ".dockerignore": ".dockerignore\n", }) defer ctx.Close() if err != nil { t.Fatal(err) } if _, err = buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("Didn't ignore .dockerignore correctly:%s", err) } logDone("build - test .dockerignore of .dockerignore") } func TestBuildDockerignoreTouchDockerfile(t *testing.T) { var id1 string var id2 string name := "testbuilddockerignoretouchdockerfile" defer deleteImages(name) dockerfile := ` FROM busybox ADD . /tmp/` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": dockerfile, ".dockerignore": "Dockerfile\n", }) defer ctx.Close() if err != nil { t.Fatal(err) } if id1, err = buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("Didn't build it correctly:%s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { t.Fatalf("Didn't use the cache - 1") } // Now make sure touching Dockerfile doesn't invalidate the cache if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { t.Fatalf("Didn't add Dockerfile: %s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { t.Fatalf("Didn't use the cache - 2") } // One more time but just 'touch' it instead of changing the content if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { t.Fatalf("Didn't add Dockerfile: %s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { t.Fatalf("Didn't use the cache - 3") } logDone("build - test .dockerignore touch dockerfile") } func TestBuildDockerignoringWholeDir(t *testing.T) { name := "testbuilddockerignorewholedir" defer deleteImages(name) dockerfile := ` FROM busybox COPY . / RUN [[ ! -e /.gitignore ]] RUN [[ -f /Makefile ]]` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "FROM scratch", "Makefile": "all:", ".dockerignore": ".*\n", }) defer ctx.Close() if err != nil { t.Fatal(err) } if _, err = buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - test .dockerignore whole dir with .*") } func TestBuildLineBreak(t *testing.T) { name := "testbuildlinebreak" defer deleteImages(name) _, err := buildImage(name, `FROM busybox RUN sh -c 'echo root:testpass \ > /tmp/passwd' RUN mkdir -p /var/run/sshd RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, true) if err != nil { t.Fatal(err) } logDone("build - line break with \\") } func TestBuildEOLInLine(t *testing.T) { name := "testbuildeolinline" defer deleteImages(name) _, err := buildImage(name, `FROM busybox RUN sh -c 'echo root:testpass > /tmp/passwd' RUN echo "foo \n bar"; echo "baz" RUN mkdir -p /var/run/sshd RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, true) if err != nil { t.Fatal(err) } logDone("build - end of line in dockerfile instruction") } func TestBuildCommentsShebangs(t *testing.T) { name := "testbuildcomments" defer deleteImages(name) _, err := buildImage(name, `FROM busybox # This is an ordinary comment. RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh RUN [ ! -x /hello.sh ] # comment with line break \ RUN chmod +x /hello.sh RUN [ -x /hello.sh ] RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] RUN [ "$(/hello.sh)" = "hello world" ]`, true) if err != nil { t.Fatal(err) } logDone("build - comments and shebangs") } func TestBuildUsersAndGroups(t *testing.T) { name := "testbuildusers" defer deleteImages(name) _, err := buildImage(name, `FROM busybox # Make sure our defaults work RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] # TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) USER root RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] # Setup dockerio user and group RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group # Make sure we can switch to our user and all the information is exactly as we expect it to be USER dockerio RUN id -G RUN id -Gn RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] # Switch back to root and double check that worked exactly as we might expect it to USER root RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] # Add a "supplementary" group for our dockerio user RUN echo 'supplementary:x:1002:dockerio' >> /etc/group # ... and then go verify that we get it like we expect USER dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] USER 1001 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] # super test the new "user:group" syntax USER dockerio:dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER 1001:dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER dockerio:1001 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER 1001:1001 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER dockerio:supplementary RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] USER dockerio:1002 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] USER 1001:supplementary RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] USER 1001:1002 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] # make sure unknown uid/gid still works properly USER 1042:1043 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, true) if err != nil { t.Fatal(err) } logDone("build - users and groups") } func TestBuildEnvUsage(t *testing.T) { name := "testbuildenvusage" defer deleteImages(name) dockerfile := `FROM busybox ENV HOME /root ENV PATH $HOME/bin:$PATH ENV PATH /tmp:$PATH RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] ENV FOO /foo/baz ENV BAR /bar ENV BAZ $BAR ENV FOOPATH $PATH:$FOO RUN [ "$BAR" = "$BAZ" ] RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] ENV FROM hello/docker/world ENV TO /docker/world/hello ADD $FROM $TO RUN [ "$(cat $TO)" = "hello" ] ENV abc=def ENV ghi=$abc RUN [ "$ghi" = "def" ] ` ctx, err := fakeContext(dockerfile, map[string]string{ "hello/docker/world": "hello", }) if err != nil { t.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } logDone("build - environment variables usage") } func TestBuildEnvUsage2(t *testing.T) { name := "testbuildenvusage2" defer deleteImages(name) dockerfile := `FROM busybox ENV abc=def RUN [ "$abc" = "def" ] ENV def="hello world" RUN [ "$def" = "hello world" ] ENV def=hello\ world RUN [ "$def" = "hello world" ] ENV v1=abc v2="hi there" RUN [ "$v1" = "abc" ] RUN [ "$v2" = "hi there" ] ENV v3='boogie nights' v4="with'quotes too" RUN [ "$v3" = "boogie nights" ] RUN [ "$v4" = "with'quotes too" ] ENV abc=zzz FROM=hello/docker/world ENV abc=zzz TO=/docker/world/hello ADD $FROM $TO RUN [ "$(cat $TO)" = "hello" ] ENV abc "zzz" RUN [ $abc = "zzz" ] ENV abc 'yyy' RUN [ $abc = 'yyy' ] ENV abc= RUN [ "$abc" = "" ] # use grep to make sure if the builder substitutes \$foo by mistake # we don't get a false positive ENV abc=\$foo RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) ENV abc \$foo RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) ENV abc=\'foo\' RUN [ "$abc" = "'foo'" ] ENV abc=\"foo\" RUN [ "$abc" = "\"foo\"" ] ENV abc "foo" RUN [ "$abc" = "foo" ] ENV abc 'foo' RUN [ "$abc" = 'foo' ] ENV abc \'foo\' RUN [ "$abc" = "'foo'" ] ENV abc \"foo\" RUN [ "$abc" = '"foo"' ] ENV e1=bar ENV e2=$e1 ENV e3=$e11 ENV e4=\$e1 ENV e5=\$e11 RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ] ENV ee1 bar ENV ee2 $ee1 ENV ee3 $ee11 ENV ee4 \$ee1 ENV ee5 \$ee11 RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ] ENV eee1="foo" ENV eee2='foo' ENV eee3 "foo" ENV eee4 'foo' RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] ` ctx, err := fakeContext(dockerfile, map[string]string{ "hello/docker/world": "hello", }) if err != nil { t.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } logDone("build - environment variables usage2") } func TestBuildAddScript(t *testing.T) { name := "testbuildaddscript" defer deleteImages(name) dockerfile := ` FROM busybox ADD test /test RUN ["chmod","+x","/test"] RUN ["/test"] RUN [ "$(cat /testfile)" = 'test!' ]` ctx, err := fakeContext(dockerfile, map[string]string{ "test": "#!/bin/sh\necho 'test!' > /testfile", }) if err != nil { t.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { t.Fatal(err) } logDone("build - add and run script") } func TestBuildAddTar(t *testing.T) { name := "testbuildaddtar" defer deleteImages(name) ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD test.tar / RUN cat /test/foo | grep Hi ADD test.tar /test.tar RUN cat /test.tar/test/foo | grep Hi ADD test.tar /unlikely-to-exist RUN cat /unlikely-to-exist/test/foo | grep Hi ADD test.tar /unlikely-to-exist-trailing-slash/ RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi RUN mkdir /existing-directory ADD test.tar /existing-directory RUN cat /existing-directory/test/foo | grep Hi ADD test.tar /existing-directory-trailing-slash/ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` tmpDir, err := ioutil.TempDir("", "fake-context") testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { t.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() tw := tar.NewWriter(testTar) if err := tw.WriteHeader(&tar.Header{ Name: "test/foo", Size: 2, }); err != nil { t.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { t.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { t.Fatalf("failed to close tar archive: %v", err) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { t.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("build failed to complete for TestBuildAddTar: %v", err) } logDone("build - ADD tar") } func TestBuildAddTarXz(t *testing.T) { name := "testbuildaddtarxz" defer deleteImages(name) ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD test.tar.xz / RUN cat /test/foo | grep Hi` tmpDir, err := ioutil.TempDir("", "fake-context") testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { t.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() tw := tar.NewWriter(testTar) if err := tw.WriteHeader(&tar.Header{ Name: "test/foo", Size: 2, }); err != nil { t.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { t.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { t.Fatalf("failed to close tar archive: %v", err) } xzCompressCmd := exec.Command("xz", "-k", "test.tar") xzCompressCmd.Dir = tmpDir out, _, err := runCommandWithOutput(xzCompressCmd) if err != nil { t.Fatal(err, out) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { t.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) } logDone("build - ADD tar.xz") } func TestBuildAddTarXzGz(t *testing.T) { name := "testbuildaddtarxzgz" defer deleteImages(name) ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD test.tar.xz.gz / RUN ls /test.tar.xz.gz` tmpDir, err := ioutil.TempDir("", "fake-context") testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { t.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() tw := tar.NewWriter(testTar) if err := tw.WriteHeader(&tar.Header{ Name: "test/foo", Size: 2, }); err != nil { t.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { t.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { t.Fatalf("failed to close tar archive: %v", err) } xzCompressCmd := exec.Command("xz", "-k", "test.tar") xzCompressCmd.Dir = tmpDir out, _, err := runCommandWithOutput(xzCompressCmd) if err != nil { t.Fatal(err, out) } gzipCompressCmd := exec.Command("gzip", "test.tar.xz") gzipCompressCmd.Dir = tmpDir out, _, err = runCommandWithOutput(gzipCompressCmd) if err != nil { t.Fatal(err, out) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { t.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) } logDone("build - ADD tar.xz.gz") } func TestBuildFromGIT(t *testing.T) { name := "testbuildfromgit" defer deleteImages(name) git, err := fakeGIT("repo", map[string]string{ "Dockerfile": `FROM busybox ADD first /first RUN [ -f /first ] MAINTAINER docker`, "first": "test git data", }, true) if err != nil { t.Fatal(err) } defer git.Close() _, err = buildImageFromPath(name, git.RepoURL, true) if err != nil { t.Fatal(err) } res, err := inspectField(name, "Author") if err != nil { t.Fatal(err) } if res != "docker" { t.Fatalf("Maintainer should be docker, got %s", res) } logDone("build - build from GIT") } func TestBuildCleanupCmdOnEntrypoint(t *testing.T) { name := "testbuildcmdcleanuponentrypoint" defer deleteImages(name) if _, err := buildImage(name, `FROM scratch CMD ["test"] ENTRYPOINT ["echo"]`, true); err != nil { t.Fatal(err) } if _, err := buildImage(name, fmt.Sprintf(`FROM %s ENTRYPOINT ["cat"]`, name), true); err != nil { t.Fatal(err) } res, err := inspectField(name, "Config.Cmd") if err != nil { t.Fatal(err) } if expected := ""; res != expected { t.Fatalf("Cmd %s, expected %s", res, expected) } res, err = inspectField(name, "Config.Entrypoint") if err != nil { t.Fatal(err) } if expected := "[cat]"; res != expected { t.Fatalf("Entrypoint %s, expected %s", res, expected) } logDone("build - cleanup cmd on ENTRYPOINT") } func TestBuildClearCmd(t *testing.T) { name := "testbuildclearcmd" defer deleteImages(name) _, err := buildImage(name, `From scratch ENTRYPOINT ["/bin/bash"] CMD []`, true) if err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { t.Fatal(err) } if res != "[]" { t.Fatalf("Cmd %s, expected %s", res, "[]") } logDone("build - clearcmd") } func TestBuildEmptyCmd(t *testing.T) { name := "testbuildemptycmd" defer deleteImages(name) if _, err := buildImage(name, "FROM scratch\nMAINTAINER quux\n", true); err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { t.Fatal(err) } if res != "null" { t.Fatalf("Cmd %s, expected %s", res, "null") } logDone("build - empty cmd") } func TestBuildOnBuildOutput(t *testing.T) { name := "testbuildonbuildparent" defer deleteImages(name) if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { t.Fatal(err) } childname := "testbuildonbuildchild" defer deleteImages(childname) _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) if err != nil { t.Fatal(err) } if !strings.Contains(out, "Trigger 0, RUN echo foo") { t.Fatal("failed to find the ONBUILD output", out) } logDone("build - onbuild output") } func TestBuildInvalidTag(t *testing.T) { name := "abcd:" + makeRandomString(200) defer deleteImages(name) _, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true) // if the error doesnt check for illegal tag name, or the image is built // then this should fail if !strings.Contains(out, "Illegal tag name") || strings.Contains(out, "Sending build context to Docker daemon") { t.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) } logDone("build - invalid tag") } func TestBuildCmdShDashC(t *testing.T) { name := "testbuildcmdshc" defer deleteImages(name) if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { t.Fatal(err, res) } expected := `["/bin/sh","-c","echo cmd"]` if res != expected { t.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) } logDone("build - cmd should have sh -c for non-json") } func TestBuildCmdSpaces(t *testing.T) { // Test to make sure that when we strcat arrays we take into account // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't // look the same name := "testbuildcmdspaces" defer deleteImages(name) var id1 string var id2 string var err error if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil { t.Fatal(err) } if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil { t.Fatal(err) } if id1 == id2 { t.Fatal("Should not have resulted in the same CMD") } // Now do the same with ENTRYPOINT if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil { t.Fatal(err) } if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil { t.Fatal(err) } if id1 == id2 { t.Fatal("Should not have resulted in the same ENTRYPOINT") } logDone("build - cmd with spaces") } func TestBuildCmdJSONNoShDashC(t *testing.T) { name := "testbuildcmdjson" defer deleteImages(name) if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { t.Fatal(err, res) } expected := `["echo","cmd"]` if res != expected { t.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) } logDone("build - cmd should not have /bin/sh -c for json") } func TestBuildErrorInvalidInstruction(t *testing.T) { name := "testbuildignoreinvalidinstruction" defer deleteImages(name) out, _, err := buildImageWithOut(name, "FROM busybox\nfoo bar", true) if err == nil { t.Fatalf("Should have failed: %s", out) } logDone("build - error invalid Dockerfile instruction") } func TestBuildEntrypointInheritance(t *testing.T) { defer deleteImages("parent", "child") defer deleteAllContainers() if _, err := buildImage("parent", ` FROM busybox ENTRYPOINT exit 130 `, true); err != nil { t.Fatal(err) } status, _ := runCommand(exec.Command(dockerBinary, "run", "parent")) if status != 130 { t.Fatalf("expected exit code 130 but received %d", status) } if _, err := buildImage("child", ` FROM parent ENTRYPOINT exit 5 `, true); err != nil { t.Fatal(err) } status, _ = runCommand(exec.Command(dockerBinary, "run", "child")) if status != 5 { t.Fatalf("expected exit code 5 but received %d", status) } logDone("build - clear entrypoint") } func TestBuildEntrypointInheritanceInspect(t *testing.T) { var ( name = "testbuildepinherit" name2 = "testbuildepinherit2" expected = `["/bin/sh","-c","echo quux"]` ) defer deleteImages(name, name2) defer deleteAllContainers() if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { t.Fatal(err) } if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name2, "Config.Entrypoint") if err != nil { t.Fatal(err, res) } if res != expected { t.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) if err != nil { t.Fatal(err, out) } expected = "quux" if strings.TrimSpace(out) != expected { t.Fatalf("Expected output is %s, got %s", expected, out) } logDone("build - entrypoint override inheritance properly") } func TestBuildRunShEntrypoint(t *testing.T) { name := "testbuildentrypoint" defer deleteImages(name) _, err := buildImage(name, `FROM busybox ENTRYPOINT /bin/echo`, true) if err != nil { t.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name)) if err != nil { t.Fatal(err, out) } logDone("build - entrypoint with /bin/echo running successfully") } func TestBuildExoticShellInterpolation(t *testing.T) { name := "testbuildexoticshellinterpolation" defer deleteImages(name) _, err := buildImage(name, ` FROM busybox ENV SOME_VAR a.b.c RUN [ "$SOME_VAR" = 'a.b.c' ] RUN [ "${SOME_VAR}" = 'a.b.c' ] RUN [ "${SOME_VAR%.*}" = 'a.b' ] RUN [ "${SOME_VAR%%.*}" = 'a' ] RUN [ "${SOME_VAR#*.}" = 'b.c' ] RUN [ "${SOME_VAR##*.}" = 'c' ] RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] RUN [ "${#SOME_VAR}" = '5' ] RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] `, false) if err != nil { t.Fatal(err) } logDone("build - exotic shell interpolation") } func TestBuildVerifySingleQuoteFails(t *testing.T) { // This testcase is supposed to generate an error because the // JSON array we're passing in on the CMD uses single quotes instead // of double quotes (per the JSON spec). This means we interpret it // as a "string" insead of "JSON array" and pass it on to "sh -c" and // it should barf on it. name := "testbuildsinglequotefails" defer deleteImages(name) defer deleteAllContainers() _, err := buildImage(name, `FROM busybox CMD [ '/bin/sh', '-c', 'echo hi' ]`, true) _, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name)) if err == nil { t.Fatal("The image was not supposed to be able to run") } logDone("build - verify single quotes break the build") } func TestBuildVerboseOut(t *testing.T) { name := "testbuildverboseout" defer deleteImages(name) _, out, err := buildImageWithOut(name, `FROM busybox RUN echo 123`, false) if err != nil { t.Fatal(err) } if !strings.Contains(out, "\n123\n") { t.Fatalf("Output should contain %q: %q", "123", out) } logDone("build - verbose output from commands") } func TestBuildWithTabs(t *testing.T) { name := "testbuildwithtabs" defer deleteImages(name) _, err := buildImage(name, "FROM busybox\nRUN echo\tone\t\ttwo", true) if err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name, "ContainerConfig.Cmd") if err != nil { t.Fatal(err) } expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates if res != expected1 && res != expected2 { t.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) } logDone("build - with tabs") } func TestBuildLabels(t *testing.T) { name := "testbuildlabel" expected := `{"License":"GPL","Vendor":"Acme"}` defer deleteImages(name) _, err := buildImage(name, `FROM busybox LABEL Vendor=Acme LABEL License GPL`, true) if err != nil { t.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Labels") if err != nil { t.Fatal(err) } if res != expected { t.Fatalf("Labels %s, expected %s", res, expected) } logDone("build - label") } func TestBuildLabelsCache(t *testing.T) { name := "testbuildlabelcache" defer deleteImages(name) id1, err := buildImage(name, `FROM busybox LABEL Vendor=Acme`, false) if err != nil { t.Fatalf("Build 1 should have worked: %v", err) } id2, err := buildImage(name, `FROM busybox LABEL Vendor=Acme`, true) if err != nil || id1 != id2 { t.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err) } id2, err = buildImage(name, `FROM busybox LABEL Vendor=Acme1`, true) if err != nil || id1 == id2 { t.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) } id2, err = buildImage(name, `FROM busybox LABEL Vendor Acme`, true) // Note: " " and "=" should be same if err != nil || id1 != id2 { t.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) } // Now make sure the cache isn't used by mistake id1, err = buildImage(name, `FROM busybox LABEL f1=b1 f2=b2`, false) if err != nil { t.Fatalf("Build 5 should have worked: %q", err) } id2, err = buildImage(name, `FROM busybox LABEL f1="b1 f2=b2"`, true) if err != nil || id1 == id2 { t.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) } logDone("build - label cache") } func TestBuildStderr(t *testing.T) { // This test just makes sure that no non-error output goes // to stderr name := "testbuildstderr" defer deleteImages(name) _, _, stderr, err := buildImageWithStdoutStderr(name, "FROM busybox\nRUN echo one", true) if err != nil { t.Fatal(err) } if runtime.GOOS == "windows" { // stderr might contain a security warning on windows lines := strings.Split(stderr, "\n") for _, v := range lines { if v != "" && !strings.Contains(v, "SECURITY WARNING:") { t.Fatalf("Stderr contains unexpected output line: %q", v) } } } else { if stderr != "" { t.Fatalf("Stderr should have been empty, instead its: %q", stderr) } } logDone("build - testing stderr") } func TestBuildChownSingleFile(t *testing.T) { testRequires(t, UnixCli) // test uses chown: not available on windows name := "testbuildchownsinglefile" defer deleteImages(name) ctx, err := fakeContext(` FROM busybox COPY test / RUN ls -l /test RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] `, map[string]string{ "test": "test", }) if err != nil { t.Fatal(err) } defer ctx.Close() if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { t.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - change permission on single file") } func TestBuildSymlinkBreakout(t *testing.T) { name := "testbuildsymlinkbreakout" tmpdir, err := ioutil.TempDir("", name) if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) ctx := filepath.Join(tmpdir, "context") if err := os.MkdirAll(ctx, 0755); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` from busybox add symlink.tar / add inject /symlink/ `), 0644); err != nil { t.Fatal(err) } inject := filepath.Join(ctx, "inject") if err := ioutil.WriteFile(inject, nil, 0644); err != nil { t.Fatal(err) } f, err := os.Create(filepath.Join(ctx, "symlink.tar")) if err != nil { t.Fatal(err) } w := tar.NewWriter(f) w.WriteHeader(&tar.Header{ Name: "symlink2", Typeflag: tar.TypeSymlink, Linkname: "/../../../../../../../../../../../../../../", Uid: os.Getuid(), Gid: os.Getgid(), }) w.WriteHeader(&tar.Header{ Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: filepath.Join("symlink2", tmpdir), Uid: os.Getuid(), Gid: os.Getgid(), }) w.Close() f.Close() if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil { t.Fatal(err) } if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { t.Fatal("symlink breakout - inject") } else if !os.IsNotExist(err) { t.Fatalf("unexpected error: %v", err) } logDone("build - symlink breakout") } func TestBuildXZHost(t *testing.T) { name := "testbuildxzhost" defer deleteImages(name) ctx, err := fakeContext(` FROM busybox ADD xz /usr/local/sbin/ RUN chmod 755 /usr/local/sbin/xz ADD test.xz / RUN [ ! -e /injected ]`, map[string]string{ "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" + "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" + "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21", "xz": "#!/bin/sh\ntouch /injected", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { t.Fatal(err) } logDone("build - xz host is being used") } func TestBuildVolumesRetainContents(t *testing.T) { var ( name = "testbuildvolumescontent" expected = "some text" ) defer deleteImages(name) ctx, err := fakeContext(` FROM busybox COPY content /foo/file VOLUME /foo CMD cat /foo/file`, map[string]string{ "content": expected, }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, false); err != nil { t.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name)) if err != nil { t.Fatal(err) } if out != expected { t.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) } logDone("build - volumes retain contents in build") } func TestBuildRenamedDockerfile(t *testing.T) { defer deleteAllContainers() ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{ "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", "files/dFile": "FROM busybox\nRUN echo from files/dFile", "dFile": "FROM busybox\nRUN echo from dFile", "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", }) defer ctx.Close() if err != nil { t.Fatal(err) } out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", "test1", ".") if err != nil { t.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from Dockerfile") { t.Fatalf("test1 should have used Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".") if err != nil { t.Fatal(err) } if !strings.Contains(out, "from files/Dockerfile") { t.Fatalf("test2 should have used files/Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(t, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".") if err != nil { t.Fatal(err) } if !strings.Contains(out, "from files/dFile") { t.Fatalf("test3 should have used files/dFile, output:%s", out) } out, _, err = dockerCmdInDir(t, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".") if err != nil { t.Fatal(err) } if !strings.Contains(out, "from dFile") { t.Fatalf("test4 should have used dFile, output:%s", out) } dirWithNoDockerfile, _ := ioutil.TempDir(os.TempDir(), "test5") nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") if _, err = os.Create(nonDockerfileFile); err != nil { t.Fatal(err) } out, _, err = dockerCmdInDir(t, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".") if err == nil { t.Fatalf("test5 was supposed to fail to find passwd") } if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", strings.Replace(nonDockerfileFile, `\`, `\\`, -1)); !strings.Contains(out, expected) { t.Fatalf("wrong error messsage:%v\nexpected to contain=%v", out, expected) } out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..") if err != nil { t.Fatalf("test6 failed: %s", err) } if !strings.Contains(out, "from Dockerfile") { t.Fatalf("test6 should have used root Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..") if err != nil { t.Fatalf("test7 failed: %s", err) } if !strings.Contains(out, "from files/Dockerfile") { t.Fatalf("test7 should have used files Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".") if err == nil || !strings.Contains(out, "must be within the build context") { t.Fatalf("test8 should have failed with Dockerfile out of context: %s", err) } tmpDir := os.TempDir() out, _, err = dockerCmdInDir(t, tmpDir, "build", "-t", "test9", ctx.Dir) if err != nil { t.Fatalf("test9 - failed: %s", err) } if !strings.Contains(out, "from Dockerfile") { t.Fatalf("test9 should have used root Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".") if err != nil { t.Fatalf("test10 should have worked: %s", err) } if !strings.Contains(out, "from files/dFile2") { t.Fatalf("test10 should have used files/dFile2, output:%s", out) } logDone("build - rename dockerfile") } func TestBuildFromMixedcaseDockerfile(t *testing.T) { testRequires(t, UnixCli) // Dockerfile overwrites dockerfile on windows defer deleteImages("test1") ctx, err := fakeContext(`FROM busybox RUN echo from dockerfile`, map[string]string{ "dockerfile": "FROM busybox\nRUN echo from dockerfile", }) defer ctx.Close() if err != nil { t.Fatal(err) } out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", "test1", ".") if err != nil { t.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from dockerfile") { t.Fatalf("Missing proper output: %s", out) } logDone("build - mixedcase Dockerfile") } func TestBuildWithTwoDockerfiles(t *testing.T) { testRequires(t, UnixCli) // Dockerfile overwrites dockerfile on windows defer deleteImages("test1") ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{ "dockerfile": "FROM busybox\nRUN echo from dockerfile", }) defer ctx.Close() if err != nil { t.Fatal(err) } out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", "test1", ".") if err != nil { t.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from Dockerfile") { t.Fatalf("Missing proper output: %s", out) } logDone("build - two Dockerfiles") } func TestBuildFromURLWithF(t *testing.T) { defer deleteImages("test1") server, err := fakeStorage(map[string]string{"baz": `FROM busybox RUN echo from baz COPY * /tmp/ RUN find /tmp/`}) if err != nil { t.Fatal(err) } defer server.Close() ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{}) defer ctx.Close() if err != nil { t.Fatal(err) } // Make sure that -f is ignored and that we don't use the Dockerfile // that's in the current dir out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz") if err != nil { t.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from baz") || strings.Contains(out, "/tmp/baz") || !strings.Contains(out, "/tmp/Dockerfile") { t.Fatalf("Missing proper output: %s", out) } logDone("build - from URL with -f") } func TestBuildFromStdinWithF(t *testing.T) { defer deleteImages("test1") ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{}) defer ctx.Close() if err != nil { t.Fatal(err) } // Make sure that -f is ignored and that we don't use the Dockerfile // that's in the current dir dockerCommand := exec.Command(dockerBinary, "build", "-f", "baz", "-t", "test1", "-") dockerCommand.Dir = ctx.Dir dockerCommand.Stdin = strings.NewReader(`FROM busybox RUN echo from baz COPY * /tmp/ RUN find /tmp/`) out, status, err := runCommandWithOutput(dockerCommand) if err != nil || status != 0 { t.Fatalf("Error building: %s", err) } if !strings.Contains(out, "from baz") || strings.Contains(out, "/tmp/baz") || !strings.Contains(out, "/tmp/Dockerfile") { t.Fatalf("Missing proper output: %s", out) } logDone("build - from stdin with -f") } func TestBuildFromOfficialNames(t *testing.T) { name := "testbuildfromofficial" fromNames := []string{ "busybox", "docker.io/busybox", "index.docker.io/busybox", "library/busybox", "docker.io/library/busybox", "index.docker.io/library/busybox", } for idx, fromName := range fromNames { imgName := fmt.Sprintf("%s%d", name, idx) _, err := buildImage(imgName, "FROM "+fromName, true) if err != nil { t.Errorf("Build failed using FROM %s: %s", fromName, err) } deleteImages(imgName) } logDone("build - from official names") } func TestBuildDockerfileOutsideContext(t *testing.T) { testRequires(t, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) name := "testbuilddockerfileoutsidecontext" tmpdir, err := ioutil.TempDir("", name) if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) ctx := filepath.Join(tmpdir, "context") if err := os.MkdirAll(ctx, 0755); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { t.Fatal(err) } wd, err := os.Getwd() if err != nil { t.Fatal(err) } defer os.Chdir(wd) if err := os.Chdir(ctx); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { t.Fatal(err) } if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { t.Fatal(err) } if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { t.Fatal(err) } for _, dockerfilePath := range []string{ filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1"), filepath.Join(ctx, "dockerfile2"), } { out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "build", "-t", name, "--no-cache", "-f", dockerfilePath, ".")) if err == nil { t.Fatalf("Expected error with %s. Out: %s", dockerfilePath, out) } if !strings.Contains(out, "must be within the build context") && !strings.Contains(out, "Cannot locate Dockerfile") { t.Fatalf("Unexpected error with %s. Out: %s", dockerfilePath, out) } deleteImages(name) } os.Chdir(tmpdir) // Path to Dockerfile should be resolved relative to working directory, not relative to context. // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx)) if err == nil { t.Fatalf("Expected error. Out: %s", out) } deleteImages(name) logDone("build - Dockerfile outside context") } func TestBuildSpaces(t *testing.T) { // Test to make sure that leading/trailing spaces on a command // doesn't change the error msg we get var ( err1 error err2 error ) name := "testspaces" defer deleteImages(name) ctx, err := fakeContext("FROM busybox\nCOPY\n", map[string]string{ "Dockerfile": "FROM busybox\nCOPY\n", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil { t.Fatal("Build 1 was supposed to fail, but didn't") } ctx.Add("Dockerfile", "FROM busybox\nCOPY ") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { t.Fatal("Build 2 was supposed to fail, but didn't") } removeLogTimestamps := func(s string) string { return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) } // Skip over the times e1 := removeLogTimestamps(err1.Error()) e2 := removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { t.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2) } ctx.Add("Dockerfile", "FROM busybox\n COPY") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { t.Fatal("Build 3 was supposed to fail, but didn't") } // Skip over the times e1 = removeLogTimestamps(err1.Error()) e2 = removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { t.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2) } ctx.Add("Dockerfile", "FROM busybox\n COPY ") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { t.Fatal("Build 4 was supposed to fail, but didn't") } // Skip over the times e1 = removeLogTimestamps(err1.Error()) e2 = removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { t.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2) } logDone("build - test spaces") } func TestBuildSpacesWithQuotes(t *testing.T) { // Test to make sure that spaces in quotes aren't lost name := "testspacesquotes" defer deleteImages(name) dockerfile := `FROM busybox RUN echo " \ foo "` _, out, err := buildImageWithOut(name, dockerfile, false) if err != nil { t.Fatal("Build failed:", err) } expecting := "\n foo \n" if !strings.Contains(out, expecting) { t.Fatalf("Bad output: %q expecting to contian %q", out, expecting) } logDone("build - test spaces with quotes") } // #4393 func TestBuildVolumeFileExistsinContainer(t *testing.T) { buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-") buildCmd.Stdin = strings.NewReader(` FROM busybox RUN touch /foo VOLUME /foo `) out, _, err := runCommandWithOutput(buildCmd) if err == nil || !strings.Contains(out, "file exists") { t.Fatalf("expected build to fail when file exists in container at requested volume path") } logDone("build - errors when volume is specified where a file exists") } func TestBuildMissingArgs(t *testing.T) { // Test to make sure that all Dockerfile commands (except the ones listed // in skipCmds) will generate an error if no args are provided. // Note: INSERT is deprecated so we exclude it because of that. skipCmds := map[string]struct{}{ "CMD": {}, "RUN": {}, "ENTRYPOINT": {}, "INSERT": {}, } defer deleteAllContainers() for cmd := range command.Commands { cmd = strings.ToUpper(cmd) if _, ok := skipCmds[cmd]; ok { continue } var dockerfile string if cmd == "FROM" { dockerfile = cmd } else { // Add FROM to make sure we don't complain about it missing dockerfile = "FROM busybox\n" + cmd } ctx, err := fakeContext(dockerfile, map[string]string{}) if err != nil { t.Fatal(err) } defer ctx.Close() var out string if out, err = buildImageFromContext("args", ctx, true); err == nil { t.Fatalf("%s was supposed to fail. Out:%s", cmd, out) } if !strings.Contains(err.Error(), cmd+" requires") { t.Fatalf("%s returned the wrong type of error:%s", cmd, err) } } logDone("build - verify missing args") } func TestBuildEmptyScratch(t *testing.T) { defer deleteImages("sc") _, out, err := buildImageWithOut("sc", "FROM scratch", true) if err == nil { t.Fatalf("Build was supposed to fail") } if !strings.Contains(out, "No image was generated") { t.Fatalf("Wrong error message: %v", out) } logDone("build - empty scratch Dockerfile") } func TestBuildDotDotFile(t *testing.T) { defer deleteImages("sc") ctx, err := fakeContext("FROM busybox\n", map[string]string{ "..gitme": "", }) if err != nil { t.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext("sc", ctx, false); err != nil { t.Fatalf("Build was supposed to work: %s", err) } logDone("build - ..file") } func TestBuildNotVerbose(t *testing.T) { defer deleteAllContainers() defer deleteImages("verbose") ctx, err := fakeContext("FROM busybox\nENV abc=hi\nRUN echo $abc there", map[string]string{}) if err != nil { t.Fatal(err) } defer ctx.Close() // First do it w/verbose - baseline buildCmd := exec.Command(dockerBinary, "build", "--no-cache", "-t", "verbose", ".") buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err != nil { t.Fatalf("failed to build the image w/o -q: %s, %v", out, err) } if !strings.Contains(out, "hi there") { t.Fatalf("missing output:%s\n", out) } // Now do it w/o verbose buildCmd = exec.Command(dockerBinary, "build", "--no-cache", "-q", "-t", "verbose", ".") buildCmd.Dir = ctx.Dir out, _, err = runCommandWithOutput(buildCmd) if err != nil { t.Fatalf("failed to build the image w/ -q: %s, %v", out, err) } if strings.Contains(out, "hi there") { t.Fatalf("Bad output, should not contain 'hi there':%s", out) } logDone("build - not verbose") } func TestBuildRUNoneJSON(t *testing.T) { name := "testbuildrunonejson" defer deleteAllContainers() defer deleteImages(name) ctx, err := fakeContext(`FROM hello-world:frozen RUN [ "/hello" ]`, map[string]string{}) if err != nil { t.Fatal(err) } defer ctx.Close() buildCmd := exec.Command(dockerBinary, "build", "--no-cache", "-t", name, ".") buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err != nil { t.Fatalf("failed to build the image: %s, %v", out, err) } if !strings.Contains(out, "Hello from Docker") { t.Fatalf("bad output: %s", out) } logDone("build - RUN with one JSON arg") } func TestBuildResourceConstraintsAreUsed(t *testing.T) { name := "testbuildresourceconstraints" defer deleteAllContainers() defer deleteImages(name) ctx, err := fakeContext(` FROM hello-world:frozen RUN ["/hello"] `, map[string]string{}) if err != nil { t.Fatal(err) } cmd := exec.Command(dockerBinary, "build", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=1", "--cpu-shares=100", "-t", name, ".") cmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } out, _, err = dockerCmd(t, "ps", "-lq") if err != nil { t.Fatal(err, out) } cID := stripTrailingCharacters(out) type hostConfig struct { Memory float64 // Use float64 here since the json decoder sees it that way MemorySwap int CpusetCpus string CpuShares int } cfg, err := inspectFieldJSON(cID, "HostConfig") if err != nil { t.Fatal(err) } var c1 hostConfig if err := json.Unmarshal([]byte(cfg), &c1); err != nil { t.Fatal(err, cfg) } mem := int64(c1.Memory) if mem != 67108864 || c1.MemorySwap != -1 || c1.CpusetCpus != "1" || c1.CpuShares != 100 { t.Fatalf("resource constraints not set properly:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpuShares: %d", mem, c1.MemorySwap, c1.CpusetCpus, c1.CpuShares) } // Make sure constraints aren't saved to image _, _, err = dockerCmd(t, "run", "--name=test", name) if err != nil { t.Fatal(err) } cfg, err = inspectFieldJSON("test", "HostConfig") if err != nil { t.Fatal(err) } var c2 hostConfig if err := json.Unmarshal([]byte(cfg), &c2); err != nil { t.Fatal(err, cfg) } mem = int64(c2.Memory) if mem == 67108864 || c2.MemorySwap == -1 || c2.CpusetCpus == "1" || c2.CpuShares == 100 { t.Fatalf("resource constraints leaked from build:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpuShares: %d", mem, c2.MemorySwap, c2.CpusetCpus, c2.CpuShares) } logDone("build - resource constraints applied") } func TestBuildEmptyStringVolume(t *testing.T) { name := "testbuildemptystringvolume" defer deleteImages(name) _, err := buildImage(name, ` FROM busybox ENV foo="" VOLUME $foo `, false) if err == nil { t.Fatal("Should have failed to build") } logDone("build - empty string volume") } docker-1.6.2/integration-cli/docker_cli_run_unix_test.go0000644000175000017500000001134612524223634023044 0ustar tianontianon// +build !windows package main import ( "fmt" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strings" "testing" "time" "github.com/docker/docker/pkg/mount" "github.com/kr/pty" ) // #6509 func TestRunRedirectStdout(t *testing.T) { defer deleteAllContainers() checkRedirect := func(command string) { _, tty, err := pty.Open() if err != nil { t.Fatalf("Could not open pty: %v", err) } cmd := exec.Command("sh", "-c", command) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty ch := make(chan struct{}) if err := cmd.Start(); err != nil { t.Fatalf("start err: %v", err) } go func() { if err := cmd.Wait(); err != nil { t.Fatalf("wait err=%v", err) } close(ch) }() select { case <-time.After(10 * time.Second): t.Fatal("command timeout") case <-ch: } } checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") logDone("run - redirect stdout") } // Test recursive bind mount works by default func TestRunWithVolumesIsRecursive(t *testing.T) { defer deleteAllContainers() tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) // Create a temporary tmpfs mount. tmpfsDir := filepath.Join(tmpDir, "tmpfs") if err := os.MkdirAll(tmpfsDir, 0777); err != nil { t.Fatalf("failed to mkdir at %s - %s", tmpfsDir, err) } if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil { t.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err) } f, err := ioutil.TempFile(tmpfsDir, "touch-me") if err != nil { t.Fatal(err) } defer f.Close() runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 0 { t.Fatal(out, stderr, err) } if !strings.Contains(out, filepath.Base(f.Name())) { t.Fatal("Recursive bind mount test failed. Expected file not found") } logDone("run - volumes are bind mounted recursively") } func TestRunWithUlimits(t *testing.T) { testRequires(t, NativeExecDriver) defer deleteAllContainers() out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n")) if err != nil { t.Fatal(err, out) } ul := strings.TrimSpace(out) if ul != "42" { t.Fatalf("expected `ulimit -n` to be 42, got %s", ul) } logDone("run - ulimits are set") } func TestRunContainerWithCgroupParent(t *testing.T) { testRequires(t, NativeExecDriver) defer deleteAllContainers() cgroupParent := "test" data, err := ioutil.ReadFile("/proc/self/cgroup") if err != nil { t.Fatalf("failed to read '/proc/self/cgroup - %v", err) } selfCgroupPaths := parseCgroupPaths(string(data)) selfCpuCgroup, found := selfCgroupPaths["memory"] if !found { t.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--cgroup-parent", cgroupParent, "--rm", "busybox", "cat", "/proc/self/cgroup")) if err != nil { t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { t.Fatalf("unexpected output - %q", string(out)) } found = false expectedCgroupPrefix := path.Join(selfCpuCgroup, cgroupParent) for _, path := range cgroupPaths { if strings.HasPrefix(path, expectedCgroupPrefix) { found = true break } } if !found { t.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", expectedCgroupPrefix, cgroupPaths) } logDone("run - cgroup parent") } func TestRunContainerWithCgroupParentAbsPath(t *testing.T) { testRequires(t, NativeExecDriver) defer deleteAllContainers() cgroupParent := "/cgroup-parent/test" out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--cgroup-parent", cgroupParent, "--rm", "busybox", "cat", "/proc/self/cgroup")) if err != nil { t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { t.Fatalf("unexpected output - %q", string(out)) } found := false for _, path := range cgroupPaths { if strings.HasPrefix(path, cgroupParent) { found = true break } } if !found { t.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", cgroupParent, cgroupPaths) } logDone("run - cgroup parent with absolute cgroup path") } docker-1.6.2/integration-cli/docker_cli_inspect_test.go0000644000175000017500000000117412524223634022640 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) func TestInspectImage(t *testing.T) { imageTest := "emptyfs" imageTestID := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" imagesCmd := exec.Command(dockerBinary, "inspect", "--format='{{.Id}}'", imageTest) out, exitCode, err := runCommandWithOutput(imagesCmd) if exitCode != 0 || err != nil { t.Fatalf("failed to inspect image: %s, %v", out, err) } if id := strings.TrimSuffix(out, "\n"); id != imageTestID { t.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id) } logDone("inspect - inspect an image") } docker-1.6.2/integration-cli/docker_cli_commit_test.go0000644000175000017500000001744612524223634022474 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) func TestCommitAfterContainerIsDone(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) if _, _, err = runCommandWithOutput(waitCmd); err != nil { t.Fatalf("error thrown while waiting for container: %s, %v", out, err) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID) out, _, err = runCommandWithOutput(commitCmd) if err != nil { t.Fatalf("failed to commit container to image: %s, %v", out, err) } cleanedImageID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { t.Fatalf("failed to inspect image: %s, %v", out, err) } deleteContainer(cleanedContainerID) deleteImages(cleanedImageID) logDone("commit - echo foo and commit the image") } func TestCommitWithoutPause(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) if _, _, err = runCommandWithOutput(waitCmd); err != nil { t.Fatalf("error thrown while waiting for container: %s, %v", out, err) } commitCmd := exec.Command(dockerBinary, "commit", "-p=false", cleanedContainerID) out, _, err = runCommandWithOutput(commitCmd) if err != nil { t.Fatalf("failed to commit container to image: %s, %v", out, err) } cleanedImageID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { t.Fatalf("failed to inspect image: %s, %v", out, err) } deleteContainer(cleanedContainerID) deleteImages(cleanedImageID) logDone("commit - echo foo and commit the image with --pause=false") } //test commit a paused container should not unpause it after commit func TestCommitPausedContainer(t *testing.T) { defer deleteAllContainers() defer unpauseAllContainers() cmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox") out, _, _, err := runCommandWithStdoutStderr(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } cleanedContainerID := stripTrailingCharacters(out) cmd = exec.Command(dockerBinary, "pause", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(cmd) if err != nil { t.Fatalf("failed to pause container: %v, output: %q", err, out) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID) out, _, err = runCommandWithOutput(commitCmd) if err != nil { t.Fatalf("failed to commit container to image: %s, %v", out, err) } cleanedImageID := stripTrailingCharacters(out) defer deleteImages(cleanedImageID) cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Paused}}", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(cmd) if err != nil { t.Fatalf("failed to inspect container: %v, output: %q", err, out) } if !strings.Contains(out, "true") { t.Fatalf("commit should not unpause a paused container") } logDone("commit - commit a paused container will not unpause it") } func TestCommitNewFile(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "commit", "commiter") imageID, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } imageID = strings.Trim(imageID, "\r\n") defer deleteImages(imageID) cmd = exec.Command(dockerBinary, "run", imageID, "cat", "/foo") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "koye" { t.Fatalf("expected output koye received %q", actual) } logDone("commit - commit file and read") } func TestCommitHardlink(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") firstOuput, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } chunks := strings.Split(strings.TrimSpace(firstOuput), " ") inode := chunks[0] found := false for _, chunk := range chunks[1:] { if chunk == inode { found = true break } } if !found { t.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) } cmd = exec.Command(dockerBinary, "commit", "hardlinks", "hardlinks") imageID, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(imageID, err) } imageID = strings.Trim(imageID, "\r\n") defer deleteImages(imageID) cmd = exec.Command(dockerBinary, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2") secondOuput, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } chunks = strings.Split(strings.TrimSpace(secondOuput), " ") inode = chunks[0] found = false for _, chunk := range chunks[1:] { if chunk == inode { found = true break } } if !found { t.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) } logDone("commit - commit hardlinks") } func TestCommitTTY(t *testing.T) { defer deleteImages("ttytest") defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "commit", "tty", "ttytest") imageID, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } imageID = strings.Trim(imageID, "\r\n") cmd = exec.Command(dockerBinary, "run", "ttytest", "/bin/ls") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } logDone("commit - commit tty") } func TestCommitWithHostBindMount(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "commit", "bind-commit", "bindtest") imageID, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(imageID, err) } imageID = strings.Trim(imageID, "\r\n") defer deleteImages(imageID) cmd = exec.Command(dockerBinary, "run", "bindtest", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } logDone("commit - commit bind mounted file") } func TestCommitChange(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "--name", "test", "busybox", "true") if _, err := runCommand(cmd); err != nil { t.Fatal(err) } cmd = exec.Command(dockerBinary, "commit", "--change", "EXPOSE 8080", "--change", "ENV DEBUG true", "--change", "ENV test 1", "--change", "ENV PATH /foo", "test", "test-commit") imageId, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(imageId, err) } imageId = strings.Trim(imageId, "\r\n") defer deleteImages(imageId) expected := map[string]string{ "Config.ExposedPorts": "map[8080/tcp:map[]]", "Config.Env": "[DEBUG=true test=1 PATH=/foo]", } for conf, value := range expected { res, err := inspectField(imageId, conf) if err != nil { t.Errorf("failed to get value %s, error: %s", conf, err) } if res != value { t.Errorf("%s('%s'), expected %s", conf, res, value) } } logDone("commit - commit --change") } docker-1.6.2/integration-cli/docker_cli_by_digest_test.go0000644000175000017500000003540012524223634023143 0ustar tianontianonpackage main import ( "fmt" "os/exec" "regexp" "strings" "testing" "github.com/docker/docker/utils" ) var ( repoName = fmt.Sprintf("%v/dockercli/busybox-by-dgst", privateRegistryURL) digestRegex = regexp.MustCompile("Digest: ([^\n]+)") ) func setupImage() (string, error) { return setupImageWithTag("latest") } func setupImageWithTag(tag string) (string, error) { containerName := "busyboxbydigest" c := exec.Command(dockerBinary, "run", "-d", "-e", "digest=1", "--name", containerName, "busybox") if _, err := runCommand(c); err != nil { return "", err } // tag the image to upload it to the private registry repoAndTag := utils.ImageReference(repoName, tag) c = exec.Command(dockerBinary, "commit", containerName, repoAndTag) if out, _, err := runCommandWithOutput(c); err != nil { return "", fmt.Errorf("image tagging failed: %s, %v", out, err) } defer deleteImages(repoAndTag) // delete the container as we don't need it any more if err := deleteContainer(containerName); err != nil { return "", err } // push the image c = exec.Command(dockerBinary, "push", repoAndTag) out, _, err := runCommandWithOutput(c) if err != nil { return "", fmt.Errorf("pushing the image to the private registry has failed: %s, %v", out, err) } // delete our local repo that we previously tagged c = exec.Command(dockerBinary, "rmi", repoAndTag) if out, _, err := runCommandWithOutput(c); err != nil { return "", fmt.Errorf("error deleting images prior to real test: %s, %v", out, err) } // the push output includes "Digest: ", so find that matches := digestRegex.FindStringSubmatch(out) if len(matches) != 2 { return "", fmt.Errorf("unable to parse digest from push output: %s", out) } pushDigest := matches[1] return pushDigest, nil } func TestPullByTagDisplaysDigest(t *testing.T) { defer setupRegistry(t)() pushDigest, err := setupImage() if err != nil { t.Fatalf("error setting up image: %v", err) } // pull from the registry using the tag c := exec.Command(dockerBinary, "pull", repoName) out, _, err := runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling by tag: %s, %v", out, err) } defer deleteImages(repoName) // the pull output includes "Digest: ", so find that matches := digestRegex.FindStringSubmatch(out) if len(matches) != 2 { t.Fatalf("unable to parse digest from pull output: %s", out) } pullDigest := matches[1] // make sure the pushed and pull digests match if pushDigest != pullDigest { t.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest) } logDone("by_digest - pull by tag displays digest") } func TestPullByDigest(t *testing.T) { defer setupRegistry(t)() pushDigest, err := setupImage() if err != nil { t.Fatalf("error setting up image: %v", err) } // pull from the registry using the @ reference imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) c := exec.Command(dockerBinary, "pull", imageReference) out, _, err := runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling by digest: %s, %v", out, err) } defer deleteImages(imageReference) // the pull output includes "Digest: ", so find that matches := digestRegex.FindStringSubmatch(out) if len(matches) != 2 { t.Fatalf("unable to parse digest from pull output: %s", out) } pullDigest := matches[1] // make sure the pushed and pull digests match if pushDigest != pullDigest { t.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest) } logDone("by_digest - pull by digest") } func TestCreateByDigest(t *testing.T) { defer setupRegistry(t)() pushDigest, err := setupImage() if err != nil { t.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) containerName := "createByDigest" c := exec.Command(dockerBinary, "create", "--name", containerName, imageReference) out, _, err := runCommandWithOutput(c) if err != nil { t.Fatalf("error creating by digest: %s, %v", out, err) } defer deleteContainer(containerName) res, err := inspectField(containerName, "Config.Image") if err != nil { t.Fatalf("failed to get Config.Image: %s, %v", out, err) } if res != imageReference { t.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference) } logDone("by_digest - create by digest") } func TestRunByDigest(t *testing.T) { defer setupRegistry(t)() pushDigest, err := setupImage() if err != nil { t.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) containerName := "runByDigest" c := exec.Command(dockerBinary, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest") out, _, err := runCommandWithOutput(c) if err != nil { t.Fatalf("error run by digest: %s, %v", out, err) } defer deleteContainer(containerName) foundRegex := regexp.MustCompile("found=([^\n]+)") matches := foundRegex.FindStringSubmatch(out) if len(matches) != 2 { t.Fatalf("error locating expected 'found=1' output: %s", out) } if matches[1] != "1" { t.Fatalf("Expected %q, got %q", "1", matches[1]) } res, err := inspectField(containerName, "Config.Image") if err != nil { t.Fatalf("failed to get Config.Image: %s, %v", out, err) } if res != imageReference { t.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference) } logDone("by_digest - run by digest") } func TestRemoveImageByDigest(t *testing.T) { defer setupRegistry(t)() digest, err := setupImage() if err != nil { t.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference c := exec.Command(dockerBinary, "pull", imageReference) out, _, err := runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling by digest: %s, %v", out, err) } // make sure inspect runs ok if _, err := inspectField(imageReference, "Id"); err != nil { t.Fatalf("failed to inspect image: %v", err) } // do the delete if err := deleteImages(imageReference); err != nil { t.Fatalf("unexpected error deleting image: %v", err) } // try to inspect again - it should error this time if _, err := inspectField(imageReference, "Id"); err == nil { t.Fatalf("unexpected nil err trying to inspect what should be a non-existent image") } else if !strings.Contains(err.Error(), "No such image") { t.Fatalf("expected 'No such image' output, got %v", err) } logDone("by_digest - remove image by digest") } func TestBuildByDigest(t *testing.T) { defer setupRegistry(t)() digest, err := setupImage() if err != nil { t.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference c := exec.Command(dockerBinary, "pull", imageReference) out, _, err := runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling by digest: %s, %v", out, err) } // get the image id imageID, err := inspectField(imageReference, "Id") if err != nil { t.Fatalf("error getting image id: %v", err) } // do the build name := "buildbydigest" defer deleteImages(name) _, err = buildImage(name, fmt.Sprintf( `FROM %s CMD ["/bin/echo", "Hello World"]`, imageReference), true) if err != nil { t.Fatal(err) } // get the build's image id res, err := inspectField(name, "Config.Image") if err != nil { t.Fatal(err) } // make sure they match if res != imageID { t.Fatalf("Image %s, expected %s", res, imageID) } logDone("by_digest - build by digest") } func TestTagByDigest(t *testing.T) { defer setupRegistry(t)() digest, err := setupImage() if err != nil { t.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference c := exec.Command(dockerBinary, "pull", imageReference) out, _, err := runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling by digest: %s, %v", out, err) } // tag it tag := "tagbydigest" c = exec.Command(dockerBinary, "tag", imageReference, tag) if _, err := runCommand(c); err != nil { t.Fatalf("unexpected error tagging: %v", err) } expectedID, err := inspectField(imageReference, "Id") if err != nil { t.Fatalf("error getting original image id: %v", err) } tagID, err := inspectField(tag, "Id") if err != nil { t.Fatalf("error getting tagged image id: %v", err) } if tagID != expectedID { t.Fatalf("expected image id %q, got %q", expectedID, tagID) } logDone("by_digest - tag by digest") } func TestListImagesWithoutDigests(t *testing.T) { defer setupRegistry(t)() digest, err := setupImage() if err != nil { t.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference c := exec.Command(dockerBinary, "pull", imageReference) out, _, err := runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling by digest: %s, %v", out, err) } c = exec.Command(dockerBinary, "images") out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error listing images: %s, %v", out, err) } if strings.Contains(out, "DIGEST") { t.Fatalf("list output should not have contained DIGEST header: %s", out) } logDone("by_digest - list images - digest header not displayed by default") } func TestListImagesWithDigests(t *testing.T) { defer setupRegistry(t)() defer deleteImages(repoName+":tag1", repoName+":tag2") // setup image1 digest1, err := setupImageWithTag("tag1") if err != nil { t.Fatalf("error setting up image: %v", err) } imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) defer deleteImages(imageReference1) t.Logf("imageReference1 = %s", imageReference1) // pull image1 by digest c := exec.Command(dockerBinary, "pull", imageReference1) out, _, err := runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling by digest: %s, %v", out, err) } // list images c = exec.Command(dockerBinary, "images", "--digests") out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error listing images: %s, %v", out, err) } // make sure repo shown, tag=, digest = $digest1 re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1 + `\s`) if !re1.MatchString(out) { t.Fatalf("expected %q: %s", re1.String(), out) } // setup image2 digest2, err := setupImageWithTag("tag2") if err != nil { t.Fatalf("error setting up image: %v", err) } imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) defer deleteImages(imageReference2) t.Logf("imageReference2 = %s", imageReference2) // pull image1 by digest c = exec.Command(dockerBinary, "pull", imageReference1) out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling by digest: %s, %v", out, err) } // pull image2 by digest c = exec.Command(dockerBinary, "pull", imageReference2) out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling by digest: %s, %v", out, err) } // list images c = exec.Command(dockerBinary, "images", "--digests") out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error listing images: %s, %v", out, err) } // make sure repo shown, tag=, digest = $digest1 if !re1.MatchString(out) { t.Fatalf("expected %q: %s", re1.String(), out) } // make sure repo shown, tag=, digest = $digest2 re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2 + `\s`) if !re2.MatchString(out) { t.Fatalf("expected %q: %s", re2.String(), out) } // pull tag1 c = exec.Command(dockerBinary, "pull", repoName+":tag1") out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling tag1: %s, %v", out, err) } // list images c = exec.Command(dockerBinary, "images", "--digests") out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error listing images: %s, %v", out, err) } // make sure image 1 has repo, tag, AND repo, , digest reWithTag1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*\s`) reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1 + `\s`) if !reWithTag1.MatchString(out) { t.Fatalf("expected %q: %s", reWithTag1.String(), out) } if !reWithDigest1.MatchString(out) { t.Fatalf("expected %q: %s", reWithDigest1.String(), out) } // make sure image 2 has repo, , digest if !re2.MatchString(out) { t.Fatalf("expected %q: %s", re2.String(), out) } // pull tag 2 c = exec.Command(dockerBinary, "pull", repoName+":tag2") out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling tag2: %s, %v", out, err) } // list images c = exec.Command(dockerBinary, "images", "--digests") out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error listing images: %s, %v", out, err) } // make sure image 1 has repo, tag, digest if !reWithTag1.MatchString(out) { t.Fatalf("expected %q: %s", re1.String(), out) } // make sure image 2 has repo, tag, digest reWithTag2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*\s`) reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2 + `\s`) if !reWithTag2.MatchString(out) { t.Fatalf("expected %q: %s", reWithTag2.String(), out) } if !reWithDigest2.MatchString(out) { t.Fatalf("expected %q: %s", reWithDigest2.String(), out) } // list images c = exec.Command(dockerBinary, "images", "--digests") out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error listing images: %s, %v", out, err) } // make sure image 1 has repo, tag, digest if !reWithTag1.MatchString(out) { t.Fatalf("expected %q: %s", re1.String(), out) } // make sure image 2 has repo, tag, digest if !reWithTag2.MatchString(out) { t.Fatalf("expected %q: %s", re2.String(), out) } // make sure busybox has tag, but not digest busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*\s`) if !busyboxRe.MatchString(out) { t.Fatalf("expected %q: %s", busyboxRe.String(), out) } logDone("by_digest - list images with digests") } func TestDeleteImageByIDOnlyPulledByDigest(t *testing.T) { defer setupRegistry(t)() pushDigest, err := setupImage() if err != nil { t.Fatalf("error setting up image: %v", err) } // pull from the registry using the @ reference imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) c := exec.Command(dockerBinary, "pull", imageReference) out, _, err := runCommandWithOutput(c) if err != nil { t.Fatalf("error pulling by digest: %s, %v", out, err) } // just in case... defer deleteImages(imageReference) imageID, err := inspectField(imageReference, ".Id") if err != nil { t.Fatalf("error inspecting image id: %v", err) } c = exec.Command(dockerBinary, "rmi", imageID) if _, err := runCommand(c); err != nil { t.Fatalf("error deleting image by id: %v", err) } logDone("by_digest - delete image by id only pulled by digest") } docker-1.6.2/integration-cli/docker_cli_rmi_test.go0000644000175000017500000001274412524223634021767 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) func TestRmiWithContainerFails(t *testing.T) { errSubstr := "is using it" // create a container runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to create a container: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) // try to delete the image runCmd = exec.Command(dockerBinary, "rmi", "busybox") out, _, err = runCommandWithOutput(runCmd) if err == nil { t.Fatalf("Container %q is using image, should not be able to rmi: %q", cleanedContainerID, out) } if !strings.Contains(out, errSubstr) { t.Fatalf("Container %q is using image, error message should contain %q: %v", cleanedContainerID, errSubstr, out) } // make sure it didn't delete the busybox name images, _, _ := dockerCmd(t, "images") if !strings.Contains(images, "busybox") { t.Fatalf("The name 'busybox' should not have been removed from images: %q", images) } deleteContainer(cleanedContainerID) logDone("rmi- container using image while rmi, should not remove image name") } func TestRmiTag(t *testing.T) { imagesBefore, _, _ := dockerCmd(t, "images", "-a") dockerCmd(t, "tag", "busybox", "utest:tag1") dockerCmd(t, "tag", "busybox", "utest/docker:tag2") dockerCmd(t, "tag", "busybox", "utest:5000/docker:tag3") { imagesAfter, _, _ := dockerCmd(t, "images", "-a") if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+3 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } dockerCmd(t, "rmi", "utest/docker:tag2") { imagesAfter, _, _ := dockerCmd(t, "images", "-a") if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+2 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } dockerCmd(t, "rmi", "utest:5000/docker:tag3") { imagesAfter, _, _ := dockerCmd(t, "images", "-a") if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+1 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } dockerCmd(t, "rmi", "utest:tag1") { imagesAfter, _, _ := dockerCmd(t, "images", "-a") if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+0 { t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } logDone("rmi - tag,rmi- tagging the same images multiple times then removing tags") } func TestRmiTagWithExistingContainers(t *testing.T) { defer deleteAllContainers() container := "test-delete-tag" newtag := "busybox:newtag" bb := "busybox:latest" if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "tag", bb, newtag)); err != nil { t.Fatalf("Could not tag busybox: %v: %s", err, out) } if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", container, bb, "/bin/true")); err != nil { t.Fatalf("Could not run busybox: %v: %s", err, out) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", newtag)) if err != nil { t.Fatalf("Could not remove tag %s: %v: %s", newtag, err, out) } if d := strings.Count(out, "Untagged: "); d != 1 { t.Fatalf("Expected 1 untagged entry got %d: %q", d, out) } logDone("rmi - delete tag with existing containers") } func TestRmiForceWithExistingContainers(t *testing.T) { defer deleteAllContainers() image := "busybox-clone" cmd := exec.Command(dockerBinary, "build", "--no-cache", "-t", image, "-") cmd.Stdin = strings.NewReader(`FROM busybox MAINTAINER foo`) if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatalf("Could not build %s: %s, %v", image, out, err) } if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "test-force-rmi", image, "/bin/true")); err != nil { t.Fatalf("Could not run container: %s, %v", out, err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", "-f", image)) if err != nil { t.Fatalf("Could not remove image %s: %s, %v", image, out, err) } logDone("rmi - force delete with existing containers") } func TestRmiWithMultipleRepositories(t *testing.T) { defer deleteAllContainers() newRepo := "127.0.0.1:5000/busybox" oldRepo := "busybox" newTag := "busybox:test" cmd := exec.Command(dockerBinary, "tag", oldRepo, newRepo) out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatalf("Could not tag busybox: %v: %s", err, out) } cmd = exec.Command(dockerBinary, "run", "--name", "test", oldRepo, "touch", "/home/abcd") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to run container: %v, output: %s", err, out) } cmd = exec.Command(dockerBinary, "commit", "test", newTag) out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to commit container: %v, output: %s", err, out) } cmd = exec.Command(dockerBinary, "rmi", newTag) out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to remove image: %v, output: %s", err, out) } if !strings.Contains(out, "Untagged: "+newTag) { t.Fatalf("Could not remove image %s: %s, %v", newTag, out, err) } logDone("rmi - delete a image which its dependency tagged to multiple repositories success") } func TestRmiBlank(t *testing.T) { // try to delete a blank image name runCmd := exec.Command(dockerBinary, "rmi", "") out, _, err := runCommandWithOutput(runCmd) if err == nil { t.Fatal("Should have failed to delete '' image") } if strings.Contains(out, "No such image") { t.Fatalf("Wrong error message generated: %s", out) } logDone("rmi- blank image name") } docker-1.6.2/integration-cli/docker_cli_push_test.go0000644000175000017500000001257212524223634022156 0ustar tianontianonpackage main import ( "fmt" "io/ioutil" "os" "os/exec" "strings" "testing" "time" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) // pulling an image from the central registry should work func TestPushBusyboxImage(t *testing.T) { defer setupRegistry(t)() repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // tag the image to upload it to the private registry tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName) if out, _, err := runCommandWithOutput(tagCmd); err != nil { t.Fatalf("image tagging failed: %s, %v", out, err) } defer deleteImages(repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) if out, _, err := runCommandWithOutput(pushCmd); err != nil { t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) } logDone("push - busybox to private registry") } // pushing an image without a prefix should throw an error func TestPushUnprefixedRepo(t *testing.T) { pushCmd := exec.Command(dockerBinary, "push", "busybox") if out, _, err := runCommandWithOutput(pushCmd); err == nil { t.Fatalf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out) } logDone("push - unprefixed busybox repo must not pass") } func TestPushUntagged(t *testing.T) { defer setupRegistry(t)() repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) expected := "Repository does not exist" pushCmd := exec.Command(dockerBinary, "push", repoName) if out, _, err := runCommandWithOutput(pushCmd); err == nil { t.Fatalf("pushing the image to the private registry should have failed: outuput %q", out) } else if !strings.Contains(out, expected) { t.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out) } logDone("push - untagged image") } func TestPushBadTag(t *testing.T) { defer setupRegistry(t)() repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL) expected := "does not exist" pushCmd := exec.Command(dockerBinary, "push", repoName) if out, _, err := runCommandWithOutput(pushCmd); err == nil { t.Fatalf("pushing the image to the private registry should have failed: outuput %q", out) } else if !strings.Contains(out, expected) { t.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out) } logDone("push - image with bad tag") } func TestPushMultipleTags(t *testing.T) { defer setupRegistry(t)() repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL) repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL) // tag the image to upload it tot he private registry tagCmd1 := exec.Command(dockerBinary, "tag", "busybox", repoTag1) if out, _, err := runCommandWithOutput(tagCmd1); err != nil { t.Fatalf("image tagging failed: %s, %v", out, err) } defer deleteImages(repoTag1) tagCmd2 := exec.Command(dockerBinary, "tag", "busybox", repoTag2) if out, _, err := runCommandWithOutput(tagCmd2); err != nil { t.Fatalf("image tagging failed: %s, %v", out, err) } defer deleteImages(repoTag2) pushCmd := exec.Command(dockerBinary, "push", repoName) if out, _, err := runCommandWithOutput(pushCmd); err != nil { t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) } logDone("push - multiple tags to private registry") } func TestPushInterrupt(t *testing.T) { defer setupRegistry(t)() repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // tag the image to upload it tot he private registry tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName) if out, _, err := runCommandWithOutput(tagCmd); err != nil { t.Fatalf("image tagging failed: %s, %v", out, err) } defer deleteImages(repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) if err := pushCmd.Start(); err != nil { t.Fatalf("Failed to start pushing to private registry: %v", err) } // Interrupt push (yes, we have no idea at what point it will get killed). time.Sleep(200 * time.Millisecond) if err := pushCmd.Process.Kill(); err != nil { t.Fatalf("Failed to kill push process: %v", err) } // Try agin pushCmd = exec.Command(dockerBinary, "push", repoName) if err := pushCmd.Start(); err != nil { t.Fatalf("Failed to start pushing to private registry: %v", err) } logDone("push - interrupted") } func TestPushEmptyLayer(t *testing.T) { defer setupRegistry(t)() repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) emptyTarball, err := ioutil.TempFile("", "empty_tarball") if err != nil { t.Fatalf("Unable to create test file: %v", err) } tw := tar.NewWriter(emptyTarball) err = tw.Close() if err != nil { t.Fatalf("Error creating empty tarball: %v", err) } freader, err := os.Open(emptyTarball.Name()) if err != nil { t.Fatalf("Could not open test tarball: %v", err) } importCmd := exec.Command(dockerBinary, "import", "-", repoName) importCmd.Stdin = freader out, _, err := runCommandWithOutput(importCmd) if err != nil { t.Errorf("import failed with errors: %v, output: %q", err, out) } // Now verify we can push it pushCmd := exec.Command(dockerBinary, "push", repoName) if out, _, err := runCommandWithOutput(pushCmd); err != nil { t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) } logDone("push - empty layer config to private registry") } docker-1.6.2/integration-cli/docker_api_containers_test.go0000644000175000017500000003662012524223634023346 0ustar tianontianonpackage main import ( "bytes" "encoding/json" "io" "os/exec" "strings" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) func TestContainerApiGetAll(t *testing.T) { defer deleteAllContainers() startCount, err := getContainerCount() if err != nil { t.Fatalf("Cannot query container count: %v", err) } name := "getall" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("Error on container creation: %v, output: %q", err, out) } body, err := sockRequest("GET", "/containers/json?all=1", nil) if err != nil { t.Fatalf("GET all containers sockRequest failed: %v", err) } var inspectJSON []struct { Names []string } if err = json.Unmarshal(body, &inspectJSON); err != nil { t.Fatalf("unable to unmarshal response body: %v", err) } if len(inspectJSON) != startCount+1 { t.Fatalf("Expected %d container(s), %d found (started with: %d)", startCount+1, len(inspectJSON), startCount) } if actual := inspectJSON[0].Names[0]; actual != "/"+name { t.Fatalf("Container Name mismatch. Expected: %q, received: %q\n", "/"+name, actual) } logDone("container REST API - check GET json/all=1") } func TestContainerApiGetExport(t *testing.T) { defer deleteAllContainers() name := "exportcontainer" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "touch", "/test") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("Error on container creation: %v, output: %q", err, out) } body, err := sockRequest("GET", "/containers/"+name+"/export", nil) if err != nil { t.Fatalf("GET containers/export sockRequest failed: %v", err) } found := false for tarReader := tar.NewReader(bytes.NewReader(body)); ; { h, err := tarReader.Next() if err != nil { if err == io.EOF { break } t.Fatal(err) } if h.Name == "test" { found = true break } } if !found { t.Fatalf("The created test file has not been found in the exported image") } logDone("container REST API - check GET containers/export") } func TestContainerApiGetChanges(t *testing.T) { defer deleteAllContainers() name := "changescontainer" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "rm", "/etc/passwd") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("Error on container creation: %v, output: %q", err, out) } body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) if err != nil { t.Fatalf("GET containers/changes sockRequest failed: %v", err) } changes := []struct { Kind int Path string }{} if err = json.Unmarshal(body, &changes); err != nil { t.Fatalf("unable to unmarshal response body: %v", err) } // Check the changelog for removal of /etc/passwd success := false for _, elem := range changes { if elem.Path == "/etc/passwd" && elem.Kind == 2 { success = true } } if !success { t.Fatalf("/etc/passwd has been removed but is not present in the diff") } logDone("container REST API - check GET containers/changes") } func TestContainerApiStartVolumeBinds(t *testing.T) { defer deleteAllContainers() name := "testing" config := map[string]interface{}{ "Image": "busybox", "Volumes": map[string]struct{}{"/tmp": {}}, } if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") { t.Fatal(err) } bindPath := randomUnixTmpDirPath("test") config = map[string]interface{}{ "Binds": []string{bindPath + ":/tmp"}, } if _, err := sockRequest("POST", "/containers/"+name+"/start", config); err != nil && !strings.Contains(err.Error(), "204 No Content") { t.Fatal(err) } pth, err := inspectFieldMap(name, "Volumes", "/tmp") if err != nil { t.Fatal(err) } if pth != bindPath { t.Fatalf("expected volume host path to be %s, got %s", bindPath, pth) } logDone("container REST API - check volume binds on start") } // Test for GH#10618 func TestContainerApiStartDupVolumeBinds(t *testing.T) { defer deleteAllContainers() name := "testdups" config := map[string]interface{}{ "Image": "busybox", "Volumes": map[string]struct{}{"/tmp": {}}, } if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") { t.Fatal(err) } bindPath1 := randomUnixTmpDirPath("test1") bindPath2 := randomUnixTmpDirPath("test2") config = map[string]interface{}{ "Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"}, } if body, err := sockRequest("POST", "/containers/"+name+"/start", config); err == nil { t.Fatal("expected container start to fail when duplicate volume binds to same container path") } else { if !strings.Contains(string(body), "Duplicate volume") { t.Fatalf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err) } } logDone("container REST API - check for duplicate volume binds error on start") } func TestContainerApiStartVolumesFrom(t *testing.T) { defer deleteAllContainers() volName := "voltst" volPath := "/tmp" if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", volName, "-v", volPath, "busybox")); err != nil { t.Fatal(out, err) } name := "testing" config := map[string]interface{}{ "Image": "busybox", "Volumes": map[string]struct{}{volPath: {}}, } if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") { t.Fatal(err) } config = map[string]interface{}{ "VolumesFrom": []string{volName}, } if _, err := sockRequest("POST", "/containers/"+name+"/start", config); err != nil && !strings.Contains(err.Error(), "204 No Content") { t.Fatal(err) } pth, err := inspectFieldMap(name, "Volumes", volPath) if err != nil { t.Fatal(err) } pth2, err := inspectFieldMap(volName, "Volumes", volPath) if err != nil { t.Fatal(err) } if pth != pth2 { t.Fatalf("expected volume host path to be %s, got %s", pth, pth2) } logDone("container REST API - check VolumesFrom on start") } // Ensure that volumes-from has priority over binds/anything else // This is pretty much the same as TestRunApplyVolumesFromBeforeVolumes, except with passing the VolumesFrom and the bind on start func TestVolumesFromHasPriority(t *testing.T) { defer deleteAllContainers() volName := "voltst" volPath := "/tmp" if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", volName, "-v", volPath, "busybox")); err != nil { t.Fatal(out, err) } name := "testing" config := map[string]interface{}{ "Image": "busybox", "Volumes": map[string]struct{}{volPath: {}}, } if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") { t.Fatal(err) } bindPath := randomUnixTmpDirPath("test") config = map[string]interface{}{ "VolumesFrom": []string{volName}, "Binds": []string{bindPath + ":/tmp"}, } if _, err := sockRequest("POST", "/containers/"+name+"/start", config); err != nil && !strings.Contains(err.Error(), "204 No Content") { t.Fatal(err) } pth, err := inspectFieldMap(name, "Volumes", volPath) if err != nil { t.Fatal(err) } pth2, err := inspectFieldMap(volName, "Volumes", volPath) if err != nil { t.Fatal(err) } if pth != pth2 { t.Fatalf("expected volume host path to be %s, got %s", pth, pth2) } logDone("container REST API - check VolumesFrom has priority") } func TestGetContainerStats(t *testing.T) { defer deleteAllContainers() var ( name = "statscontainer" runCmd = exec.Command(dockerBinary, "run", "-d", "--name", name, "busybox", "top") ) out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("Error on container creation: %v, output: %q", err, out) } type b struct { body []byte err error } bc := make(chan b, 1) go func() { body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) bc <- b{body, err} }() // allow some time to stream the stats from the container time.Sleep(4 * time.Second) if _, err := runCommand(exec.Command(dockerBinary, "rm", "-f", name)); err != nil { t.Fatal(err) } // collect the results from the stats stream or timeout and fail // if the stream was not disconnected. select { case <-time.After(2 * time.Second): t.Fatal("stream was not closed after container was removed") case sr := <-bc: if sr.err != nil { t.Fatal(sr.err) } dec := json.NewDecoder(bytes.NewBuffer(sr.body)) var s *types.Stats // decode only one object from the stream if err := dec.Decode(&s); err != nil { t.Fatal(err) } } logDone("container REST API - check GET containers/stats") } func TestGetStoppedContainerStats(t *testing.T) { defer deleteAllContainers() var ( name = "statscontainer" runCmd = exec.Command(dockerBinary, "create", "--name", name, "busybox", "top") ) out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("Error on container creation: %v, output: %q", err, out) } go func() { // We'll never get return for GET stats from sockRequest as of now, // just send request and see if panic or error would happen on daemon side. _, err := sockRequest("GET", "/containers/"+name+"/stats", nil) if err != nil { t.Fatal(err) } }() // allow some time to send request and let daemon deal with it time.Sleep(1 * time.Second) logDone("container REST API - check GET stopped containers/stats") } func TestBuildApiDockerfilePath(t *testing.T) { // Test to make sure we stop people from trying to leave the // build context when specifying the path to the dockerfile buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() dockerfile := []byte("FROM busybox") if err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Size: int64(len(dockerfile)), }); err != nil { t.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write(dockerfile); err != nil { t.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { t.Fatalf("failed to close tar archive: %v", err) } out, err := sockRequestRaw("POST", "/build?dockerfile=../Dockerfile", buffer, "application/x-tar") if err == nil { t.Fatalf("Build was supposed to fail: %s", out) } if !strings.Contains(string(out), "must be within the build context") { t.Fatalf("Didn't complain about leaving build context: %s", out) } logDone("container REST API - check build w/bad Dockerfile path") } func TestBuildApiDockerFileRemote(t *testing.T) { server, err := fakeStorage(map[string]string{ "testD": `FROM busybox COPY * /tmp/ RUN find / -name ba* RUN find /tmp/`, }) if err != nil { t.Fatal(err) } defer server.Close() buf, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json") if err != nil { t.Fatalf("Build failed: %s", err) } // Make sure Dockerfile exists. // Make sure 'baz' doesn't exist ANYWHERE despite being mentioned in the URL out := string(buf) if !strings.Contains(out, "/tmp/Dockerfile") || strings.Contains(out, "baz") { t.Fatalf("Incorrect output: %s", out) } logDone("container REST API - check build with -f from remote") } func TestBuildApiLowerDockerfile(t *testing.T) { git, err := fakeGIT("repo", map[string]string{ "dockerfile": `FROM busybox RUN echo from dockerfile`, }, false) if err != nil { t.Fatal(err) } defer git.Close() buf, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") if err != nil { t.Fatalf("Build failed: %s\n%q", err, buf) } out := string(buf) if !strings.Contains(out, "from dockerfile") { t.Fatalf("Incorrect output: %s", out) } logDone("container REST API - check build with lower dockerfile") } func TestBuildApiBuildGitWithF(t *testing.T) { git, err := fakeGIT("repo", map[string]string{ "baz": `FROM busybox RUN echo from baz`, "Dockerfile": `FROM busybox RUN echo from Dockerfile`, }, false) if err != nil { t.Fatal(err) } defer git.Close() // Make sure it tries to 'dockerfile' query param value buf, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json") if err != nil { t.Fatalf("Build failed: %s\n%q", err, buf) } out := string(buf) if !strings.Contains(out, "from baz") { t.Fatalf("Incorrect output: %s", out) } logDone("container REST API - check build from git w/F") } func TestBuildApiDoubleDockerfile(t *testing.T) { testRequires(t, UnixCli) // dockerfile overwrites Dockerfile on Windows git, err := fakeGIT("repo", map[string]string{ "Dockerfile": `FROM busybox RUN echo from Dockerfile`, "dockerfile": `FROM busybox RUN echo from dockerfile`, }, false) if err != nil { t.Fatal(err) } defer git.Close() // Make sure it tries to 'dockerfile' query param value buf, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") if err != nil { t.Fatalf("Build failed: %s", err) } out := string(buf) if !strings.Contains(out, "from Dockerfile") { t.Fatalf("Incorrect output: %s", out) } logDone("container REST API - check build with two dockerfiles") } func TestBuildApiDockerfileSymlink(t *testing.T) { // Test to make sure we stop people from trying to leave the // build context when specifying a symlink as the path to the dockerfile buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() if err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Typeflag: tar.TypeSymlink, Linkname: "/etc/passwd", }); err != nil { t.Fatalf("failed to write tar file header: %v", err) } if err := tw.Close(); err != nil { t.Fatalf("failed to close tar archive: %v", err) } out, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") if err == nil { t.Fatalf("Build was supposed to fail: %s", out) } // The reason the error is "Cannot locate specified Dockerfile" is because // in the builder, the symlink is resolved within the context, therefore // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is // a nonexistent file. if !strings.Contains(string(out), "Cannot locate specified Dockerfile: Dockerfile") { t.Fatalf("Didn't complain about leaving build context: %s", out) } logDone("container REST API - check build w/bad Dockerfile symlink path") } // #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume func TestPostContainerBindNormalVolume(t *testing.T) { defer deleteAllContainers() out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "-v", "/foo", "--name=one", "busybox")) if err != nil { t.Fatal(err, out) } fooDir, err := inspectFieldMap("one", "Volumes", "/foo") if err != nil { t.Fatal(err) } out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "create", "-v", "/foo", "--name=two", "busybox")) if err != nil { t.Fatal(err, out) } bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} _, err = sockRequest("POST", "/containers/two/start", bindSpec) if err != nil && !strings.Contains(err.Error(), "204 No Content") { t.Fatal(err) } fooDir2, err := inspectFieldMap("two", "Volumes", "/foo") if err != nil { t.Fatal(err) } if fooDir2 != fooDir { t.Fatal("expected volume path to be %s, got: %s", fooDir, fooDir2) } logDone("container REST API - can use path from normal volume as bind-mount to overwrite another volume") } docker-1.6.2/integration-cli/docker_cli_logs_test.go0000644000175000017500000002274512524223634022146 0ustar tianontianonpackage main import ( "fmt" "os/exec" "regexp" "strings" "testing" "time" "github.com/docker/docker/pkg/timeutils" ) // This used to work, it test a log of PageSize-1 (gh#4851) func TestLogsContainerSmallerThanPage(t *testing.T) { testLen := 32767 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { t.Fatalf("failed to log container: %s, %v", out, err) } if len(out) != testLen+1 { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) } deleteContainer(cleanedContainerID) logDone("logs - logs container running echo smaller than page size") } // Regression test: When going over the PageSize, it used to panic (gh#4851) func TestLogsContainerBiggerThanPage(t *testing.T) { testLen := 32768 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { t.Fatalf("failed to log container: %s, %v", out, err) } if len(out) != testLen+1 { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) } deleteContainer(cleanedContainerID) logDone("logs - logs container running echo bigger than page size") } // Regression test: When going much over the PageSize, it used to block (gh#4851) func TestLogsContainerMuchBiggerThanPage(t *testing.T) { testLen := 33000 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { t.Fatalf("failed to log container: %s, %v", out, err) } if len(out) != testLen+1 { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) } deleteContainer(cleanedContainerID) logDone("logs - logs container running echo much bigger than page size") } func TestLogsTimestamps(t *testing.T) { testLen := 100 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", "-t", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { t.Fatalf("failed to log container: %s, %v", out, err) } lines := strings.Split(out, "\n") if len(lines) != testLen+1 { t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) } ts := regexp.MustCompile(`^.* `) for _, l := range lines { if l != "" { _, err := time.Parse(timeutils.RFC3339NanoFixed+" ", ts.FindString(l)) if err != nil { t.Fatalf("Failed to parse timestamp from %v: %v", l, err) } if l[29] != 'Z' { // ensure we have padded 0's t.Fatalf("Timestamp isn't padded properly: %s", l) } } } deleteContainer(cleanedContainerID) logDone("logs - logs with timestamps") } func TestLogsSeparateStderr(t *testing.T) { msg := "stderr_log" runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) if err != nil { t.Fatalf("failed to log container: %s, %v", out, err) } if stdout != "" { t.Fatalf("Expected empty stdout stream, got %v", stdout) } stderr = strings.TrimSpace(stderr) if stderr != msg { t.Fatalf("Expected %v in stderr stream, got %v", msg, stderr) } deleteContainer(cleanedContainerID) logDone("logs - separate stderr (without pseudo-tty)") } func TestLogsStderrInStdout(t *testing.T) { msg := "stderr_log" runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) if err != nil { t.Fatalf("failed to log container: %s, %v", out, err) } if stderr != "" { t.Fatalf("Expected empty stderr stream, got %v", stdout) } stdout = strings.TrimSpace(stdout) if stdout != msg { t.Fatalf("Expected %v in stdout stream, got %v", msg, stdout) } deleteContainer(cleanedContainerID) logDone("logs - stderr in stdout (with pseudo-tty)") } func TestLogsTail(t *testing.T) { testLen := 100 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", "--tail", "5", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { t.Fatalf("failed to log container: %s, %v", out, err) } lines := strings.Split(out, "\n") if len(lines) != 6 { t.Fatalf("Expected log %d lines, received %d\n", 6, len(lines)) } logsCmd = exec.Command(dockerBinary, "logs", "--tail", "all", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { t.Fatalf("failed to log container: %s, %v", out, err) } lines = strings.Split(out, "\n") if len(lines) != testLen+1 { t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) } logsCmd = exec.Command(dockerBinary, "logs", "--tail", "random", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { t.Fatalf("failed to log container: %s, %v", out, err) } lines = strings.Split(out, "\n") if len(lines) != testLen+1 { t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) } deleteContainer(cleanedContainerID) logDone("logs - logs tail") } func TestLogsFollowStopped(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) exec.Command(dockerBinary, "wait", cleanedContainerID).Run() logsCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID) if err := logsCmd.Start(); err != nil { t.Fatal(err) } c := make(chan struct{}) go func() { if err := logsCmd.Wait(); err != nil { t.Fatal(err) } close(c) }() select { case <-c: case <-time.After(1 * time.Second): t.Fatal("Following logs is hanged") } deleteContainer(cleanedContainerID) logDone("logs - logs follow stopped container") } // Regression test for #8832 func TestLogsFollowSlowStdoutConsumer(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 200000;yes X | head -c 200000`) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) stopSlowRead := make(chan bool) go func() { exec.Command(dockerBinary, "wait", cleanedContainerID).Run() stopSlowRead <- true }() logCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID) stdout, err := logCmd.StdoutPipe() if err != nil { t.Fatal(err) } if err := logCmd.Start(); err != nil { t.Fatal(err) } // First read slowly bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) if err != nil { t.Fatal(err) } // After the container has finished we can continue reading fast bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil) if err != nil { t.Fatal(err) } actual := bytes1 + bytes2 expected := 200000 if actual != expected { t.Fatalf("Invalid bytes read: %d, expected %d", actual, expected) } logDone("logs - follow slow consumer") } docker-1.6.2/integration-cli/docker_cli_pull_test.go0000644000175000017500000001125712524223634022152 0ustar tianontianonpackage main import ( "fmt" "os/exec" "strings" "testing" ) // See issue docker/docker#8141 func TestPullImageWithAliases(t *testing.T) { defer setupRegistry(t)() repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) defer deleteImages(repoName) repos := []string{} for _, tag := range []string{"recent", "fresh"} { repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag)) } // Tag and push the same image multiple times. for _, repo := range repos { if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "tag", "busybox", repo)); err != nil { t.Fatalf("Failed to tag image %v: error %v, output %q", repos, err, out) } defer deleteImages(repo) if out, err := exec.Command(dockerBinary, "push", repo).CombinedOutput(); err != nil { t.Fatalf("Failed to push image %v: error %v, output %q", repo, err, string(out)) } } // Clear local images store. args := append([]string{"rmi"}, repos...) if out, err := exec.Command(dockerBinary, args...).CombinedOutput(); err != nil { t.Fatalf("Failed to clean images: error %v, output %q", err, string(out)) } // Pull a single tag and verify it doesn't bring down all aliases. pullCmd := exec.Command(dockerBinary, "pull", repos[0]) if out, _, err := runCommandWithOutput(pullCmd); err != nil { t.Fatalf("Failed to pull %v: error %v, output %q", repoName, err, out) } if err := exec.Command(dockerBinary, "inspect", repos[0]).Run(); err != nil { t.Fatalf("Image %v was not pulled down", repos[0]) } for _, repo := range repos[1:] { if err := exec.Command(dockerBinary, "inspect", repo).Run(); err == nil { t.Fatalf("Image %v shouldn't have been pulled down", repo) } } logDone("pull - image with aliases") } // pulling library/hello-world should show verified message func TestPullVerified(t *testing.T) { // Image must be pulled from central repository to get verified message // unless keychain is manually updated to contain the daemon's sign key. verifiedName := "hello-world" defer deleteImages(verifiedName) // pull it expected := "The image you are pulling has been verified" pullCmd := exec.Command(dockerBinary, "pull", verifiedName) if out, exitCode, err := runCommandWithOutput(pullCmd); err != nil || !strings.Contains(out, expected) { if err != nil || exitCode != 0 { t.Skipf("pulling the '%s' image from the registry has failed: %s", verifiedName, err) } t.Fatalf("pulling a verified image failed. expected: %s\ngot: %s, %v", expected, out, err) } // pull it again pullCmd = exec.Command(dockerBinary, "pull", verifiedName) if out, exitCode, err := runCommandWithOutput(pullCmd); err != nil || strings.Contains(out, expected) { if err != nil || exitCode != 0 { t.Skipf("pulling the '%s' image from the registry has failed: %s", verifiedName, err) } t.Fatalf("pulling a verified image failed. unexpected verify message\ngot: %s, %v", out, err) } logDone("pull - pull verified") } // pulling an image from the central registry should work func TestPullImageFromCentralRegistry(t *testing.T) { defer deleteImages("hello-world") pullCmd := exec.Command(dockerBinary, "pull", "hello-world") if out, _, err := runCommandWithOutput(pullCmd); err != nil { t.Fatalf("pulling the hello-world image from the registry has failed: %s, %v", out, err) } logDone("pull - pull hello-world") } // pulling a non-existing image from the central registry should return a non-zero exit code func TestPullNonExistingImage(t *testing.T) { pullCmd := exec.Command(dockerBinary, "pull", "fooblahblah1234") if out, _, err := runCommandWithOutput(pullCmd); err == nil { t.Fatalf("expected non-zero exit status when pulling non-existing image: %s", out) } logDone("pull - pull fooblahblah1234 (non-existing image)") } // pulling an image from the central registry using official names should work // ensure all pulls result in the same image func TestPullImageOfficialNames(t *testing.T) { names := []string{ "docker.io/hello-world", "index.docker.io/hello-world", "library/hello-world", "docker.io/library/hello-world", "index.docker.io/library/hello-world", } for _, name := range names { pullCmd := exec.Command(dockerBinary, "pull", name) out, exitCode, err := runCommandWithOutput(pullCmd) if err != nil || exitCode != 0 { t.Errorf("pulling the '%s' image from the registry has failed: %s", name, err) continue } // ensure we don't have multiple image names. imagesCmd := exec.Command(dockerBinary, "images") out, _, err = runCommandWithOutput(imagesCmd) if err != nil { t.Errorf("listing images failed with errors: %v", err) } else if strings.Contains(out, name) { t.Errorf("images should not have listed '%s'", name) } } logDone("pull - pull official names") } docker-1.6.2/integration-cli/docker_cli_start_test.go0000644000175000017500000002360212524223634022330 0ustar tianontianonpackage main import ( "fmt" "os/exec" "strings" "testing" "time" ) // Regression test for https://github.com/docker/docker/issues/7843 func TestStartAttachReturnsOnError(t *testing.T) { defer deleteAllContainers() dockerCmd(t, "run", "-d", "--name", "test", "busybox") dockerCmd(t, "wait", "test") // Expect this to fail because the above container is stopped, this is what we want if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil { t.Fatal("Expected error but got none") } ch := make(chan struct{}) go func() { // Attempt to start attached to the container that won't start // This should return an error immediately since the container can't be started if _, err := runCommand(exec.Command(dockerBinary, "start", "-a", "test2")); err == nil { t.Fatal("Expected error but got none") } close(ch) }() select { case <-ch: case <-time.After(time.Second): t.Fatalf("Attach did not exit properly") } logDone("start - error on start with attach exits") } // gh#8555: Exit code should be passed through when using start -a func TestStartAttachCorrectExitCode(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } out = stripTrailingCharacters(out) // make sure the container has exited before trying the "start -a" waitCmd := exec.Command(dockerBinary, "wait", out) if _, _, err = runCommandWithOutput(waitCmd); err != nil { t.Fatalf("Failed to wait on container: %v", err) } startCmd := exec.Command(dockerBinary, "start", "-a", out) startOut, exitCode, err := runCommandWithOutput(startCmd) if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { t.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut) } if exitCode != 1 { t.Fatalf("start -a did not respond with proper exit code: expected 1, got %d", exitCode) } logDone("start - correct exit code returned with -a") } func TestStartSilentAttach(t *testing.T) { defer deleteAllContainers() name := "teststartattachcorrectexitcode" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatalf("failed to run container: %v, output: %q", err, out) } // make sure the container has exited before trying the "start -a" waitCmd := exec.Command(dockerBinary, "wait", name) if _, _, err = runCommandWithOutput(waitCmd); err != nil { t.Fatalf("wait command failed with error: %v", err) } startCmd := exec.Command(dockerBinary, "start", "-a", name) startOut, _, err := runCommandWithOutput(startCmd) if err != nil { t.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut) } if expected := "test\n"; startOut != expected { t.Fatalf("start -a produced unexpected output: expected %q, got %q", expected, startOut) } logDone("start - don't echo container ID when attaching") } func TestStartRecordError(t *testing.T) { defer deleteAllContainers() // when container runs successfully, we should not have state.Error dockerCmd(t, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") stateErr, err := inspectField("test", "State.Error") if err != nil { t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) } if stateErr != "" { t.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) } // Expect this to fail and records error because of ports conflict out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top")) if err == nil { t.Fatalf("Expected error but got none, output %q", out) } stateErr, err = inspectField("test2", "State.Error") if err != nil { t.Fatalf("Failed to inspect %q state's error, got error %q", "test2", err) } expected := "port is already allocated" if stateErr == "" || !strings.Contains(stateErr, expected) { t.Fatalf("State.Error(%q) does not include %q", stateErr, expected) } // Expect the conflict to be resolved when we stop the initial container dockerCmd(t, "stop", "test") dockerCmd(t, "start", "test2") stateErr, err = inspectField("test2", "State.Error") if err != nil { t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) } if stateErr != "" { t.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) } logDone("start - set state error when start is unsuccessful") } // gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s func TestStartVolumesFromFailsCleanly(t *testing.T) { defer deleteAllContainers() // Create the first data volume dockerCmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") // Expect this to fail because the data test after contaienr doesn't exist yet if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil { t.Fatal("Expected error but got none") } // Create the second data volume dockerCmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") // Now, all the volumes should be there dockerCmd(t, "start", "consumer") // Check that we have the volumes we want out, _, _ := dockerCmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer") n_volumes := strings.Trim(out, " \r\n'") if n_volumes != "2" { t.Fatalf("Missing volumes: expected 2, got %s", n_volumes) } logDone("start - missing containers in --volumes-from did not affect subsequent runs") } func TestStartPausedContainer(t *testing.T) { defer deleteAllContainers() defer unpauseAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top") if out, _, err := runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "pause", "testing") if out, _, err := runCommandWithOutput(runCmd); err != nil { t.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "start", "testing") if out, _, err := runCommandWithOutput(runCmd); err == nil || !strings.Contains(out, "Cannot start a paused container, try unpause instead.") { t.Fatalf("an error should have been shown that you cannot start paused container: %s\n%v", out, err) } logDone("start - error should show if trying to start paused container") } func TestStartMultipleContainers(t *testing.T) { defer deleteAllContainers() // run a container named 'parent' and create two container link to `parent` cmd := exec.Command(dockerBinary, "run", "-d", "--name", "parent", "busybox", "top") if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(out, err) } for _, container := range []string{"child_first", "child_second"} { cmd = exec.Command(dockerBinary, "create", "--name", container, "--link", "parent:parent", "busybox", "top") if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(out, err) } } // stop 'parent' container cmd = exec.Command(dockerBinary, "stop", "parent") if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(out, err) } cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", "parent") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } out = strings.Trim(out, "\r\n") if out != "false" { t.Fatal("Container should be stopped") } // start all the three containers, container `child_first` start first which should be faild // container 'parent' start second and then start container 'child_second' cmd = exec.Command(dockerBinary, "start", "child_first", "parent", "child_second") out, _, err = runCommandWithOutput(cmd) if !strings.Contains(out, "Cannot start container child_first") || err == nil { t.Fatal("Expected error but got none") } for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} { cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", container) out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } out = strings.Trim(out, "\r\n") if out != expected { t.Fatal("Container running state wrong") } } logDone("start - start multiple containers continue on one failed") } func TestStartAttachMultipleContainers(t *testing.T) { var cmd *exec.Cmd defer deleteAllContainers() // run multiple containers to test for _, container := range []string{"test1", "test2", "test3"} { cmd = exec.Command(dockerBinary, "run", "-d", "--name", container, "busybox", "top") if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(out, err) } } // stop all the containers for _, container := range []string{"test1", "test2", "test3"} { cmd = exec.Command(dockerBinary, "stop", container) if out, _, err := runCommandWithOutput(cmd); err != nil { t.Fatal(out, err) } } // test start and attach multiple containers at once, expected error for _, option := range []string{"-a", "-i", "-ai"} { cmd = exec.Command(dockerBinary, "start", option, "test1", "test2", "test3") out, _, err := runCommandWithOutput(cmd) if !strings.Contains(out, "You cannot start and attach multiple containers at once.") || err == nil { t.Fatal("Expected error but got none") } } // confirm the state of all the containers be stopped for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} { cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", container) out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } out = strings.Trim(out, "\r\n") if out != expected { t.Fatal("Container running state wrong") } } logDone("start - error on start and attach multiple containers at once") } docker-1.6.2/integration-cli/docker_cli_images_test.go0000644000175000017500000001307312524223634022441 0ustar tianontianonpackage main import ( "fmt" "os/exec" "reflect" "sort" "strings" "testing" "time" "github.com/docker/docker/pkg/common" ) func TestImagesEnsureImageIsListed(t *testing.T) { imagesCmd := exec.Command(dockerBinary, "images") out, _, err := runCommandWithOutput(imagesCmd) if err != nil { t.Fatalf("listing images failed with errors: %s, %v", out, err) } if !strings.Contains(out, "busybox") { t.Fatal("images should've listed busybox") } logDone("images - busybox should be listed") } func TestImagesOrderedByCreationDate(t *testing.T) { defer deleteImages("order:test_a") defer deleteImages("order:test_c") defer deleteImages("order:test_b") id1, err := buildImage("order:test_a", `FROM scratch MAINTAINER dockerio1`, true) if err != nil { t.Fatal(err) } time.Sleep(time.Second) id2, err := buildImage("order:test_c", `FROM scratch MAINTAINER dockerio2`, true) if err != nil { t.Fatal(err) } time.Sleep(time.Second) id3, err := buildImage("order:test_b", `FROM scratch MAINTAINER dockerio3`, true) if err != nil { t.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc")) if err != nil { t.Fatalf("listing images failed with errors: %s, %v", out, err) } imgs := strings.Split(out, "\n") if imgs[0] != id3 { t.Fatalf("First image must be %s, got %s", id3, imgs[0]) } if imgs[1] != id2 { t.Fatalf("Second image must be %s, got %s", id2, imgs[1]) } if imgs[2] != id1 { t.Fatalf("Third image must be %s, got %s", id1, imgs[2]) } logDone("images - ordering by creation date") } func TestImagesErrorWithInvalidFilterNameTest(t *testing.T) { imagesCmd := exec.Command(dockerBinary, "images", "-f", "FOO=123") out, _, err := runCommandWithOutput(imagesCmd) if !strings.Contains(out, "Invalid filter") { t.Fatalf("error should occur when listing images with invalid filter name FOO, %s, %v", out, err) } logDone("images - invalid filter name check working") } func TestImagesFilterLabel(t *testing.T) { imageName1 := "images_filter_test1" imageName2 := "images_filter_test2" imageName3 := "images_filter_test3" defer deleteAllContainers() defer deleteImages(imageName1) defer deleteImages(imageName2) defer deleteImages(imageName3) image1ID, err := buildImage(imageName1, `FROM scratch LABEL match me`, true) if err != nil { t.Fatal(err) } image2ID, err := buildImage(imageName2, `FROM scratch LABEL match="me too"`, true) if err != nil { t.Fatal(err) } image3ID, err := buildImage(imageName3, `FROM scratch LABEL nomatch me`, true) if err != nil { t.Fatal(err) } cmd := exec.Command(dockerBinary, "images", "--no-trunc", "-q", "-f", "label=match") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } out = strings.TrimSpace(out) if (!strings.Contains(out, image1ID) && !strings.Contains(out, image2ID)) || strings.Contains(out, image3ID) { t.Fatalf("Expected ids %s,%s got %s", image1ID, image2ID, out) } cmd = exec.Command(dockerBinary, "images", "--no-trunc", "-q", "-f", "label=match=me too") out, _, err = runCommandWithOutput(cmd) if err != nil { t.Fatal(out, err) } out = strings.TrimSpace(out) if out != image2ID { t.Fatalf("Expected %s got %s", image2ID, out) } logDone("images - filter label") } func TestImagesFilterWhiteSpaceTrimmingAndLowerCasingWorking(t *testing.T) { imageName := "images_filter_test" defer deleteAllContainers() defer deleteImages(imageName) buildImage(imageName, `FROM scratch RUN touch /test/foo RUN touch /test/bar RUN touch /test/baz`, true) filters := []string{ "dangling=true", "Dangling=true", " dangling=true", "dangling=true ", "dangling = true", } imageListings := make([][]string, 5, 5) for idx, filter := range filters { cmd := exec.Command(dockerBinary, "images", "-q", "-f", filter) out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatal(err) } listing := strings.Split(out, "\n") sort.Strings(listing) imageListings[idx] = listing } for idx, listing := range imageListings { if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) { for idx, errListing := range imageListings { fmt.Printf("out %d", idx) for _, image := range errListing { fmt.Print(image) } fmt.Print("") } t.Fatalf("All output must be the same") } } logDone("images - white space trimming and lower casing") } func TestImagesEnsureDanglingImageOnlyListedOnce(t *testing.T) { defer deleteAllContainers() // create container 1 c := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(c) if err != nil { t.Fatalf("error running busybox: %s, %v", out, err) } containerId1 := strings.TrimSpace(out) // tag as foobox c = exec.Command(dockerBinary, "commit", containerId1, "foobox") out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error tagging foobox: %s", err) } imageId := common.TruncateID(strings.TrimSpace(out)) defer deleteImages(imageId) // overwrite the tag, making the previous image dangling c = exec.Command(dockerBinary, "tag", "-f", "busybox", "foobox") out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("error tagging foobox: %s", err) } defer deleteImages("foobox") c = exec.Command(dockerBinary, "images", "-q", "-f", "dangling=true") out, _, err = runCommandWithOutput(c) if err != nil { t.Fatalf("listing images failed with errors: %s, %v", out, err) } if e, a := 1, strings.Count(out, imageId); e != a { t.Fatalf("expected 1 dangling image, got %d: %s", a, out) } logDone("images - dangling image only listed once") } docker-1.6.2/integration-cli/docker_cli_attach_unix_test.go0000644000175000017500000000536012524223634023503 0ustar tianontianon// +build !windows package main import ( "os/exec" "strings" "testing" "time" "github.com/kr/pty" ) // #9860 func TestAttachClosedOnContainerStop(t *testing.T) { defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-dti", "busybox", "sleep", "2") out, _, err := runCommandWithOutput(cmd) if err != nil { t.Fatalf("failed to start container: %v (%v)", out, err) } id := stripTrailingCharacters(out) if err := waitRun(id); err != nil { t.Fatal(err) } done := make(chan struct{}) go func() { defer close(done) _, tty, err := pty.Open() if err != nil { t.Fatalf("could not open pty: %v", err) } attachCmd := exec.Command(dockerBinary, "attach", id) attachCmd.Stdin = tty attachCmd.Stdout = tty attachCmd.Stderr = tty if err := attachCmd.Run(); err != nil { t.Fatalf("attach returned error %s", err) } }() waitCmd := exec.Command(dockerBinary, "wait", id) if out, _, err = runCommandWithOutput(waitCmd); err != nil { t.Fatalf("error thrown while waiting for container: %s, %v", out, err) } select { case <-done: case <-time.After(attachWait): t.Fatal("timed out without attach returning") } logDone("attach - return after container finished") } func TestAttachAfterDetach(t *testing.T) { defer deleteAllContainers() name := "detachtest" cpty, tty, err := pty.Open() if err != nil { t.Fatalf("Could not open pty: %v", err) } cmd := exec.Command(dockerBinary, "run", "-ti", "--name", name, "busybox") cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty detached := make(chan struct{}) go func() { if err := cmd.Run(); err != nil { t.Fatalf("attach returned error %s", err) } close(detached) }() time.Sleep(500 * time.Millisecond) if err := waitRun(name); err != nil { t.Fatal(err) } cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) cpty.Write([]byte{17}) <-detached cpty, tty, err = pty.Open() if err != nil { t.Fatalf("Could not open pty: %v", err) } cmd = exec.Command(dockerBinary, "attach", name) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty if err := cmd.Start(); err != nil { t.Fatal(err) } bytes := make([]byte, 10) var nBytes int readErr := make(chan error, 1) go func() { time.Sleep(500 * time.Millisecond) cpty.Write([]byte("\n")) time.Sleep(500 * time.Millisecond) nBytes, err = cpty.Read(bytes) cpty.Close() readErr <- err }() select { case err := <-readErr: if err != nil { t.Fatal(err) } case <-time.After(2 * time.Second): t.Fatal("timeout waiting for attach read") } if err := cmd.Wait(); err != nil { t.Fatal(err) } if !strings.Contains(string(bytes[:nBytes]), "/ #") { t.Fatalf("failed to get a new prompt. got %s", string(bytes[:nBytes])) } logDone("attach - reconnect after detaching") } docker-1.6.2/integration-cli/docker_cli_cp_test.go0000644000175000017500000003556112524223634021604 0ustar tianontianonpackage main import ( "bytes" "fmt" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strings" "testing" ) const ( cpTestPathParent = "/some" cpTestPath = "/some/path" cpTestName = "test" cpFullPath = "/some/path/test" cpContainerContents = "holla, i am the container" cpHostContents = "hello, i am the host" ) // Test for #5656 // Check that garbage paths don't escape the container's rootfs func TestCpGarbagePath(t *testing.T) { out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { t.Fatal(err) } hostFile, err := os.Create(cpFullPath) if err != nil { t.Fatal(err) } defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) fmt.Fprintf(hostFile, "%s", cpHostContents) tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { t.Fatal(err) } tmpname := filepath.Join(tmpdir, cpTestName) defer os.RemoveAll(tmpdir) path := path.Join("../../../../../../../../../../../../", cpFullPath) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err) } file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) if err != nil { t.Fatal(err) } if string(test) == cpHostContents { t.Errorf("output matched host file -- garbage path can escape container rootfs") } if string(test) != cpContainerContents { t.Errorf("output doesn't match the input for garbage path") } logDone("cp - garbage paths relative to container's rootfs") } // Check that relative paths are relative to the container's rootfs func TestCpRelativePath(t *testing.T) { out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { t.Fatal(err) } hostFile, err := os.Create(cpFullPath) if err != nil { t.Fatal(err) } defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) fmt.Fprintf(hostFile, "%s", cpHostContents) tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { t.Fatal(err) } tmpname := filepath.Join(tmpdir, cpTestName) defer os.RemoveAll(tmpdir) var relPath string if path.IsAbs(cpFullPath) { // normally this is `filepath.Rel("/", cpFullPath)` but we cannot // get this unix-path manipulation on windows with filepath. relPath = cpFullPath[1:] } else { t.Fatalf("path %s was assumed to be an absolute path", cpFullPath) } _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+relPath, tmpdir) if err != nil { t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, relPath, err) } file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) if err != nil { t.Fatal(err) } if string(test) == cpHostContents { t.Errorf("output matched host file -- relative path can escape container rootfs") } if string(test) != cpContainerContents { t.Errorf("output doesn't match the input for relative path") } logDone("cp - relative paths relative to container's rootfs") } // Check that absolute paths are relative to the container's rootfs func TestCpAbsolutePath(t *testing.T) { out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { t.Fatal(err) } hostFile, err := os.Create(cpFullPath) if err != nil { t.Fatal(err) } defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) fmt.Fprintf(hostFile, "%s", cpHostContents) tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { t.Fatal(err) } tmpname := filepath.Join(tmpdir, cpTestName) defer os.RemoveAll(tmpdir) path := cpFullPath _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) } file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) if err != nil { t.Fatal(err) } if string(test) == cpHostContents { t.Errorf("output matched host file -- absolute path can escape container rootfs") } if string(test) != cpContainerContents { t.Errorf("output doesn't match the input for absolute path") } logDone("cp - absolute paths relative to container's rootfs") } // Test for #5619 // Check that absolute symlinks are still relative to the container's rootfs func TestCpAbsoluteSymlink(t *testing.T) { out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { t.Fatal(err) } hostFile, err := os.Create(cpFullPath) if err != nil { t.Fatal(err) } defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) fmt.Fprintf(hostFile, "%s", cpHostContents) tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { t.Fatal(err) } tmpname := filepath.Join(tmpdir, cpTestName) defer os.RemoveAll(tmpdir) path := path.Join("/", "container_path") _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) } file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) if err != nil { t.Fatal(err) } if string(test) == cpHostContents { t.Errorf("output matched host file -- absolute symlink can escape container rootfs") } if string(test) != cpContainerContents { t.Errorf("output doesn't match the input for absolute symlink") } logDone("cp - absolute symlink relative to container's rootfs") } // Test for #5619 // Check that symlinks which are part of the resource path are still relative to the container's rootfs func TestCpSymlinkComponent(t *testing.T) { out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { t.Fatal(err) } hostFile, err := os.Create(cpFullPath) if err != nil { t.Fatal(err) } defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) fmt.Fprintf(hostFile, "%s", cpHostContents) tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { t.Fatal(err) } tmpname := filepath.Join(tmpdir, cpTestName) defer os.RemoveAll(tmpdir) path := path.Join("/", "container_path", cpTestName) _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) if err != nil { t.Fatalf("couldn't copy from symlink path component: %s:%s %s", cleanedContainerID, path, err) } file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) if err != nil { t.Fatal(err) } if string(test) == cpHostContents { t.Errorf("output matched host file -- symlink path component can escape container rootfs") } if string(test) != cpContainerContents { t.Errorf("output doesn't match the input for symlink path component") } logDone("cp - symlink path components relative to container's rootfs") } // Check that cp with unprivileged user doesn't return any error func TestCpUnprivilegedUser(t *testing.T) { testRequires(t, UnixCli) // uses chmod/su: not available on windows out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err = os.Chmod(tmpdir, 0777); err != nil { t.Fatal(err) } path := cpTestName _, _, err = runCommandWithOutput(exec.Command("su", "unprivilegeduser", "-c", dockerBinary+" cp "+cleanedContainerID+":"+path+" "+tmpdir)) if err != nil { t.Fatalf("couldn't copy with unprivileged user: %s:%s %s", cleanedContainerID, path, err) } logDone("cp - unprivileged user") } func TestCpVolumePath(t *testing.T) { testRequires(t, SameHostDaemon) tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") if err != nil { t.Fatal(err) } defer os.RemoveAll(outDir) _, err = os.Create(tmpDir + "/test") if err != nil { t.Fatal(err) } out, exitCode, err := dockerCmd(t, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } // Copy actual volume path _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo", outDir) if err != nil { t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) } stat, err := os.Stat(outDir + "/foo") if err != nil { t.Fatal(err) } if !stat.IsDir() { t.Fatal("expected copied content to be dir") } stat, err = os.Stat(outDir + "/foo/bar") if err != nil { t.Fatal(err) } if stat.IsDir() { t.Fatal("Expected file `bar` to be a file") } // Copy file nested in volume _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo/bar", outDir) if err != nil { t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) } stat, err = os.Stat(outDir + "/bar") if err != nil { t.Fatal(err) } if stat.IsDir() { t.Fatal("Expected file `bar` to be a file") } // Copy Bind-mounted dir _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz", outDir) if err != nil { t.Fatalf("couldn't copy from bind-mounted volume path: %s:%s %v", cleanedContainerID, "/baz", err) } stat, err = os.Stat(outDir + "/baz") if err != nil { t.Fatal(err) } if !stat.IsDir() { t.Fatal("Expected `baz` to be a dir") } // Copy file nested in bind-mounted dir _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz/test", outDir) fb, err := ioutil.ReadFile(outDir + "/baz/test") if err != nil { t.Fatal(err) } fb2, err := ioutil.ReadFile(tmpDir + "/test") if err != nil { t.Fatal(err) } if !bytes.Equal(fb, fb2) { t.Fatalf("Expected copied file to be duplicate of bind-mounted file") } // Copy bind-mounted file _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", outDir) fb, err = ioutil.ReadFile(outDir + "/test") if err != nil { t.Fatal(err) } fb2, err = ioutil.ReadFile(tmpDir + "/test") if err != nil { t.Fatal(err) } if !bytes.Equal(fb, fb2) { t.Fatalf("Expected copied file to be duplicate of bind-mounted file") } logDone("cp - volume path") } func TestCpToDot(t *testing.T) { out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") if err != nil || exitCode != 0 { t.Fatal("failed to create a container", out, err) } cleanedContainerID := stripTrailingCharacters(out) defer deleteContainer(cleanedContainerID) out, _, err = dockerCmd(t, "wait", cleanedContainerID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatal("failed to set up container", out, err) } tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) cwd, err := os.Getwd() if err != nil { t.Fatal(err) } defer os.Chdir(cwd) if err := os.Chdir(tmpdir); err != nil { t.Fatal(err) } _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", ".") if err != nil { t.Fatalf("couldn't docker cp to \".\" path: %s", err) } content, err := ioutil.ReadFile("./test") if string(content) != "lololol\n" { t.Fatalf("Wrong content in copied file %q, should be %q", content, "lololol\n") } logDone("cp - to dot path") } func TestCpToStdout(t *testing.T) { out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") if err != nil || exitCode != 0 { t.Fatalf("failed to create a container:%s\n%s", out, err) } cID := stripTrailingCharacters(out) defer deleteContainer(cID) out, _, err = dockerCmd(t, "wait", cID) if err != nil || stripTrailingCharacters(out) != "0" { t.Fatalf("failed to set up container:%s\n%s", out, err) } out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "cp", cID+":/test", "-"), exec.Command("tar", "-vtf", "-")) if err != nil { t.Fatalf("Failed to run commands: %s", err) } if !strings.Contains(out, "test") || !strings.Contains(out, "-rw") { t.Fatalf("Missing file from tar TOC:\n%s", out) } logDone("cp - to stdout") } docker-1.6.2/integration-cli/requirements.go0000644000175000017500000000423712524223634020504 0ustar tianontianonpackage main import ( "encoding/json" "fmt" "log" "os/exec" "strings" "testing" ) type TestCondition func() bool type TestRequirement struct { Condition TestCondition SkipMessage string } // List test requirements var ( daemonExecDriver string SameHostDaemon = TestRequirement{ func() bool { return isLocalDaemon }, "Test requires docker daemon to runs on the same machine as CLI", } UnixCli = TestRequirement{ func() bool { return isUnixCli }, "Test requires posix utilities or functionality to run.", } ExecSupport = TestRequirement{ func() bool { return supportsExec }, "Test requires 'docker exec' capabilities on the tested daemon.", } RegistryHosting = TestRequirement{ func() bool { // for now registry binary is built only if we're running inside // container through `make test`. Figure that out by testing if // registry binary is in PATH. _, err := exec.LookPath(v2binary) return err == nil }, fmt.Sprintf("Test requires an environment that can host %s in the same host", v2binary), } NativeExecDriver = TestRequirement{ func() bool { if daemonExecDriver == "" { // get daemon info body, err := sockRequest("GET", "/info", nil) if err != nil { log.Fatalf("sockRequest failed for /info: %v", err) } type infoJSON struct { ExecutionDriver string } var info infoJSON if err = json.Unmarshal(body, &info); err != nil { log.Fatalf("unable to unmarshal body: %v", err) } daemonExecDriver = info.ExecutionDriver } return strings.HasPrefix(daemonExecDriver, "native") }, "Test requires the native (libcontainer) exec driver.", } NotOverlay = TestRequirement{ func() bool { cmd := exec.Command("grep", "^overlay / overlay", "/proc/mounts") if err := cmd.Run(); err != nil { return true } return false }, "Test requires underlying root filesystem not be backed by overlay.", } ) // testRequires checks if the environment satisfies the requirements // for the test to run or skips the tests. func testRequires(t *testing.T, requirements ...TestRequirement) { for _, r := range requirements { if !r.Condition() { t.Skip(r.SkipMessage) } } } docker-1.6.2/integration-cli/docker_cli_kill_test.go0000644000175000017500000000431112524223634022122 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) func TestKillContainer(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 10") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { t.Fatalf("out should've been a container id: %s, %v", out, err) } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) if out, _, err = runCommandWithOutput(killCmd); err != nil { t.Fatalf("failed to kill container: %s, %v", out, err) } listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") out, _, err = runCommandWithOutput(listRunningContainersCmd) if err != nil { t.Fatalf("failed to list running containers: %s, %v", out, err) } if strings.Contains(out, cleanedContainerID) { t.Fatal("killed container is still running") } deleteContainer(cleanedContainerID) logDone("kill - kill container running sleep 10") } func TestKillDifferentUserContainer(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-u", "daemon", "-d", "busybox", "sh", "-c", "sleep 10") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { t.Fatalf("out should've been a container id: %s, %v", out, err) } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) if out, _, err = runCommandWithOutput(killCmd); err != nil { t.Fatalf("failed to kill container: %s, %v", out, err) } listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") out, _, err = runCommandWithOutput(listRunningContainersCmd) if err != nil { t.Fatalf("failed to list running containers: %s, %v", out, err) } if strings.Contains(out, cleanedContainerID) { t.Fatal("killed container is still running") } deleteContainer(cleanedContainerID) logDone("kill - kill container running sleep 10 from a different user") } docker-1.6.2/integration-cli/docker_test_vars_cli.go0000644000175000017500000000020512524223634022140 0ustar tianontianon// +build !daemon package main const ( // tests should not assume daemon runs on the same machine as CLI isLocalDaemon = false ) docker-1.6.2/integration-cli/test_vars_noexec.go0000644000175000017500000000020012524223634021316 0ustar tianontianon// +build test_no_exec package main const ( // indicates docker daemon tested supports 'docker exec' supportsExec = false ) docker-1.6.2/integration-cli/utils.go0000644000175000017500000001775312524223634017130 0ustar tianontianonpackage main import ( "bytes" "encoding/json" "errors" "fmt" "io" "math/rand" "net/http" "net/http/httptest" "os" "os/exec" "path" "reflect" "strings" "syscall" "time" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) func getExitCode(err error) (int, error) { exitCode := 0 if exiterr, ok := err.(*exec.ExitError); ok { if procExit := exiterr.Sys().(syscall.WaitStatus); ok { return procExit.ExitStatus(), nil } } return exitCode, fmt.Errorf("failed to get exit code") } func processExitCode(err error) (exitCode int) { if err != nil { var exiterr error if exitCode, exiterr = getExitCode(err); exiterr != nil { // TODO: Fix this so we check the error's text. // we've failed to retrieve exit code, so we set it to 127 exitCode = 127 } } return } func IsKilled(err error) bool { if exitErr, ok := err.(*exec.ExitError); ok { sys := exitErr.ProcessState.Sys() status, ok := sys.(syscall.WaitStatus) if !ok { return false } return status.Signaled() && status.Signal() == os.Kill } return false } func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { exitCode = 0 out, err := cmd.CombinedOutput() exitCode = processExitCode(err) output = string(out) return } func runCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { var ( stderrBuffer, stdoutBuffer bytes.Buffer ) exitCode = 0 cmd.Stderr = &stderrBuffer cmd.Stdout = &stdoutBuffer err = cmd.Run() exitCode = processExitCode(err) stdout = stdoutBuffer.String() stderr = stderrBuffer.String() return } func runCommandWithOutputForDuration(cmd *exec.Cmd, duration time.Duration) (output string, exitCode int, timedOut bool, err error) { var outputBuffer bytes.Buffer if cmd.Stdout != nil { err = errors.New("cmd.Stdout already set") return } cmd.Stdout = &outputBuffer if cmd.Stderr != nil { err = errors.New("cmd.Stderr already set") return } cmd.Stderr = &outputBuffer done := make(chan error) go func() { exitErr := cmd.Run() exitCode = processExitCode(exitErr) done <- exitErr }() select { case <-time.After(duration): killErr := cmd.Process.Kill() if killErr != nil { fmt.Printf("failed to kill (pid=%d): %v\n", cmd.Process.Pid, killErr) } timedOut = true break case err = <-done: break } output = outputBuffer.String() return } var ErrCmdTimeout = fmt.Errorf("command timed out") func runCommandWithOutputAndTimeout(cmd *exec.Cmd, timeout time.Duration) (output string, exitCode int, err error) { var timedOut bool output, exitCode, timedOut, err = runCommandWithOutputForDuration(cmd, timeout) if timedOut { err = ErrCmdTimeout } return } func runCommand(cmd *exec.Cmd) (exitCode int, err error) { exitCode = 0 err = cmd.Run() exitCode = processExitCode(err) return } func runCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { if len(cmds) < 2 { return "", 0, errors.New("pipeline does not have multiple cmds") } // connect stdin of each cmd to stdout pipe of previous cmd for i, cmd := range cmds { if i > 0 { prevCmd := cmds[i-1] cmd.Stdin, err = prevCmd.StdoutPipe() if err != nil { return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) } } } // start all cmds except the last for _, cmd := range cmds[:len(cmds)-1] { if err = cmd.Start(); err != nil { return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) } } defer func() { // wait all cmds except the last to release their resources for _, cmd := range cmds[:len(cmds)-1] { cmd.Wait() } }() // wait on last cmd return runCommandWithOutput(cmds[len(cmds)-1]) } func logDone(message string) { fmt.Printf("[PASSED]: %s\n", message) } func stripTrailingCharacters(target string) string { return strings.TrimSpace(target) } func unmarshalJSON(data []byte, result interface{}) error { err := json.Unmarshal(data, result) if err != nil { return err } return nil } func convertSliceOfStringsToMap(input []string) map[string]struct{} { output := make(map[string]struct{}) for _, v := range input { output[v] = struct{}{} } return output } func waitForContainer(contID string, args ...string) error { args = append([]string{"run", "--name", contID}, args...) cmd := exec.Command(dockerBinary, args...) if _, err := runCommand(cmd); err != nil { return err } if err := waitRun(contID); err != nil { return err } return nil } func waitRun(contID string) error { return waitInspect(contID, "{{.State.Running}}", "true", 5) } func waitInspect(name, expr, expected string, timeout int) error { after := time.After(time.Duration(timeout) * time.Second) for { cmd := exec.Command(dockerBinary, "inspect", "-f", expr, name) out, _, err := runCommandWithOutput(cmd) if err != nil { return fmt.Errorf("error executing docker inspect: %v", err) } out = strings.TrimSpace(out) if out == expected { break } select { case <-after: return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) default: } time.Sleep(100 * time.Millisecond) } return nil } func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { var ( e1Entries = make(map[string]struct{}) e2Entries = make(map[string]struct{}) ) for _, e := range e1 { e1Entries[e.Name()] = struct{}{} } for _, e := range e2 { e2Entries[e.Name()] = struct{}{} } if !reflect.DeepEqual(e1Entries, e2Entries) { return fmt.Errorf("entries differ") } return nil } func ListTar(f io.Reader) ([]string, error) { tr := tar.NewReader(f) var entries []string for { th, err := tr.Next() if err == io.EOF { // end of tar archive return entries, nil } if err != nil { return entries, err } entries = append(entries, th.Name) } } type FileServer struct { *httptest.Server } func fileServer(files map[string]string) (*FileServer, error) { var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { if filePath, found := files[r.URL.Path]; found { http.ServeFile(w, r, filePath) } else { http.Error(w, http.StatusText(404), 404) } } for _, file := range files { if _, err := os.Stat(file); err != nil { return nil, err } } server := httptest.NewServer(handler) return &FileServer{ Server: server, }, nil } func copyWithCP(source, target string) error { copyCmd := exec.Command("cp", "-rp", source, target) out, exitCode, err := runCommandWithOutput(copyCmd) if err != nil || exitCode != 0 { return fmt.Errorf("failed to copy: error: %q ,output: %q", err, out) } return nil } func makeRandomString(n int) string { // make a really long string letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") b := make([]byte, n) r := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) for i := range b { b[i] = letters[r.Intn(len(letters))] } return string(b) } // randomUnixTmpDirPath provides a temporary unix path with rand string appended. // does not create or checks if it exists. func randomUnixTmpDirPath(s string) string { return path.Join("/tmp", fmt.Sprintf("%s.%s", s, makeRandomString(10))) } // Reads chunkSize bytes from reader after every interval. // Returns total read bytes. func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { buffer := make([]byte, chunkSize) for { select { case <-stop: return default: var readBytes int readBytes, err = reader.Read(buffer) n += readBytes if err != nil { if err == io.EOF { err = nil } return } time.Sleep(interval) } } } // Parses 'procCgroupData', which is output of '/proc//cgroup', and returns // a map which cgroup name as key and path as value. func parseCgroupPaths(procCgroupData string) map[string]string { cgroupPaths := map[string]string{} for _, line := range strings.Split(procCgroupData, "\n") { parts := strings.Split(line, ":") if len(parts) != 3 { continue } cgroupPaths[parts[1]] = parts[2] } return cgroupPaths } docker-1.6.2/integration-cli/docker_cli_diff_test.go0000644000175000017500000000622512524223634022105 0ustar tianontianonpackage main import ( "os/exec" "strings" "testing" ) // ensure that an added file shows up in docker diff func TestDiffFilenameShownInOutput(t *testing.T) { containerCmd := `echo foo > /root/bar` runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatalf("failed to start the container: %s, %v", out, err) } cleanCID := stripTrailingCharacters(out) diffCmd := exec.Command(dockerBinary, "diff", cleanCID) out, _, err = runCommandWithOutput(diffCmd) if err != nil { t.Fatalf("failed to run diff: %s %v", out, err) } found := false for _, line := range strings.Split(out, "\n") { if strings.Contains("A /root/bar", line) { found = true break } } if !found { t.Errorf("couldn't find the new file in docker diff's output: %v", out) } deleteContainer(cleanCID) logDone("diff - check if created file shows up") } // test to ensure GH #3840 doesn't occur any more func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { // this is a list of files which shouldn't show up in `docker diff` dockerinitFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerinit", "/.dockerenv"} // we might not run into this problem from the first run, so start a few containers for i := 0; i < 20; i++ { containerCmd := `echo foo > /root/bar` runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } cleanCID := stripTrailingCharacters(out) diffCmd := exec.Command(dockerBinary, "diff", cleanCID) out, _, err = runCommandWithOutput(diffCmd) if err != nil { t.Fatalf("failed to run diff: %s, %v", out, err) } deleteContainer(cleanCID) for _, filename := range dockerinitFiles { if strings.Contains(out, filename) { t.Errorf("found file which should've been ignored %v in diff output", filename) } } } logDone("diff - check if ignored files show up in diff") } func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep", "0") out, _, err := runCommandWithOutput(runCmd) if err != nil { t.Fatal(out, err) } cleanCID := stripTrailingCharacters(out) diffCmd := exec.Command(dockerBinary, "diff", cleanCID) out, _, err = runCommandWithOutput(diffCmd) if err != nil { t.Fatalf("failed to run diff: %s, %v", out, err) } deleteContainer(cleanCID) expected := map[string]bool{ "C /dev": true, "A /dev/full": true, // busybox "C /dev/ptmx": true, // libcontainer "A /dev/kmsg": true, // lxc "A /dev/fd": true, "A /dev/fuse": true, "A /dev/ptmx": true, "A /dev/null": true, "A /dev/random": true, "A /dev/stdout": true, "A /dev/stderr": true, "A /dev/tty1": true, "A /dev/stdin": true, "A /dev/tty": true, "A /dev/urandom": true, "A /dev/zero": true, } for _, line := range strings.Split(out, "\n") { if line != "" && !expected[line] { t.Errorf("%q is shown in the diff but shouldn't", line) } } logDone("diff - ensure that only kmsg and ptmx in diff") } docker-1.6.2/integration-cli/docker_cli_create_test.go0000644000175000017500000001766012524223634022445 0ustar tianontianonpackage main import ( "encoding/json" "os" "os/exec" "reflect" "testing" "time" "github.com/docker/docker/nat" ) // Make sure we can create a simple container with some args func TestCreateArgs(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "create", "busybox", "command", "arg1", "arg2", "arg with space") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("out should've been a container id: %s, %v", out, err) } containers := []struct { ID string Created time.Time Path string Args []string Image string }{} if err := json.Unmarshal([]byte(out), &containers); err != nil { t.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) } c := containers[0] if c.Path != "command" { t.Fatalf("Unexpected container path. Expected command, received: %s", c.Path) } b := false expected := []string{"arg1", "arg2", "arg with space"} for i, arg := range expected { if arg != c.Args[i] { b = true break } } if len(c.Args) != len(expected) || b { t.Fatalf("Unexpected args. Expected %v, received: %v", expected, c.Args) } logDone("create - args") } // Make sure we can set hostconfig options too func TestCreateHostConfig(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "create", "-P", "busybox", "echo") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("out should've been a container id: %s, %v", out, err) } containers := []struct { HostConfig *struct { PublishAllPorts bool } }{} if err := json.Unmarshal([]byte(out), &containers); err != nil { t.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) } c := containers[0] if c.HostConfig == nil { t.Fatalf("Expected HostConfig, got none") } if !c.HostConfig.PublishAllPorts { t.Fatalf("Expected PublishAllPorts, got false") } logDone("create - hostconfig") } func TestCreateWithPortRange(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "create", "-p", "3300-3303:3300-3303/tcp", "busybox", "echo") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("out should've been a container id: %s, %v", out, err) } containers := []struct { HostConfig *struct { PortBindings map[nat.Port][]nat.PortBinding } }{} if err := json.Unmarshal([]byte(out), &containers); err != nil { t.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) } c := containers[0] if c.HostConfig == nil { t.Fatalf("Expected HostConfig, got none") } if len(c.HostConfig.PortBindings) != 4 { t.Fatalf("Expected 4 ports bindings, got %d", len(c.HostConfig.PortBindings)) } for k, v := range c.HostConfig.PortBindings { if len(v) != 1 { t.Fatalf("Expected 1 ports binding, for the port %s but found %s", k, v) } if k.Port() != v[0].HostPort { t.Fatalf("Expected host port %d to match published port %d", k.Port(), v[0].HostPort) } } logDone("create - port range") } func TestCreateWithiLargePortRange(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "create", "-p", "1-65535:1-65535/tcp", "busybox", "echo") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { t.Fatalf("out should've been a container id: %s, %v", out, err) } containers := []struct { HostConfig *struct { PortBindings map[nat.Port][]nat.PortBinding } }{} if err := json.Unmarshal([]byte(out), &containers); err != nil { t.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) } c := containers[0] if c.HostConfig == nil { t.Fatalf("Expected HostConfig, got none") } if len(c.HostConfig.PortBindings) != 65535 { t.Fatalf("Expected 65535 ports bindings, got %d", len(c.HostConfig.PortBindings)) } for k, v := range c.HostConfig.PortBindings { if len(v) != 1 { t.Fatalf("Expected 1 ports binding, for the port %s but found %s", k, v) } if k.Port() != v[0].HostPort { t.Fatalf("Expected host port %d to match published port %d", k.Port(), v[0].HostPort) } } logDone("create - large port range") } // "test123" should be printed by docker create + start func TestCreateEchoStdout(t *testing.T) { defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "create", "busybox", "echo", "test123") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } cleanedContainerID := stripTrailingCharacters(out) runCmd = exec.Command(dockerBinary, "start", "-ai", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(runCmd) if err != nil { t.Fatal(out, err) } if out != "test123\n" { t.Errorf("container should've printed 'test123', got %q", out) } logDone("create - echo test123") } func TestCreateVolumesCreated(t *testing.T) { testRequires(t, SameHostDaemon) defer deleteAllContainers() name := "test_create_volume" if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "--name", name, "-v", "/foo", "busybox")); err != nil { t.Fatal(out, err) } dir, err := inspectFieldMap(name, "Volumes", "/foo") if err != nil { t.Fatalf("Error getting volume host path: %q", err) } if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { t.Fatalf("Volume was not created") } if err != nil { t.Fatalf("Error statting volume host path: %q", err) } logDone("create - volumes are created") } func TestCreateLabels(t *testing.T) { name := "test_create_labels" expected := map[string]string{"k1": "v1", "k2": "v2"} if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox")); err != nil { t.Fatal(out, err) } actual := make(map[string]string) err := inspectFieldAndMarshall(name, "Config.Labels", &actual) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(expected, actual) { t.Fatalf("Expected %s got %s", expected, actual) } deleteAllContainers() logDone("create - labels") } func TestCreateLabelFromImage(t *testing.T) { imageName := "testcreatebuildlabel" defer deleteImages(imageName) _, err := buildImage(imageName, `FROM busybox LABEL k1=v1 k2=v2`, true) if err != nil { t.Fatal(err) } name := "test_create_labels_from_image" expected := map[string]string{"k2": "x", "k3": "v3", "k1": "v1"} if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName)); err != nil { t.Fatal(out, err) } actual := make(map[string]string) err = inspectFieldAndMarshall(name, "Config.Labels", &actual) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(expected, actual) { t.Fatalf("Expected %s got %s", expected, actual) } deleteAllContainers() logDone("create - labels from image") } docker-1.6.2/image/0000755000175000017500000000000012524223634013416 5ustar tianontianondocker-1.6.2/image/spec/0000755000175000017500000000000012524223634014350 5ustar tianontianondocker-1.6.2/image/spec/v1.md0000644000175000017500000005012212524223634015220 0ustar tianontianon# Docker Image Specification v1.0.0 An *Image* is an ordered collection of root filesystem changes and the corresponding execution parameters for use within a container runtime. This specification outlines the format of these filesystem changes and corresponding parameters and describes how to create and use them for use with a container runtime and execution tool. ## Terminology This specification uses the following terms:
Layer
Images are composed of layers. Image layer is a general term which may be used to refer to one or both of the following:
  1. The metadata for the layer, described in the JSON format.
  2. The filesystem changes described by a layer.
To refer to the former you may use the term Layer JSON or Layer Metadata. To refer to the latter you may use the term Image Filesystem Changeset or Image Diff.
Image JSON
Each layer has an associated A JSON structure which describes some basic information about the image such as date created, author, and the ID of its parent image as well as execution/runtime configuration like its entry point, default arguments, CPU/memory shares, networking, and volumes.
Image Filesystem Changeset
Each layer has an archive of the files which have been added, changed, or deleted relative to its parent layer. Using a layer-based or union filesystem such as AUFS, or by computing the diff from filesystem snapshots, the filesystem changeset can be used to present a series of image layers as if they were one cohesive filesystem.
Image ID
Each layer is given an ID upon its creation. It is represented as a hexidecimal encoding of 256 bits, e.g., a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. Image IDs should be sufficiently random so as to be globally unique. 32 bytes read from /dev/urandom is sufficient for all practical purposes. Alternatively, an image ID may be derived as a cryptographic hash of image contents as the result is considered indistinguishable from random. The choice is left up to implementors.
Image Parent
Most layer metadata structs contain a parent field which refers to the Image from which another directly descends. An image contains a separate JSON metadata file and set of changes relative to the filesystem of its parent image. Image Ancestor and Image Descendant are also common terms.
Image Checksum
Layer metadata structs contain a cryptographic hash of the contents of the layer's filesystem changeset. Though the set of changes exists as a simple Tar archive, two archives with identical filenames and content will have different SHA digests if the last-access or last-modified times of any entries differ. For this reason, image checksums are generated using the TarSum algorithm which produces a cryptographic hash of file contents and selected headers only. Details of this algorithm are described in the separate [TarSum specification](https://github.com/docker/docker/blob/master/pkg/tarsum/tarsum_spec.md).
Tag
A tag serves to map a descriptive, user-given name to any single image ID. An image name suffix (the name component after :) is often referred to as a tag as well, though it strictly refers to the full name of an image. Acceptable values for a tag suffix are implementation specific, but they SHOULD be limited to the set of alphanumeric characters [a-zA-z0-9], punctuation characters [._-], and MUST NOT contain a : character.
Repository
A collection of tags grouped under a common prefix (the name component before :). For example, in an image tagged with the name my-app:3.1.4, my-app is the Repository component of the name. Acceptable values for repository name are implementation specific, but they SHOULD be limited to the set of alphanumeric characters [a-zA-z0-9], and punctuation characters [._-], however it MAY contain additional / and : characters for organizational purposes, with the last : character being interpreted dividing the repository component of the name from the tag suffic component.
## Image JSON Description Here is an example image JSON file: ``` { "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", "created": "2014-10-13T21:19:18.674353812Z", "author": "Alyssa P. Hacker <alyspdev@example.com>", "architecture": "amd64", "os": "linux", "Size": 271828, "config": { "User": "alice", "Memory": 2048, "MemorySwap": 4096, "CpuShares": 8, "ExposedPorts": { "8080/tcp": {} }, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "FOO=docker_is_a_really", "BAR=great_tool_you_know" ], "Entrypoint": [ "/bin/my-app-binary" ], "Cmd": [ "--foreground", "--config", "/etc/my-app.d/default.cfg" ], "Volumes": { "/var/job-result-data": {}, "/var/log/my-app-logs": {}, }, "WorkingDir": "/home/alice", } } ``` ### Image JSON Field Descriptions
id string
Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies the image.
parent string
ID of the parent image. If there is no parent image then this field should be omitted. A collection of images may share many of the same ancestor layers. This organizational structure is strictly a tree with any one layer having either no parent or a single parent and zero or more decendent layers. Cycles are not allowed and implementations should be careful to avoid creating them or iterating through a cycle indefinitely.
created string
ISO-8601 formatted combined date and time at which the image was created.
author string
Gives the name and/or email address of the person or entity which created and is responsible for maintaining the image.
architecture string
The CPU architecture which the binaries in this image are built to run on. Possible values include:
  • 386
  • amd64
  • arm
More values may be supported in the future and any of these may or may not be supported by a given container runtime implementation.
os string
The name of the operating system which the image is built to run on. Possible values include:
  • darwin
  • freebsd
  • linux
More values may be supported in the future and any of these may or may not be supported by a given container runtime implementation.
checksum string
Image Checksum of the filesystem changeset associated with the image layer.
Size integer
The size in bytes of the filesystem changeset associated with the image layer.
config struct
The execution parameters which should be used as a base when running a container using the image. This field can be null, in which case any execution parameters should be specified at creation of the container.

Container RunConfig Field Descriptions

User string

The username or UID which the process in the container should run as. This acts as a default value to use when the value is not specified when creating a container.

All of the following are valid:

  • user
  • uid
  • user:group
  • uid:gid
  • uid:group
  • user:gid

If group/gid is not specified, the default group and supplementary groups of the given user/uid in /etc/passwd from the container are applied.

Memory integer
Memory limit (in bytes). This acts as a default value to use when the value is not specified when creating a container.
MemorySwap integer
Total memory usage (memory + swap); set to -1 to disable swap. This acts as a default value to use when the value is not specified when creating a container.
CpuShares integer
CPU shares (relative weight vs. other containers). This acts as a default value to use when the value is not specified when creating a container.
ExposedPorts struct
A set of ports to expose from a container running this image. This JSON structure value is unusual because it is a direct JSON serialization of the Go type map[string]struct{} and is represented in JSON as an object mapping its keys to an empty object. Here is an example:
{
    "8080": {},
    "53/udp": {},
    "2356/tcp": {}
}
Its keys can be in the format of:
  • "port/tcp"
  • "port/udp"
  • "port"
with the default protocol being "tcp" if not specified. These values act as defaults and are merged with any specified when creating a container.
Env array of strings
Entries are in the format of VARNAME="var value". These values act as defaults and are merged with any specified when creating a container.
Entrypoint array of strings
A list of arguments to use as the command to execute when the container starts. This value acts as a default and is replaced by an entrypoint specified when creating a container.
Cmd array of strings
Default arguments to the entry point of the container. These values act as defaults and are replaced with any specified when creating a container. If an Entrypoint value is not specified, then the first entry of the Cmd array should be interpreted as the executable to run.
Volumes struct
A set of directories which should be created as data volumes in a container running this image. This JSON structure value is unusual because it is a direct JSON serialization of the Go type map[string]struct{} and is represented in JSON as an object mapping its keys to an empty object. Here is an example:
{
    "/var/my-app-data/": {},
    "/etc/some-config.d/": {},
}
WorkingDir string
Sets the current working directory of the entry point process in the container. This value acts as a default and is replaced by a working directory specified when creating a container.
Any extra fields in the Image JSON struct are considered implementation specific and should be ignored by any implementations which are unable to interpret them. ## Creating an Image Filesystem Changeset An example of creating an Image Filesystem Changeset follows. An image root filesystem is first created as an empty directory named with the ID of the image being created. Here is the initial empty directory structure for the changeset for an image with ID `c3167915dc9d` ([real IDs are much longer](#id_desc), but this example use a truncated one here for brevity. Implementations need not name the rootfs directory in this way but it may be convenient for keeping record of a large number of image layers.): ``` c3167915dc9d/ ``` Files and directories are then created: ``` c3167915dc9d/ etc/ my-app-config bin/ my-app-binary my-app-tools ``` The `c3167915dc9d` directory is then committed as a plain Tar archive with entries for the following files: ``` etc/my-app-config bin/my-app-binary bin/my-app-tools ``` The TarSum checksum for the archive file is then computed and placed in the JSON metadata along with the execution parameters. To make changes to the filesystem of this container image, create a new directory named with a new ID, such as `f60c56784b83`, and initialize it with a snapshot of the parent image's root filesystem, so that the directory is identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very efficient: ``` f60c56784b83/ etc/ my-app-config bin/ my-app-binary my-app-tools ``` This example change is going add a configuration directory at `/etc/my-app.d` which contains a default config file. There's also a change to the `my-app-tools` binary to handle the config layout change. The `f60c56784b83` directory then looks like this: ``` f60c56784b83/ etc/ my-app.d/ default.cfg bin/ my-app-binary my-app-tools ``` This reflects the removal of `/etc/my-app-config` and creation of a file and directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been replaced with an updated version. Before committing this directory to a changeset, because it has a parent image, it is first compared with the directory tree of the parent snapshot, `f60c56784b83`, looking for files and directories that have been added, modified, or removed. The following changeset is found: ``` Added: /etc/my-app.d/default.cfg Modified: /bin/my-app-tools Deleted: /etc/my-app-config ``` A Tar Archive is then created which contains *only* this changeset: The added and modified files and directories in their entirety, and for each deleted item an entry for an empty file at the same location but with the basename of the deleted file or directory prefixed with `.wh.`. The filenames prefixed with `.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible to create an image root filesystem which contains a file or directory with a name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has the following entries: ``` /etc/my-app.d/default.cfg /bin/my-app-tools /etc/.wh.my-app-config ``` Any given image is likely to be composed of several of these Image Filesystem Changeset tar archives. ## Combined Image JSON + Filesystem Changeset Format There is also a format for a single archive which contains complete information about an image, including: - repository names/tags - all image layer JSON files - all tar archives of each layer filesystem changesets For example, here's what the full archive of `library/busybox` is (displayed in `tree` format): ``` . ├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e │   ├── VERSION │   ├── json │   └── layer.tar ├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a │   ├── VERSION │   ├── json │   └── layer.tar ├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb │   ├── VERSION │   ├── json │   └── layer.tar ├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c │   ├── VERSION │   ├── json │   └── layer.tar └── repositories ``` There are one or more directories named with the ID for each layer in a full image. Each of these directories contains 3 files: * `VERSION` - The schema version of the `json` file * `json` - The JSON metadata for an image layer * `layer.tar` - The Tar archive of the filesystem changeset for an image layer. The content of the `VERSION` files is simply the semantic version of the JSON metadata schema: ``` 1.0 ``` And the `repositories` file is another JSON file which describes names/tags: ``` { "busybox":{ "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" } } ``` Every key in this object is the name of a repository, and maps to a collection of tag suffixes. Each tag maps to the ID of the image represented by that tag. ## Loading an Image Filesystem Changeset Unpacking a bundle of image layer JSON files and their corresponding filesystem changesets can be done using a series of steps: 1. Follow the parent IDs of image layers to find the root ancestor (an image with no parent ID specified). 2. For every image layer, in order from root ancestor and descending down, extract the contents of that layer's filesystem changeset archive into a directory which will be used as the root of a container filesystem. - Extract all contents of each archive. - Walk the directory tree once more, removing any files with the prefix `.wh.` and the corresponding file or directory named without this prefix. ## Implementations This specification is an admittedly imperfect description of an imperfectly-understood problem. The Docker project is, in turn, an attempt to implement this specification. Our goal and our execution toward it will evolve over time, but our primary concern in this specification and in our implementation is compatibility and interoperability. docker-1.6.2/image/image.go0000644000175000017500000001545212524223634015036 0ustar tianontianonpackage image import ( "encoding/json" "fmt" "io/ioutil" "os" "path" "strconv" "time" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) // Set the max depth to the aufs default that most // kernels are compiled with // For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk const MaxImageDepth = 127 type Image struct { ID string `json:"id"` Parent string `json:"parent,omitempty"` Comment string `json:"comment,omitempty"` Created time.Time `json:"created"` Container string `json:"container,omitempty"` ContainerConfig runconfig.Config `json:"container_config,omitempty"` DockerVersion string `json:"docker_version,omitempty"` Author string `json:"author,omitempty"` Config *runconfig.Config `json:"config,omitempty"` Architecture string `json:"architecture,omitempty"` OS string `json:"os,omitempty"` Size int64 graph Graph } func LoadImage(root string) (*Image, error) { // Open the JSON file to decode by streaming jsonSource, err := os.Open(jsonPath(root)) if err != nil { return nil, err } defer jsonSource.Close() img := &Image{} dec := json.NewDecoder(jsonSource) // Decode the JSON data if err := dec.Decode(img); err != nil { return nil, err } if err := utils.ValidateID(img.ID); err != nil { return nil, err } if buf, err := ioutil.ReadFile(path.Join(root, "layersize")); err != nil { if !os.IsNotExist(err) { return nil, err } // If the layersize file does not exist then set the size to a negative number // because a layer size of 0 (zero) is valid img.Size = -1 } else { // Using Atoi here instead would temporarily convert the size to a machine // dependent integer type, which causes images larger than 2^31 bytes to // display negative sizes on 32-bit machines: size, err := strconv.ParseInt(string(buf), 10, 64) if err != nil { return nil, err } img.Size = int64(size) } return img, nil } // StoreImage stores file system layer data for the given image to the // image's registered storage driver. Image metadata is stored in a file // at the specified root directory. func StoreImage(img *Image, layerData archive.ArchiveReader, root string) (err error) { // Store the layer. If layerData is not nil, unpack it into the new layer if layerData != nil { if img.Size, err = img.graph.Driver().ApplyDiff(img.ID, img.Parent, layerData); err != nil { return err } } if err := img.SaveSize(root); err != nil { return err } f, err := os.OpenFile(jsonPath(root), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) if err != nil { return err } defer f.Close() return json.NewEncoder(f).Encode(img) } func (img *Image) SetGraph(graph Graph) { img.graph = graph } // SaveSize stores the current `size` value of `img` in the directory `root`. func (img *Image) SaveSize(root string) error { if err := ioutil.WriteFile(path.Join(root, "layersize"), []byte(strconv.Itoa(int(img.Size))), 0600); err != nil { return fmt.Errorf("Error storing image size in %s/layersize: %s", root, err) } return nil } func (img *Image) SaveCheckSum(root, checksum string) error { if err := ioutil.WriteFile(path.Join(root, "checksum"), []byte(checksum), 0600); err != nil { return fmt.Errorf("Error storing checksum in %s/checksum: %s", root, err) } return nil } func (img *Image) GetCheckSum(root string) (string, error) { cs, err := ioutil.ReadFile(path.Join(root, "checksum")) if err != nil { if os.IsNotExist(err) { return "", nil } return "", err } return string(cs), err } func jsonPath(root string) string { return path.Join(root, "json") } func (img *Image) RawJson() ([]byte, error) { root, err := img.root() if err != nil { return nil, fmt.Errorf("Failed to get root for image %s: %s", img.ID, err) } buf, err := ioutil.ReadFile(jsonPath(root)) if err != nil { return nil, fmt.Errorf("Failed to read json for image %s: %s", img.ID, err) } return buf, nil } // TarLayer returns a tar archive of the image's filesystem layer. func (img *Image) TarLayer() (arch archive.Archive, err error) { if img.graph == nil { return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID) } driver := img.graph.Driver() return driver.Diff(img.ID, img.Parent) } // Image includes convenience proxy functions to its graph // These functions will return an error if the image is not registered // (ie. if image.graph == nil) func (img *Image) History() ([]*Image, error) { var parents []*Image if err := img.WalkHistory( func(img *Image) error { parents = append(parents, img) return nil }, ); err != nil { return nil, err } return parents, nil } func (img *Image) WalkHistory(handler func(*Image) error) (err error) { currentImg := img for currentImg != nil { if handler != nil { if err := handler(currentImg); err != nil { return err } } currentImg, err = currentImg.GetParent() if err != nil { return fmt.Errorf("Error while getting parent image: %v", err) } } return nil } func (img *Image) GetParent() (*Image, error) { if img.Parent == "" { return nil, nil } if img.graph == nil { return nil, fmt.Errorf("Can't lookup parent of unregistered image") } return img.graph.Get(img.Parent) } func (img *Image) root() (string, error) { if img.graph == nil { return "", fmt.Errorf("Can't lookup root of unregistered image") } return img.graph.ImageRoot(img.ID), nil } func (img *Image) GetParentsSize(size int64) int64 { parentImage, err := img.GetParent() if err != nil || parentImage == nil { return size } size += parentImage.Size return parentImage.GetParentsSize(size) } // Depth returns the number of parents for a // current image func (img *Image) Depth() (int, error) { var ( count = 0 parent = img err error ) for parent != nil { count++ parent, err = parent.GetParent() if err != nil { return -1, err } } return count, nil } // CheckDepth returns an error if the depth of an image, as returned // by ImageDepth, is too large to support creating a container from it // on this daemon. func (img *Image) CheckDepth() error { // We add 2 layers to the depth because the container's rw and // init layer add to the restriction depth, err := img.Depth() if err != nil { return err } if depth+2 >= MaxImageDepth { return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) } return nil } // Build an Image object from raw json data func NewImgJSON(src []byte) (*Image, error) { ret := &Image{} // FIXME: Is there a cleaner way to "purify" the input json? if err := json.Unmarshal(src, ret); err != nil { return nil, err } return ret, nil } docker-1.6.2/image/graph.go0000644000175000017500000000027512524223634015052 0ustar tianontianonpackage image import ( "github.com/docker/docker/daemon/graphdriver" ) type Graph interface { Get(id string) (*Image, error) ImageRoot(id string) string Driver() graphdriver.Driver } docker-1.6.2/README.md0000644000175000017500000002455712524223634013630 0ustar tianontianonDocker: the Linux container engine ================================== Docker is an open source project to pack, ship and run any application as a lightweight container Docker containers are both *hardware-agnostic* and *platform-agnostic*. This means they can run anywhere, from your laptop to the largest EC2 compute instance and everything in between - and they don't require you to use a particular language, framework or packaging system. That makes them great building blocks for deploying and scaling web apps, databases, and backend services without depending on a particular stack or provider. Docker began as an open-source implementation of the deployment engine which powers [dotCloud](http://dotcloud.com), a popular Platform-as-a-Service. It benefits directly from the experience accumulated over several years of large-scale operation and support of hundreds of thousands of applications and databases. ![Docker L](docs/sources/static_files/docker-logo-compressed.png "Docker") ## Security Disclosure Security is very important to us. If you have any issue regarding security, please disclose the information responsibly by sending an email to security@docker.com and not by creating a github issue. ## Better than VMs A common method for distributing applications and sandboxing their execution is to use virtual machines, or VMs. Typical VM formats are VMWare's vmdk, Oracle Virtualbox's vdi, and Amazon EC2's ami. In theory these formats should allow every developer to automatically package their application into a "machine" for easy distribution and deployment. In practice, that almost never happens, for a few reasons: * *Size*: VMs are very large which makes them impractical to store and transfer. * *Performance*: running VMs consumes significant CPU and memory, which makes them impractical in many scenarios, for example local development of multi-tier applications, and large-scale deployment of cpu and memory-intensive applications on large numbers of machines. * *Portability*: competing VM environments don't play well with each other. Although conversion tools do exist, they are limited and add even more overhead. * *Hardware-centric*: VMs were designed with machine operators in mind, not software developers. As a result, they offer very limited tooling for what developers need most: building, testing and running their software. For example, VMs offer no facilities for application versioning, monitoring, configuration, logging or service discovery. By contrast, Docker relies on a different sandboxing method known as *containerization*. Unlike traditional virtualization, containerization takes place at the kernel level. Most modern operating system kernels now support the primitives necessary for containerization, including Linux with [openvz](http://openvz.org), [vserver](http://linux-vserver.org) and more recently [lxc](http://lxc.sourceforge.net), Solaris with [zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), and FreeBSD with [Jails](http://www.freebsd.org/doc/handbook/jails.html). Docker builds on top of these low-level primitives to offer developers a portable format and runtime environment that solves all four problems. Docker containers are small (and their transfer can be optimized with layers), they have basically zero memory and cpu overhead, they are completely portable, and are designed from the ground up with an application-centric design. Perhaps best of all, because Docker operates at the OS level, it can still be run inside a VM! ## Plays well with others Docker does not require you to buy into a particular programming language, framework, packaging system, or configuration language. Is your application a Unix process? Does it use files, tcp connections, environment variables, standard Unix streams and command-line arguments as inputs and outputs? Then Docker can run it. Can your application's build be expressed as a sequence of such commands? Then Docker can build it. ## Escape dependency hell A common problem for developers is the difficulty of managing all their application's dependencies in a simple and automated way. This is usually difficult for several reasons: * *Cross-platform dependencies*. Modern applications often depend on a combination of system libraries and binaries, language-specific packages, framework-specific modules, internal components developed for another project, etc. These dependencies live in different "worlds" and require different tools - these tools typically don't work well with each other, requiring awkward custom integrations. * *Conflicting dependencies*. Different applications may depend on different versions of the same dependency. Packaging tools handle these situations with various degrees of ease - but they all handle them in different and incompatible ways, which again forces the developer to do extra work. * *Custom dependencies*. A developer may need to prepare a custom version of their application's dependency. Some packaging systems can handle custom versions of a dependency, others can't - and all of them handle it differently. Docker solves the problem of dependency hell by giving the developer a simple way to express *all* their application's dependencies in one place, while streamlining the process of assembling them. If this makes you think of [XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't *replace* your favorite packaging systems. It simply orchestrates their use in a simple and repeatable way. How does it do that? With layers. Docker defines a build as running a sequence of Unix commands, one after the other, in the same container. Build commands modify the contents of the container (usually by installing new files on the filesystem), the next command modifies it some more, etc. Since each build command inherits the result of the previous commands, the *order* in which the commands are executed expresses *dependencies*. Here's a typical Docker build process: ```bash FROM ubuntu:12.04 RUN apt-get update && apt-get install -y python python-pip curl RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv RUN cd helloflask-master && pip install -r requirements.txt ``` Note that Docker doesn't care *how* dependencies are built - as long as they can be built by running a Unix command in a container. Getting started =============== Docker can be installed on your local machine as well as servers - both bare metal and virtualized. It is available as a binary on most modern Linux systems, or as a VM on Windows, Mac and other systems. We also offer an [interactive tutorial](http://www.docker.com/tryit/) for quickly learning the basics of using Docker. For up-to-date install instructions, see the [Docs](http://docs.docker.com). Usage examples ============== Docker can be used to run short-lived commands, long-running daemons (app servers, databases etc.), interactive shell sessions, etc. You can find a [list of real-world examples](http://docs.docker.com/examples/) in the documentation. Under the hood -------------- Under the hood, Docker is built on the following components: * The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel; * The [Go](http://golang.org) programming language. * The [Docker Image Specification] (https://github.com/docker/docker/blob/master/image/spec/v1.md) * The [Libcontainer Specification] (https://github.com/docker/libcontainer/blob/master/SPEC.md) Contributing to Docker ====================== [![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker) [![Jenkins Build Status](https://jenkins.dockerproject.com/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.com/job/Docker%20Master/) Want to hack on Docker? Awesome! We have [instructions to help you get started contributing code or documentation.](https://docs.docker.com/project/who-written-for/). These instructions are probably not perfect, please let us know if anything feels wrong or incomplete. Better yet, submit a PR and improve them yourself. Getting the development builds ============================== Want to run Docker from a master build? You can download master builds at [master.dockerproject.com](https://master.dockerproject.com). They are updated with each commit merged into the master branch. Don't know how to use that super cool new feature in the master build? Check out the master docs at [docs.master.dockerproject.com](http://docs.master.dockerproject.com). How the project is run ====================== Docker is a very, very active project. If you want to learn more about how it is run, or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project). We are always open to suggestions on process improvements, and are always looking for more maintainers. ### Legal *Brought to you courtesy of our legal counsel. For more context, please see the "NOTICE" document in this repo.* Use and transfer of Docker may be subject to certain restrictions by the United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see http://www.bis.doc.gov Licensing ========= Docker is licensed under the Apache License, Version 2.0. See [LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full license text. Other Docker Related Projects ============================= There are a number of projects under development that are based on Docker's core technology. These projects expand the tooling built around the Docker platform to broaden its application and utility. If you know of another project underway that should be listed here, please help us keep this list up-to-date by submitting a PR. * [Docker Registry](https://github.com/docker/distribution): Registry server for Docker (hosting/delivery of repositories and images) * [Docker Machine](https://github.com/docker/machine): Machine management for a container-centric world * [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering system * [Docker Compose](https://github.com/docker/compose) (formerly Fig): Define and run multi-container apps docker-1.6.2/CONTRIBUTING.md0000644000175000017500000003527512524223634014601 0ustar tianontianon# Contributing to Docker Want to hack on Docker? Awesome! We have a contributor's guide that explains [setting up a Docker development environment and the contribution process](https://docs.docker.com/project/who-written-for/). ![Contributors guide](docs/sources/static_files/contributors.png) This page contains information about reporting issues as well as some tips and guidelines useful to experienced open source contributors. Finally, make sure you read our [community guidelines](#docker-community-guidelines) before you start participating. ## Topics * [Reporting Security Issues](#reporting-security-issues) * [Design and Cleanup Proposals](#design-and-cleanup-proposals) * [Reporting Issues](#reporting-other-issues) * [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) * [Community Guidelines](#docker-community-guidelines) ## Reporting security issues The Docker maintainers take security seriously. If you discover a security issue, please bring it to their attention right away! Please **DO NOT** file a public issue, instead send your report privately to [security@docker.com](mailto:security@docker.com), Security reports are greatly appreciated and we will publicly thank you for it. We also like to send gifts—if you're into Docker schwag make sure to let us know We currently do not offer a paid security bounty program, but are not ruling it out in the future. ## Reporting other issues A great way to contribute to the project is to send a detailed report when you encounter an issue. We always appreciate a well-written, thorough bug report, and will thank you for it! Check that [our issue database](https://github.com/docker/docker/issues) doesn't already include that problem or suggestion before submitting an issue. If you find a match, add a quick "+1" or "I have this problem too." Doing this helps prioritize the most common problems and requests. When reporting issues, please include your host OS (Ubuntu 12.04, Fedora 19, etc). Please include: * The output of `uname -a`. * The output of `docker version`. * The output of `docker -D info`. Please also include the steps required to reproduce the problem if possible and applicable. This information will help us review and fix your issue faster. **Issue Report Template**: ``` Description of problem: `docker version`: `docker info`: `uname -a`: Environment details (AWS, VirtualBox, physical, etc.): How reproducible: Steps to Reproduce: 1. 2. 3. Actual Results: Expected Results: Additional info: ``` ##Quick contribution tips and guidelines This section gives the experienced contributor some tips and guidelines. ###Pull requests are always welcome Not sure if that typo is worth a pull request? Found a bug and know how to fix it? Do it! We will appreciate it. Any significant improvement should be documented as [a GitHub issue](https://github.com/docker/docker/issues) before anybody starts working on it. We are always thrilled to receive pull requests. We do our best to process them quickly. If your pull request is not accepted on the first try, don't get discouraged! Our contributor's guide explains [the review process we use for simple changes](https://docs.docker.com/project/make-a-contribution/). ### Design and cleanup proposals You can propose new designs for existing Docker features. You can also design entirely new features. We really appreciate contributors who want to refactor or otherwise cleanup our project. For information on making these types of contributions, see [the advanced contribution section](https://docs.docker.com/project/advanced-contributing/) in the contributors guide. We try hard to keep Docker lean and focused. Docker can't do everything for everybody. This means that we might decide against incorporating a new feature. However, there might be a way to implement that feature *on top of* Docker. ### Talking to other Docker users and contributors
Internet Relay Chat (IRC)

IRC a direct line to our most knowledgeable Docker users; we have both the #docker and #docker-dev group on irc.freenode.net. IRC is a rich chat protocol but it can overwhelm new users. You can search our chat archives.

Read our IRC quickstart guide for an easy way to get started.
Google Groups There are two groups. Docker-user is for people using Docker containers. The docker-dev group is for contributors and other people contributing to the Docker project.
Twitter You can follow Docker's Twitter feed to get updates on our products. You can also tweet us questions or just share blogs or stories.
Stack Overflow Stack Overflow has over 7000K Docker questions listed. We regularly monitor Docker questions and so do many other knowledgeable Docker users.
### Conventions Fork the repository and make changes on your fork in a feature branch: - If it's a bug fix branch, name it XXXX-something where XXXX is the number of the issue. - If it's a feature branch, create an enhancement issue to announce your intentions, and name it XXXX-something where XXXX is the number of the issue. Submit unit tests for your changes. Go has a great test framework built in; use it! Take a look at existing tests for inspiration. [Run the full test suite](https://docs.docker.com/project/test-and-docs/) on your branch before submitting a pull request. Update the documentation when creating or modifying features. Test your documentation changes for clarity, concision, and correctness, as well as a clean documentation build. See our contributors guide for [our style guide](https://docs.docker.com/project/doc-style) and instructions on [building the documentation](https://docs.docker.com/project/test-and-docs/#build-and-test-the-documentation). Write clean code. Universally formatted code promotes ease of writing, reading, and maintenance. Always run `gofmt -s -w file.go` on each changed file before committing your changes. Most editors have plug-ins that do this automatically. Pull request descriptions should be as clear as possible and include a reference to all the issues that they address. Commit messages must start with a capitalized and short summary (max. 50 chars) written in the imperative, followed by an optional, more detailed explanatory text which is separated from the summary by an empty line. Code review comments may be added to your pull request. Discuss, then make the suggested modifications and push additional commits to your feature branch. Post a comment after pushing. New commits show up in the pull request automatically, but the reviewers are notified only when you comment. Pull requests must be cleanly rebased on top of master without multiple branches mixed into the PR. **Git tip**: If your PR no longer merges cleanly, use `rebase master` in your feature branch to update your pull request rather than `merge master`. Before you make a pull request, squash your commits into logical units of work using `git rebase -i` and `git push -f`. A logical unit of work is a consistent set of patches that should be reviewed together: for example, upgrading the version of a vendored dependency and taking advantage of its now available new feature constitute two separate units of work. Implementing a new function and calling it in another file constitute a single logical unit of work. The very high majory of submissions should have a single commit, so if in doubt: squash down to one. After every commit, [make sure the test suite passes] ((https://docs.docker.com/project/test-and-docs/)). Include documentation changes in the same pull request so that a revert would remove all traces of the feature or fix. Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that close an issue. Including references automatically closes the issue on a merge. Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly from the Git history. ### Merge approval Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to indicate acceptance. A change requires LGTMs from an absolute majority of the maintainers of each component affected. For example, if a change affects `docs/` and `registry/`, it needs an absolute majority from the maintainers of `docs/` AND, separately, an absolute majority of the maintainers of `registry/`. For more details, see the [MAINTAINERS](MAINTAINERS) page. ### Sign your work The sign-off is a simple line at the end of the explanation for the patch. Your signature certifies that you wrote the patch or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` Then you just add a line to every git commit message: Signed-off-by: Joe Smith Use your real name (sorry, no pseudonyms or anonymous contributions.) If you set your `user.name` and `user.email` git configs, you can sign your commit automatically with `git commit -s`. Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still accepted, so there is no need to update outstanding pull requests to the new format right away, but please do adjust your processes for future contributions. ### How can I become a maintainer? * Step 1: Learn the component inside out * Step 2: Make yourself useful by contributing code, bug fixes, support etc. * Step 3: Volunteer on the IRC channel (#docker at Freenode) * Step 4: Propose yourself at a scheduled docker meeting in #docker-dev Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. You don't have to be a maintainer to make a difference on the project! ### IRC meetings There are two monthly meetings taking place on #docker-dev IRC to accomodate all timezones. Anybody can propose a topic for discussion prior to the meeting. If you feel the conversation is going off-topic, feel free to point it out. For the exact dates and times, have a look at [the irc-minutes repo](https://github.com/docker/irc-minutes). The minutes also contain all the notes from previous meetings. ## Docker community guidelines We want to keep the Docker community awesome, growing and collaborative. We need your help to keep it that way. To help with this we've come up with some general guidelines for the community as a whole: * Be nice: Be courteous, respectful and polite to fellow community members: no regional, racial, gender, or other abuse will be tolerated. We like nice people way better than mean ones! * Encourage diversity and participation: Make everyone in our community feel welcome, regardless of their background and the extent of their contributions, and do everything possible to encourage participation in our community. * Keep it legal: Basically, don't get us in trouble. Share only content that you own, do not share private or sensitive information, and don't break the law. * Stay on topic: Make sure that you are posting to the correct channel and avoid off-topic discussions. Remember when you update an issue or respond to an email you are potentially sending to a large number of people. Please consider this before you update. Also remember that nobody likes spam. ### Guideline violations — 3 strikes method The point of this section is not to find opportunities to punish people, but we do need a fair way to deal with people who are making our community suck. 1. First occurrence: We'll give you a friendly, but public reminder that the behavior is inappropriate according to our guidelines. 2. Second occurrence: We will send you a private message with a warning that any additional violations will result in removal from the community. 3. Third occurrence: Depending on the violation, we may need to delete or ban your account. **Notes:** * Obvious spammers are banned on first occurrence. If we don't do this, we'll have spam all over the place. * Violations are forgiven after 6 months of good behavior, and we won't hold a grudge. * People who commit minor infractions will get some education, rather than hammering them in the 3 strikes process. * The rules apply equally to everyone in the community, no matter how much you've contributed. * Extreme violations of a threatening, abusive, destructive or illegal nature will be addressed immediately and are not subject to 3 strikes or forgiveness. * Contact abuse@docker.com to report abuse or appeal violations. In the case of appeals, we know that mistakes happen, and we'll work with you to come up with a fair solution if there has been a misunderstanding. docker-1.6.2/daemon/0000755000175000017500000000000012524223634013577 5ustar tianontianondocker-1.6.2/daemon/daemon_devicemapper.go0000644000175000017500000000020312524223634020110 0ustar tianontianon// +build !exclude_graphdriver_devicemapper package daemon import ( _ "github.com/docker/docker/daemon/graphdriver/devmapper" ) docker-1.6.2/daemon/pause.go0000644000175000017500000000163212524223634015245 0ustar tianontianonpackage daemon import ( "github.com/docker/docker/engine" ) func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Error(err) } if err := container.Pause(); err != nil { return job.Errorf("Cannot pause container %s: %s", name, err) } container.LogEvent("pause") return engine.StatusOK } func (daemon *Daemon) ContainerUnpause(job *engine.Job) engine.Status { if n := len(job.Args); n < 1 || n > 2 { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Error(err) } if err := container.Unpause(); err != nil { return job.Errorf("Cannot unpause container %s: %s", name, err) } container.LogEvent("unpause") return engine.StatusOK } docker-1.6.2/daemon/daemon_no_aufs.go0000644000175000017500000000030012524223634017074 0ustar tianontianon// +build exclude_graphdriver_aufs package daemon import ( "github.com/docker/docker/daemon/graphdriver" ) func migrateIfAufs(driver graphdriver.Driver, root string) error { return nil } docker-1.6.2/daemon/network_settings.go0000644000175000017500000000233612524223634017543 0ustar tianontianonpackage daemon import ( "github.com/docker/docker/engine" "github.com/docker/docker/nat" ) // FIXME: move deprecated port stuff to nat to clean up the core. type PortMapping map[string]string // Deprecated type NetworkSettings struct { IPAddress string IPPrefixLen int MacAddress string LinkLocalIPv6Address string LinkLocalIPv6PrefixLen int GlobalIPv6Address string GlobalIPv6PrefixLen int Gateway string IPv6Gateway string Bridge string PortMapping map[string]PortMapping // Deprecated Ports nat.PortMap } func (settings *NetworkSettings) PortMappingAPI() *engine.Table { var outs = engine.NewTable("", 0) for port, bindings := range settings.Ports { p, _ := nat.ParsePort(port.Port()) if len(bindings) == 0 { out := &engine.Env{} out.SetInt("PrivatePort", p) out.Set("Type", port.Proto()) outs.Add(out) continue } for _, binding := range bindings { out := &engine.Env{} h, _ := nat.ParsePort(binding.HostPort) out.SetInt("PrivatePort", p) out.SetInt("PublicPort", h) out.Set("Type", port.Proto()) out.Set("IP", binding.HostIp) outs.Add(out) } } return outs } docker-1.6.2/daemon/stats_collector.go0000644000175000017500000000656112524223634017342 0ustar tianontianonpackage daemon import ( "bufio" "fmt" "os" "strconv" "strings" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/pubsub" "github.com/docker/libcontainer/system" ) // newStatsCollector returns a new statsCollector that collections // network and cgroup stats for a registered container at the specified // interval. The collector allows non-running containers to be added // and will start processing stats when they are started. func newStatsCollector(interval time.Duration) *statsCollector { s := &statsCollector{ interval: interval, publishers: make(map[*Container]*pubsub.Publisher), clockTicks: uint64(system.GetClockTicks()), } go s.run() return s } // statsCollector manages and provides container resource stats type statsCollector struct { m sync.Mutex interval time.Duration clockTicks uint64 publishers map[*Container]*pubsub.Publisher } // collect registers the container with the collector and adds it to // the event loop for collection on the specified interval returning // a channel for the subscriber to receive on. func (s *statsCollector) collect(c *Container) chan interface{} { s.m.Lock() defer s.m.Unlock() publisher, exists := s.publishers[c] if !exists { publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) s.publishers[c] = publisher } return publisher.Subscribe() } // stopCollection closes the channels for all subscribers and removes // the container from metrics collection. func (s *statsCollector) stopCollection(c *Container) { s.m.Lock() if publisher, exists := s.publishers[c]; exists { publisher.Close() delete(s.publishers, c) } s.m.Unlock() } // unsubscribe removes a specific subscriber from receiving updates for a container's stats. func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) { s.m.Lock() publisher := s.publishers[c] if publisher != nil { publisher.Evict(ch) if publisher.Len() == 0 { delete(s.publishers, c) } } s.m.Unlock() } func (s *statsCollector) run() { for _ = range time.Tick(s.interval) { for container, publisher := range s.publishers { systemUsage, err := s.getSystemCpuUsage() if err != nil { log.Errorf("collecting system cpu usage for %s: %v", container.ID, err) continue } stats, err := container.Stats() if err != nil { if err != execdriver.ErrNotRunning { log.Errorf("collecting stats for %s: %v", container.ID, err) } continue } stats.SystemUsage = systemUsage publisher.Publish(stats) } } } const nanoSeconds = 1e9 // getSystemCpuUSage returns the host system's cpu usage in nanoseconds // for the system to match the cgroup readings are returned in the same format. func (s *statsCollector) getSystemCpuUsage() (uint64, error) { f, err := os.Open("/proc/stat") if err != nil { return 0, err } defer f.Close() sc := bufio.NewScanner(f) for sc.Scan() { parts := strings.Fields(sc.Text()) switch parts[0] { case "cpu": if len(parts) < 8 { return 0, fmt.Errorf("invalid number of cpu fields") } var sum uint64 for _, i := range parts[1:8] { v, err := strconv.ParseUint(i, 10, 64) if err != nil { return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) } sum += v } return (sum * nanoSeconds) / s.clockTicks, nil } } return 0, fmt.Errorf("invalid stat format") } docker-1.6.2/daemon/networkdriver/0000755000175000017500000000000012524223634016504 5ustar tianontianondocker-1.6.2/daemon/networkdriver/portallocator/0000755000175000017500000000000012524223634021371 5ustar tianontianondocker-1.6.2/daemon/networkdriver/portallocator/portallocator.go0000644000175000017500000000775512524223634024623 0ustar tianontianonpackage portallocator import ( "bufio" "errors" "fmt" "net" "os" "sync" log "github.com/Sirupsen/logrus" ) const ( DefaultPortRangeStart = 49153 DefaultPortRangeEnd = 65535 ) type ipMapping map[string]protoMap var ( ErrAllPortsAllocated = errors.New("all ports are allocated") ErrUnknownProtocol = errors.New("unknown protocol") defaultIP = net.ParseIP("0.0.0.0") ) type ErrPortAlreadyAllocated struct { ip string port int } func NewErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated { return ErrPortAlreadyAllocated{ ip: ip, port: port, } } func (e ErrPortAlreadyAllocated) IP() string { return e.ip } func (e ErrPortAlreadyAllocated) Port() int { return e.port } func (e ErrPortAlreadyAllocated) IPPort() string { return fmt.Sprintf("%s:%d", e.ip, e.port) } func (e ErrPortAlreadyAllocated) Error() string { return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port) } type ( PortAllocator struct { mutex sync.Mutex ipMap ipMapping Begin int End int } portMap struct { p map[int]struct{} begin, end int last int } protoMap map[string]*portMap ) func New() *PortAllocator { start, end, err := getDynamicPortRange() if err != nil { log.Warn(err) start, end = DefaultPortRangeStart, DefaultPortRangeEnd } return &PortAllocator{ ipMap: ipMapping{}, Begin: start, End: end, } } func getDynamicPortRange() (start int, end int, err error) { const portRangeKernelParam = "/proc/sys/net/ipv4/ip_local_port_range" portRangeFallback := fmt.Sprintf("using fallback port range %d-%d", DefaultPortRangeStart, DefaultPortRangeEnd) file, err := os.Open(portRangeKernelParam) if err != nil { return 0, 0, fmt.Errorf("port allocator - %s due to error: %v", portRangeFallback, err) } n, err := fmt.Fscanf(bufio.NewReader(file), "%d\t%d", &start, &end) if n != 2 || err != nil { if err == nil { err = fmt.Errorf("unexpected count of parsed numbers (%d)", n) } return 0, 0, fmt.Errorf("port allocator - failed to parse system ephemeral port range from %s - %s: %v", portRangeKernelParam, portRangeFallback, err) } return start, end, nil } // RequestPort requests new port from global ports pool for specified ip and proto. // If port is 0 it returns first free port. Otherwise it cheks port availability // in pool and return that port or error if port is already busy. func (p *PortAllocator) RequestPort(ip net.IP, proto string, port int) (int, error) { p.mutex.Lock() defer p.mutex.Unlock() if proto != "tcp" && proto != "udp" { return 0, ErrUnknownProtocol } if ip == nil { ip = defaultIP } ipstr := ip.String() protomap, ok := p.ipMap[ipstr] if !ok { protomap = protoMap{ "tcp": p.newPortMap(), "udp": p.newPortMap(), } p.ipMap[ipstr] = protomap } mapping := protomap[proto] if port > 0 { if _, ok := mapping.p[port]; !ok { mapping.p[port] = struct{}{} return port, nil } return 0, NewErrPortAlreadyAllocated(ipstr, port) } port, err := mapping.findPort() if err != nil { return 0, err } return port, nil } // ReleasePort releases port from global ports pool for specified ip and proto. func (p *PortAllocator) ReleasePort(ip net.IP, proto string, port int) error { p.mutex.Lock() defer p.mutex.Unlock() if ip == nil { ip = defaultIP } protomap, ok := p.ipMap[ip.String()] if !ok { return nil } delete(protomap[proto].p, port) return nil } func (p *PortAllocator) newPortMap() *portMap { return &portMap{ p: map[int]struct{}{}, begin: p.Begin, end: p.End, last: p.End, } } // ReleaseAll releases all ports for all ips. func (p *PortAllocator) ReleaseAll() error { p.mutex.Lock() p.ipMap = ipMapping{} p.mutex.Unlock() return nil } func (pm *portMap) findPort() (int, error) { port := pm.last for i := 0; i <= pm.end-pm.begin; i++ { port++ if port > pm.end { port = pm.begin } if _, ok := pm.p[port]; !ok { pm.p[port] = struct{}{} pm.last = port return port, nil } } return 0, ErrAllPortsAllocated } docker-1.6.2/daemon/networkdriver/portallocator/portallocator_test.go0000644000175000017500000001241512524223634025647 0ustar tianontianonpackage portallocator import ( "net" "testing" ) func TestRequestNewPort(t *testing.T) { p := New() port, err := p.RequestPort(defaultIP, "tcp", 0) if err != nil { t.Fatal(err) } if expected := p.Begin; port != expected { t.Fatalf("Expected port %d got %d", expected, port) } } func TestRequestSpecificPort(t *testing.T) { p := New() port, err := p.RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } if port != 5000 { t.Fatalf("Expected port 5000 got %d", port) } } func TestReleasePort(t *testing.T) { p := New() port, err := p.RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } if port != 5000 { t.Fatalf("Expected port 5000 got %d", port) } if err := p.ReleasePort(defaultIP, "tcp", 5000); err != nil { t.Fatal(err) } } func TestReuseReleasedPort(t *testing.T) { p := New() port, err := p.RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } if port != 5000 { t.Fatalf("Expected port 5000 got %d", port) } if err := p.ReleasePort(defaultIP, "tcp", 5000); err != nil { t.Fatal(err) } port, err = p.RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } } func TestReleaseUnreadledPort(t *testing.T) { p := New() port, err := p.RequestPort(defaultIP, "tcp", 5000) if err != nil { t.Fatal(err) } if port != 5000 { t.Fatalf("Expected port 5000 got %d", port) } port, err = p.RequestPort(defaultIP, "tcp", 5000) switch err.(type) { case ErrPortAlreadyAllocated: default: t.Fatalf("Expected port allocation error got %s", err) } } func TestUnknowProtocol(t *testing.T) { if _, err := New().RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol { t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err) } } func TestAllocateAllPorts(t *testing.T) { p := New() for i := 0; i <= p.End-p.Begin; i++ { port, err := p.RequestPort(defaultIP, "tcp", 0) if err != nil { t.Fatal(err) } if expected := p.Begin + i; port != expected { t.Fatalf("Expected port %d got %d", expected, port) } } if _, err := p.RequestPort(defaultIP, "tcp", 0); err != ErrAllPortsAllocated { t.Fatalf("Expected error %s got %s", ErrAllPortsAllocated, err) } _, err := p.RequestPort(defaultIP, "udp", 0) if err != nil { t.Fatal(err) } // release a port in the middle and ensure we get another tcp port port := p.Begin + 5 if err := p.ReleasePort(defaultIP, "tcp", port); err != nil { t.Fatal(err) } newPort, err := p.RequestPort(defaultIP, "tcp", 0) if err != nil { t.Fatal(err) } if newPort != port { t.Fatalf("Expected port %d got %d", port, newPort) } // now pm.last == newPort, release it so that it's the only free port of // the range, and ensure we get it back if err := p.ReleasePort(defaultIP, "tcp", newPort); err != nil { t.Fatal(err) } port, err = p.RequestPort(defaultIP, "tcp", 0) if err != nil { t.Fatal(err) } if newPort != port { t.Fatalf("Expected port %d got %d", newPort, port) } } func BenchmarkAllocatePorts(b *testing.B) { p := New() for i := 0; i < b.N; i++ { for i := 0; i <= p.End-p.Begin; i++ { port, err := p.RequestPort(defaultIP, "tcp", 0) if err != nil { b.Fatal(err) } if expected := p.Begin + i; port != expected { b.Fatalf("Expected port %d got %d", expected, port) } } p.ReleaseAll() } } func TestPortAllocation(t *testing.T) { p := New() ip := net.ParseIP("192.168.0.1") ip2 := net.ParseIP("192.168.0.2") if port, err := p.RequestPort(ip, "tcp", 80); err != nil { t.Fatal(err) } else if port != 80 { t.Fatalf("Acquire(80) should return 80, not %d", port) } port, err := p.RequestPort(ip, "tcp", 0) if err != nil { t.Fatal(err) } if port <= 0 { t.Fatalf("Acquire(0) should return a non-zero port") } if _, err := p.RequestPort(ip, "tcp", port); err == nil { t.Fatalf("Acquiring a port already in use should return an error") } if newPort, err := p.RequestPort(ip, "tcp", 0); err != nil { t.Fatal(err) } else if newPort == port { t.Fatalf("Acquire(0) allocated the same port twice: %d", port) } if _, err := p.RequestPort(ip, "tcp", 80); err == nil { t.Fatalf("Acquiring a port already in use should return an error") } if _, err := p.RequestPort(ip2, "tcp", 80); err != nil { t.Fatalf("It should be possible to allocate the same port on a different interface") } if _, err := p.RequestPort(ip2, "tcp", 80); err == nil { t.Fatalf("Acquiring a port already in use should return an error") } if err := p.ReleasePort(ip, "tcp", 80); err != nil { t.Fatal(err) } if _, err := p.RequestPort(ip, "tcp", 80); err != nil { t.Fatal(err) } port, err = p.RequestPort(ip, "tcp", 0) if err != nil { t.Fatal(err) } port2, err := p.RequestPort(ip, "tcp", port+1) if err != nil { t.Fatal(err) } port3, err := p.RequestPort(ip, "tcp", 0) if err != nil { t.Fatal(err) } if port3 == port2 { t.Fatal("Requesting a dynamic port should never allocate a used port") } } func TestNoDuplicateBPR(t *testing.T) { p := New() if port, err := p.RequestPort(defaultIP, "tcp", p.Begin); err != nil { t.Fatal(err) } else if port != p.Begin { t.Fatalf("Expected port %d got %d", p.Begin, port) } if port, err := p.RequestPort(defaultIP, "tcp", 0); err != nil { t.Fatal(err) } else if port == p.Begin { t.Fatalf("Acquire(0) allocated the same port twice: %d", port) } } docker-1.6.2/daemon/networkdriver/bridge/0000755000175000017500000000000012524223634017740 5ustar tianontianondocker-1.6.2/daemon/networkdriver/bridge/driver.go0000644000175000017500000005272612524223634021576 0ustar tianontianonpackage bridge import ( "encoding/hex" "errors" "fmt" "io/ioutil" "net" "os" "os/exec" "strings" "sync" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/networkdriver" "github.com/docker/docker/daemon/networkdriver/ipallocator" "github.com/docker/docker/daemon/networkdriver/portmapper" "github.com/docker/docker/engine" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/iptables" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/libcontainer/netlink" ) const ( DefaultNetworkBridge = "docker0" MaxAllocatedPortAttempts = 10 ) // Network interface represents the networking stack of a container type networkInterface struct { IP net.IP IPv6 net.IP PortMappings []net.Addr // There are mappings to the host interfaces } type ifaces struct { c map[string]*networkInterface sync.Mutex } func (i *ifaces) Set(key string, n *networkInterface) { i.Lock() i.c[key] = n i.Unlock() } func (i *ifaces) Get(key string) *networkInterface { i.Lock() res := i.c[key] i.Unlock() return res } var ( addrs = []string{ // Here we don't follow the convention of using the 1st IP of the range for the gateway. // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges. // In theory this shouldn't matter - in practice there's bound to be a few scripts relying // on the internal addressing or other stupid things like that. // They shouldn't, but hey, let's not break them unless we really have to. "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23 "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive "10.1.42.1/16", "10.42.42.1/16", "172.16.42.1/24", "172.16.43.1/24", "172.16.44.1/24", "10.0.42.1/24", "10.0.43.1/24", "192.168.42.1/24", "192.168.43.1/24", "192.168.44.1/24", } bridgeIface string bridgeIPv4Network *net.IPNet bridgeIPv6Addr net.IP globalIPv6Network *net.IPNet portMapper *portmapper.PortMapper once sync.Once defaultBindingIP = net.ParseIP("0.0.0.0") currentInterfaces = ifaces{c: make(map[string]*networkInterface)} ) func initPortMapper() { once.Do(func() { portMapper = portmapper.New() }) } func InitDriver(job *engine.Job) engine.Status { var ( networkv4 *net.IPNet networkv6 *net.IPNet addrv4 net.Addr addrsv6 []net.Addr enableIPTables = job.GetenvBool("EnableIptables") enableIPv6 = job.GetenvBool("EnableIPv6") icc = job.GetenvBool("InterContainerCommunication") ipMasq = job.GetenvBool("EnableIpMasq") ipForward = job.GetenvBool("EnableIpForward") bridgeIP = job.Getenv("BridgeIP") bridgeIPv6 = "fe80::1/64" fixedCIDR = job.Getenv("FixedCIDR") fixedCIDRv6 = job.Getenv("FixedCIDRv6") ) // try to modprobe bridge first // see gh#12177 if out, err := exec.Command("modprobe", "-va", "bridge", "nf_nat").Output(); err != nil { log.Warnf("Running modprobe bridge nf_nat failed with message: %s, error: %v", out, err) } initPortMapper() if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { defaultBindingIP = net.ParseIP(defaultIP) } bridgeIface = job.Getenv("BridgeIface") usingDefaultBridge := false if bridgeIface == "" { usingDefaultBridge = true bridgeIface = DefaultNetworkBridge } addrv4, addrsv6, err := networkdriver.GetIfaceAddr(bridgeIface) if err != nil { // No Bridge existent, create one // If we're not using the default bridge, fail without trying to create it if !usingDefaultBridge { return job.Error(err) } // If the iface is not found, try to create it if err := configureBridge(bridgeIP, bridgeIPv6, enableIPv6); err != nil { return job.Error(err) } addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface) if err != nil { return job.Error(err) } if fixedCIDRv6 != "" { // Setting route to global IPv6 subnet log.Infof("Adding route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface) if err := netlink.AddRoute(fixedCIDRv6, "", "", bridgeIface); err != nil { log.Fatalf("Could not add route to IPv6 network %q via device %q", fixedCIDRv6, bridgeIface) } } } else { // Bridge exists already, getting info... // Validate that the bridge ip matches the ip specified by BridgeIP if bridgeIP != "" { networkv4 = addrv4.(*net.IPNet) bip, _, err := net.ParseCIDR(bridgeIP) if err != nil { return job.Error(err) } if !networkv4.IP.Equal(bip) { return job.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip) } } // A bridge might exist but not have any IPv6 addr associated with it yet // (for example, an existing Docker installation that has only been used // with IPv4 and docker0 already is set up) In that case, we can perform // the bridge init for IPv6 here, else we will error out below if --ipv6=true if len(addrsv6) == 0 && enableIPv6 { if err := setupIPv6Bridge(bridgeIPv6); err != nil { return job.Error(err) } // Recheck addresses now that IPv6 is setup on the bridge addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface) if err != nil { return job.Error(err) } } // TODO: Check if route to fixedCIDRv6 is set } if enableIPv6 { bip6, _, err := net.ParseCIDR(bridgeIPv6) if err != nil { return job.Error(err) } found := false for _, addrv6 := range addrsv6 { networkv6 = addrv6.(*net.IPNet) if networkv6.IP.Equal(bip6) { found = true break } } if !found { return job.Errorf("Bridge IPv6 does not match existing bridge configuration %s", bip6) } } networkv4 = addrv4.(*net.IPNet) if enableIPv6 { if len(addrsv6) == 0 { return job.Error(errors.New("IPv6 enabled but no IPv6 detected")) } bridgeIPv6Addr = networkv6.IP } // Configure iptables for link support if enableIPTables { if err := setupIPTables(addrv4, icc, ipMasq); err != nil { return job.Error(err) } } if ipForward { // Enable IPv4 forwarding if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err) } if fixedCIDRv6 != "" { // Enable IPv6 forwarding if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/default/forwarding", []byte{'1', '\n'}, 0644); err != nil { job.Logf("WARNING: unable to enable IPv6 default forwarding: %s\n", err) } if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding", []byte{'1', '\n'}, 0644); err != nil { job.Logf("WARNING: unable to enable IPv6 all forwarding: %s\n", err) } } } // We can always try removing the iptables if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil { return job.Error(err) } if enableIPTables { _, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat) if err != nil { return job.Error(err) } chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter) if err != nil { return job.Error(err) } portMapper.SetIptablesChain(chain) } bridgeIPv4Network = networkv4 if fixedCIDR != "" { _, subnet, err := net.ParseCIDR(fixedCIDR) if err != nil { return job.Error(err) } log.Debugf("Subnet: %v", subnet) if err := ipallocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil { return job.Error(err) } } if fixedCIDRv6 != "" { _, subnet, err := net.ParseCIDR(fixedCIDRv6) if err != nil { return job.Error(err) } log.Debugf("Subnet: %v", subnet) if err := ipallocator.RegisterSubnet(subnet, subnet); err != nil { return job.Error(err) } globalIPv6Network = subnet } // Block BridgeIP in IP allocator ipallocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP) // https://github.com/docker/docker/issues/2768 job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP) for name, f := range map[string]engine.Handler{ "allocate_interface": Allocate, "release_interface": Release, "allocate_port": AllocatePort, "link": LinkContainers, } { if err := job.Eng.Register(name, f); err != nil { return job.Error(err) } } return engine.StatusOK } func setupIPTables(addr net.Addr, icc, ipmasq bool) error { // Enable NAT if ipmasq { natArgs := []string{"-s", addr.String(), "!", "-o", bridgeIface, "-j", "MASQUERADE"} if !iptables.Exists(iptables.Nat, "POSTROUTING", natArgs...) { if output, err := iptables.Raw(append([]string{ "-t", string(iptables.Nat), "-I", "POSTROUTING"}, natArgs...)...); err != nil { return fmt.Errorf("Unable to enable network bridge NAT: %s", err) } else if len(output) != 0 { return &iptables.ChainError{Chain: "POSTROUTING", Output: output} } } } var ( args = []string{"-i", bridgeIface, "-o", bridgeIface, "-j"} acceptArgs = append(args, "ACCEPT") dropArgs = append(args, "DROP") ) if !icc { iptables.Raw(append([]string{"-D", "FORWARD"}, acceptArgs...)...) if !iptables.Exists(iptables.Filter, "FORWARD", dropArgs...) { log.Debugf("Disable inter-container communication") if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, dropArgs...)...); err != nil { return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) } else if len(output) != 0 { return fmt.Errorf("Error disabling intercontainer communication: %s", output) } } } else { iptables.Raw(append([]string{"-D", "FORWARD"}, dropArgs...)...) if !iptables.Exists(iptables.Filter, "FORWARD", acceptArgs...) { log.Debugf("Enable inter-container communication") if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, acceptArgs...)...); err != nil { return fmt.Errorf("Unable to allow intercontainer communication: %s", err) } else if len(output) != 0 { return fmt.Errorf("Error enabling intercontainer communication: %s", output) } } } // Accept all non-intercontainer outgoing packets outgoingArgs := []string{"-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"} if !iptables.Exists(iptables.Filter, "FORWARD", outgoingArgs...) { if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, outgoingArgs...)...); err != nil { return fmt.Errorf("Unable to allow outgoing packets: %s", err) } else if len(output) != 0 { return &iptables.ChainError{Chain: "FORWARD outgoing", Output: output} } } // Accept incoming packets for existing connections existingArgs := []string{"-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} if !iptables.Exists(iptables.Filter, "FORWARD", existingArgs...) { if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, existingArgs...)...); err != nil { return fmt.Errorf("Unable to allow incoming packets: %s", err) } else if len(output) != 0 { return &iptables.ChainError{Chain: "FORWARD incoming", Output: output} } } return nil } func RequestPort(ip net.IP, proto string, port int) (int, error) { initPortMapper() return portMapper.Allocator.RequestPort(ip, proto, port) } // configureBridge attempts to create and configure a network bridge interface named `bridgeIface` on the host // If bridgeIP is empty, it will try to find a non-conflicting IP from the Docker-specified private ranges // If the bridge `bridgeIface` already exists, it will only perform the IP address association with the existing // bridge (fixes issue #8444) // If an address which doesn't conflict with existing interfaces can't be found, an error is returned. func configureBridge(bridgeIP string, bridgeIPv6 string, enableIPv6 bool) error { nameservers := []string{} resolvConf, _ := resolvconf.Get() // We don't check for an error here, because we don't really care // if we can't read /etc/resolv.conf. So instead we skip the append // if resolvConf is nil. It either doesn't exist, or we can't read it // for some reason. if resolvConf != nil { nameservers = append(nameservers, resolvconf.GetNameserversAsCIDR(resolvConf)...) } var ifaceAddr string if len(bridgeIP) != 0 { _, _, err := net.ParseCIDR(bridgeIP) if err != nil { return err } ifaceAddr = bridgeIP } else { for _, addr := range addrs { _, dockerNetwork, err := net.ParseCIDR(addr) if err != nil { return err } if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil { if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil { ifaceAddr = addr break } else { log.Debugf("%s %s", addr, err) } } } } if ifaceAddr == "" { return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) } log.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) if err := createBridgeIface(bridgeIface); err != nil { // The bridge may already exist, therefore we can ignore an "exists" error if !os.IsExist(err) { return err } } iface, err := net.InterfaceByName(bridgeIface) if err != nil { return err } ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) if err != nil { return err } if err := netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { return fmt.Errorf("Unable to add private network: %s", err) } if enableIPv6 { if err := setupIPv6Bridge(bridgeIPv6); err != nil { return err } } if err := netlink.NetworkLinkUp(iface); err != nil { return fmt.Errorf("Unable to start network bridge: %s", err) } return nil } func setupIPv6Bridge(bridgeIPv6 string) error { iface, err := net.InterfaceByName(bridgeIface) if err != nil { return err } // Enable IPv6 on the bridge procFile := "/proc/sys/net/ipv6/conf/" + iface.Name + "/disable_ipv6" if err := ioutil.WriteFile(procFile, []byte{'0', '\n'}, 0644); err != nil { return fmt.Errorf("Unable to enable IPv6 addresses on bridge: %v", err) } ipAddr6, ipNet6, err := net.ParseCIDR(bridgeIPv6) if err != nil { return fmt.Errorf("Unable to parse bridge IPv6 address: %q, error: %v", bridgeIPv6, err) } if err := netlink.NetworkLinkAddIp(iface, ipAddr6, ipNet6); err != nil { return fmt.Errorf("Unable to add private IPv6 network: %v", err) } return nil } func createBridgeIface(name string) error { kv, err := kernel.GetKernelVersion() // Only set the bridge's mac address if the kernel version is > 3.3 // before that it was not supported setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3) log.Debugf("setting bridge mac address = %v", setBridgeMacAddr) return netlink.CreateBridge(name, setBridgeMacAddr) } // Generate a IEEE802 compliant MAC address from the given IP address. // // The generator is guaranteed to be consistent: the same IP will always yield the same // MAC address. This is to avoid ARP cache issues. func generateMacAddr(ip net.IP) net.HardwareAddr { hw := make(net.HardwareAddr, 6) // The first byte of the MAC address has to comply with these rules: // 1. Unicast: Set the least-significant bit to 0. // 2. Address is locally administered: Set the second-least-significant bit (U/L) to 1. // 3. As "small" as possible: The veth address has to be "smaller" than the bridge address. hw[0] = 0x02 // The first 24 bits of the MAC represent the Organizationally Unique Identifier (OUI). // Since this address is locally administered, we can do whatever we want as long as // it doesn't conflict with other addresses. hw[1] = 0x42 // Insert the IP address into the last 32 bits of the MAC address. // This is a simple way to guarantee the address will be consistent and unique. copy(hw[2:], ip.To4()) return hw } func linkLocalIPv6FromMac(mac string) (string, error) { hx := strings.Replace(mac, ":", "", -1) hw, err := hex.DecodeString(hx) if err != nil { return "", errors.New("Could not parse MAC address " + mac) } hw[0] ^= 0x2 return fmt.Sprintf("fe80::%x%x:%xff:fe%x:%x%x/64", hw[0], hw[1], hw[2], hw[3], hw[4], hw[5]), nil } // Allocate a network interface func Allocate(job *engine.Job) engine.Status { var ( ip net.IP mac net.HardwareAddr err error id = job.Args[0] requestedIP = net.ParseIP(job.Getenv("RequestedIP")) requestedIPv6 = net.ParseIP(job.Getenv("RequestedIPv6")) globalIPv6 net.IP ) ip, err = ipallocator.RequestIP(bridgeIPv4Network, requestedIP) if err != nil { return job.Error(err) } // If no explicit mac address was given, generate a random one. if mac, err = net.ParseMAC(job.Getenv("RequestedMac")); err != nil { mac = generateMacAddr(ip) } if globalIPv6Network != nil { // If globalIPv6Network Size is at least a /80 subnet generate IPv6 address from MAC address netmask_ones, _ := globalIPv6Network.Mask.Size() if requestedIPv6 == nil && netmask_ones <= 80 { requestedIPv6 = make(net.IP, len(globalIPv6Network.IP)) copy(requestedIPv6, globalIPv6Network.IP) for i, h := range mac { requestedIPv6[i+10] = h } } globalIPv6, err = ipallocator.RequestIP(globalIPv6Network, requestedIPv6) if err != nil { log.Errorf("Allocator: RequestIP v6: %v", err) return job.Error(err) } log.Infof("Allocated IPv6 %s", globalIPv6) } out := engine.Env{} out.Set("IP", ip.String()) out.Set("Mask", bridgeIPv4Network.Mask.String()) out.Set("Gateway", bridgeIPv4Network.IP.String()) out.Set("MacAddress", mac.String()) out.Set("Bridge", bridgeIface) size, _ := bridgeIPv4Network.Mask.Size() out.SetInt("IPPrefixLen", size) // If linklocal IPv6 localIPv6Net, err := linkLocalIPv6FromMac(mac.String()) if err != nil { return job.Error(err) } localIPv6, _, _ := net.ParseCIDR(localIPv6Net) out.Set("LinkLocalIPv6", localIPv6.String()) out.Set("MacAddress", mac.String()) if globalIPv6Network != nil { out.Set("GlobalIPv6", globalIPv6.String()) sizev6, _ := globalIPv6Network.Mask.Size() out.SetInt("GlobalIPv6PrefixLen", sizev6) out.Set("IPv6Gateway", bridgeIPv6Addr.String()) } currentInterfaces.Set(id, &networkInterface{ IP: ip, IPv6: globalIPv6, }) out.WriteTo(job.Stdout) return engine.StatusOK } // Release an interface for a select ip func Release(job *engine.Job) engine.Status { var ( id = job.Args[0] containerInterface = currentInterfaces.Get(id) ) if containerInterface == nil { return job.Errorf("No network information to release for %s", id) } for _, nat := range containerInterface.PortMappings { if err := portMapper.Unmap(nat); err != nil { log.Infof("Unable to unmap port %s: %s", nat, err) } } if err := ipallocator.ReleaseIP(bridgeIPv4Network, containerInterface.IP); err != nil { log.Infof("Unable to release IPv4 %s", err) } if globalIPv6Network != nil { if err := ipallocator.ReleaseIP(globalIPv6Network, containerInterface.IPv6); err != nil { log.Infof("Unable to release IPv6 %s", err) } } return engine.StatusOK } // Allocate an external port and map it to the interface func AllocatePort(job *engine.Job) engine.Status { var ( err error ip = defaultBindingIP id = job.Args[0] hostIP = job.Getenv("HostIP") hostPort = job.GetenvInt("HostPort") containerPort = job.GetenvInt("ContainerPort") proto = job.Getenv("Proto") network = currentInterfaces.Get(id) ) if hostIP != "" { ip = net.ParseIP(hostIP) if ip == nil { return job.Errorf("Bad parameter: invalid host ip %s", hostIP) } } // host ip, proto, and host port var container net.Addr switch proto { case "tcp": container = &net.TCPAddr{IP: network.IP, Port: containerPort} case "udp": container = &net.UDPAddr{IP: network.IP, Port: containerPort} default: return job.Errorf("unsupported address type %s", proto) } // // Try up to 10 times to get a port that's not already allocated. // // In the event of failure to bind, return the error that portmapper.Map // yields. // var host net.Addr for i := 0; i < MaxAllocatedPortAttempts; i++ { if host, err = portMapper.Map(container, ip, hostPort); err == nil { break } // There is no point in immediately retrying to map an explicitly // chosen port. if hostPort != 0 { job.Logf("Failed to allocate and map port %d: %s", hostPort, err) break } job.Logf("Failed to allocate and map port: %s, retry: %d", err, i+1) } if err != nil { return job.Error(err) } network.PortMappings = append(network.PortMappings, host) out := engine.Env{} switch netAddr := host.(type) { case *net.TCPAddr: out.Set("HostIP", netAddr.IP.String()) out.SetInt("HostPort", netAddr.Port) case *net.UDPAddr: out.Set("HostIP", netAddr.IP.String()) out.SetInt("HostPort", netAddr.Port) } if _, err := out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } func LinkContainers(job *engine.Job) engine.Status { var ( action = job.Args[0] nfAction iptables.Action childIP = job.Getenv("ChildIP") parentIP = job.Getenv("ParentIP") ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") ) switch action { case "-A": nfAction = iptables.Append case "-I": nfAction = iptables.Insert case "-D": nfAction = iptables.Delete default: return job.Errorf("Invalid action '%s' specified", action) } ip1 := net.ParseIP(parentIP) if ip1 == nil { return job.Errorf("Parent IP '%s' is invalid", parentIP) } ip2 := net.ParseIP(childIP) if ip2 == nil { return job.Errorf("Child IP '%s' is invalid", childIP) } chain := iptables.Chain{Name: "DOCKER", Bridge: bridgeIface} for _, p := range ports { port := nat.Port(p) if err := chain.Link(nfAction, ip1, ip2, port.Int(), port.Proto()); !ignoreErrors && err != nil { return job.Error(err) } } return engine.StatusOK } docker-1.6.2/daemon/networkdriver/bridge/driver_test.go0000644000175000017500000001607212524223634022627 0ustar tianontianonpackage bridge import ( "fmt" "net" "strconv" "testing" "github.com/docker/docker/daemon/networkdriver/portmapper" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/iptables" ) func init() { // reset the new proxy command for mocking out the userland proxy in tests portmapper.NewProxy = portmapper.NewMockProxyCommand } func findFreePort(t *testing.T) int { l, err := net.Listen("tcp", ":0") if err != nil { t.Fatal("Failed to find a free port") } defer l.Close() result, err := net.ResolveTCPAddr("tcp", l.Addr().String()) if err != nil { t.Fatal("Failed to resolve address to identify free port") } return result.Port } func newPortAllocationJob(eng *engine.Engine, port int) (job *engine.Job) { strPort := strconv.Itoa(port) job = eng.Job("allocate_port", "container_id") job.Setenv("HostIP", "127.0.0.1") job.Setenv("HostPort", strPort) job.Setenv("Proto", "tcp") job.Setenv("ContainerPort", strPort) return } func newPortAllocationJobWithInvalidHostIP(eng *engine.Engine, port int) (job *engine.Job) { strPort := strconv.Itoa(port) job = eng.Job("allocate_port", "container_id") job.Setenv("HostIP", "localhost") job.Setenv("HostPort", strPort) job.Setenv("Proto", "tcp") job.Setenv("ContainerPort", strPort) return } func TestAllocatePortDetection(t *testing.T) { eng := engine.New() eng.Logging = false freePort := findFreePort(t) // Init driver job := eng.Job("initdriver") if res := InitDriver(job); res != engine.StatusOK { t.Fatal("Failed to initialize network driver") } // Allocate interface job = eng.Job("allocate_interface", "container_id") if res := Allocate(job); res != engine.StatusOK { t.Fatal("Failed to allocate network interface") } // Allocate same port twice, expect failure on second call job = newPortAllocationJob(eng, freePort) if res := AllocatePort(job); res != engine.StatusOK { t.Fatal("Failed to find a free port to allocate") } if res := AllocatePort(job); res == engine.StatusOK { t.Fatal("Duplicate port allocation granted by AllocatePort") } } func TestHostnameFormatChecking(t *testing.T) { eng := engine.New() eng.Logging = false freePort := findFreePort(t) // Init driver job := eng.Job("initdriver") if res := InitDriver(job); res != engine.StatusOK { t.Fatal("Failed to initialize network driver") } // Allocate interface job = eng.Job("allocate_interface", "container_id") if res := Allocate(job); res != engine.StatusOK { t.Fatal("Failed to allocate network interface") } // Allocate port with invalid HostIP, expect failure with Bad Request http status job = newPortAllocationJobWithInvalidHostIP(eng, freePort) if res := AllocatePort(job); res == engine.StatusOK { t.Fatal("Failed to check invalid HostIP") } } func newInterfaceAllocation(t *testing.T, input engine.Env) (output engine.Env) { eng := engine.New() eng.Logging = false done := make(chan bool) // set IPv6 global if given if input.Exists("globalIPv6Network") { _, globalIPv6Network, _ = net.ParseCIDR(input.Get("globalIPv6Network")) } job := eng.Job("allocate_interface", "container_id") job.Env().Init(&input) reader, _ := job.Stdout.AddPipe() go func() { output.Decode(reader) done <- true }() res := Allocate(job) job.Stdout.Close() <-done if input.Exists("expectFail") && input.GetBool("expectFail") { if res == engine.StatusOK { t.Fatal("Doesn't fail to allocate network interface") } } else { if res != engine.StatusOK { t.Fatal("Failed to allocate network interface") } } if input.Exists("globalIPv6Network") { // check for bug #11427 _, subnet, _ := net.ParseCIDR(input.Get("globalIPv6Network")) if globalIPv6Network.IP.String() != subnet.IP.String() { t.Fatal("globalIPv6Network was modified during allocation") } // clean up IPv6 global globalIPv6Network = nil } return } func TestIPv6InterfaceAllocationAutoNetmaskGt80(t *testing.T) { input := engine.Env{} _, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/81") // set global ipv6 input.Set("globalIPv6Network", subnet.String()) output := newInterfaceAllocation(t, input) // ensure low manually assigend global ip ip := net.ParseIP(output.Get("GlobalIPv6")) _, subnet, _ = net.ParseCIDR(fmt.Sprintf("%s/%d", subnet.IP.String(), 120)) if !subnet.Contains(ip) { t.Fatalf("Error ip %s not in subnet %s", ip.String(), subnet.String()) } } func TestIPv6InterfaceAllocationAutoNetmaskLe80(t *testing.T) { input := engine.Env{} _, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/80") // set global ipv6 input.Set("globalIPv6Network", subnet.String()) input.Set("RequestedMac", "ab:cd:ab:cd:ab:cd") output := newInterfaceAllocation(t, input) // ensure global ip with mac ip := net.ParseIP(output.Get("GlobalIPv6")) expected_ip := net.ParseIP("2001:db8:1234:1234:1234:abcd:abcd:abcd") if ip.String() != expected_ip.String() { t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String()) } // ensure link local format ip = net.ParseIP(output.Get("LinkLocalIPv6")) expected_ip = net.ParseIP("fe80::a9cd:abff:fecd:abcd") if ip.String() != expected_ip.String() { t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String()) } } func TestIPv6InterfaceAllocationRequest(t *testing.T) { input := engine.Env{} _, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/80") expected_ip := net.ParseIP("2001:db8:1234:1234:1234::1328") // set global ipv6 input.Set("globalIPv6Network", subnet.String()) input.Set("RequestedIPv6", expected_ip.String()) output := newInterfaceAllocation(t, input) // ensure global ip with mac ip := net.ParseIP(output.Get("GlobalIPv6")) if ip.String() != expected_ip.String() { t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String()) } // retry -> fails for duplicated address input.SetBool("expectFail", true) output = newInterfaceAllocation(t, input) } func TestMacAddrGeneration(t *testing.T) { ip := net.ParseIP("192.168.0.1") mac := generateMacAddr(ip).String() // Should be consistent. if generateMacAddr(ip).String() != mac { t.Fatal("Inconsistent MAC address") } // Should be unique. ip2 := net.ParseIP("192.168.0.2") if generateMacAddr(ip2).String() == mac { t.Fatal("Non-unique MAC address") } } func TestLinkContainers(t *testing.T) { eng := engine.New() eng.Logging = false // Init driver job := eng.Job("initdriver") if res := InitDriver(job); res != engine.StatusOK { t.Fatal("Failed to initialize network driver") } // Allocate interface job = eng.Job("allocate_interface", "container_id") if res := Allocate(job); res != engine.StatusOK { t.Fatal("Failed to allocate network interface") } job.Args[0] = "-I" job.Setenv("ChildIP", "172.17.0.2") job.Setenv("ParentIP", "172.17.0.1") job.SetenvBool("IgnoreErrors", false) job.SetenvList("Ports", []string{"1234"}) bridgeIface = "lo" _, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter) if err != nil { t.Fatal(err) } if res := LinkContainers(job); res != engine.StatusOK { t.Fatalf("LinkContainers failed") } // flush rules if _, err = iptables.Raw([]string{"-F", "DOCKER"}...); err != nil { t.Fatal(err) } } docker-1.6.2/daemon/networkdriver/network_test.go0000644000175000017500000001126612524223634021571 0ustar tianontianonpackage networkdriver import ( "github.com/docker/libcontainer/netlink" "net" "testing" ) func TestNonOverlapingNameservers(t *testing.T) { network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } nameservers := []string{ "127.0.0.1/32", } if err := CheckNameserverOverlaps(nameservers, network); err != nil { t.Fatal(err) } } func TestOverlapingNameservers(t *testing.T) { network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } nameservers := []string{ "192.168.0.1/32", } if err := CheckNameserverOverlaps(nameservers, network); err == nil { t.Fatalf("Expected error %s got %s", ErrNetworkOverlapsWithNameservers, err) } } func TestCheckRouteOverlaps(t *testing.T) { orig := networkGetRoutesFct defer func() { networkGetRoutesFct = orig }() networkGetRoutesFct = func() ([]netlink.Route, error) { routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"} routes := []netlink.Route{} for _, addr := range routesData { _, netX, _ := net.ParseCIDR(addr) routes = append(routes, netlink.Route{IPNet: netX}) } return routes, nil } _, netX, _ := net.ParseCIDR("172.16.0.1/24") if err := CheckRouteOverlaps(netX); err != nil { t.Fatal(err) } _, netX, _ = net.ParseCIDR("10.0.2.0/24") if err := CheckRouteOverlaps(netX); err == nil { t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't") } } func TestCheckNameserverOverlaps(t *testing.T) { nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"} _, netX, _ := net.ParseCIDR("10.0.2.3/32") if err := CheckNameserverOverlaps(nameservers, netX); err == nil { t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX) } _, netX, _ = net.ParseCIDR("192.168.102.2/32") if err := CheckNameserverOverlaps(nameservers, netX); err != nil { t.Fatalf("%s should not overlap %v but it does", netX, nameservers) } } func AssertOverlap(CIDRx string, CIDRy string, t *testing.T) { _, netX, _ := net.ParseCIDR(CIDRx) _, netY, _ := net.ParseCIDR(CIDRy) if !NetworkOverlaps(netX, netY) { t.Errorf("%v and %v should overlap", netX, netY) } } func AssertNoOverlap(CIDRx string, CIDRy string, t *testing.T) { _, netX, _ := net.ParseCIDR(CIDRx) _, netY, _ := net.ParseCIDR(CIDRy) if NetworkOverlaps(netX, netY) { t.Errorf("%v and %v should not overlap", netX, netY) } } func TestNetworkOverlaps(t *testing.T) { //netY starts at same IP and ends within netX AssertOverlap("172.16.0.1/24", "172.16.0.1/25", t) //netY starts within netX and ends at same IP AssertOverlap("172.16.0.1/24", "172.16.0.128/25", t) //netY starts and ends within netX AssertOverlap("172.16.0.1/24", "172.16.0.64/25", t) //netY starts at same IP and ends outside of netX AssertOverlap("172.16.0.1/24", "172.16.0.1/23", t) //netY starts before and ends at same IP of netX AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) //netY starts before and ends outside of netX AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t) //netY starts and ends before netX AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t) //netX starts and ends before netY AssertNoOverlap("172.16.1.1/25", "172.16.2.1/24", t) } func TestNetworkRange(t *testing.T) { // Simple class C test _, network, _ := net.ParseCIDR("192.168.0.1/24") first, last := NetworkRange(network) if !first.Equal(net.ParseIP("192.168.0.0")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("192.168.0.255")) { t.Error(last.String()) } // Class A test _, network, _ = net.ParseCIDR("10.0.0.1/8") first, last = NetworkRange(network) if !first.Equal(net.ParseIP("10.0.0.0")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("10.255.255.255")) { t.Error(last.String()) } // Class A, random IP address _, network, _ = net.ParseCIDR("10.1.2.3/8") first, last = NetworkRange(network) if !first.Equal(net.ParseIP("10.0.0.0")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("10.255.255.255")) { t.Error(last.String()) } // 32bit mask _, network, _ = net.ParseCIDR("10.1.2.3/32") first, last = NetworkRange(network) if !first.Equal(net.ParseIP("10.1.2.3")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("10.1.2.3")) { t.Error(last.String()) } // 31bit mask _, network, _ = net.ParseCIDR("10.1.2.3/31") first, last = NetworkRange(network) if !first.Equal(net.ParseIP("10.1.2.2")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("10.1.2.3")) { t.Error(last.String()) } // 26bit mask _, network, _ = net.ParseCIDR("10.1.2.3/26") first, last = NetworkRange(network) if !first.Equal(net.ParseIP("10.1.2.0")) { t.Error(first.String()) } if !last.Equal(net.ParseIP("10.1.2.63")) { t.Error(last.String()) } } docker-1.6.2/daemon/networkdriver/network.go0000644000175000017500000000036712524223634020532 0ustar tianontianonpackage networkdriver import ( "errors" ) var ( ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver") ErrNetworkOverlaps = errors.New("requested network overlaps with existing network") ) docker-1.6.2/daemon/networkdriver/ipallocator/0000755000175000017500000000000012524223634021015 5ustar tianontianondocker-1.6.2/daemon/networkdriver/ipallocator/allocator.go0000644000175000017500000001062012524223634023323 0ustar tianontianonpackage ipallocator import ( "errors" "math/big" "net" "sync" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/networkdriver" ) // allocatedMap is thread-unsafe set of allocated IP type allocatedMap struct { p map[string]struct{} last *big.Int begin *big.Int end *big.Int } func newAllocatedMap(network *net.IPNet) *allocatedMap { firstIP, lastIP := networkdriver.NetworkRange(network) begin := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1)) end := big.NewInt(0).Sub(ipToBigInt(lastIP), big.NewInt(1)) return &allocatedMap{ p: make(map[string]struct{}), begin: begin, end: end, last: big.NewInt(0).Sub(begin, big.NewInt(1)), // so first allocated will be begin } } type networkSet map[string]*allocatedMap var ( ErrNoAvailableIPs = errors.New("no available ip addresses on network") ErrIPAlreadyAllocated = errors.New("ip already allocated") ErrIPOutOfRange = errors.New("requested ip is out of range") ErrNetworkAlreadyRegistered = errors.New("network already registered") ErrBadSubnet = errors.New("network does not contain specified subnet") ) var ( lock = sync.Mutex{} allocatedIPs = networkSet{} ) // RegisterSubnet registers network in global allocator with bounds // defined by subnet. If you want to use network range you must call // this method before first RequestIP, otherwise full network range will be used func RegisterSubnet(network *net.IPNet, subnet *net.IPNet) error { lock.Lock() defer lock.Unlock() key := network.String() if _, ok := allocatedIPs[key]; ok { return ErrNetworkAlreadyRegistered } n := newAllocatedMap(network) beginIP, endIP := networkdriver.NetworkRange(subnet) begin := big.NewInt(0).Add(ipToBigInt(beginIP), big.NewInt(1)) end := big.NewInt(0).Sub(ipToBigInt(endIP), big.NewInt(1)) // Check that subnet is within network if !(begin.Cmp(n.begin) >= 0 && end.Cmp(n.end) <= 0 && begin.Cmp(end) == -1) { return ErrBadSubnet } n.begin.Set(begin) n.end.Set(end) n.last.Sub(begin, big.NewInt(1)) allocatedIPs[key] = n return nil } // RequestIP requests an available ip from the given network. It // will return the next available ip if the ip provided is nil. If the // ip provided is not nil it will validate that the provided ip is available // for use or return an error func RequestIP(network *net.IPNet, ip net.IP) (net.IP, error) { lock.Lock() defer lock.Unlock() key := network.String() allocated, ok := allocatedIPs[key] if !ok { allocated = newAllocatedMap(network) allocatedIPs[key] = allocated } if ip == nil { return allocated.getNextIP() } return allocated.checkIP(ip) } // ReleaseIP adds the provided ip back into the pool of // available ips to be returned for use. func ReleaseIP(network *net.IPNet, ip net.IP) error { lock.Lock() defer lock.Unlock() if allocated, exists := allocatedIPs[network.String()]; exists { delete(allocated.p, ip.String()) } return nil } func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) { if _, ok := allocated.p[ip.String()]; ok { return nil, ErrIPAlreadyAllocated } pos := ipToBigInt(ip) // Verify that the IP address is within our network range. if pos.Cmp(allocated.begin) == -1 || pos.Cmp(allocated.end) == 1 { return nil, ErrIPOutOfRange } // Register the IP. allocated.p[ip.String()] = struct{}{} return ip, nil } // return an available ip if one is currently available. If not, // return the next available ip for the nextwork func (allocated *allocatedMap) getNextIP() (net.IP, error) { pos := big.NewInt(0).Set(allocated.last) allRange := big.NewInt(0).Sub(allocated.end, allocated.begin) for i := big.NewInt(0); i.Cmp(allRange) <= 0; i.Add(i, big.NewInt(1)) { pos.Add(pos, big.NewInt(1)) if pos.Cmp(allocated.end) == 1 { pos.Set(allocated.begin) } if _, ok := allocated.p[bigIntToIP(pos).String()]; ok { continue } allocated.p[bigIntToIP(pos).String()] = struct{}{} allocated.last.Set(pos) return bigIntToIP(pos), nil } return nil, ErrNoAvailableIPs } // Converts a 4 bytes IP into a 128 bit integer func ipToBigInt(ip net.IP) *big.Int { x := big.NewInt(0) if ip4 := ip.To4(); ip4 != nil { return x.SetBytes(ip4) } if ip6 := ip.To16(); ip6 != nil { return x.SetBytes(ip6) } log.Errorf("ipToBigInt: Wrong IP length! %s", ip) return nil } // Converts 128 bit integer into a 4 bytes IP address func bigIntToIP(v *big.Int) net.IP { return net.IP(v.Bytes()) } docker-1.6.2/daemon/networkdriver/ipallocator/allocator_test.go0000644000175000017500000003610312524223634024366 0ustar tianontianonpackage ipallocator import ( "fmt" "math/big" "net" "testing" ) func reset() { allocatedIPs = networkSet{} } func TestConversion(t *testing.T) { ip := net.ParseIP("127.0.0.1") i := ipToBigInt(ip) if i.Cmp(big.NewInt(0x7f000001)) != 0 { t.Fatal("incorrect conversion") } conv := bigIntToIP(i) if !ip.Equal(conv) { t.Error(conv.String()) } } func TestConversionIPv6(t *testing.T) { ip := net.ParseIP("2a00:1450::1") ip2 := net.ParseIP("2a00:1450::2") ip3 := net.ParseIP("2a00:1450::1:1") i := ipToBigInt(ip) val, success := big.NewInt(0).SetString("2a001450000000000000000000000001", 16) if !success { t.Fatal("Hex-String to BigInt conversion failed.") } if i.Cmp(val) != 0 { t.Fatal("incorrent conversion") } conv := bigIntToIP(i) conv2 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(1))) conv3 := bigIntToIP(big.NewInt(0).Add(i, big.NewInt(0x10000))) if !ip.Equal(conv) { t.Error("2a00:1450::1 should be equal to " + conv.String()) } if !ip2.Equal(conv2) { t.Error("2a00:1450::2 should be equal to " + conv2.String()) } if !ip3.Equal(conv3) { t.Error("2a00:1450::1:1 should be equal to " + conv3.String()) } } func TestRequestNewIps(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } var ip net.IP var err error for i := 1; i < 10; i++ { ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } if expected := fmt.Sprintf("192.168.0.%d", i); ip.String() != expected { t.Fatalf("Expected ip %s got %s", expected, ip.String()) } } value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String() if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } if ip.String() != value { t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String()) } } func TestRequestNewIpV6(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask } var ip net.IP var err error for i := 1; i < 10; i++ { ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } if expected := fmt.Sprintf("2a00:1450::%d", i); ip.String() != expected { t.Fatalf("Expected ip %s got %s", expected, ip.String()) } } value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String() if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } if ip.String() != value { t.Fatalf("Expected to receive the next ip %s got %s", value, ip.String()) } } func TestReleaseIp(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } } func TestReleaseIpV6(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask } ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } } func TestGetReleasedIp(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } value := ip.String() if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } for i := 0; i < 253; i++ { _, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } err = ReleaseIP(network, ip) if err != nil { t.Fatal(err) } } ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } if ip.String() != value { t.Fatalf("Expected to receive same ip %s got %s", value, ip.String()) } } func TestGetReleasedIpV6(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0}, } ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } value := ip.String() if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } for i := 0; i < 253; i++ { _, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } err = ReleaseIP(network, ip) if err != nil { t.Fatal(err) } } ip, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } if ip.String() != value { t.Fatalf("Expected to receive same ip %s got %s", value, ip.String()) } } func TestRequestSpecificIp(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 224}, } ip := net.ParseIP("192.168.0.5") // Request a "good" IP. if _, err := RequestIP(network, ip); err != nil { t.Fatal(err) } // Request the same IP again. if _, err := RequestIP(network, ip); err != ErrIPAlreadyAllocated { t.Fatalf("Got the same IP twice: %#v", err) } // Request an out of range IP. if _, err := RequestIP(network, net.ParseIP("192.168.0.42")); err != ErrIPOutOfRange { t.Fatalf("Got an out of range IP: %#v", err) } } func TestRequestSpecificIpV6(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask } ip := net.ParseIP("2a00:1450::5") // Request a "good" IP. if _, err := RequestIP(network, ip); err != nil { t.Fatal(err) } // Request the same IP again. if _, err := RequestIP(network, ip); err != ErrIPAlreadyAllocated { t.Fatalf("Got the same IP twice: %#v", err) } // Request an out of range IP. if _, err := RequestIP(network, net.ParseIP("2a00:1500::1")); err != ErrIPOutOfRange { t.Fatalf("Got an out of range IP: %#v", err) } } func TestIPAllocator(t *testing.T) { expectedIPs := []net.IP{ 0: net.IPv4(127, 0, 0, 1), 1: net.IPv4(127, 0, 0, 2), 2: net.IPv4(127, 0, 0, 3), 3: net.IPv4(127, 0, 0, 4), 4: net.IPv4(127, 0, 0, 5), 5: net.IPv4(127, 0, 0, 6), } gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") network := &net.IPNet{IP: gwIP, Mask: n.Mask} // Pool after initialisation (f = free, u = used) // 1(f) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f) // ↑ // Check that we get 6 IPs, from 127.0.0.1–127.0.0.6, in that // order. for i := 0; i < 6; i++ { ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } assertIPEquals(t, expectedIPs[i], ip) } // Before loop begin // 1(f) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f) // ↑ // After i = 0 // 1(u) - 2(f) - 3(f) - 4(f) - 5(f) - 6(f) // ↑ // After i = 1 // 1(u) - 2(u) - 3(f) - 4(f) - 5(f) - 6(f) // ↑ // After i = 2 // 1(u) - 2(u) - 3(u) - 4(f) - 5(f) - 6(f) // ↑ // After i = 3 // 1(u) - 2(u) - 3(u) - 4(u) - 5(f) - 6(f) // ↑ // After i = 4 // 1(u) - 2(u) - 3(u) - 4(u) - 5(u) - 6(f) // ↑ // After i = 5 // 1(u) - 2(u) - 3(u) - 4(u) - 5(u) - 6(u) // ↑ // Check that there are no more IPs ip, err := RequestIP(network, nil) if err == nil { t.Fatalf("There shouldn't be any IP addresses at this point, got %s\n", ip) } // Release some IPs in non-sequential order if err := ReleaseIP(network, expectedIPs[3]); err != nil { t.Fatal(err) } // 1(u) - 2(u) - 3(u) - 4(f) - 5(u) - 6(u) // ↑ if err := ReleaseIP(network, expectedIPs[2]); err != nil { t.Fatal(err) } // 1(u) - 2(u) - 3(f) - 4(f) - 5(u) - 6(u) // ↑ if err := ReleaseIP(network, expectedIPs[4]); err != nil { t.Fatal(err) } // 1(u) - 2(u) - 3(f) - 4(f) - 5(f) - 6(u) // ↑ // Make sure that IPs are reused in sequential order, starting // with the first released IP newIPs := make([]net.IP, 3) for i := 0; i < 3; i++ { ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } newIPs[i] = ip } assertIPEquals(t, expectedIPs[2], newIPs[0]) assertIPEquals(t, expectedIPs[3], newIPs[1]) assertIPEquals(t, expectedIPs[4], newIPs[2]) _, err = RequestIP(network, nil) if err == nil { t.Fatal("There shouldn't be any IP addresses at this point") } } func TestAllocateFirstIP(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 0}, Mask: []byte{255, 255, 255, 0}, } firstIP := network.IP.To4().Mask(network.Mask) first := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1)) ip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } allocated := ipToBigInt(ip) if allocated == first { t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated) } } func TestAllocateAllIps(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } var ( current, first net.IP err error isFirst = true ) for err == nil { current, err = RequestIP(network, nil) if isFirst { first = current isFirst = false } } if err != ErrNoAvailableIPs { t.Fatal(err) } if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs { t.Fatal(err) } if err := ReleaseIP(network, first); err != nil { t.Fatal(err) } again, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } assertIPEquals(t, first, again) // ensure that alloc.last == alloc.begin won't result in dead loop if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs { t.Fatal(err) } // Test by making alloc.last the only free ip and ensure we get it back // #1. first of the range, (alloc.last == ipToInt(first) already) if err := ReleaseIP(network, first); err != nil { t.Fatal(err) } ret, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } assertIPEquals(t, first, ret) // #2. last of the range, note that current is the last one last := net.IPv4(192, 168, 0, 254) setLastTo(t, network, last) ret, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } assertIPEquals(t, last, ret) // #3. middle of the range mid := net.IPv4(192, 168, 0, 7) setLastTo(t, network, mid) ret, err = RequestIP(network, nil) if err != nil { t.Fatal(err) } assertIPEquals(t, mid, ret) } // make sure the pool is full when calling setLastTo. // we don't cheat here func setLastTo(t *testing.T, network *net.IPNet, ip net.IP) { if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } ret, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } assertIPEquals(t, ip, ret) if err := ReleaseIP(network, ip); err != nil { t.Fatal(err) } } func TestAllocateDifferentSubnets(t *testing.T) { defer reset() network1 := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } network2 := &net.IPNet{ IP: []byte{127, 0, 0, 1}, Mask: []byte{255, 255, 255, 0}, } network3 := &net.IPNet{ IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask } network4 := &net.IPNet{ IP: []byte{0x2a, 0x00, 0x16, 0x32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask } expectedIPs := []net.IP{ 0: net.IPv4(192, 168, 0, 1), 1: net.IPv4(192, 168, 0, 2), 2: net.IPv4(127, 0, 0, 1), 3: net.IPv4(127, 0, 0, 2), 4: net.ParseIP("2a00:1450::1"), 5: net.ParseIP("2a00:1450::2"), 6: net.ParseIP("2a00:1450::3"), 7: net.ParseIP("2a00:1632::1"), 8: net.ParseIP("2a00:1632::2"), } ip11, err := RequestIP(network1, nil) if err != nil { t.Fatal(err) } ip12, err := RequestIP(network1, nil) if err != nil { t.Fatal(err) } ip21, err := RequestIP(network2, nil) if err != nil { t.Fatal(err) } ip22, err := RequestIP(network2, nil) if err != nil { t.Fatal(err) } ip31, err := RequestIP(network3, nil) if err != nil { t.Fatal(err) } ip32, err := RequestIP(network3, nil) if err != nil { t.Fatal(err) } ip33, err := RequestIP(network3, nil) if err != nil { t.Fatal(err) } ip41, err := RequestIP(network4, nil) if err != nil { t.Fatal(err) } ip42, err := RequestIP(network4, nil) if err != nil { t.Fatal(err) } assertIPEquals(t, expectedIPs[0], ip11) assertIPEquals(t, expectedIPs[1], ip12) assertIPEquals(t, expectedIPs[2], ip21) assertIPEquals(t, expectedIPs[3], ip22) assertIPEquals(t, expectedIPs[4], ip31) assertIPEquals(t, expectedIPs[5], ip32) assertIPEquals(t, expectedIPs[6], ip33) assertIPEquals(t, expectedIPs[7], ip41) assertIPEquals(t, expectedIPs[8], ip42) } func TestRegisterBadTwice(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 1, 1}, Mask: []byte{255, 255, 255, 0}, } subnet := &net.IPNet{ IP: []byte{192, 168, 1, 8}, Mask: []byte{255, 255, 255, 248}, } if err := RegisterSubnet(network, subnet); err != nil { t.Fatal(err) } subnet = &net.IPNet{ IP: []byte{192, 168, 1, 16}, Mask: []byte{255, 255, 255, 248}, } if err := RegisterSubnet(network, subnet); err != ErrNetworkAlreadyRegistered { t.Fatalf("Expecteded ErrNetworkAlreadyRegistered error, got %v", err) } } func TestRegisterBadRange(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 1, 1}, Mask: []byte{255, 255, 255, 0}, } subnet := &net.IPNet{ IP: []byte{192, 168, 1, 1}, Mask: []byte{255, 255, 0, 0}, } if err := RegisterSubnet(network, subnet); err != ErrBadSubnet { t.Fatalf("Expected ErrBadSubnet error, got %v", err) } } func TestAllocateFromRange(t *testing.T) { defer reset() network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } // 192.168.1.9 - 192.168.1.14 subnet := &net.IPNet{ IP: []byte{192, 168, 0, 8}, Mask: []byte{255, 255, 255, 248}, } if err := RegisterSubnet(network, subnet); err != nil { t.Fatal(err) } expectedIPs := []net.IP{ 0: net.IPv4(192, 168, 0, 9), 1: net.IPv4(192, 168, 0, 10), 2: net.IPv4(192, 168, 0, 11), 3: net.IPv4(192, 168, 0, 12), 4: net.IPv4(192, 168, 0, 13), 5: net.IPv4(192, 168, 0, 14), } for _, ip := range expectedIPs { rip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } assertIPEquals(t, ip, rip) } if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs { t.Fatalf("Expected ErrNoAvailableIPs error, got %v", err) } for _, ip := range expectedIPs { ReleaseIP(network, ip) rip, err := RequestIP(network, nil) if err != nil { t.Fatal(err) } assertIPEquals(t, ip, rip) } } func assertIPEquals(t *testing.T, ip1, ip2 net.IP) { if !ip1.Equal(ip2) { t.Fatalf("Expected IP %s, got %s", ip1, ip2) } } func BenchmarkRequestIP(b *testing.B) { network := &net.IPNet{ IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}, } b.ResetTimer() for i := 0; i < b.N; i++ { for j := 0; j < 253; j++ { _, err := RequestIP(network, nil) if err != nil { b.Fatal(err) } } reset() } } docker-1.6.2/daemon/networkdriver/utils.go0000644000175000017500000000533712524223634020203 0ustar tianontianonpackage networkdriver import ( "errors" "fmt" "net" "github.com/docker/libcontainer/netlink" ) var ( networkGetRoutesFct = netlink.NetworkGetRoutes ErrNoDefaultRoute = errors.New("no default route") ) func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error { if len(nameservers) > 0 { for _, ns := range nameservers { _, nsNetwork, err := net.ParseCIDR(ns) if err != nil { return err } if NetworkOverlaps(toCheck, nsNetwork) { return ErrNetworkOverlapsWithNameservers } } } return nil } func CheckRouteOverlaps(toCheck *net.IPNet) error { networks, err := networkGetRoutesFct() if err != nil { return err } for _, network := range networks { if network.IPNet != nil && NetworkOverlaps(toCheck, network.IPNet) { return ErrNetworkOverlaps } } return nil } // Detects overlap between one IPNet and another func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { if len(netX.IP) == len(netY.IP) { if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) { return true } if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) { return true } } return false } // Calculates the first and last IP addresses in an IPNet func NetworkRange(network *net.IPNet) (net.IP, net.IP) { var netIP net.IP if network.IP.To4() != nil { netIP = network.IP.To4() } else if network.IP.To16() != nil { netIP = network.IP.To16() } else { return nil, nil } lastIP := make([]byte, len(netIP), len(netIP)) for i := 0; i < len(netIP); i++ { lastIP[i] = netIP[i] | ^network.Mask[i] } return netIP.Mask(network.Mask), net.IP(lastIP) } // Return the first IPv4 address and slice of IPv6 addresses for the specified network interface func GetIfaceAddr(name string) (net.Addr, []net.Addr, error) { iface, err := net.InterfaceByName(name) if err != nil { return nil, nil, err } addrs, err := iface.Addrs() if err != nil { return nil, nil, err } var addrs4 []net.Addr var addrs6 []net.Addr for _, addr := range addrs { ip := (addr.(*net.IPNet)).IP if ip4 := ip.To4(); ip4 != nil { addrs4 = append(addrs4, addr) } else if ip6 := ip.To16(); len(ip6) == net.IPv6len { addrs6 = append(addrs6, addr) } } switch { case len(addrs4) == 0: return nil, nil, fmt.Errorf("Interface %v has no IPv4 addresses", name) case len(addrs4) > 1: fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n", name, (addrs4[0].(*net.IPNet)).IP) } return addrs4[0], addrs6, nil } func GetDefaultRouteIface() (*net.Interface, error) { rs, err := networkGetRoutesFct() if err != nil { return nil, fmt.Errorf("unable to get routes: %v", err) } for _, r := range rs { if r.Default { return r.Iface, nil } } return nil, ErrNoDefaultRoute } docker-1.6.2/daemon/networkdriver/portmapper/0000755000175000017500000000000012524223634020675 5ustar tianontianondocker-1.6.2/daemon/networkdriver/portmapper/mock_proxy.go0000644000175000017500000000052012524223634023413 0ustar tianontianonpackage portmapper import "net" func NewMockProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy { return &mockProxyCommand{} } type mockProxyCommand struct { } func (p *mockProxyCommand) Start() error { return nil } func (p *mockProxyCommand) Stop() error { return nil } docker-1.6.2/daemon/networkdriver/portmapper/mapper.go0000644000175000017500000001122112524223634022505 0ustar tianontianonpackage portmapper import ( "errors" "fmt" "net" "sync" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/networkdriver/portallocator" "github.com/docker/docker/pkg/iptables" ) type mapping struct { proto string userlandProxy UserlandProxy host net.Addr container net.Addr } var NewProxy = NewProxyCommand var ( ErrUnknownBackendAddressType = errors.New("unknown container address type not supported") ErrPortMappedForIP = errors.New("port is already mapped to ip") ErrPortNotMapped = errors.New("port is not mapped") ) type PortMapper struct { chain *iptables.Chain // udp:ip:port currentMappings map[string]*mapping lock sync.Mutex Allocator *portallocator.PortAllocator } func New() *PortMapper { return NewWithPortAllocator(portallocator.New()) } func NewWithPortAllocator(allocator *portallocator.PortAllocator) *PortMapper { return &PortMapper{ currentMappings: make(map[string]*mapping), Allocator: allocator, } } func (pm *PortMapper) SetIptablesChain(c *iptables.Chain) { pm.chain = c } func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int) (host net.Addr, err error) { pm.lock.Lock() defer pm.lock.Unlock() var ( m *mapping proto string allocatedHostPort int proxy UserlandProxy ) switch container.(type) { case *net.TCPAddr: proto = "tcp" if allocatedHostPort, err = pm.Allocator.RequestPort(hostIP, proto, hostPort); err != nil { return nil, err } m = &mapping{ proto: proto, host: &net.TCPAddr{IP: hostIP, Port: allocatedHostPort}, container: container, } proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port) case *net.UDPAddr: proto = "udp" if allocatedHostPort, err = pm.Allocator.RequestPort(hostIP, proto, hostPort); err != nil { return nil, err } m = &mapping{ proto: proto, host: &net.UDPAddr{IP: hostIP, Port: allocatedHostPort}, container: container, } proxy = NewProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port) default: return nil, ErrUnknownBackendAddressType } // release the allocated port on any further error during return. defer func() { if err != nil { pm.Allocator.ReleasePort(hostIP, proto, allocatedHostPort) } }() key := getKey(m.host) if _, exists := pm.currentMappings[key]; exists { return nil, ErrPortMappedForIP } containerIP, containerPort := getIPAndPort(m.container) if err := pm.forward(iptables.Append, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil { return nil, err } cleanup := func() error { // need to undo the iptables rules before we return proxy.Stop() pm.forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort) if err := pm.Allocator.ReleasePort(hostIP, m.proto, allocatedHostPort); err != nil { return err } return nil } if err := proxy.Start(); err != nil { if err := cleanup(); err != nil { return nil, fmt.Errorf("Error during port allocation cleanup: %v", err) } return nil, err } m.userlandProxy = proxy pm.currentMappings[key] = m return m.host, nil } func (pm *PortMapper) Unmap(host net.Addr) error { pm.lock.Lock() defer pm.lock.Unlock() key := getKey(host) data, exists := pm.currentMappings[key] if !exists { return ErrPortNotMapped } data.userlandProxy.Stop() delete(pm.currentMappings, key) containerIP, containerPort := getIPAndPort(data.container) hostIP, hostPort := getIPAndPort(data.host) if err := pm.forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { log.Errorf("Error on iptables delete: %s", err) } switch a := host.(type) { case *net.TCPAddr: return pm.Allocator.ReleasePort(a.IP, "tcp", a.Port) case *net.UDPAddr: return pm.Allocator.ReleasePort(a.IP, "udp", a.Port) } return nil } func getKey(a net.Addr) string { switch t := a.(type) { case *net.TCPAddr: return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp") case *net.UDPAddr: return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp") } return "" } func getIPAndPort(a net.Addr) (net.IP, int) { switch t := a.(type) { case *net.TCPAddr: return t.IP, t.Port case *net.UDPAddr: return t.IP, t.Port } return nil, 0 } func (pm *PortMapper) forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error { if pm.chain == nil { return nil } return pm.chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort) } docker-1.6.2/daemon/networkdriver/portmapper/mapper_test.go0000644000175000017500000000656312524223634023561 0ustar tianontianonpackage portmapper import ( "net" "testing" "github.com/docker/docker/pkg/iptables" ) func init() { // override this func to mock out the proxy server NewProxy = NewMockProxyCommand } func TestSetIptablesChain(t *testing.T) { pm := New() c := &iptables.Chain{ Name: "TEST", Bridge: "192.168.1.1", } if pm.chain != nil { t.Fatal("chain should be nil at init") } pm.SetIptablesChain(c) if pm.chain == nil { t.Fatal("chain should not be nil after set") } } func TestMapPorts(t *testing.T) { pm := New() dstIp1 := net.ParseIP("192.168.0.1") dstIp2 := net.ParseIP("192.168.0.2") dstAddr1 := &net.TCPAddr{IP: dstIp1, Port: 80} dstAddr2 := &net.TCPAddr{IP: dstIp2, Port: 80} srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")} addrEqual := func(addr1, addr2 net.Addr) bool { return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String()) } if host, err := pm.Map(srcAddr1, dstIp1, 80); err != nil { t.Fatalf("Failed to allocate port: %s", err) } else if !addrEqual(dstAddr1, host) { t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s", dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network()) } if _, err := pm.Map(srcAddr1, dstIp1, 80); err == nil { t.Fatalf("Port is in use - mapping should have failed") } if _, err := pm.Map(srcAddr2, dstIp1, 80); err == nil { t.Fatalf("Port is in use - mapping should have failed") } if _, err := pm.Map(srcAddr2, dstIp2, 80); err != nil { t.Fatalf("Failed to allocate port: %s", err) } if pm.Unmap(dstAddr1) != nil { t.Fatalf("Failed to release port") } if pm.Unmap(dstAddr2) != nil { t.Fatalf("Failed to release port") } if pm.Unmap(dstAddr2) == nil { t.Fatalf("Port already released, but no error reported") } } func TestGetUDPKey(t *testing.T) { addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} key := getKey(addr) if expected := "192.168.1.5:53/udp"; key != expected { t.Fatalf("expected key %s got %s", expected, key) } } func TestGetTCPKey(t *testing.T) { addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80} key := getKey(addr) if expected := "192.168.1.5:80/tcp"; key != expected { t.Fatalf("expected key %s got %s", expected, key) } } func TestGetUDPIPAndPort(t *testing.T) { addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} ip, port := getIPAndPort(addr) if expected := "192.168.1.5"; ip.String() != expected { t.Fatalf("expected ip %s got %s", expected, ip) } if ep := 53; port != ep { t.Fatalf("expected port %d got %d", ep, port) } } func TestMapAllPortsSingleInterface(t *testing.T) { pm := New() dstIp1 := net.ParseIP("0.0.0.0") srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} hosts := []net.Addr{} var host net.Addr var err error defer func() { for _, val := range hosts { pm.Unmap(val) } }() for i := 0; i < 10; i++ { start, end := pm.Allocator.Begin, pm.Allocator.End for i := start; i < end; i++ { if host, err = pm.Map(srcAddr1, dstIp1, 0); err != nil { t.Fatal(err) } hosts = append(hosts, host) } if _, err := pm.Map(srcAddr1, dstIp1, start); err == nil { t.Fatalf("Port %d should be bound but is not", start) } for _, val := range hosts { if err := pm.Unmap(val); err != nil { t.Fatal(err) } } hosts = []net.Addr{} } } docker-1.6.2/daemon/networkdriver/portmapper/proxy.go0000644000175000017500000000712012524223634022405 0ustar tianontianonpackage portmapper import ( "flag" "fmt" "io/ioutil" "log" "net" "os" "os/exec" "os/signal" "strconv" "syscall" "time" "github.com/docker/docker/pkg/proxy" "github.com/docker/docker/pkg/reexec" ) const userlandProxyCommandName = "docker-proxy" func init() { reexec.Register(userlandProxyCommandName, execProxy) } type UserlandProxy interface { Start() error Stop() error } // proxyCommand wraps an exec.Cmd to run the userland TCP and UDP // proxies as separate processes. type proxyCommand struct { cmd *exec.Cmd } // execProxy is the reexec function that is registered to start the userland proxies func execProxy() { f := os.NewFile(3, "signal-parent") host, container := parseHostContainerAddrs() p, err := proxy.NewProxy(host, container) if err != nil { fmt.Fprintf(f, "1\n%s", err) f.Close() os.Exit(1) } go handleStopSignals(p) fmt.Fprint(f, "0\n") f.Close() // Run will block until the proxy stops p.Run() } // parseHostContainerAddrs parses the flags passed on reexec to create the TCP or UDP // net.Addrs to map the host and container ports func parseHostContainerAddrs() (host net.Addr, container net.Addr) { var ( proto = flag.String("proto", "tcp", "proxy protocol") hostIP = flag.String("host-ip", "", "host ip") hostPort = flag.Int("host-port", -1, "host port") containerIP = flag.String("container-ip", "", "container ip") containerPort = flag.Int("container-port", -1, "container port") ) flag.Parse() switch *proto { case "tcp": host = &net.TCPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort} container = &net.TCPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort} case "udp": host = &net.UDPAddr{IP: net.ParseIP(*hostIP), Port: *hostPort} container = &net.UDPAddr{IP: net.ParseIP(*containerIP), Port: *containerPort} default: log.Fatalf("unsupported protocol %s", *proto) } return host, container } func handleStopSignals(p proxy.Proxy) { s := make(chan os.Signal, 10) signal.Notify(s, os.Interrupt, syscall.SIGTERM, syscall.SIGSTOP) for _ = range s { p.Close() os.Exit(0) } } func NewProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int) UserlandProxy { args := []string{ userlandProxyCommandName, "-proto", proto, "-host-ip", hostIP.String(), "-host-port", strconv.Itoa(hostPort), "-container-ip", containerIP.String(), "-container-port", strconv.Itoa(containerPort), } return &proxyCommand{ cmd: &exec.Cmd{ Path: reexec.Self(), Args: args, SysProcAttr: &syscall.SysProcAttr{ Pdeathsig: syscall.SIGTERM, // send a sigterm to the proxy if the daemon process dies }, }, } } func (p *proxyCommand) Start() error { r, w, err := os.Pipe() if err != nil { return fmt.Errorf("proxy unable to open os.Pipe %s", err) } defer r.Close() p.cmd.ExtraFiles = []*os.File{w} if err := p.cmd.Start(); err != nil { return err } w.Close() errchan := make(chan error, 1) go func() { buf := make([]byte, 2) r.Read(buf) if string(buf) != "0\n" { errStr, err := ioutil.ReadAll(r) if err != nil { errchan <- fmt.Errorf("Error reading exit status from userland proxy: %v", err) return } errchan <- fmt.Errorf("Error starting userland proxy: %s", errStr) return } errchan <- nil }() select { case err := <-errchan: return err case <-time.After(16 * time.Second): return fmt.Errorf("Timed out proxy starting the userland proxy") } } func (p *proxyCommand) Stop() error { if p.cmd.Process != nil { if err := p.cmd.Process.Signal(os.Interrupt); err != nil { return err } return p.cmd.Wait() } return nil } docker-1.6.2/daemon/create.go0000644000175000017500000001050312524223634015370 0ustar tianontianonpackage daemon import ( "fmt" "strings" "github.com/docker/docker/engine" "github.com/docker/docker/graph" "github.com/docker/docker/image" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/runconfig" "github.com/docker/libcontainer/label" ) func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status { var name string if len(job.Args) == 1 { name = job.Args[0] } else if len(job.Args) > 1 { return job.Errorf("Usage: %s", job.Name) } config := runconfig.ContainerConfigFromJob(job) hostConfig := runconfig.ContainerHostConfigFromJob(job) if len(hostConfig.LxcConf) > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") { return job.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name()) } if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 { return job.Errorf("Minimum memory limit allowed is 4MB") } if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit { job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") hostConfig.Memory = 0 } if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit { job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") hostConfig.MemorySwap = -1 } if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { return job.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n") } if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 { return job.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n") } container, buildWarnings, err := daemon.Create(config, hostConfig, name) if err != nil { if daemon.Graph().IsNotExist(err) { _, tag := parsers.ParseRepositoryTag(config.Image) if tag == "" { tag = graph.DEFAULTTAG } return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) } return job.Error(err) } if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { job.Errorf("IPv4 forwarding is disabled.\n") } container.LogEvent("create") job.Printf("%s\n", container.ID) for _, warning := range buildWarnings { job.Errorf("%s\n", warning) } return engine.StatusOK } // Create creates a new container from the given configuration with a given name. func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (*Container, []string, error) { var ( container *Container warnings []string img *image.Image imgID string err error ) if config.Image != "" { img, err = daemon.repositories.LookupImage(config.Image) if err != nil { return nil, nil, err } if err = img.CheckDepth(); err != nil { return nil, nil, err } imgID = img.ID } if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil { return nil, nil, err } if hostConfig == nil { hostConfig = &runconfig.HostConfig{} } if hostConfig.SecurityOpt == nil { hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) if err != nil { return nil, nil, err } } if container, err = daemon.newContainer(name, config, imgID); err != nil { return nil, nil, err } if err := daemon.Register(container); err != nil { return nil, nil, err } if err := daemon.createRootfs(container); err != nil { return nil, nil, err } if hostConfig != nil { if err := daemon.setHostConfig(container, hostConfig); err != nil { return nil, nil, err } } if err := container.Mount(); err != nil { return nil, nil, err } defer container.Unmount() if err := container.prepareVolumes(); err != nil { return nil, nil, err } if err := container.ToDisk(); err != nil { return nil, nil, err } return container, warnings, nil } func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) { if ipcMode.IsHost() || pidMode.IsHost() { return label.DisableSecOpt(), nil } if ipcContainer := ipcMode.Container(); ipcContainer != "" { c, err := daemon.Get(ipcContainer) if err != nil { return nil, err } if !c.IsRunning() { return nil, fmt.Errorf("cannot join IPC of a non running container: %s", ipcContainer) } return label.DupSecOpt(c.ProcessLabel), nil } return nil, nil } docker-1.6.2/daemon/copy.go0000644000175000017500000000111112524223634015072 0ustar tianontianonpackage daemon import ( "io" "github.com/docker/docker/engine" ) func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) } var ( name = job.Args[0] resource = job.Args[1] ) container, err := daemon.Get(name) if err != nil { return job.Error(err) } data, err := container.Copy(resource) if err != nil { return job.Error(err) } defer data.Close() if _, err := io.Copy(job.Stdout, data); err != nil { return job.Error(err) } return engine.StatusOK } docker-1.6.2/daemon/logs.go0000644000175000017500000000663512524223634015104 0ustar tianontianonpackage daemon import ( "bytes" "encoding/json" "fmt" "io" "os" "strconv" "sync" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/tailfile" "github.com/docker/docker/pkg/timeutils" ) func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] stdout = job.GetenvBool("stdout") stderr = job.GetenvBool("stderr") tail = job.Getenv("tail") follow = job.GetenvBool("follow") times = job.GetenvBool("timestamps") lines = -1 format string ) if !(stdout || stderr) { return job.Errorf("You must choose at least one stream") } if times { format = timeutils.RFC3339NanoFixed } if tail == "" { tail = "all" } container, err := daemon.Get(name) if err != nil { return job.Error(err) } if container.LogDriverType() != "json-file" { return job.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver") } cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs log.Debugf("Old logs format") if stdout { cLog, err := container.ReadLog("stdout") if err != nil { log.Errorf("Error reading logs (stdout): %s", err) } else if _, err := io.Copy(job.Stdout, cLog); err != nil { log.Errorf("Error streaming logs (stdout): %s", err) } } if stderr { cLog, err := container.ReadLog("stderr") if err != nil { log.Errorf("Error reading logs (stderr): %s", err) } else if _, err := io.Copy(job.Stderr, cLog); err != nil { log.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { log.Errorf("Error reading logs (json): %s", err) } else { if tail != "all" { var err error lines, err = strconv.Atoi(tail) if err != nil { log.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err) lines = -1 } } if lines != 0 { if lines > 0 { f := cLog.(*os.File) ls, err := tailfile.TailFile(f, lines) if err != nil { return job.Error(err) } tmp := bytes.NewBuffer([]byte{}) for _, l := range ls { fmt.Fprintf(tmp, "%s\n", l) } cLog = tmp } dec := json.NewDecoder(cLog) l := &jsonlog.JSONLog{} for { if err := dec.Decode(l); err == io.EOF { break } else if err != nil { log.Errorf("Error streaming logs: %s", err) break } logLine := l.Log if times { // format can be "" or time format, so here can't be error logLine, _ = l.Format(format) } if l.Stream == "stdout" && stdout { io.WriteString(job.Stdout, logLine) } if l.Stream == "stderr" && stderr { io.WriteString(job.Stderr, logLine) } l.Reset() } } } if follow && container.IsRunning() { errors := make(chan error, 2) wg := sync.WaitGroup{} if stdout { wg.Add(1) stdoutPipe := container.StdoutLogPipe() defer stdoutPipe.Close() go func() { errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format) wg.Done() }() } if stderr { wg.Add(1) stderrPipe := container.StderrLogPipe() defer stderrPipe.Close() go func() { errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format) wg.Done() }() } wg.Wait() close(errors) for err := range errors { if err != nil { log.Errorf("%s", err) } } } return engine.StatusOK } docker-1.6.2/daemon/execdriver/0000755000175000017500000000000012524223634015737 5ustar tianontianondocker-1.6.2/daemon/execdriver/native/0000755000175000017500000000000012524223634017225 5ustar tianontianondocker-1.6.2/daemon/execdriver/native/create.go0000644000175000017500000001374012524223634021024 0ustar tianontianon// +build linux,cgo package native import ( "errors" "fmt" "net" "strings" "syscall" "github.com/docker/docker/daemon/execdriver" "github.com/docker/libcontainer/apparmor" "github.com/docker/libcontainer/configs" "github.com/docker/libcontainer/devices" "github.com/docker/libcontainer/utils" ) // createContainer populates and configures the container type with the // data provided by the execdriver.Command func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error) { container := execdriver.InitContainer(c) if err := d.createIpc(container, c); err != nil { return nil, err } if err := d.createPid(container, c); err != nil { return nil, err } if err := d.createNetwork(container, c); err != nil { return nil, err } if c.ProcessConfig.Privileged { // clear readonly for /sys for i := range container.Mounts { if container.Mounts[i].Destination == "/sys" { container.Mounts[i].Flags &= ^syscall.MS_RDONLY } } container.ReadonlyPaths = nil container.MaskPaths = nil if err := d.setPrivileged(container); err != nil { return nil, err } } else { if err := d.setCapabilities(container, c); err != nil { return nil, err } } if c.AppArmorProfile != "" { container.AppArmorProfile = c.AppArmorProfile } if err := execdriver.SetupCgroups(container, c); err != nil { return nil, err } if err := d.setupMounts(container, c); err != nil { return nil, err } if err := d.setupLabels(container, c); err != nil { return nil, err } d.setupRlimits(container, c) return container, nil } func generateIfaceName() (string, error) { for i := 0; i < 10; i++ { name, err := utils.GenerateRandomName("veth", 7) if err != nil { continue } if _, err := net.InterfaceByName(name); err != nil { if strings.Contains(err.Error(), "no such") { return name, nil } return "", err } } return "", errors.New("Failed to find name for new interface") } func (d *driver) createNetwork(container *configs.Config, c *execdriver.Command) error { if c.Network.HostNetworking { container.Namespaces.Remove(configs.NEWNET) return nil } container.Networks = []*configs.Network{ { Type: "loopback", }, } iName, err := generateIfaceName() if err != nil { return err } if c.Network.Interface != nil { vethNetwork := configs.Network{ Name: "eth0", HostInterfaceName: iName, Mtu: c.Network.Mtu, Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), MacAddress: c.Network.Interface.MacAddress, Gateway: c.Network.Interface.Gateway, Type: "veth", Bridge: c.Network.Interface.Bridge, } if c.Network.Interface.GlobalIPv6Address != "" { vethNetwork.IPv6Address = fmt.Sprintf("%s/%d", c.Network.Interface.GlobalIPv6Address, c.Network.Interface.GlobalIPv6PrefixLen) vethNetwork.IPv6Gateway = c.Network.Interface.IPv6Gateway } container.Networks = append(container.Networks, &vethNetwork) } if c.Network.ContainerID != "" { d.Lock() active := d.activeContainers[c.Network.ContainerID] d.Unlock() if active == nil { return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID) } state, err := active.State() if err != nil { return err } container.Namespaces.Add(configs.NEWNET, state.NamespacePaths[configs.NEWNET]) } return nil } func (d *driver) createIpc(container *configs.Config, c *execdriver.Command) error { if c.Ipc.HostIpc { container.Namespaces.Remove(configs.NEWIPC) return nil } if c.Ipc.ContainerID != "" { d.Lock() active := d.activeContainers[c.Ipc.ContainerID] d.Unlock() if active == nil { return fmt.Errorf("%s is not a valid running container to join", c.Ipc.ContainerID) } state, err := active.State() if err != nil { return err } container.Namespaces.Add(configs.NEWIPC, state.NamespacePaths[configs.NEWIPC]) } return nil } func (d *driver) createPid(container *configs.Config, c *execdriver.Command) error { if c.Pid.HostPid { container.Namespaces.Remove(configs.NEWPID) return nil } return nil } func (d *driver) setPrivileged(container *configs.Config) (err error) { container.Capabilities = execdriver.GetAllCapabilities() container.Cgroups.AllowAllDevices = true hostDevices, err := devices.HostDevices() if err != nil { return err } container.Devices = hostDevices if apparmor.IsEnabled() { container.AppArmorProfile = "unconfined" } return nil } func (d *driver) setCapabilities(container *configs.Config, c *execdriver.Command) (err error) { container.Capabilities, err = execdriver.TweakCapabilities(container.Capabilities, c.CapAdd, c.CapDrop) return err } func (d *driver) setupRlimits(container *configs.Config, c *execdriver.Command) { if c.Resources == nil { return } for _, rlimit := range c.Resources.Rlimits { container.Rlimits = append(container.Rlimits, configs.Rlimit{ Type: rlimit.Type, Hard: rlimit.Hard, Soft: rlimit.Soft, }) } } func (d *driver) setupMounts(container *configs.Config, c *execdriver.Command) error { userMounts := make(map[string]struct{}) for _, m := range c.Mounts { userMounts[m.Destination] = struct{}{} } // Filter out mounts that are overriden by user supplied mounts var defaultMounts []*configs.Mount for _, m := range container.Mounts { if _, ok := userMounts[m.Destination]; !ok { defaultMounts = append(defaultMounts, m) } } container.Mounts = defaultMounts for _, m := range c.Mounts { flags := syscall.MS_BIND | syscall.MS_REC if !m.Writable { flags |= syscall.MS_RDONLY } if m.Slave { flags |= syscall.MS_SLAVE } container.Mounts = append(container.Mounts, &configs.Mount{ Source: m.Source, Destination: m.Destination, Device: "bind", Flags: flags, }) } return nil } func (d *driver) setupLabels(container *configs.Config, c *execdriver.Command) error { container.ProcessLabel = c.ProcessLabel container.MountLabel = c.MountLabel return nil } docker-1.6.2/daemon/execdriver/native/init.go0000644000175000017500000000123412524223634020517 0ustar tianontianon// +build linux package native import ( "fmt" "os" "runtime" "github.com/docker/docker/pkg/reexec" "github.com/docker/libcontainer" ) func init() { reexec.Register(DriverName, initializer) } func fatal(err error) { if lerr, ok := err.(libcontainer.Error); ok { lerr.Detail(os.Stderr) os.Exit(1) } fmt.Fprintln(os.Stderr, err) os.Exit(1) } func initializer() { runtime.GOMAXPROCS(1) runtime.LockOSThread() factory, err := libcontainer.New("") if err != nil { fatal(err) } if err := factory.StartInitialization(3); err != nil { fatal(err) } panic("unreachable") } func writeError(err error) { fmt.Fprint(os.Stderr, err) os.Exit(1) } docker-1.6.2/daemon/execdriver/native/driver.go0000644000175000017500000002300212524223634021044 0ustar tianontianon// +build linux,cgo package native import ( "encoding/json" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "sync" "syscall" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/reexec" sysinfo "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/term" "github.com/docker/libcontainer" "github.com/docker/libcontainer/apparmor" "github.com/docker/libcontainer/cgroups/systemd" "github.com/docker/libcontainer/configs" "github.com/docker/libcontainer/system" "github.com/docker/libcontainer/utils" ) const ( DriverName = "native" Version = "0.2" ) type driver struct { root string initPath string activeContainers map[string]libcontainer.Container machineMemory int64 factory libcontainer.Factory sync.Mutex } func NewDriver(root, initPath string) (*driver, error) { meminfo, err := sysinfo.ReadMemInfo() if err != nil { return nil, err } if err := os.MkdirAll(root, 0700); err != nil { return nil, err } // native driver root is at docker_root/execdriver/native. Put apparmor at docker_root if err := apparmor.InstallDefaultProfile(); err != nil { return nil, err } cgm := libcontainer.Cgroupfs if systemd.UseSystemd() { cgm = libcontainer.SystemdCgroups } f, err := libcontainer.New( root, cgm, libcontainer.InitPath(reexec.Self(), DriverName), ) if err != nil { return nil, err } return &driver{ root: root, initPath: initPath, activeContainers: make(map[string]libcontainer.Container), machineMemory: meminfo.MemTotal, factory: f, }, nil } type execOutput struct { exitCode int err error } func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { // take the Command and populate the libcontainer.Config from it container, err := d.createContainer(c) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } var term execdriver.Terminal p := &libcontainer.Process{ Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...), Env: c.ProcessConfig.Env, Cwd: c.WorkingDir, User: c.ProcessConfig.User, } if c.ProcessConfig.Tty { rootuid, err := container.HostUID() if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } cons, err := p.NewConsole(rootuid) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } term, err = NewTtyConsole(cons, pipes, rootuid) } else { p.Stdout = pipes.Stdout p.Stderr = pipes.Stderr r, w, err := os.Pipe() if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } if pipes.Stdin != nil { go func() { io.Copy(w, pipes.Stdin) w.Close() }() p.Stdin = r } term = &execdriver.StdConsole{} } if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } c.ProcessConfig.Terminal = term cont, err := d.factory.Create(c.ID, container) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } d.Lock() d.activeContainers[c.ID] = cont d.Unlock() defer func() { cont.Destroy() d.cleanContainer(c.ID) }() if err := cont.Start(p); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } if startCallback != nil { pid, err := p.Pid() if err != nil { p.Signal(os.Kill) p.Wait() return execdriver.ExitStatus{ExitCode: -1}, err } startCallback(&c.ProcessConfig, pid) } oom := notifyOnOOM(cont) waitF := p.Wait if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) { // we need such hack for tracking processes with inerited fds, // because cmd.Wait() waiting for all streams to be copied waitF = waitInPIDHost(p, cont) } ps, err := waitF() if err != nil { execErr, ok := err.(*exec.ExitError) if !ok { return execdriver.ExitStatus{ExitCode: -1}, err } ps = execErr.ProcessState } cont.Destroy() _, oomKill := <-oom return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil } // notifyOnOOM returns a channel that signals if the container received an OOM notification // for any process. If it is unable to subscribe to OOM notifications then a closed // channel is returned as it will be non-blocking and return the correct result when read. func notifyOnOOM(container libcontainer.Container) <-chan struct{} { oom, err := container.NotifyOOM() if err != nil { log.Warnf("Your kernel does not support OOM notifications: %s", err) c := make(chan struct{}) close(c) return c } return oom } func killCgroupProcs(c libcontainer.Container) { var procs []*os.Process if err := c.Pause(); err != nil { log.Warn(err) } pids, err := c.Processes() if err != nil { // don't care about childs if we can't get them, this is mostly because cgroup already deleted log.Warnf("Failed to get processes from container %s: %v", c.ID(), err) } for _, pid := range pids { if p, err := os.FindProcess(pid); err == nil { procs = append(procs, p) if err := p.Kill(); err != nil { log.Warn(err) } } } if err := c.Resume(); err != nil { log.Warn(err) } for _, p := range procs { if _, err := p.Wait(); err != nil { log.Warn(err) } } } func waitInPIDHost(p *libcontainer.Process, c libcontainer.Container) func() (*os.ProcessState, error) { return func() (*os.ProcessState, error) { pid, err := p.Pid() if err != nil { return nil, err } process, err := os.FindProcess(pid) s, err := process.Wait() if err != nil { execErr, ok := err.(*exec.ExitError) if !ok { return s, err } s = execErr.ProcessState } killCgroupProcs(c) p.Wait() return s, err } } func (d *driver) Kill(c *execdriver.Command, sig int) error { active := d.activeContainers[c.ID] if active == nil { return fmt.Errorf("active container for %s does not exist", c.ID) } state, err := active.State() if err != nil { return err } return syscall.Kill(state.InitProcessPid, syscall.Signal(sig)) } func (d *driver) Pause(c *execdriver.Command) error { active := d.activeContainers[c.ID] if active == nil { return fmt.Errorf("active container for %s does not exist", c.ID) } return active.Pause() } func (d *driver) Unpause(c *execdriver.Command) error { active := d.activeContainers[c.ID] if active == nil { return fmt.Errorf("active container for %s does not exist", c.ID) } return active.Resume() } func (d *driver) Terminate(c *execdriver.Command) error { defer d.cleanContainer(c.ID) container, err := d.factory.Load(c.ID) if err != nil { return err } defer container.Destroy() state, err := container.State() if err != nil { return err } pid := state.InitProcessPid currentStartTime, err := system.GetProcessStartTime(pid) if err != nil { return err } if state.InitProcessStartTime == currentStartTime { err = syscall.Kill(pid, 9) syscall.Wait4(pid, nil, 0, nil) } return err } func (d *driver) Info(id string) execdriver.Info { return &info{ ID: id, driver: d, } } func (d *driver) Name() string { return fmt.Sprintf("%s-%s", DriverName, Version) } func (d *driver) GetPidsForContainer(id string) ([]int, error) { d.Lock() active := d.activeContainers[id] d.Unlock() if active == nil { return nil, fmt.Errorf("active container for %s does not exist", id) } return active.Processes() } func (d *driver) writeContainerFile(container *configs.Config, id string) error { data, err := json.Marshal(container) if err != nil { return err } return ioutil.WriteFile(filepath.Join(d.root, id, "container.json"), data, 0655) } func (d *driver) cleanContainer(id string) error { d.Lock() delete(d.activeContainers, id) d.Unlock() return os.RemoveAll(filepath.Join(d.root, id)) } func (d *driver) createContainerRoot(id string) error { return os.MkdirAll(filepath.Join(d.root, id), 0655) } func (d *driver) Clean(id string) error { return os.RemoveAll(filepath.Join(d.root, id)) } func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) { c := d.activeContainers[id] if c == nil { return nil, execdriver.ErrNotRunning } now := time.Now() stats, err := c.Stats() if err != nil { return nil, err } memoryLimit := c.Config().Cgroups.Memory // if the container does not have any memory limit specified set the // limit to the machines memory if memoryLimit == 0 { memoryLimit = d.machineMemory } return &execdriver.ResourceStats{ Stats: stats, Read: now, MemoryLimit: memoryLimit, }, nil } func getEnv(key string, env []string) string { for _, pair := range env { parts := strings.Split(pair, "=") if parts[0] == key { return parts[1] } } return "" } type TtyConsole struct { console libcontainer.Console } func NewTtyConsole(console libcontainer.Console, pipes *execdriver.Pipes, rootuid int) (*TtyConsole, error) { tty := &TtyConsole{ console: console, } if err := tty.AttachPipes(pipes); err != nil { tty.Close() return nil, err } return tty, nil } func (t *TtyConsole) Master() libcontainer.Console { return t.console } func (t *TtyConsole) Resize(h, w int) error { return term.SetWinsize(t.console.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) } func (t *TtyConsole) AttachPipes(pipes *execdriver.Pipes) error { go func() { if wb, ok := pipes.Stdout.(interface { CloseWriters() error }); ok { defer wb.CloseWriters() } io.Copy(pipes.Stdout, t.console) }() if pipes.Stdin != nil { go func() { io.Copy(t.console, pipes.Stdin) pipes.Stdin.Close() }() } return nil } func (t *TtyConsole) Close() error { return t.console.Close() } docker-1.6.2/daemon/execdriver/native/info.go0000644000175000017500000000047512524223634020515 0ustar tianontianon// +build linux,cgo package native type info struct { ID string driver *driver } // IsRunning is determined by looking for the // pid file for a container. If the file exists then the // container is currently running func (i *info) IsRunning() bool { _, ok := i.driver.activeContainers[i.ID] return ok } docker-1.6.2/daemon/execdriver/native/template/0000755000175000017500000000000012524223634021040 5ustar tianontianondocker-1.6.2/daemon/execdriver/native/template/default_template.go0000644000175000017500000000415412524223634024712 0ustar tianontianonpackage template import ( "syscall" "github.com/docker/libcontainer/apparmor" "github.com/docker/libcontainer/configs" ) const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV // New returns the docker default configuration for libcontainer func New() *configs.Config { container := &configs.Config{ Capabilities: []string{ "CHOWN", "DAC_OVERRIDE", "FSETID", "FOWNER", "MKNOD", "NET_RAW", "SETGID", "SETUID", "SETFCAP", "SETPCAP", "NET_BIND_SERVICE", "SYS_CHROOT", "KILL", "AUDIT_WRITE", }, Namespaces: configs.Namespaces([]configs.Namespace{ {Type: "NEWNS"}, {Type: "NEWUTS"}, {Type: "NEWIPC"}, {Type: "NEWPID"}, {Type: "NEWNET"}, }), Cgroups: &configs.Cgroup{ Parent: "docker", AllowAllDevices: false, }, Mounts: []*configs.Mount{ { Source: "proc", Destination: "/proc", Device: "proc", Flags: defaultMountFlags, }, { Source: "tmpfs", Destination: "/dev", Device: "tmpfs", Flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, Data: "mode=755", }, { Source: "devpts", Destination: "/dev/pts", Device: "devpts", Flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, Data: "newinstance,ptmxmode=0666,mode=0620,gid=5", }, { Device: "tmpfs", Source: "shm", Destination: "/dev/shm", Data: "mode=1777,size=65536k", Flags: defaultMountFlags, }, { Source: "mqueue", Destination: "/dev/mqueue", Device: "mqueue", Flags: defaultMountFlags, }, { Source: "sysfs", Destination: "/sys", Device: "sysfs", Flags: defaultMountFlags | syscall.MS_RDONLY, }, }, MaskPaths: []string{ "/proc/kcore", "/proc/latency_stats", "/proc/timer_stats", }, ReadonlyPaths: []string{ "/proc/asound", "/proc/bus", "/proc/fs", "/proc/irq", "/proc/sys", "/proc/sysrq-trigger", }, } if apparmor.IsEnabled() { container.AppArmorProfile = "docker-default" } return container } docker-1.6.2/daemon/execdriver/native/exec.go0000644000175000017500000000333112524223634020500 0ustar tianontianon// +build linux package native import ( "fmt" "os" "os/exec" "syscall" "github.com/docker/docker/daemon/execdriver" "github.com/docker/libcontainer" _ "github.com/docker/libcontainer/nsenter" "github.com/docker/libcontainer/utils" ) // TODO(vishh): Add support for running in priviledged mode and running as a different user. func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { active := d.activeContainers[c.ID] if active == nil { return -1, fmt.Errorf("No active container exists with ID %s", c.ID) } var term execdriver.Terminal var err error p := &libcontainer.Process{ Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...), Env: c.ProcessConfig.Env, Cwd: c.WorkingDir, User: c.ProcessConfig.User, } if processConfig.Tty { config := active.Config() rootuid, err := config.HostUID() if err != nil { return -1, err } cons, err := p.NewConsole(rootuid) if err != nil { return -1, err } term, err = NewTtyConsole(cons, pipes, rootuid) } else { p.Stdout = pipes.Stdout p.Stderr = pipes.Stderr p.Stdin = pipes.Stdin term = &execdriver.StdConsole{} } if err != nil { return -1, err } processConfig.Terminal = term if err := active.Start(p); err != nil { return -1, err } if startCallback != nil { pid, err := p.Pid() if err != nil { p.Signal(os.Kill) p.Wait() return -1, err } startCallback(&c.ProcessConfig, pid) } ps, err := p.Wait() if err != nil { exitErr, ok := err.(*exec.ExitError) if !ok { return -1, err } ps = exitErr.ProcessState } return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil } docker-1.6.2/daemon/execdriver/native/driver_unsupported_nocgo.go0000644000175000017500000000036112524223634024704 0ustar tianontianon// +build linux,!cgo package native import ( "fmt" "github.com/docker/docker/daemon/execdriver" ) func NewDriver(root, initPath string) (execdriver.Driver, error) { return nil, fmt.Errorf("native driver not supported on non-linux") } docker-1.6.2/daemon/execdriver/native/utils.go0000644000175000017500000000077212524223634020722 0ustar tianontianon// +build linux package native //func findUserArgs() []string { //for i, a := range os.Args { //if a == "--" { //return os.Args[i+1:] //} //} //return []string{} //} //// loadConfigFromFd loads a container's config from the sync pipe that is provided by //// fd 3 when running a process //func loadConfigFromFd() (*configs.Config, error) { //var config *libcontainer.Config //if err := json.NewDecoder(os.NewFile(3, "child")).Decode(&config); err != nil { //return nil, err //} //return config, nil //} docker-1.6.2/daemon/execdriver/native/driver_unsupported.go0000644000175000017500000000035512524223634023522 0ustar tianontianon// +build !linux package native import ( "fmt" "github.com/docker/docker/daemon/execdriver" ) func NewDriver(root, initPath string) (execdriver.Driver, error) { return nil, fmt.Errorf("native driver not supported on non-linux") } docker-1.6.2/daemon/execdriver/termconsole.go0000644000175000017500000000143712524223634020625 0ustar tianontianonpackage execdriver import ( "io" "os/exec" ) type StdConsole struct { } func NewStdConsole(processConfig *ProcessConfig, pipes *Pipes) (*StdConsole, error) { std := &StdConsole{} if err := std.AttachPipes(&processConfig.Cmd, pipes); err != nil { return nil, err } return std, nil } func (s *StdConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { command.Stdout = pipes.Stdout command.Stderr = pipes.Stderr if pipes.Stdin != nil { stdin, err := command.StdinPipe() if err != nil { return err } go func() { defer stdin.Close() io.Copy(stdin, pipes.Stdin) }() } return nil } func (s *StdConsole) Resize(h, w int) error { // we do not need to reside a non tty return nil } func (s *StdConsole) Close() error { // nothing to close here return nil } docker-1.6.2/daemon/execdriver/lxc/0000755000175000017500000000000012524223634016525 5ustar tianontianondocker-1.6.2/daemon/execdriver/lxc/info_test.go0000644000175000017500000000107212524223634021046 0ustar tianontianonpackage lxc import ( "testing" ) func TestParseRunningInfo(t *testing.T) { raw := ` state: RUNNING pid: 50` info, err := parseLxcInfo(raw) if err != nil { t.Fatal(err) } if !info.Running { t.Fatal("info should return a running state") } if info.Pid != 50 { t.Fatalf("info should have pid 50 got %d", info.Pid) } } func TestEmptyInfo(t *testing.T) { _, err := parseLxcInfo("") if err == nil { t.Fatal("error should not be nil") } } func TestBadInfo(t *testing.T) { _, err := parseLxcInfo("state") if err != nil { t.Fatal(err) } } docker-1.6.2/daemon/execdriver/lxc/lxc_init_unsupported.go0000644000175000017500000000016212524223634023334 0ustar tianontianon// +build !linux package lxc func finalizeNamespace(args *InitArgs) error { panic("Not supported on darwin") } docker-1.6.2/daemon/execdriver/lxc/init.go0000644000175000017500000000636312524223634020027 0ustar tianontianonpackage lxc import ( "encoding/json" "flag" "fmt" "io/ioutil" "log" "os" "os/exec" "runtime" "strings" "syscall" "github.com/docker/docker/pkg/reexec" ) // Args provided to the init function for a driver type InitArgs struct { User string Gateway string Ip string WorkDir string Privileged bool Env []string Args []string Mtu int Console string Pipe int Root string CapAdd string CapDrop string } func init() { // like always lxc requires a hack to get this to work reexec.Register("/.dockerinit", dockerInititalizer) } func dockerInititalizer() { initializer() } // initializer is the lxc driver's init function that is run inside the namespace to setup // additional configurations func initializer() { runtime.LockOSThread() args := getArgs() if err := setupNamespace(args); err != nil { log.Fatal(err) } } func setupNamespace(args *InitArgs) error { if err := setupEnv(args); err != nil { return err } if err := finalizeNamespace(args); err != nil { return err } path, err := exec.LookPath(args.Args[0]) if err != nil { log.Printf("Unable to locate %v", args.Args[0]) os.Exit(127) } if err := syscall.Exec(path, args.Args, os.Environ()); err != nil { return fmt.Errorf("dockerinit unable to execute %s - %s", path, err) } return nil } func getArgs() *InitArgs { var ( // Get cmdline arguments user = flag.String("u", "", "username or uid") gateway = flag.String("g", "", "gateway address") ip = flag.String("i", "", "ip address") workDir = flag.String("w", "", "workdir") privileged = flag.Bool("privileged", false, "privileged mode") mtu = flag.Int("mtu", 1500, "interface mtu") capAdd = flag.String("cap-add", "", "capabilities to add") capDrop = flag.String("cap-drop", "", "capabilities to drop") ) flag.Parse() return &InitArgs{ User: *user, Gateway: *gateway, Ip: *ip, WorkDir: *workDir, Privileged: *privileged, Args: flag.Args(), Mtu: *mtu, CapAdd: *capAdd, CapDrop: *capDrop, } } // Clear environment pollution introduced by lxc-start func setupEnv(args *InitArgs) error { // Get env var env []string content, err := ioutil.ReadFile(".dockerenv") if err != nil { return fmt.Errorf("Unable to load environment variables: %v", err) } if err := json.Unmarshal(content, &env); err != nil { return fmt.Errorf("Unable to unmarshal environment variables: %v", err) } // Propagate the plugin-specific container env variable env = append(env, "container="+os.Getenv("container")) args.Env = env os.Clearenv() for _, kv := range args.Env { parts := strings.SplitN(kv, "=", 2) if len(parts) == 1 { parts = append(parts, "") } os.Setenv(parts[0], parts[1]) } return nil } // Setup working directory func setupWorkingDirectory(args *InitArgs) error { if args.WorkDir == "" { return nil } if err := syscall.Chdir(args.WorkDir); err != nil { return fmt.Errorf("Unable to change dir to %v: %v", args.WorkDir, err) } return nil } func getEnv(args *InitArgs, key string) string { for _, kv := range args.Env { parts := strings.SplitN(kv, "=", 2) if parts[0] == key && len(parts) == 2 { return parts[1] } } return "" } docker-1.6.2/daemon/execdriver/lxc/driver.go0000644000175000017500000004641612524223634020362 0ustar tianontianonpackage lxc import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "sync" "syscall" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" sysinfo "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/term" "github.com/docker/docker/pkg/version" "github.com/docker/docker/utils" "github.com/docker/libcontainer" "github.com/docker/libcontainer/cgroups" "github.com/docker/libcontainer/configs" "github.com/docker/libcontainer/system" "github.com/docker/libcontainer/user" "github.com/kr/pty" ) const DriverName = "lxc" var ErrExec = errors.New("Unsupported: Exec is not supported by the lxc driver") type driver struct { root string // root path for the driver to use libPath string initPath string apparmor bool sharedRoot bool activeContainers map[string]*activeContainer machineMemory int64 sync.Mutex } type activeContainer struct { container *configs.Config cmd *exec.Cmd } func NewDriver(root, libPath, initPath string, apparmor bool) (*driver, error) { if err := os.MkdirAll(root, 0700); err != nil { return nil, err } // setup unconfined symlink if err := linkLxcStart(root); err != nil { return nil, err } meminfo, err := sysinfo.ReadMemInfo() if err != nil { return nil, err } return &driver{ apparmor: apparmor, root: root, libPath: libPath, initPath: initPath, sharedRoot: rootIsShared(), activeContainers: make(map[string]*activeContainer), machineMemory: meminfo.MemTotal, }, nil } func (d *driver) Name() string { version := d.version() return fmt.Sprintf("%s-%s", DriverName, version) } func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { var ( term execdriver.Terminal err error dataPath = d.containerDir(c.ID) ) if c.ProcessConfig.Tty { term, err = NewTtyConsole(&c.ProcessConfig, pipes) } else { term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) } c.ProcessConfig.Terminal = term container, err := d.createContainer(c) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } d.Lock() d.activeContainers[c.ID] = &activeContainer{ container: container, cmd: &c.ProcessConfig.Cmd, } d.Unlock() c.Mounts = append(c.Mounts, execdriver.Mount{ Source: d.initPath, Destination: c.InitPath, Writable: false, Private: true, }) if err := d.generateEnvConfig(c); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } configPath, err := d.generateLXCConfig(c) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } params := []string{ "lxc-start", "-n", c.ID, "-f", configPath, } // From lxc>=1.1 the default behavior is to daemonize containers after start lxcVersion := version.Version(d.version()) if lxcVersion.GreaterThanOrEqualTo(version.Version("1.1")) { params = append(params, "-F") } if c.Network.ContainerID != "" { params = append(params, "--share-net", c.Network.ContainerID, ) } if c.Ipc != nil { if c.Ipc.ContainerID != "" { params = append(params, "--share-ipc", c.Ipc.ContainerID, ) } else if c.Ipc.HostIpc { params = append(params, "--share-ipc", "1", ) } } params = append(params, "--", c.InitPath, ) if c.Network.Interface != nil { params = append(params, "-g", c.Network.Interface.Gateway, "-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), ) } params = append(params, "-mtu", strconv.Itoa(c.Network.Mtu), ) if c.ProcessConfig.User != "" { params = append(params, "-u", c.ProcessConfig.User) } if c.ProcessConfig.Privileged { if d.apparmor { params[0] = path.Join(d.root, "lxc-start-unconfined") } params = append(params, "-privileged") } if c.WorkingDir != "" { params = append(params, "-w", c.WorkingDir) } params = append(params, "--", c.ProcessConfig.Entrypoint) params = append(params, c.ProcessConfig.Arguments...) if d.sharedRoot { // lxc-start really needs / to be non-shared, or all kinds of stuff break // when lxc-start unmount things and those unmounts propagate to the main // mount namespace. // What we really want is to clone into a new namespace and then // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork // without exec in go we have to do this horrible shell hack... shellString := "mount --make-rslave /; exec " + utils.ShellQuoteArguments(params) params = []string{ "unshare", "-m", "--", "/bin/sh", "-c", shellString, } } log.Debugf("lxc params %s", params) var ( name = params[0] arg = params[1:] ) aname, err := exec.LookPath(name) if err != nil { aname = name } c.ProcessConfig.Path = aname c.ProcessConfig.Args = append([]string{name}, arg...) if err := createDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } if err := c.ProcessConfig.Start(); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } var ( waitErr error waitLock = make(chan struct{}) ) go func() { if err := c.ProcessConfig.Wait(); err != nil { if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 waitErr = err } } close(waitLock) }() terminate := func(terr error) (execdriver.ExitStatus, error) { if c.ProcessConfig.Process != nil { c.ProcessConfig.Process.Kill() c.ProcessConfig.Wait() } return execdriver.ExitStatus{ExitCode: -1}, terr } // Poll lxc for RUNNING status pid, err := d.waitForStart(c, waitLock) if err != nil { return terminate(err) } cgroupPaths, err := cgroupPaths(c.ID) if err != nil { return terminate(err) } state := &libcontainer.State{ InitProcessPid: pid, CgroupPaths: cgroupPaths, } f, err := os.Create(filepath.Join(dataPath, "state.json")) if err != nil { return terminate(err) } defer f.Close() if err := json.NewEncoder(f).Encode(state); err != nil { return terminate(err) } c.ContainerPid = pid if startCallback != nil { log.Debugf("Invoking startCallback") startCallback(&c.ProcessConfig, pid) } oomKill := false oomKillNotification, err := notifyOnOOM(cgroupPaths) <-waitLock if err == nil { _, oomKill = <-oomKillNotification log.Debugf("oomKill error %s waitErr %s", oomKill, waitErr) } else { log.Warnf("Your kernel does not support OOM notifications: %s", err) } // check oom error exitCode := getExitCode(c) if oomKill { exitCode = 137 } return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr } // copy from libcontainer func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) { dir := paths["memory"] if dir == "" { return nil, fmt.Errorf("There is no path for %q in state", "memory") } oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control")) if err != nil { return nil, err } fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) if syserr != 0 { oomControl.Close() return nil, syserr } eventfd := os.NewFile(fd, "eventfd") eventControlPath := filepath.Join(dir, "cgroup.event_control") data := fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd()) if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil { eventfd.Close() oomControl.Close() return nil, err } ch := make(chan struct{}) go func() { defer func() { close(ch) eventfd.Close() oomControl.Close() }() buf := make([]byte, 8) for { if _, err := eventfd.Read(buf); err != nil { return } // When a cgroup is destroyed, an event is sent to eventfd. // So if the control path is gone, return instead of notifying. if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) { return } ch <- struct{}{} } }() return ch, nil } // createContainer populates and configures the container type with the // data provided by the execdriver.Command func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error) { container := execdriver.InitContainer(c) if err := execdriver.SetupCgroups(container, c); err != nil { return nil, err } return container, nil } // Return an map of susbystem -> container cgroup func cgroupPaths(containerId string) (map[string]string, error) { subsystems, err := cgroups.GetAllSubsystems() if err != nil { return nil, err } log.Debugf("subsystems: %s", subsystems) paths := make(map[string]string) for _, subsystem := range subsystems { cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem) log.Debugf("cgroup path %s %s", cgroupRoot, cgroupDir) if err != nil { //unsupported subystem continue } path := filepath.Join(cgroupRoot, cgroupDir, "lxc", containerId) paths[subsystem] = path } return paths, nil } // this is copy from old libcontainer nodes.go func createDeviceNodes(rootfs string, nodesToCreate []*configs.Device) error { oldMask := syscall.Umask(0000) defer syscall.Umask(oldMask) for _, node := range nodesToCreate { if err := createDeviceNode(rootfs, node); err != nil { return err } } return nil } // Creates the device node in the rootfs of the container. func createDeviceNode(rootfs string, node *configs.Device) error { var ( dest = filepath.Join(rootfs, node.Path) parent = filepath.Dir(dest) ) if err := os.MkdirAll(parent, 0755); err != nil { return err } fileMode := node.FileMode switch node.Type { case 'c': fileMode |= syscall.S_IFCHR case 'b': fileMode |= syscall.S_IFBLK default: return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) } if err := syscall.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil && !os.IsExist(err) { return fmt.Errorf("mknod %s %s", node.Path, err) } if err := syscall.Chown(dest, int(node.Uid), int(node.Gid)); err != nil { return fmt.Errorf("chown %s to %d:%d", node.Path, node.Uid, node.Gid) } return nil } // setupUser changes the groups, gid, and uid for the user inside the container // copy from libcontainer, cause not it's private func setupUser(userSpec string) error { // Set up defaults. defaultExecUser := user.ExecUser{ Uid: syscall.Getuid(), Gid: syscall.Getgid(), Home: "/", } passwdPath, err := user.GetPasswdPath() if err != nil { return err } groupPath, err := user.GetGroupPath() if err != nil { return err } execUser, err := user.GetExecUserPath(userSpec, &defaultExecUser, passwdPath, groupPath) if err != nil { return err } if err := syscall.Setgroups(execUser.Sgids); err != nil { return err } if err := system.Setgid(execUser.Gid); err != nil { return err } if err := system.Setuid(execUser.Uid); err != nil { return err } // if we didn't get HOME already, set it based on the user's HOME if envHome := os.Getenv("HOME"); envHome == "" { if err := os.Setenv("HOME", execUser.Home); err != nil { return err } } return nil } /// Return the exit code of the process // if the process has not exited -1 will be returned func getExitCode(c *execdriver.Command) int { if c.ProcessConfig.ProcessState == nil { return -1 } return c.ProcessConfig.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() } func (d *driver) Kill(c *execdriver.Command, sig int) error { return KillLxc(c.ID, sig) } func (d *driver) Pause(c *execdriver.Command) error { _, err := exec.LookPath("lxc-freeze") if err == nil { output, errExec := exec.Command("lxc-freeze", "-n", c.ID).CombinedOutput() if errExec != nil { return fmt.Errorf("Err: %s Output: %s", errExec, output) } } return err } func (d *driver) Unpause(c *execdriver.Command) error { _, err := exec.LookPath("lxc-unfreeze") if err == nil { output, errExec := exec.Command("lxc-unfreeze", "-n", c.ID).CombinedOutput() if errExec != nil { return fmt.Errorf("Err: %s Output: %s", errExec, output) } } return err } func (d *driver) Terminate(c *execdriver.Command) error { return KillLxc(c.ID, 9) } func (d *driver) version() string { var ( version string output []byte err error ) if _, errPath := exec.LookPath("lxc-version"); errPath == nil { output, err = exec.Command("lxc-version").CombinedOutput() } else { output, err = exec.Command("lxc-start", "--version").CombinedOutput() } if err == nil { version = strings.TrimSpace(string(output)) if parts := strings.SplitN(version, ":", 2); len(parts) == 2 { version = strings.TrimSpace(parts[1]) } } return version } func KillLxc(id string, sig int) error { var ( err error output []byte ) _, err = exec.LookPath("lxc-kill") if err == nil { output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput() } else { output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput() } if err != nil { return fmt.Errorf("Err: %s Output: %s", err, output) } return nil } // wait for the process to start and return the pid for the process func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (int, error) { var ( err error output []byte ) // We wait for the container to be fully running. // Timeout after 5 seconds. In case of broken pipe, just retry. // Note: The container can run and finish correctly before // the end of this loop for now := time.Now(); time.Since(now) < 5*time.Second; { select { case <-waitLock: // If the process dies while waiting for it, just return return -1, nil default: } output, err = d.getInfo(c.ID) if err == nil { info, err := parseLxcInfo(string(output)) if err != nil { return -1, err } if info.Running { return info.Pid, nil } } time.Sleep(50 * time.Millisecond) } return -1, execdriver.ErrNotRunning } func (d *driver) getInfo(id string) ([]byte, error) { return exec.Command("lxc-info", "-n", id).CombinedOutput() } type info struct { ID string driver *driver } func (i *info) IsRunning() bool { var running bool output, err := i.driver.getInfo(i.ID) if err != nil { log.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) return false } if strings.Contains(string(output), "RUNNING") { running = true } return running } func (d *driver) Info(id string) execdriver.Info { return &info{ ID: id, driver: d, } } func findCgroupRootAndDir(subsystem string) (string, string, error) { cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) if err != nil { return "", "", err } cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) if err != nil { return "", "", err } return cgroupRoot, cgroupDir, nil } func (d *driver) GetPidsForContainer(id string) ([]int, error) { pids := []int{} // cpu is chosen because it is the only non optional subsystem in cgroups subsystem := "cpu" cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem) if err != nil { return pids, err } filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") if _, err := os.Stat(filename); os.IsNotExist(err) { // With more recent lxc versions use, cgroup will be in lxc/ filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks") } output, err := ioutil.ReadFile(filename) if err != nil { return pids, err } for _, p := range strings.Split(string(output), "\n") { if len(p) == 0 { continue } pid, err := strconv.Atoi(p) if err != nil { return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) } pids = append(pids, pid) } return pids, nil } func linkLxcStart(root string) error { sourcePath, err := exec.LookPath("lxc-start") if err != nil { return err } targetPath := path.Join(root, "lxc-start-unconfined") if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) { return err } else if err == nil { if err := os.Remove(targetPath); err != nil { return err } } return os.Symlink(sourcePath, targetPath) } // TODO: This can be moved to the mountinfo reader in the mount pkg func rootIsShared() bool { if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil { for _, line := range strings.Split(string(data), "\n") { cols := strings.Split(line, " ") if len(cols) >= 6 && cols[4] == "/" { return strings.HasPrefix(cols[6], "shared") } } } // No idea, probably safe to assume so return true } func (d *driver) containerDir(containerId string) string { return path.Join(d.libPath, "containers", containerId) } func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { root := path.Join(d.containerDir(c.ID), "config.lxc") fo, err := os.Create(root) if err != nil { return "", err } defer fo.Close() if err := LxcTemplateCompiled.Execute(fo, struct { *execdriver.Command AppArmor bool }{ Command: c, AppArmor: d.apparmor, }); err != nil { return "", err } return root, nil } func (d *driver) generateEnvConfig(c *execdriver.Command) error { data, err := json.Marshal(c.ProcessConfig.Env) if err != nil { return err } p := path.Join(d.libPath, "containers", c.ID, "config.env") c.Mounts = append(c.Mounts, execdriver.Mount{ Source: p, Destination: "/.dockerenv", Writable: false, Private: true, }) return ioutil.WriteFile(p, data, 0600) } // Clean not implemented for lxc func (d *driver) Clean(id string) error { return nil } type TtyConsole struct { MasterPty *os.File SlavePty *os.File } func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) { // lxc is special in that we cannot create the master outside of the container without // opening the slave because we have nothing to provide to the cmd. We have to open both then do // the crazy setup on command right now instead of passing the console path to lxc and telling it // to open up that console. we save a couple of openfiles in the native driver because we can do // this. ptyMaster, ptySlave, err := pty.Open() if err != nil { return nil, err } tty := &TtyConsole{ MasterPty: ptyMaster, SlavePty: ptySlave, } if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil { tty.Close() return nil, err } processConfig.Console = tty.SlavePty.Name() return tty, nil } func (t *TtyConsole) Master() *os.File { return t.MasterPty } func (t *TtyConsole) Resize(h, w int) error { return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) } func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error { command.Stdout = t.SlavePty command.Stderr = t.SlavePty go func() { if wb, ok := pipes.Stdout.(interface { CloseWriters() error }); ok { defer wb.CloseWriters() } io.Copy(pipes.Stdout, t.MasterPty) }() if pipes.Stdin != nil { command.Stdin = t.SlavePty command.SysProcAttr.Setctty = true go func() { io.Copy(t.MasterPty, pipes.Stdin) pipes.Stdin.Close() }() } return nil } func (t *TtyConsole) Close() error { t.SlavePty.Close() return t.MasterPty.Close() } func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { return -1, ErrExec } func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) { if _, ok := d.activeContainers[id]; !ok { return nil, fmt.Errorf("%s is not a key in active containers", id) } return execdriver.Stats(d.containerDir(id), d.activeContainers[id].container.Cgroups.Memory, d.machineMemory) } docker-1.6.2/daemon/execdriver/lxc/info.go0000644000175000017500000000145212524223634020011 0ustar tianontianonpackage lxc import ( "bufio" "errors" "strconv" "strings" ) var ( ErrCannotParse = errors.New("cannot parse raw input") ) type lxcInfo struct { Running bool Pid int } func parseLxcInfo(raw string) (*lxcInfo, error) { if raw == "" { return nil, ErrCannotParse } var ( err error s = bufio.NewScanner(strings.NewReader(raw)) info = &lxcInfo{} ) for s.Scan() { text := s.Text() if s.Err() != nil { return nil, s.Err() } parts := strings.Split(text, ":") if len(parts) < 2 { continue } switch strings.ToLower(strings.TrimSpace(parts[0])) { case "state": info.Running = strings.TrimSpace(parts[1]) == "RUNNING" case "pid": info.Pid, err = strconv.Atoi(strings.TrimSpace(parts[1])) if err != nil { return nil, err } } } return info, nil } docker-1.6.2/daemon/execdriver/lxc/lxc_template_unit_test.go0000644000175000017500000002200112524223634023626 0ustar tianontianon// +build linux package lxc import ( "bufio" "fmt" "io/ioutil" "math/rand" "os" "path" "strings" "testing" "time" "github.com/docker/docker/daemon/execdriver" nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/libcontainer/configs" "github.com/syndtr/gocapability/capability" ) func TestLXCConfig(t *testing.T) { root, err := ioutil.TempDir("", "TestLXCConfig") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) os.MkdirAll(path.Join(root, "containers", "1"), 0777) // Memory is allocated randomly for testing rand.Seed(time.Now().UTC().UnixNano()) var ( memMin = 33554432 memMax = 536870912 mem = memMin + rand.Intn(memMax-memMin) cpuMin = 100 cpuMax = 10000 cpu = cpuMin + rand.Intn(cpuMax-cpuMin) ) driver, err := NewDriver(root, root, "", false) if err != nil { t.Fatal(err) } command := &execdriver.Command{ ID: "1", Resources: &execdriver.Resources{ Memory: int64(mem), CpuShares: int64(cpu), }, Network: &execdriver.Network{ Mtu: 1500, Interface: nil, }, AllowedDevices: make([]*configs.Device, 0), ProcessConfig: execdriver.ProcessConfig{}, } p, err := driver.generateLXCConfig(command) if err != nil { t.Fatal(err) } grepFile(t, p, fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) grepFile(t, p, fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) } func TestCustomLxcConfig(t *testing.T) { root, err := ioutil.TempDir("", "TestCustomLxcConfig") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) os.MkdirAll(path.Join(root, "containers", "1"), 0777) driver, err := NewDriver(root, root, "", false) if err != nil { t.Fatal(err) } processConfig := execdriver.ProcessConfig{ Privileged: false, } command := &execdriver.Command{ ID: "1", LxcConfig: []string{ "lxc.utsname = docker", "lxc.cgroup.cpuset.cpus = 0,1", }, Network: &execdriver.Network{ Mtu: 1500, Interface: nil, }, ProcessConfig: processConfig, } p, err := driver.generateLXCConfig(command) if err != nil { t.Fatal(err) } grepFile(t, p, "lxc.utsname = docker") grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") } func grepFile(t *testing.T, path string, pattern string) { grepFileWithReverse(t, path, pattern, false) } func grepFileWithReverse(t *testing.T, path string, pattern string, inverseGrep bool) { f, err := os.Open(path) if err != nil { t.Fatal(err) } defer f.Close() r := bufio.NewReader(f) var ( line string ) err = nil for err == nil { line, err = r.ReadString('\n') if strings.Contains(line, pattern) == true { if inverseGrep { t.Fatalf("grepFile: pattern \"%s\" found in \"%s\"", pattern, path) } return } } if inverseGrep { return } t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) } func TestEscapeFstabSpaces(t *testing.T) { var testInputs = map[string]string{ " ": "\\040", "": "", "/double space": "/double\\040\\040space", "/some long test string": "/some\\040long\\040test\\040string", "/var/lib/docker": "/var/lib/docker", " leading": "\\040leading", "trailing ": "trailing\\040", } for in, exp := range testInputs { if out := escapeFstabSpaces(in); exp != out { t.Logf("Expected %s got %s", exp, out) t.Fail() } } } func TestIsDirectory(t *testing.T) { tempDir, err := ioutil.TempDir("", "TestIsDir") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempDir) tempFile, err := ioutil.TempFile(tempDir, "TestIsDirFile") if err != nil { t.Fatal(err) } if isDirectory(tempDir) != "dir" { t.Logf("Could not identify %s as a directory", tempDir) t.Fail() } if isDirectory(tempFile.Name()) != "file" { t.Logf("Could not identify %s as a file", tempFile.Name()) t.Fail() } } func TestCustomLxcConfigMounts(t *testing.T) { root, err := ioutil.TempDir("", "TestCustomLxcConfig") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) tempDir, err := ioutil.TempDir("", "TestIsDir") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempDir) tempFile, err := ioutil.TempFile(tempDir, "TestIsDirFile") if err != nil { t.Fatal(err) } os.MkdirAll(path.Join(root, "containers", "1"), 0777) driver, err := NewDriver(root, root, "", false) if err != nil { t.Fatal(err) } processConfig := execdriver.ProcessConfig{ Privileged: false, } mounts := []execdriver.Mount{ { Source: tempDir, Destination: tempDir, Writable: false, Private: true, }, { Source: tempFile.Name(), Destination: tempFile.Name(), Writable: true, Private: true, }, } command := &execdriver.Command{ ID: "1", LxcConfig: []string{ "lxc.utsname = docker", "lxc.cgroup.cpuset.cpus = 0,1", }, Network: &execdriver.Network{ Mtu: 1500, Interface: nil, }, Mounts: mounts, ProcessConfig: processConfig, } p, err := driver.generateLXCConfig(command) if err != nil { t.Fatal(err) } grepFile(t, p, "lxc.utsname = docker") grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,ro,create=%s 0 0", tempDir, "/"+tempDir, "dir")) grepFile(t, p, fmt.Sprintf("lxc.mount.entry = %s %s none rbind,rw,create=%s 0 0", tempFile.Name(), "/"+tempFile.Name(), "file")) } func TestCustomLxcConfigMisc(t *testing.T) { root, err := ioutil.TempDir("", "TestCustomLxcConfig") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) os.MkdirAll(path.Join(root, "containers", "1"), 0777) driver, err := NewDriver(root, root, "", true) if err != nil { t.Fatal(err) } processConfig := execdriver.ProcessConfig{ Privileged: false, } processConfig.Env = []string{"HOSTNAME=testhost"} command := &execdriver.Command{ ID: "1", LxcConfig: []string{ "lxc.cgroup.cpuset.cpus = 0,1", }, Network: &execdriver.Network{ Mtu: 1500, Interface: &execdriver.NetworkInterface{ Gateway: "10.10.10.1", IPAddress: "10.10.10.10", IPPrefixLen: 24, Bridge: "docker0", }, }, ProcessConfig: processConfig, CapAdd: []string{"net_admin", "syslog"}, CapDrop: []string{"kill", "mknod"}, AppArmorProfile: "lxc-container-default-with-nesting", } p, err := driver.generateLXCConfig(command) if err != nil { t.Fatal(err) } // network grepFile(t, p, "lxc.network.type = veth") grepFile(t, p, "lxc.network.link = docker0") grepFile(t, p, "lxc.network.name = eth0") grepFile(t, p, "lxc.network.ipv4 = 10.10.10.10/24") grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1") grepFile(t, p, "lxc.network.flags = up") grepFile(t, p, "lxc.aa_profile = lxc-container-default-with-nesting") // hostname grepFile(t, p, "lxc.utsname = testhost") grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") container := nativeTemplate.New() for _, cap := range container.Capabilities { realCap := execdriver.GetCapability(cap) numCap := fmt.Sprintf("%d", realCap.Value) if cap != "MKNOD" && cap != "KILL" { grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap)) } } grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_KILL), true) grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_MKNOD), true) } func TestCustomLxcConfigMiscOverride(t *testing.T) { root, err := ioutil.TempDir("", "TestCustomLxcConfig") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) os.MkdirAll(path.Join(root, "containers", "1"), 0777) driver, err := NewDriver(root, root, "", false) if err != nil { t.Fatal(err) } processConfig := execdriver.ProcessConfig{ Privileged: false, } processConfig.Env = []string{"HOSTNAME=testhost"} command := &execdriver.Command{ ID: "1", LxcConfig: []string{ "lxc.cgroup.cpuset.cpus = 0,1", "lxc.network.ipv4 = 172.0.0.1", }, Network: &execdriver.Network{ Mtu: 1500, Interface: &execdriver.NetworkInterface{ Gateway: "10.10.10.1", IPAddress: "10.10.10.10", IPPrefixLen: 24, Bridge: "docker0", }, }, ProcessConfig: processConfig, CapAdd: []string{"NET_ADMIN", "SYSLOG"}, CapDrop: []string{"KILL", "MKNOD"}, } p, err := driver.generateLXCConfig(command) if err != nil { t.Fatal(err) } // network grepFile(t, p, "lxc.network.type = veth") grepFile(t, p, "lxc.network.link = docker0") grepFile(t, p, "lxc.network.name = eth0") grepFile(t, p, "lxc.network.ipv4 = 172.0.0.1") grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1") grepFile(t, p, "lxc.network.flags = up") // hostname grepFile(t, p, "lxc.utsname = testhost") grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") container := nativeTemplate.New() for _, cap := range container.Capabilities { realCap := execdriver.GetCapability(cap) numCap := fmt.Sprintf("%d", realCap.Value) if cap != "MKNOD" && cap != "KILL" { grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap)) } } grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_KILL), true) grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_MKNOD), true) } docker-1.6.2/daemon/execdriver/lxc/lxc_init_linux.go0000644000175000017500000000054612524223634022111 0ustar tianontianonpackage lxc import ( "fmt" "github.com/docker/libcontainer/utils" ) func finalizeNamespace(args *InitArgs) error { if err := utils.CloseExecFrom(3); err != nil { return err } if err := setupUser(args.User); err != nil { return fmt.Errorf("setup user %s", err) } if err := setupWorkingDirectory(args); err != nil { return err } return nil } docker-1.6.2/daemon/execdriver/lxc/lxc_template.go0000644000175000017500000001564212524223634021545 0ustar tianontianonpackage lxc import ( "fmt" "os" "strings" "text/template" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/docker/utils" "github.com/docker/libcontainer/label" ) const LxcTemplate = ` {{if .Network.Interface}} # network configuration lxc.network.type = veth lxc.network.link = {{.Network.Interface.Bridge}} lxc.network.name = eth0 lxc.network.mtu = {{.Network.Mtu}} lxc.network.flags = up {{else if .Network.HostNetworking}} lxc.network.type = none {{else}} # network is disabled (-n=false) lxc.network.type = empty lxc.network.flags = up lxc.network.mtu = {{.Network.Mtu}} {{end}} # root filesystem {{$ROOTFS := .Rootfs}} lxc.rootfs = {{$ROOTFS}} # use a dedicated pts for the container (and limit the number of pseudo terminal # available) lxc.pts = 1024 # disable the main console lxc.console = none # no controlling tty at all lxc.tty = 1 {{if .ProcessConfig.Privileged}} lxc.cgroup.devices.allow = a {{else}} # no implicit access to devices lxc.cgroup.devices.deny = a #Allow the devices passed to us in the AllowedDevices list. {{range $allowedDevice := .AllowedDevices}} lxc.cgroup.devices.allow = {{$allowedDevice.CgroupString}} {{end}} {{end}} # standard mount point # Use mnt.putold as per https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/986385 lxc.pivotdir = lxc_putold # NOTICE: These mounts must be applied within the namespace {{if .ProcessConfig.Privileged}} # WARNING: mounting procfs and/or sysfs read-write is a known attack vector. # See e.g. http://blog.zx2c4.com/749 and http://bit.ly/T9CkqJ # We mount them read-write here, but later, dockerinit will call the Restrict() function to remount them read-only. # We cannot mount them directly read-only, because that would prevent loading AppArmor profiles. lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0 lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 {{if .AppArmor}} lxc.aa_profile = unconfined {{end}} {{else}} # In non-privileged mode, lxc will automatically mount /proc and /sys in readonly mode # for security. See: http://man7.org/linux/man-pages/man5/lxc.container.conf.5.html lxc.mount.auto = proc sys {{if .AppArmorProfile}} lxc.aa_profile = {{.AppArmorProfile}} {{end}} {{end}} {{if .ProcessConfig.Tty}} lxc.mount.entry = {{.ProcessConfig.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 {{end}} lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMountLabel "newinstance,ptmxmode=0666,nosuid,noexec" ""}} 0 0 lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" ""}} 0 0 {{range $value := .Mounts}} {{$createVal := isDirectory $value.Source}} {{if $value.Writable}} lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,rw,create={{$createVal}} 0 0 {{else}} lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none rbind,ro,create={{$createVal}} 0 0 {{end}} {{end}} # limits {{if .Resources}} {{if .Resources.Memory}} lxc.cgroup.memory.limit_in_bytes = {{.Resources.Memory}} lxc.cgroup.memory.soft_limit_in_bytes = {{.Resources.Memory}} {{with $memSwap := getMemorySwap .Resources}} lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}} {{end}} {{end}} {{if .Resources.CpuShares}} lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} {{end}} {{if .Resources.CpusetCpus}} lxc.cgroup.cpuset.cpus = {{.Resources.CpusetCpus}} {{end}} {{end}} {{if .LxcConfig}} {{range $value := .LxcConfig}} lxc.{{$value}} {{end}} {{end}} {{if .Network.Interface}} {{if .Network.Interface.IPAddress}} lxc.network.ipv4 = {{.Network.Interface.IPAddress}}/{{.Network.Interface.IPPrefixLen}} {{end}} {{if .Network.Interface.Gateway}} lxc.network.ipv4.gateway = {{.Network.Interface.Gateway}} {{end}} {{if .Network.Interface.MacAddress}} lxc.network.hwaddr = {{.Network.Interface.MacAddress}} {{end}} {{if .ProcessConfig.Env}} lxc.utsname = {{getHostname .ProcessConfig.Env}} {{end}} {{if .ProcessConfig.Privileged}} # No cap values are needed, as lxc is starting in privileged mode {{else}} {{ with keepCapabilities .CapAdd .CapDrop }} {{range .}} lxc.cap.keep = {{.}} {{end}} {{else}} {{ with dropList .CapDrop }} {{range .}} lxc.cap.drop = {{.}} {{end}} {{end}} {{end}} {{end}} {{end}} ` var LxcTemplateCompiled *template.Template // Escape spaces in strings according to the fstab documentation, which is the // format for "lxc.mount.entry" lines in lxc.conf. See also "man 5 fstab". func escapeFstabSpaces(field string) string { return strings.Replace(field, " ", "\\040", -1) } func keepCapabilities(adds []string, drops []string) ([]string, error) { container := nativeTemplate.New() log.Debugf("adds %s drops %s\n", adds, drops) caps, err := execdriver.TweakCapabilities(container.Capabilities, adds, drops) if err != nil { return nil, err } var newCaps []string for _, cap := range caps { log.Debugf("cap %s\n", cap) realCap := execdriver.GetCapability(cap) numCap := fmt.Sprintf("%d", realCap.Value) newCaps = append(newCaps, numCap) } return newCaps, nil } func dropList(drops []string) ([]string, error) { if utils.StringsContainsNoCase(drops, "all") { var newCaps []string for _, capName := range execdriver.GetAllCapabilities() { cap := execdriver.GetCapability(capName) log.Debugf("drop cap %s\n", cap.Key) numCap := fmt.Sprintf("%d", cap.Value) newCaps = append(newCaps, numCap) } return newCaps, nil } return []string{}, nil } func isDirectory(source string) string { f, err := os.Stat(source) log.Debugf("dir: %s\n", source) if err != nil { if os.IsNotExist(err) { return "dir" } return "" } if f.IsDir() { return "dir" } return "file" } func getMemorySwap(v *execdriver.Resources) int64 { // By default, MemorySwap is set to twice the size of RAM. // If you want to omit MemorySwap, set it to `-1'. if v.MemorySwap < 0 { return 0 } return v.Memory * 2 } func getLabel(c map[string][]string, name string) string { label := c["label"] for _, l := range label { parts := strings.SplitN(l, "=", 2) if strings.TrimSpace(parts[0]) == name { return strings.TrimSpace(parts[1]) } } return "" } func getHostname(env []string) string { for _, kv := range env { parts := strings.SplitN(kv, "=", 2) if parts[0] == "HOSTNAME" && len(parts) == 2 { return parts[1] } } return "" } func init() { var err error funcMap := template.FuncMap{ "getMemorySwap": getMemorySwap, "escapeFstabSpaces": escapeFstabSpaces, "formatMountLabel": label.FormatMountLabel, "isDirectory": isDirectory, "keepCapabilities": keepCapabilities, "dropList": dropList, "getHostname": getHostname, } LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) if err != nil { panic(err) } } docker-1.6.2/daemon/execdriver/driver.go0000644000175000017500000002415412524223634017567 0ustar tianontianonpackage execdriver import ( "encoding/json" "errors" "io" "io/ioutil" "os" "os/exec" "path/filepath" "strconv" "strings" "time" "github.com/docker/docker/daemon/execdriver/native/template" "github.com/docker/docker/pkg/ulimit" "github.com/docker/libcontainer" "github.com/docker/libcontainer/cgroups/fs" "github.com/docker/libcontainer/configs" ) // Context is a generic key value pair that allows // arbatrary data to be sent type Context map[string]string var ( ErrNotRunning = errors.New("Container is not running") ErrWaitTimeoutReached = errors.New("Wait timeout reached") ErrDriverAlreadyRegistered = errors.New("A driver already registered this docker init function") ErrDriverNotFound = errors.New("The requested docker init has not been found") ) type StartCallback func(*ProcessConfig, int) // Driver specific information based on // processes registered with the driver type Info interface { IsRunning() bool } // Terminal in an interface for drivers to implement // if they want to support Close and Resize calls from // the core type Terminal interface { io.Closer Resize(height, width int) error } type TtyTerminal interface { Master() libcontainer.Console } // ExitStatus provides exit reasons for a container. type ExitStatus struct { // The exit code with which the container exited. ExitCode int // Whether the container encountered an OOM. OOMKilled bool } type Driver interface { Run(c *Command, pipes *Pipes, startCallback StartCallback) (ExitStatus, error) // Run executes the process and blocks until the process exits and returns the exit code // Exec executes the process in an existing container, blocks until the process exits and returns the exit code Exec(c *Command, processConfig *ProcessConfig, pipes *Pipes, startCallback StartCallback) (int, error) Kill(c *Command, sig int) error Pause(c *Command) error Unpause(c *Command) error Name() string // Driver name Info(id string) Info // "temporary" hack (until we move state from core to plugins) GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. Terminate(c *Command) error // kill it with fire Clean(id string) error // clean all traces of container exec Stats(id string) (*ResourceStats, error) // Get resource stats for a running container } // Network settings of the container type Network struct { Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled Mtu int `json:"mtu"` ContainerID string `json:"container_id"` // id of the container to join network. HostNetworking bool `json:"host_networking"` } // IPC settings of the container type Ipc struct { ContainerID string `json:"container_id"` // id of the container to join ipc. HostIpc bool `json:"host_ipc"` } // PID settings of the container type Pid struct { HostPid bool `json:"host_pid"` } type NetworkInterface struct { Gateway string `json:"gateway"` IPAddress string `json:"ip"` IPPrefixLen int `json:"ip_prefix_len"` MacAddress string `json:"mac"` Bridge string `json:"bridge"` GlobalIPv6Address string `json:"global_ipv6"` LinkLocalIPv6Address string `json:"link_local_ipv6"` GlobalIPv6PrefixLen int `json:"global_ipv6_prefix_len"` IPv6Gateway string `json:"ipv6_gateway"` } type Resources struct { Memory int64 `json:"memory"` MemorySwap int64 `json:"memory_swap"` CpuShares int64 `json:"cpu_shares"` CpusetCpus string `json:"cpuset_cpus"` Rlimits []*ulimit.Rlimit `json:"rlimits"` } type ResourceStats struct { *libcontainer.Stats Read time.Time `json:"read"` MemoryLimit int64 `json:"memory_limit"` SystemUsage uint64 `json:"system_usage"` } type Mount struct { Source string `json:"source"` Destination string `json:"destination"` Writable bool `json:"writable"` Private bool `json:"private"` Slave bool `json:"slave"` } // Describes a process that will be run inside a container. type ProcessConfig struct { exec.Cmd `json:"-"` Privileged bool `json:"privileged"` User string `json:"user"` Tty bool `json:"tty"` Entrypoint string `json:"entrypoint"` Arguments []string `json:"arguments"` Terminal Terminal `json:"-"` // standard or tty terminal Console string `json:"-"` // dev/console path } // Process wrapps an os/exec.Cmd to add more metadata type Command struct { ID string `json:"id"` Rootfs string `json:"rootfs"` // root fs of the container ReadonlyRootfs bool `json:"readonly_rootfs"` InitPath string `json:"initpath"` // dockerinit WorkingDir string `json:"working_dir"` ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver Network *Network `json:"network"` Ipc *Ipc `json:"ipc"` Pid *Pid `json:"pid"` Resources *Resources `json:"resources"` Mounts []Mount `json:"mounts"` AllowedDevices []*configs.Device `json:"allowed_devices"` AutoCreatedDevices []*configs.Device `json:"autocreated_devices"` CapAdd []string `json:"cap_add"` CapDrop []string `json:"cap_drop"` ContainerPid int `json:"container_pid"` // the pid for the process inside a container ProcessConfig ProcessConfig `json:"process_config"` // Describes the init process of the container. ProcessLabel string `json:"process_label"` MountLabel string `json:"mount_label"` LxcConfig []string `json:"lxc_config"` AppArmorProfile string `json:"apparmor_profile"` CgroupParent string `json:"cgroup_parent"` // The parent cgroup for this command. } func InitContainer(c *Command) *configs.Config { container := template.New() container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env) container.Cgroups.Name = c.ID container.Cgroups.AllowedDevices = c.AllowedDevices container.Readonlyfs = c.ReadonlyRootfs container.Devices = c.AutoCreatedDevices container.Rootfs = c.Rootfs container.Readonlyfs = c.ReadonlyRootfs // check to see if we are running in ramdisk to disable pivot root container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" // Default parent cgroup is "docker". Override if required. if c.CgroupParent != "" { container.Cgroups.Parent = c.CgroupParent } return container } func getEnv(key string, env []string) string { for _, pair := range env { parts := strings.Split(pair, "=") if parts[0] == key { return parts[1] } } return "" } func SetupCgroups(container *configs.Config, c *Command) error { if c.Resources != nil { container.Cgroups.CpuShares = c.Resources.CpuShares container.Cgroups.Memory = c.Resources.Memory container.Cgroups.MemoryReservation = c.Resources.Memory container.Cgroups.MemorySwap = c.Resources.MemorySwap container.Cgroups.CpusetCpus = c.Resources.CpusetCpus } return nil } // Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. func getNetworkInterfaceStats(interfaceName string) (*libcontainer.NetworkInterface, error) { out := &libcontainer.NetworkInterface{Name: interfaceName} // This can happen if the network runtime information is missing - possible if the // container was created by an old version of libcontainer. if interfaceName == "" { return out, nil } type netStatsPair struct { // Where to write the output. Out *uint64 // The network stats file to read. File string } // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. netStats := []netStatsPair{ {Out: &out.RxBytes, File: "tx_bytes"}, {Out: &out.RxPackets, File: "tx_packets"}, {Out: &out.RxErrors, File: "tx_errors"}, {Out: &out.RxDropped, File: "tx_dropped"}, {Out: &out.TxBytes, File: "rx_bytes"}, {Out: &out.TxPackets, File: "rx_packets"}, {Out: &out.TxErrors, File: "rx_errors"}, {Out: &out.TxDropped, File: "rx_dropped"}, } for _, netStat := range netStats { data, err := readSysfsNetworkStats(interfaceName, netStat.File) if err != nil { return nil, err } *(netStat.Out) = data } return out, nil } // Reads the specified statistics available under /sys/class/net//statistics func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) { data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile)) if err != nil { return 0, err } return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) } func Stats(containerDir string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) { f, err := os.Open(filepath.Join(containerDir, "state.json")) if err != nil { return nil, err } defer f.Close() type network struct { Type string HostInterfaceName string } state := struct { CgroupPaths map[string]string `json:"cgroup_paths"` Networks []network }{} if err := json.NewDecoder(f).Decode(&state); err != nil { return nil, err } now := time.Now() mgr := fs.Manager{Paths: state.CgroupPaths} cstats, err := mgr.GetStats() if err != nil { return nil, err } stats := &libcontainer.Stats{CgroupStats: cstats} // if the container does not have any memory limit specified set the // limit to the machines memory memoryLimit := containerMemoryLimit if memoryLimit == 0 { memoryLimit = machineMemory } for _, iface := range state.Networks { switch iface.Type { case "veth": istats, err := getNetworkInterfaceStats(iface.HostInterfaceName) if err != nil { return nil, err } stats.Interfaces = append(stats.Interfaces, istats) } } return &ResourceStats{ Stats: stats, Read: now, MemoryLimit: memoryLimit, }, nil } docker-1.6.2/daemon/execdriver/pipes.go0000644000175000017500000000057512524223634017415 0ustar tianontianonpackage execdriver import ( "io" ) // Pipes is a wrapper around a containers output for // stdin, stdout, stderr type Pipes struct { Stdin io.ReadCloser Stdout, Stderr io.Writer } func NewPipes(stdin io.ReadCloser, stdout, stderr io.Writer, useStdin bool) *Pipes { p := &Pipes{ Stdout: stdout, Stderr: stderr, } if useStdin { p.Stdin = stdin } return p } docker-1.6.2/daemon/execdriver/execdrivers/0000755000175000017500000000000012524223634020262 5ustar tianontianondocker-1.6.2/daemon/execdriver/execdrivers/execdrivers.go0000644000175000017500000000141712524223634023137 0ustar tianontianonpackage execdrivers import ( "fmt" "path" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/lxc" "github.com/docker/docker/daemon/execdriver/native" "github.com/docker/docker/pkg/sysinfo" ) func NewDriver(name, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { switch name { case "lxc": // we want to give the lxc driver the full docker root because it needs // to access and write config and template files in /var/lib/docker/containers/* // to be backwards compatible return lxc.NewDriver(root, libPath, initPath, sysInfo.AppArmor) case "native": return native.NewDriver(path.Join(root, "execdriver", "native"), initPath) } return nil, fmt.Errorf("unknown exec driver %s", name) } docker-1.6.2/daemon/execdriver/utils.go0000644000175000017500000000764612524223634017443 0ustar tianontianonpackage execdriver import ( "fmt" "strings" "github.com/docker/docker/utils" "github.com/syndtr/gocapability/capability" ) var capabilityList = Capabilities{ {Key: "SETPCAP", Value: capability.CAP_SETPCAP}, {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE}, {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO}, {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT}, {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN}, {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE}, {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE}, {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME}, {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG}, {Key: "MKNOD", Value: capability.CAP_MKNOD}, {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE}, {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL}, {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE}, {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN}, {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN}, {Key: "SYSLOG", Value: capability.CAP_SYSLOG}, {Key: "CHOWN", Value: capability.CAP_CHOWN}, {Key: "NET_RAW", Value: capability.CAP_NET_RAW}, {Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE}, {Key: "FOWNER", Value: capability.CAP_FOWNER}, {Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH}, {Key: "FSETID", Value: capability.CAP_FSETID}, {Key: "KILL", Value: capability.CAP_KILL}, {Key: "SETGID", Value: capability.CAP_SETGID}, {Key: "SETUID", Value: capability.CAP_SETUID}, {Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE}, {Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE}, {Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST}, {Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK}, {Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER}, {Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT}, {Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE}, {Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT}, {Key: "LEASE", Value: capability.CAP_LEASE}, {Key: "SETFCAP", Value: capability.CAP_SETFCAP}, {Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM}, {Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND}, } type ( CapabilityMapping struct { Key string `json:"key,omitempty"` Value capability.Cap `json:"value,omitempty"` } Capabilities []*CapabilityMapping ) func (c *CapabilityMapping) String() string { return c.Key } func GetCapability(key string) *CapabilityMapping { for _, capp := range capabilityList { if capp.Key == key { cpy := *capp return &cpy } } return nil } func GetAllCapabilities() []string { output := make([]string, len(capabilityList)) for i, capability := range capabilityList { output[i] = capability.String() } return output } func TweakCapabilities(basics, adds, drops []string) ([]string, error) { var ( newCaps []string allCaps = GetAllCapabilities() ) // look for invalid cap in the drop list for _, cap := range drops { if strings.ToLower(cap) == "all" { continue } if !utils.StringsContainsNoCase(allCaps, cap) { return nil, fmt.Errorf("Unknown capability drop: %q", cap) } } // handle --cap-add=all if utils.StringsContainsNoCase(adds, "all") { basics = allCaps } if !utils.StringsContainsNoCase(drops, "all") { for _, cap := range basics { // skip `all` aready handled above if strings.ToLower(cap) == "all" { continue } // if we don't drop `all`, add back all the non-dropped caps if !utils.StringsContainsNoCase(drops, cap) { newCaps = append(newCaps, strings.ToUpper(cap)) } } } for _, cap := range adds { // skip `all` aready handled above if strings.ToLower(cap) == "all" { continue } if !utils.StringsContainsNoCase(allCaps, cap) { return nil, fmt.Errorf("Unknown capability to add: %q", cap) } // add cap if not already in the list if !utils.StringsContainsNoCase(newCaps, cap) { newCaps = append(newCaps, strings.ToUpper(cap)) } } return newCaps, nil } docker-1.6.2/daemon/resize.go0000644000175000017500000000231512524223634015430 0ustar tianontianonpackage daemon import ( "strconv" "github.com/docker/docker/engine" ) func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status { if len(job.Args) != 3 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) } name := job.Args[0] height, err := strconv.Atoi(job.Args[1]) if err != nil { return job.Error(err) } width, err := strconv.Atoi(job.Args[2]) if err != nil { return job.Error(err) } container, err := daemon.Get(name) if err != nil { return job.Error(err) } if err := container.Resize(height, width); err != nil { return job.Error(err) } return engine.StatusOK } func (daemon *Daemon) ContainerExecResize(job *engine.Job) engine.Status { if len(job.Args) != 3 { return job.Errorf("Not enough arguments. Usage: %s EXEC HEIGHT WIDTH\n", job.Name) } name := job.Args[0] height, err := strconv.Atoi(job.Args[1]) if err != nil { return job.Error(err) } width, err := strconv.Atoi(job.Args[2]) if err != nil { return job.Error(err) } execConfig, err := daemon.getExecConfig(name) if err != nil { return job.Error(err) } if err := execConfig.Resize(height, width); err != nil { return job.Error(err) } return engine.StatusOK } docker-1.6.2/daemon/monitor.go0000644000175000017500000002302012524223634015612 0ustar tianontianonpackage daemon import ( "io" "os/exec" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/common" "github.com/docker/docker/runconfig" ) const defaultTimeIncrement = 100 // containerMonitor monitors the execution of a container's main process. // If a restart policy is specified for the container the monitor will ensure that the // process is restarted based on the rules of the policy. When the container is finally stopped // the monitor will reset and cleanup any of the container resources such as networking allocations // and the rootfs type containerMonitor struct { mux sync.Mutex // container is the container being monitored container *Container // restartPolicy is the current policy being applied to the container monitor restartPolicy runconfig.RestartPolicy // failureCount is the number of times the container has failed to // start in a row failureCount int // shouldStop signals the monitor that the next time the container exits it is // either because docker or the user asked for the container to be stopped shouldStop bool // startSignal is a channel that is closes after the container initially starts startSignal chan struct{} // stopChan is used to signal to the monitor whenever there is a wait for the // next restart so that the timeIncrement is not honored and the user is not // left waiting for nothing to happen during this time stopChan chan struct{} // timeIncrement is the amount of time to wait between restarts // this is in milliseconds timeIncrement int // lastStartTime is the time which the monitor last exec'd the container's process lastStartTime time.Time } // newContainerMonitor returns an initialized containerMonitor for the provided container // honoring the provided restart policy func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor { return &containerMonitor{ container: container, restartPolicy: policy, timeIncrement: defaultTimeIncrement, stopChan: make(chan struct{}), startSignal: make(chan struct{}), } } // Stop signals to the container monitor that it should stop monitoring the container // for exits the next time the process dies func (m *containerMonitor) ExitOnNext() { m.mux.Lock() // we need to protect having a double close of the channel when stop is called // twice or else we will get a panic if !m.shouldStop { m.shouldStop = true close(m.stopChan) } m.mux.Unlock() } // Close closes the container's resources such as networking allocations and // unmounts the contatiner's root filesystem func (m *containerMonitor) Close() error { // Cleanup networking and mounts m.container.cleanup() // FIXME: here is race condition between two RUN instructions in Dockerfile // because they share same runconfig and change image. Must be fixed // in builder/builder.go if err := m.container.toDisk(); err != nil { log.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err) return err } return nil } // Start starts the containers process and monitors it according to the restart policy func (m *containerMonitor) Start() error { var ( err error exitStatus execdriver.ExitStatus // this variable indicates where we in execution flow: // before Run or after afterRun bool ) // ensure that when the monitor finally exits we release the networking and unmount the rootfs defer func() { if afterRun { m.container.Lock() m.container.setStopped(&exitStatus) defer m.container.Unlock() } m.Close() }() // reset the restart count m.container.RestartCount = -1 for { m.container.RestartCount++ if err := m.container.startLogging(); err != nil { m.resetContainer(false) return err } pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin) m.container.LogEvent("start") m.lastStartTime = time.Now() if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil { // if we receive an internal error from the initial start of a container then lets // return it instead of entering the restart loop if m.container.RestartCount == 0 { m.container.ExitCode = -1 m.resetContainer(false) return err } log.Errorf("Error running container: %s", err) } // here container.Lock is already lost afterRun = true m.resetMonitor(err == nil && exitStatus.ExitCode == 0) if m.shouldRestart(exitStatus.ExitCode) { m.container.SetRestarting(&exitStatus) if exitStatus.OOMKilled { m.container.LogEvent("oom") } m.container.LogEvent("die") m.resetContainer(true) // sleep with a small time increment between each restart to help avoid issues cased by quickly // restarting the container because of some types of errors ( networking cut out, etc... ) m.waitForNextRestart() // we need to check this before reentering the loop because the waitForNextRestart could have // been terminated by a request from a user if m.shouldStop { return err } continue } if exitStatus.OOMKilled { m.container.LogEvent("oom") } m.container.LogEvent("die") m.resetContainer(true) return err } } // resetMonitor resets the stateful fields on the containerMonitor based on the // previous runs success or failure. Regardless of success, if the container had // an execution time of more than 10s then reset the timer back to the default func (m *containerMonitor) resetMonitor(successful bool) { executionTime := time.Now().Sub(m.lastStartTime).Seconds() if executionTime > 10 { m.timeIncrement = defaultTimeIncrement } else { // otherwise we need to increment the amount of time we wait before restarting // the process. We will build up by multiplying the increment by 2 m.timeIncrement *= 2 } // the container exited successfully so we need to reset the failure counter if successful { m.failureCount = 0 } else { m.failureCount++ } } // waitForNextRestart waits with the default time increment to restart the container unless // a user or docker asks for the container to be stopped func (m *containerMonitor) waitForNextRestart() { select { case <-time.After(time.Duration(m.timeIncrement) * time.Millisecond): case <-m.stopChan: } } // shouldRestart checks the restart policy and applies the rules to determine if // the container's process should be restarted func (m *containerMonitor) shouldRestart(exitCode int) bool { m.mux.Lock() defer m.mux.Unlock() // do not restart if the user or docker has requested that this container be stopped if m.shouldStop { return false } switch m.restartPolicy.Name { case "always": return true case "on-failure": // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max { log.Debugf("stopping restart of container %s because maximum failure could of %d has been reached", common.TruncateID(m.container.ID), max) return false } return exitCode != 0 } return false } // callback ensures that the container's state is properly updated after we // received ack from the execution drivers func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid int) { if processConfig.Tty { // The callback is called after the process Start() // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave // which we close here. if c, ok := processConfig.Stdout.(io.Closer); ok { c.Close() } } m.container.setRunning(pid) // signal that the process has started // close channel only if not closed select { case <-m.startSignal: default: close(m.startSignal) } if err := m.container.ToDisk(); err != nil { log.Debugf("%s", err) } } // resetContainer resets the container's IO and ensures that the command is able to be executed again // by copying the data into a new struct // if lock is true, then container locked during reset func (m *containerMonitor) resetContainer(lock bool) { container := m.container if lock { container.Lock() defer container.Unlock() } if container.Config.OpenStdin { if err := container.stdin.Close(); err != nil { log.Errorf("%s: Error close stdin: %s", container.ID, err) } } if err := container.stdout.Clean(); err != nil { log.Errorf("%s: Error close stdout: %s", container.ID, err) } if err := container.stderr.Clean(); err != nil { log.Errorf("%s: Error close stderr: %s", container.ID, err) } if container.command != nil && container.command.ProcessConfig.Terminal != nil { if err := container.command.ProcessConfig.Terminal.Close(); err != nil { log.Errorf("%s: Error closing terminal: %s", container.ID, err) } } // Re-create a brand new stdin pipe once the container exited if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } if container.logDriver != nil { if container.logCopier != nil { exit := make(chan struct{}) go func() { container.logCopier.Wait() close(exit) }() select { case <-time.After(1 * time.Second): log.Warnf("Logger didn't exit in time: logs may be truncated") case <-exit: } } container.logDriver.Close() container.logCopier = nil container.logDriver = nil } c := container.command.ProcessConfig.Cmd container.command.ProcessConfig.Cmd = exec.Cmd{ Stdin: c.Stdin, Stdout: c.Stdout, Stderr: c.Stderr, Path: c.Path, Env: c.Env, ExtraFiles: c.ExtraFiles, Args: c.Args, Dir: c.Dir, SysProcAttr: c.SysProcAttr, } } docker-1.6.2/daemon/README.md0000644000175000017500000000031512524223634015055 0ustar tianontianonThis directory contains code pertaining to running containers and storing images Code pertaining to running containers: - execdriver - networkdriver Code pertaining to storing images: - graphdriver docker-1.6.2/daemon/list.go0000644000175000017500000001014412524223634015101 0ustar tianontianonpackage daemon import ( "errors" "fmt" "strconv" "strings" "github.com/docker/docker/graph" "github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/utils" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/filters" ) // List returns an array of all containers registered in the daemon. func (daemon *Daemon) List() []*Container { return daemon.containers.List() } func (daemon *Daemon) Containers(job *engine.Job) engine.Status { var ( foundBefore bool displayed int all = job.GetenvBool("all") since = job.Getenv("since") before = job.Getenv("before") n = job.GetenvInt("limit") size = job.GetenvBool("size") psFilters filters.Args filt_exited []int ) outs := engine.NewTable("Created", 0) psFilters, err := filters.FromParam(job.Getenv("filters")) if err != nil { return job.Error(err) } if i, ok := psFilters["exited"]; ok { for _, value := range i { code, err := strconv.Atoi(value) if err != nil { return job.Error(err) } filt_exited = append(filt_exited, code) } } if i, ok := psFilters["status"]; ok { for _, value := range i { if value == "exited" { all = true } } } names := map[string][]string{} daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { names[e.ID()] = append(names[e.ID()], p) return nil }, 1) var beforeCont, sinceCont *Container if before != "" { beforeCont, err = daemon.Get(before) if err != nil { return job.Error(err) } } if since != "" { sinceCont, err = daemon.Get(since) if err != nil { return job.Error(err) } } errLast := errors.New("last container") writeCont := func(container *Container) error { container.Lock() defer container.Unlock() if !container.Running && !all && n <= 0 && since == "" && before == "" { return nil } if !psFilters.Match("name", container.Name) { return nil } if !psFilters.Match("id", container.ID) { return nil } if !psFilters.MatchKVList("label", container.Config.Labels) { return nil } if before != "" && !foundBefore { if container.ID == beforeCont.ID { foundBefore = true } return nil } if n > 0 && displayed == n { return errLast } if since != "" { if container.ID == sinceCont.ID { return errLast } } if len(filt_exited) > 0 { should_skip := true for _, code := range filt_exited { if code == container.ExitCode && !container.Running { should_skip = false break } } if should_skip { return nil } } if !psFilters.Match("status", container.State.StateString()) { return nil } displayed++ out := &engine.Env{} out.SetJson("Id", container.ID) out.SetList("Names", names[container.ID]) img := container.Config.Image _, tag := parsers.ParseRepositoryTag(container.Config.Image) if tag == "" { img = utils.ImageReference(img, graph.DEFAULTTAG) } out.SetJson("Image", img) if len(container.Args) > 0 { args := []string{} for _, arg := range container.Args { if strings.Contains(arg, " ") { args = append(args, fmt.Sprintf("'%s'", arg)) } else { args = append(args, arg) } } argsAsString := strings.Join(args, " ") out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString)) } else { out.Set("Command", fmt.Sprintf("\"%s\"", container.Path)) } out.SetInt64("Created", container.Created.Unix()) out.Set("Status", container.State.String()) str, err := container.NetworkSettings.PortMappingAPI().ToListString() if err != nil { return err } out.Set("Ports", str) if size { sizeRw, sizeRootFs := container.GetSize() out.SetInt64("SizeRw", sizeRw) out.SetInt64("SizeRootFs", sizeRootFs) } out.SetJson("Labels", container.Config.Labels) outs.Add(out) return nil } for _, container := range daemon.List() { if err := writeCont(container); err != nil { if err != errLast { return job.Error(err) } break } } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } docker-1.6.2/daemon/export.go0000644000175000017500000000134112524223634015446 0ustar tianontianonpackage daemon import ( "io" "github.com/docker/docker/engine" ) func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s container_id", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Error(err) } data, err := container.Export() if err != nil { return job.Errorf("%s: %s", name, err) } defer data.Close() // Stream the entire contents of the container (basically a volatile snapshot) if _, err := io.Copy(job.Stdout, data); err != nil { return job.Errorf("%s: %s", name, err) } // FIXME: factor job-specific LogEvent to engine.Job.Run() container.LogEvent("export") return engine.StatusOK } docker-1.6.2/daemon/daemon_aufs.go0000644000175000017500000000113112524223634016403 0ustar tianontianon// +build !exclude_graphdriver_aufs package daemon import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/aufs" "github.com/docker/docker/graph" ) // Given the graphdriver ad, if it is aufs, then migrate it. // If aufs driver is not built, this func is a noop. func migrateIfAufs(driver graphdriver.Driver, root string) error { if ad, ok := driver.(*aufs.Driver); ok { log.Debugf("Migrating existing containers") if err := ad.Migrate(root, graph.SetupInitLayer); err != nil { return err } } return nil } docker-1.6.2/daemon/stats.go0000644000175000017500000000623712524223634015274 0ustar tianontianonpackage daemon import ( "encoding/json" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/engine" "github.com/docker/libcontainer" "github.com/docker/libcontainer/cgroups" ) func (daemon *Daemon) ContainerStats(job *engine.Job) engine.Status { updates, err := daemon.SubscribeToContainerStats(job.Args[0]) if err != nil { return job.Error(err) } enc := json.NewEncoder(job.Stdout) for v := range updates { update := v.(*execdriver.ResourceStats) ss := convertToAPITypes(update.Stats) ss.MemoryStats.Limit = uint64(update.MemoryLimit) ss.Read = update.Read ss.CpuStats.SystemUsage = update.SystemUsage if err := enc.Encode(ss); err != nil { // TODO: handle the specific broken pipe daemon.UnsubscribeToContainerStats(job.Args[0], updates) return job.Error(err) } } return engine.StatusOK } // convertToAPITypes converts the libcontainer.Stats to the api specific // structs. This is done to preserve API compatibility and versioning. func convertToAPITypes(ls *libcontainer.Stats) *types.Stats { s := &types.Stats{} if ls.Interfaces != nil { s.Network = types.Network{} for _, iface := range ls.Interfaces { s.Network.RxBytes += iface.RxBytes s.Network.RxPackets += iface.RxPackets s.Network.RxErrors += iface.RxErrors s.Network.RxDropped += iface.RxDropped s.Network.TxBytes += iface.TxBytes s.Network.TxPackets += iface.TxPackets s.Network.TxErrors += iface.TxErrors s.Network.TxDropped += iface.TxDropped } } cs := ls.CgroupStats if cs != nil { s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: copyBlkioEntry(cs.BlkioStats.IoServiceBytesRecursive), IoServicedRecursive: copyBlkioEntry(cs.BlkioStats.IoServicedRecursive), IoQueuedRecursive: copyBlkioEntry(cs.BlkioStats.IoQueuedRecursive), IoServiceTimeRecursive: copyBlkioEntry(cs.BlkioStats.IoServiceTimeRecursive), IoWaitTimeRecursive: copyBlkioEntry(cs.BlkioStats.IoWaitTimeRecursive), IoMergedRecursive: copyBlkioEntry(cs.BlkioStats.IoMergedRecursive), IoTimeRecursive: copyBlkioEntry(cs.BlkioStats.IoTimeRecursive), SectorsRecursive: copyBlkioEntry(cs.BlkioStats.SectorsRecursive), } cpu := cs.CpuStats s.CpuStats = types.CpuStats{ CpuUsage: types.CpuUsage{ TotalUsage: cpu.CpuUsage.TotalUsage, PercpuUsage: cpu.CpuUsage.PercpuUsage, UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, UsageInUsermode: cpu.CpuUsage.UsageInUsermode, }, ThrottlingData: types.ThrottlingData{ Periods: cpu.ThrottlingData.Periods, ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, ThrottledTime: cpu.ThrottlingData.ThrottledTime, }, } mem := cs.MemoryStats s.MemoryStats = types.MemoryStats{ Usage: mem.Usage, MaxUsage: mem.MaxUsage, Stats: mem.Stats, Failcnt: mem.Failcnt, } } return s } func copyBlkioEntry(entries []cgroups.BlkioStatEntry) []types.BlkioStatEntry { out := make([]types.BlkioStatEntry, len(entries)) for i, re := range entries { out[i] = types.BlkioStatEntry{ Major: re.Major, Minor: re.Minor, Op: re.Op, Value: re.Value, } } return out } docker-1.6.2/daemon/graphdriver/0000755000175000017500000000000012524223634016114 5ustar tianontianondocker-1.6.2/daemon/graphdriver/graphtest/0000755000175000017500000000000012524223634020115 5ustar tianontianondocker-1.6.2/daemon/graphdriver/graphtest/graphtest.go0000644000175000017500000001350412524223634022450 0ustar tianontianonpackage graphtest import ( "fmt" "io/ioutil" "os" "path" "syscall" "testing" "github.com/docker/docker/daemon/graphdriver" ) var ( drv *Driver ) type Driver struct { graphdriver.Driver root string refCount int } // InitLoopbacks ensures that the loopback devices are properly created within // the system running the device mapper tests. func InitLoopbacks() error { stat_t, err := getBaseLoopStats() if err != nil { return err } // create atleast 8 loopback files, ya, that is a good number for i := 0; i < 8; i++ { loopPath := fmt.Sprintf("/dev/loop%d", i) // only create new loopback files if they don't exist if _, err := os.Stat(loopPath); err != nil { if mkerr := syscall.Mknod(loopPath, uint32(stat_t.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { return mkerr } os.Chown(loopPath, int(stat_t.Uid), int(stat_t.Gid)) } } return nil } // getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the // loop0 device on the system. If it does not exist we assume 0,0,0660 for the // stat data func getBaseLoopStats() (*syscall.Stat_t, error) { loop0, err := os.Stat("/dev/loop0") if err != nil { if os.IsNotExist(err) { return &syscall.Stat_t{ Uid: 0, Gid: 0, Mode: 0660, }, nil } return nil, err } return loop0.Sys().(*syscall.Stat_t), nil } func newDriver(t *testing.T, name string) *Driver { root, err := ioutil.TempDir("/var/tmp", "docker-graphtest-") if err != nil { t.Fatal(err) } if err := os.MkdirAll(root, 0755); err != nil { t.Fatal(err) } d, err := graphdriver.GetDriver(name, root, nil) if err != nil { t.Logf("graphdriver: %v\n", err) if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { t.Skipf("Driver %s not supported", name) } t.Fatal(err) } return &Driver{d, root, 1} } func cleanup(t *testing.T, d *Driver) { if err := drv.Cleanup(); err != nil { t.Fatal(err) } os.RemoveAll(d.root) } func GetDriver(t *testing.T, name string) graphdriver.Driver { if drv == nil { drv = newDriver(t, name) } else { drv.refCount++ } return drv } func PutDriver(t *testing.T) { if drv == nil { t.Skip("No driver to put!") } drv.refCount-- if drv.refCount == 0 { cleanup(t, drv) drv = nil } } func verifyFile(t *testing.T, path string, mode os.FileMode, uid, gid uint32) { fi, err := os.Stat(path) if err != nil { t.Fatal(err) } if fi.Mode()&os.ModeType != mode&os.ModeType { t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) } if fi.Mode()&os.ModePerm != mode&os.ModePerm { t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) } if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) } if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) } if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) } if stat, ok := fi.Sys().(*syscall.Stat_t); ok { if stat.Uid != uid { t.Fatalf("%s no owned by uid %d", path, uid) } if stat.Gid != gid { t.Fatalf("%s not owned by gid %d", path, gid) } } } // Creates an new image and verifies it is empty and the right metadata func DriverTestCreateEmpty(t *testing.T, drivername string) { driver := GetDriver(t, drivername) defer PutDriver(t) if err := driver.Create("empty", ""); err != nil { t.Fatal(err) } if !driver.Exists("empty") { t.Fatal("Newly created image doesn't exist") } dir, err := driver.Get("empty", "") if err != nil { t.Fatal(err) } verifyFile(t, dir, 0755|os.ModeDir, 0, 0) // Verify that the directory is empty fis, err := ioutil.ReadDir(dir) if err != nil { t.Fatal(err) } if len(fis) != 0 { t.Fatal("New directory not empty") } driver.Put("empty") if err := driver.Remove("empty"); err != nil { t.Fatal(err) } } func createBase(t *testing.T, driver graphdriver.Driver, name string) { // We need to be able to set any perms oldmask := syscall.Umask(0) defer syscall.Umask(oldmask) if err := driver.Create(name, ""); err != nil { t.Fatal(err) } dir, err := driver.Get(name, "") if err != nil { t.Fatal(err) } defer driver.Put(name) subdir := path.Join(dir, "a subdir") if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { t.Fatal(err) } if err := os.Chown(subdir, 1, 2); err != nil { t.Fatal(err) } file := path.Join(dir, "a file") if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { t.Fatal(err) } } func verifyBase(t *testing.T, driver graphdriver.Driver, name string) { dir, err := driver.Get(name, "") if err != nil { t.Fatal(err) } defer driver.Put(name) subdir := path.Join(dir, "a subdir") verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) file := path.Join(dir, "a file") verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) fis, err := ioutil.ReadDir(dir) if err != nil { t.Fatal(err) } if len(fis) != 2 { t.Fatal("Unexpected files in base image") } } func DriverTestCreateBase(t *testing.T, drivername string) { driver := GetDriver(t, drivername) defer PutDriver(t) createBase(t, driver, "Base") verifyBase(t, driver, "Base") if err := driver.Remove("Base"); err != nil { t.Fatal(err) } } func DriverTestCreateSnap(t *testing.T, drivername string) { driver := GetDriver(t, drivername) defer PutDriver(t) createBase(t, driver, "Base") if err := driver.Create("Snap", "Base"); err != nil { t.Fatal(err) } verifyBase(t, driver, "Snap") if err := driver.Remove("Snap"); err != nil { t.Fatal(err) } if err := driver.Remove("Base"); err != nil { t.Fatal(err) } } docker-1.6.2/daemon/graphdriver/overlay/0000755000175000017500000000000012524223634017575 5ustar tianontianondocker-1.6.2/daemon/graphdriver/overlay/copy.go0000644000175000017500000000643612524223634021107 0ustar tianontianon// +build linux package overlay import ( "fmt" "io" "os" "path/filepath" "syscall" "github.com/docker/docker/pkg/system" ) type CopyFlags int const ( CopyHardlink CopyFlags = 1 << iota ) func copyRegular(srcPath, dstPath string, mode os.FileMode) error { srcFile, err := os.Open(srcPath) if err != nil { return err } defer srcFile.Close() dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) if err != nil { return err } defer dstFile.Close() _, err = io.Copy(dstFile, srcFile) return err } func copyXattr(srcPath, dstPath, attr string) error { data, err := system.Lgetxattr(srcPath, attr) if err != nil { return err } if data != nil { if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { return err } } return nil } func copyDir(srcDir, dstDir string, flags CopyFlags) error { err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path relPath, err := filepath.Rel(srcDir, srcPath) if err != nil { return err } dstPath := filepath.Join(dstDir, relPath) if err != nil { return err } stat, ok := f.Sys().(*syscall.Stat_t) if !ok { return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) } switch f.Mode() & os.ModeType { case 0: // Regular file if flags&CopyHardlink != 0 { if err := os.Link(srcPath, dstPath); err != nil { return err } } else { if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { return err } } case os.ModeDir: if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { return err } case os.ModeSymlink: link, err := os.Readlink(srcPath) if err != nil { return err } if err := os.Symlink(link, dstPath); err != nil { return err } case os.ModeNamedPipe: fallthrough case os.ModeSocket: if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { return err } case os.ModeDevice: if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { return err } default: return fmt.Errorf("Unknown file type for %s\n", srcPath) } if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { return err } if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { return err } // We need to copy this attribute if it appears in an overlay upper layer, as // this function is used to copy those. It is set by overlay if a directory // is removed and then re-created and should not inherit anything from the // same dir in the lower dir. if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { return err } isSymlink := f.Mode()&os.ModeSymlink != 0 // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if !isSymlink { if err := os.Chmod(dstPath, f.Mode()); err != nil { return err } } ts := []syscall.Timespec{stat.Atim, stat.Mtim} // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and if !isSymlink { if err := system.UtimesNano(dstPath, ts); err != nil { return err } } else { if err := system.LUtimesNano(dstPath, ts); err != nil { return err } } return nil }) return err } docker-1.6.2/daemon/graphdriver/overlay/overlay_test.go0000644000175000017500000000123512524223634022645 0ustar tianontianonpackage overlay import ( "github.com/docker/docker/daemon/graphdriver/graphtest" "testing" ) // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown func TestOverlaySetup(t *testing.T) { graphtest.GetDriver(t, "overlay") } func TestOverlayCreateEmpty(t *testing.T) { graphtest.DriverTestCreateEmpty(t, "overlay") } func TestOverlayCreateBase(t *testing.T) { graphtest.DriverTestCreateBase(t, "overlay") } func TestOverlayCreateSnap(t *testing.T) { graphtest.DriverTestCreateSnap(t, "overlay") } func TestOverlayTeardown(t *testing.T) { graphtest.PutDriver(t) } docker-1.6.2/daemon/graphdriver/overlay/overlay.go0000644000175000017500000002332112524223634021606 0ustar tianontianon// +build linux package overlay import ( "bufio" "fmt" "io/ioutil" "os" "os/exec" "path" "sync" "syscall" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/libcontainer/label" ) // This is a small wrapper over the NaiveDiffWriter that lets us have a custom // implementation of ApplyDiff() var ( ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") ) type ApplyDiffProtoDriver interface { graphdriver.ProtoDriver ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) } type naiveDiffDriverWithApply struct { graphdriver.Driver applyDiff ApplyDiffProtoDriver } func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver) graphdriver.Driver { return &naiveDiffDriverWithApply{ Driver: graphdriver.NaiveDiffDriver(driver), applyDiff: driver, } } func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.ArchiveReader) (int64, error) { b, err := d.applyDiff.ApplyDiff(id, parent, diff) if err == ErrApplyDiffFallback { return d.Driver.ApplyDiff(id, parent, diff) } return b, err } // This backend uses the overlay union filesystem for containers // plus hard link file sharing for images. // Each container/image can have a "root" subdirectory which is a plain // filesystem hierarchy, or they can use overlay. // If they use overlay there is a "upper" directory and a "lower-id" // file, as well as "merged" and "work" directories. The "upper" // directory has the upper layer of the overlay, and "lower-id" contains // the id of the parent whose "root" directory shall be used as the lower // layer in the overlay. The overlay itself is mounted in the "merged" // directory, and the "work" dir is needed for overlay to work. // When a overlay layer is created there are two cases, either the // parent has a "root" dir, then we start out with a empty "upper" // directory overlaid on the parents root. This is typically the // case with the init layer of a container which is based on an image. // If there is no "root" in the parent, we inherit the lower-id from // the parent and start by making a copy if the parents "upper" dir. // This is typically the case for a container layer which copies // its parent -init upper layer. // Additionally we also have a custom implementation of ApplyLayer // which makes a recursive copy of the parent "root" layer using // hardlinks to share file data, and then applies the layer on top // of that. This means all child images share file (but not directory) // data with the parent. type ActiveMount struct { count int path string mounted bool } type Driver struct { home string sync.Mutex // Protects concurrent modification to active active map[string]*ActiveMount } var backingFs = "" func init() { graphdriver.Register("overlay", Init) } func Init(home string, options []string) (graphdriver.Driver, error) { if err := supportsOverlay(); err != nil { return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(home) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } // check if they are running over btrfs or aufs switch fsMagic { case graphdriver.FsMagicBtrfs: log.Error("'overlay' is not supported over btrfs.") return nil, graphdriver.ErrIncompatibleFS case graphdriver.FsMagicAufs: log.Error("'overlay' is not supported over aufs.") return nil, graphdriver.ErrIncompatibleFS case graphdriver.FsMagicZfs: log.Error("'overlay' is not supported over zfs.") return nil, graphdriver.ErrIncompatibleFS } // Create the driver home dir if err := os.MkdirAll(home, 0755); err != nil && !os.IsExist(err) { return nil, err } d := &Driver{ home: home, active: make(map[string]*ActiveMount), } return NaiveDiffDriverWithApply(d), nil } func supportsOverlay() error { // We can try to modprobe overlay first before looking at // proc/filesystems for when overlay is supported exec.Command("modprobe", "overlay").Run() f, err := os.Open("/proc/filesystems") if err != nil { return err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if s.Text() == "nodev\toverlay" { return nil } } log.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") return graphdriver.ErrNotSupported } func (d *Driver) String() string { return "overlay" } func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, } } func (d *Driver) Cleanup() error { return nil } func (d *Driver) Create(id string, parent string) (retErr error) { dir := d.dir(id) if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { return err } if err := os.Mkdir(dir, 0700); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() // Toplevel images are just a "root" dir if parent == "" { if err := os.Mkdir(path.Join(dir, "root"), 0755); err != nil { return err } return nil } parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return err } // If parent has a root, just do a overlay to it parentRoot := path.Join(parentDir, "root") if s, err := os.Lstat(parentRoot); err == nil { if err := os.Mkdir(path.Join(dir, "upper"), s.Mode()); err != nil { return err } if err := os.Mkdir(path.Join(dir, "work"), 0700); err != nil { return err } if err := os.Mkdir(path.Join(dir, "merged"), 0700); err != nil { return err } if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { return err } return nil } // Otherwise, copy the upper and the lower-id from the parent lowerId, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) if err != nil { return err } if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerId, 0666); err != nil { return err } parentUpperDir := path.Join(parentDir, "upper") s, err := os.Lstat(parentUpperDir) if err != nil { return err } upperDir := path.Join(dir, "upper") if err := os.Mkdir(upperDir, s.Mode()); err != nil { return err } if err := os.Mkdir(path.Join(dir, "work"), 0700); err != nil { return err } if err := os.Mkdir(path.Join(dir, "merged"), 0700); err != nil { return err } return copyDir(parentUpperDir, upperDir, 0) } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) Remove(id string) error { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return err } return os.RemoveAll(dir) } func (d *Driver) Get(id string, mountLabel string) (string, error) { // Protect the d.active from concurrent access d.Lock() defer d.Unlock() mount := d.active[id] if mount != nil { mount.count++ return mount.path, nil } else { mount = &ActiveMount{count: 1} } dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return "", err } // If id has a root, just return it rootDir := path.Join(dir, "root") if _, err := os.Stat(rootDir); err == nil { mount.path = rootDir d.active[id] = mount return mount.path, nil } lowerId, err := ioutil.ReadFile(path.Join(dir, "lower-id")) if err != nil { return "", err } lowerDir := path.Join(d.dir(string(lowerId)), "root") upperDir := path.Join(dir, "upper") workDir := path.Join(dir, "work") mergedDir := path.Join(dir, "merged") opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } mount.path = mergedDir mount.mounted = true d.active[id] = mount return mount.path, nil } func (d *Driver) Put(id string) error { // Protect the d.active from concurrent access d.Lock() defer d.Unlock() mount := d.active[id] if mount == nil { log.Debugf("Put on a non-mounted device %s", id) return nil } mount.count-- if mount.count > 0 { return nil } defer delete(d.active, id) if mount.mounted { err := syscall.Unmount(mount.path, 0) if err != nil { log.Debugf("Failed to unmount %s overlay: %v", id, err) } return err } return nil } func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader) (size int64, err error) { dir := d.dir(id) if parent == "" { return 0, ErrApplyDiffFallback } parentRootDir := path.Join(d.dir(parent), "root") if _, err := os.Stat(parentRootDir); err != nil { return 0, ErrApplyDiffFallback } // We now know there is a parent, and it has a "root" directory containing // the full root filesystem. We can just hardlink it and apply the // layer. This relies on two things: // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) // These are all currently true and are not expected to break tmpRootDir, err := ioutil.TempDir(dir, "tmproot") if err != nil { return 0, err } defer func() { if err != nil { os.RemoveAll(tmpRootDir) } else { os.RemoveAll(path.Join(dir, "upper")) os.RemoveAll(path.Join(dir, "work")) os.RemoveAll(path.Join(dir, "merged")) os.RemoveAll(path.Join(dir, "lower-id")) } }() if err = copyDir(parentRootDir, tmpRootDir, CopyHardlink); err != nil { return 0, err } if size, err = chrootarchive.ApplyLayer(tmpRootDir, diff); err != nil { return 0, err } rootDir := path.Join(dir, "root") if err := os.Rename(tmpRootDir, rootDir); err != nil { return 0, err } return } func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } docker-1.6.2/daemon/graphdriver/vfs/0000755000175000017500000000000012524223634016712 5ustar tianontianondocker-1.6.2/daemon/graphdriver/vfs/driver.go0000644000175000017500000000352112524223634020535 0ustar tianontianonpackage vfs import ( "fmt" "os" "path" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/libcontainer/label" ) func init() { graphdriver.Register("vfs", Init) } func Init(home string, options []string) (graphdriver.Driver, error) { d := &Driver{ home: home, } return graphdriver.NaiveDiffDriver(d), nil } type Driver struct { home string } func (d *Driver) String() string { return "vfs" } func (d *Driver) Status() [][2]string { return nil } func (d *Driver) Cleanup() error { return nil } func (d *Driver) Create(id, parent string) error { dir := d.dir(id) if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { return err } if err := os.Mkdir(dir, 0755); err != nil { return err } opts := []string{"level:s0"} if _, mountLabel, err := label.InitLabels(opts); err == nil { label.SetFileLabel(dir, mountLabel) } if parent == "" { return nil } parentDir, err := d.Get(parent, "") if err != nil { return fmt.Errorf("%s: %s", parent, err) } if err := chrootarchive.CopyWithTar(parentDir, dir); err != nil { return err } return nil } func (d *Driver) dir(id string) string { return path.Join(d.home, "dir", path.Base(id)) } func (d *Driver) Remove(id string) error { if _, err := os.Stat(d.dir(id)); err != nil { return err } return os.RemoveAll(d.dir(id)) } func (d *Driver) Get(id, mountLabel string) (string, error) { dir := d.dir(id) if st, err := os.Stat(dir); err != nil { return "", err } else if !st.IsDir() { return "", fmt.Errorf("%s: not a directory", dir) } return dir, nil } func (d *Driver) Put(id string) error { // The vfs driver has no runtime resources (e.g. mounts) // to clean up, so we don't need anything here return nil } func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } docker-1.6.2/daemon/graphdriver/vfs/vfs_test.go0000644000175000017500000000126612524223634021103 0ustar tianontianonpackage vfs import ( "testing" "github.com/docker/docker/daemon/graphdriver/graphtest" "github.com/docker/docker/pkg/reexec" ) func init() { reexec.Init() } // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestVfsSetup and TestVfsTeardown func TestVfsSetup(t *testing.T) { graphtest.GetDriver(t, "vfs") } func TestVfsCreateEmpty(t *testing.T) { graphtest.DriverTestCreateEmpty(t, "vfs") } func TestVfsCreateBase(t *testing.T) { graphtest.DriverTestCreateBase(t, "vfs") } func TestVfsCreateSnap(t *testing.T) { graphtest.DriverTestCreateSnap(t, "vfs") } func TestVfsTeardown(t *testing.T) { graphtest.PutDriver(t) } docker-1.6.2/daemon/graphdriver/driver.go0000644000175000017500000001360112524223634017737 0ustar tianontianonpackage graphdriver import ( "errors" "fmt" "os" "path" "strings" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" ) type FsMagic uint32 const ( FsMagicBtrfs = FsMagic(0x9123683E) FsMagicAufs = FsMagic(0x61756673) FsMagicExtfs = FsMagic(0x0000EF53) FsMagicCramfs = FsMagic(0x28cd3d45) FsMagicRamFs = FsMagic(0x858458f6) FsMagicTmpFs = FsMagic(0x01021994) FsMagicSquashFs = FsMagic(0x73717368) FsMagicNfsFs = FsMagic(0x00006969) FsMagicReiserFs = FsMagic(0x52654973) FsMagicSmbFs = FsMagic(0x0000517B) FsMagicJffs2Fs = FsMagic(0x000072b6) FsMagicZfs = FsMagic(0x2fc12fc1) FsMagicXfs = FsMagic(0x58465342) FsMagicUnsupported = FsMagic(0x00000000) ) var ( DefaultDriver string // All registred drivers drivers map[string]InitFunc // Slice of drivers that should be used in an order priority = []string{ "aufs", "btrfs", "devicemapper", "overlay", "vfs", } ErrNotSupported = errors.New("driver not supported") ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") FsNames = map[FsMagic]string{ FsMagicAufs: "aufs", FsMagicBtrfs: "btrfs", FsMagicExtfs: "extfs", FsMagicCramfs: "cramfs", FsMagicRamFs: "ramfs", FsMagicTmpFs: "tmpfs", FsMagicSquashFs: "squashfs", FsMagicNfsFs: "nfs", FsMagicReiserFs: "reiserfs", FsMagicSmbFs: "smb", FsMagicJffs2Fs: "jffs2", FsMagicZfs: "zfs", FsMagicXfs: "xfs", FsMagicUnsupported: "unsupported", } ) type InitFunc func(root string, options []string) (Driver, error) // ProtoDriver defines the basic capabilities of a driver. // This interface exists solely to be a minimum set of methods // for client code which choose not to implement the entire Driver // interface and use the NaiveDiffDriver wrapper constructor. // // Use of ProtoDriver directly by client code is not recommended. type ProtoDriver interface { // String returns a string representation of this driver. String() string // Create creates a new, empty, filesystem layer with the // specified id and parent. Parent may be "". Create(id, parent string) error // Remove attempts to remove the filesystem layer with this id. Remove(id string) error // Get returns the mountpoint for the layered filesystem referred // to by this id. You can optionally specify a mountLabel or "". // Returns the absolute path to the mounted layered filesystem. Get(id, mountLabel string) (dir string, err error) // Put releases the system resources for the specified id, // e.g, unmounting layered filesystem. Put(id string) error // Exists returns whether a filesystem layer with the specified // ID exists on this driver. Exists(id string) bool // Status returns a set of key-value pairs which give low // level diagnostic status about this driver. Status() [][2]string // Cleanup performs necessary tasks to release resources // held by the driver, e.g., unmounting all layered filesystems // known to this driver. Cleanup() error } // Driver is the interface for layered/snapshot file system drivers. type Driver interface { ProtoDriver // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". Diff(id, parent string) (archive.Archive, error) // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. Changes(id, parent string) ([]archive.Change, error) // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. DiffSize(id, parent string) (size int64, err error) } func init() { drivers = make(map[string]InitFunc) } func Register(name string, initFunc InitFunc) error { if _, exists := drivers[name]; exists { return fmt.Errorf("Name already registered %s", name) } drivers[name] = initFunc return nil } func GetDriver(name, home string, options []string) (Driver, error) { if initFunc, exists := drivers[name]; exists { return initFunc(path.Join(home, name), options) } return nil, ErrNotSupported } func New(root string, options []string) (driver Driver, err error) { for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { if name != "" { return GetDriver(name, root, options) } } // Check for priority drivers first for _, name := range priority { driver, err = GetDriver(name, root, options) if err != nil { if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { continue } return nil, err } checkPriorDriver(name, root) return driver, nil } // Check all registered drivers if no priority driver is found for name, initFunc := range drivers { if driver, err = initFunc(root, options); err != nil { if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { continue } return nil, err } checkPriorDriver(name, root) return driver, nil } return nil, fmt.Errorf("No supported storage backend found") } func checkPriorDriver(name, root string) { priorDrivers := []string{} for prior := range drivers { if prior != name && prior != "vfs" { if _, err := os.Stat(path.Join(root, prior)); err == nil { priorDrivers = append(priorDrivers, prior) } } } if len(priorDrivers) > 0 { log.Warnf("Graphdriver %s selected. Your graphdriver directory %s already contains data managed by other graphdrivers: %s", name, root, strings.Join(priorDrivers, ",")) } } docker-1.6.2/daemon/graphdriver/driver_linux.go0000644000175000017500000000037012524223634021155 0ustar tianontianonpackage graphdriver import ( "path" "syscall" ) func GetFSMagic(rootpath string) (FsMagic, error) { var buf syscall.Statfs_t if err := syscall.Statfs(path.Dir(rootpath), &buf); err != nil { return 0, err } return FsMagic(buf.Type), nil } docker-1.6.2/daemon/graphdriver/fsdiff.go0000644000175000017500000000740412524223634017711 0ustar tianontianon// +build daemon package graphdriver import ( "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/ioutils" ) // naiveDiffDriver takes a ProtoDriver and adds the // capability of the Diffing methods which it may or may not // support on its own. See the comment on the exported // NaiveDiffDriver function below. // Notably, the AUFS driver doesn't need to be wrapped like this. type naiveDiffDriver struct { ProtoDriver } // NaiveDiffDriver returns a fully functional driver that wraps the // given ProtoDriver and adds the capability of the following methods which // it may or may not support on its own: // Diff(id, parent string) (archive.Archive, error) // Changes(id, parent string) ([]archive.Change, error) // ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) // DiffSize(id, parent string) (size int64, err error) func NaiveDiffDriver(driver ProtoDriver) Driver { return &naiveDiffDriver{ProtoDriver: driver} } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (gdw *naiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { driver := gdw.ProtoDriver layerFs, err := driver.Get(id, "") if err != nil { return nil, err } defer func() { if err != nil { driver.Put(id) } }() if parent == "" { archive, err := archive.Tar(layerFs, archive.Uncompressed) if err != nil { return nil, err } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() driver.Put(id) return err }), nil } parentFs, err := driver.Get(parent, "") if err != nil { return nil, err } defer driver.Put(parent) changes, err := archive.ChangesDirs(layerFs, parentFs) if err != nil { return nil, err } archive, err := archive.ExportChanges(layerFs, changes) if err != nil { return nil, err } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() driver.Put(id) return err }), nil } // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (gdw *naiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { driver := gdw.ProtoDriver layerFs, err := driver.Get(id, "") if err != nil { return nil, err } defer driver.Put(id) parentFs := "" if parent != "" { parentFs, err = driver.Get(parent, "") if err != nil { return nil, err } defer driver.Put(parent) } return archive.ChangesDirs(layerFs, parentFs) } // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) { driver := gdw.ProtoDriver // Mount the root filesystem so we can apply the diff/layer. layerFs, err := driver.Get(id, "") if err != nil { return } defer driver.Put(id) start := time.Now().UTC() log.Debugf("Start untar layer") if size, err = chrootarchive.ApplyLayer(layerFs, diff); err != nil { return } log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) return } // DiffSize calculates the changes between the specified layer // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (gdw *naiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { driver := gdw.ProtoDriver changes, err := gdw.Changes(id, parent) if err != nil { return } layerFs, err := driver.Get(id, "") if err != nil { return } defer driver.Put(id) return archive.ChangesSize(layerFs, changes), nil } docker-1.6.2/daemon/graphdriver/btrfs/0000755000175000017500000000000012524223634017234 5ustar tianontianondocker-1.6.2/daemon/graphdriver/btrfs/btrfs_test.go0000644000175000017500000000120512524223634021740 0ustar tianontianonpackage btrfs import ( "github.com/docker/docker/daemon/graphdriver/graphtest" "testing" ) // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown func TestBtrfsSetup(t *testing.T) { graphtest.GetDriver(t, "btrfs") } func TestBtrfsCreateEmpty(t *testing.T) { graphtest.DriverTestCreateEmpty(t, "btrfs") } func TestBtrfsCreateBase(t *testing.T) { graphtest.DriverTestCreateBase(t, "btrfs") } func TestBtrfsCreateSnap(t *testing.T) { graphtest.DriverTestCreateSnap(t, "btrfs") } func TestBtrfsTeardown(t *testing.T) { graphtest.PutDriver(t) } docker-1.6.2/daemon/graphdriver/btrfs/btrfs.go0000644000175000017500000001106112524223634020702 0ustar tianontianon// +build linux package btrfs /* #include #include #include */ import "C" import ( "fmt" "os" "path" "syscall" "unsafe" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/mount" ) func init() { graphdriver.Register("btrfs", Init) } func Init(home string, options []string) (graphdriver.Driver, error) { rootdir := path.Dir(home) var buf syscall.Statfs_t if err := syscall.Statfs(rootdir, &buf); err != nil { return nil, err } if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicBtrfs { return nil, graphdriver.ErrPrerequisites } if err := os.MkdirAll(home, 0700); err != nil { return nil, err } if err := mount.MakePrivate(home); err != nil { return nil, err } driver := &Driver{ home: home, } return graphdriver.NaiveDiffDriver(driver), nil } type Driver struct { home string } func (d *Driver) String() string { return "btrfs" } func (d *Driver) Status() [][2]string { status := [][2]string{} if bv := BtrfsBuildVersion(); bv != "-" { status = append(status, [2]string{"Build Version", bv}) } if lv := BtrfsLibVersion(); lv != -1 { status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) } return status } func (d *Driver) Cleanup() error { return mount.Unmount(d.home) } func free(p *C.char) { C.free(unsafe.Pointer(p)) } func openDir(path string) (*C.DIR, error) { Cpath := C.CString(path) defer free(Cpath) dir := C.opendir(Cpath) if dir == nil { return nil, fmt.Errorf("Can't open dir") } return dir, nil } func closeDir(dir *C.DIR) { if dir != nil { C.closedir(dir) } } func getDirFd(dir *C.DIR) uintptr { return uintptr(C.dirfd(dir)) } func subvolCreate(path, name string) error { dir, err := openDir(path) if err != nil { return err } defer closeDir(dir) var args C.struct_btrfs_ioctl_vol_args for i, c := range []byte(name) { args.name[i] = C.char(c) } _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) } return nil } func subvolSnapshot(src, dest, name string) error { srcDir, err := openDir(src) if err != nil { return err } defer closeDir(srcDir) destDir, err := openDir(dest) if err != nil { return err } defer closeDir(destDir) var args C.struct_btrfs_ioctl_vol_args_v2 args.fd = C.__s64(getDirFd(srcDir)) for i, c := range []byte(name) { args.name[i] = C.char(c) } _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) } return nil } func subvolDelete(path, name string) error { dir, err := openDir(path) if err != nil { return err } defer closeDir(dir) var args C.struct_btrfs_ioctl_vol_args for i, c := range []byte(name) { args.name[i] = C.char(c) } _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to destroy btrfs snapshot: %v", errno.Error()) } return nil } func (d *Driver) subvolumesDir() string { return path.Join(d.home, "subvolumes") } func (d *Driver) subvolumesDirId(id string) string { return path.Join(d.subvolumesDir(), id) } func (d *Driver) Create(id string, parent string) error { subvolumes := path.Join(d.home, "subvolumes") if err := os.MkdirAll(subvolumes, 0700); err != nil { return err } if parent == "" { if err := subvolCreate(subvolumes, id); err != nil { return err } } else { parentDir, err := d.Get(parent, "") if err != nil { return err } if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { return err } } return nil } func (d *Driver) Remove(id string) error { dir := d.subvolumesDirId(id) if _, err := os.Stat(dir); err != nil { return err } if err := subvolDelete(d.subvolumesDir(), id); err != nil { return err } return os.RemoveAll(dir) } func (d *Driver) Get(id, mountLabel string) (string, error) { dir := d.subvolumesDirId(id) st, err := os.Stat(dir) if err != nil { return "", err } if !st.IsDir() { return "", fmt.Errorf("%s: not a directory", dir) } return dir, nil } func (d *Driver) Put(id string) error { // Get() creates no runtime resources (like e.g. mounts) // so this doesn't need to do anything. return nil } func (d *Driver) Exists(id string) bool { dir := d.subvolumesDirId(id) _, err := os.Stat(dir) return err == nil } docker-1.6.2/daemon/graphdriver/btrfs/dummy_unsupported.go0000644000175000017500000000004512524223634023365 0ustar tianontianon// +build !linux !cgo package btrfs docker-1.6.2/daemon/graphdriver/btrfs/version.go0000644000175000017500000000074312524223634021254 0ustar tianontianon// +build linux,!btrfs_noversion package btrfs /* #include // around version 3.16, they did not define lib version yet #ifndef BTRFS_LIB_VERSION #define BTRFS_LIB_VERSION -1 #endif // upstream had removed it, but now it will be coming back #ifndef BTRFS_BUILD_VERSION #define BTRFS_BUILD_VERSION "-" #endif */ import "C" func BtrfsBuildVersion() string { return string(C.BTRFS_BUILD_VERSION) } func BtrfsLibVersion() int { return int(C.BTRFS_LIB_VERSION) } docker-1.6.2/daemon/graphdriver/btrfs/version_test.go0000644000175000017500000000030712524223634022307 0ustar tianontianon// +build linux,!btrfs_noversion package btrfs import ( "testing" ) func TestLibVersion(t *testing.T) { if BtrfsLibVersion() <= 0 { t.Errorf("expected output from btrfs lib version > 0") } } docker-1.6.2/daemon/graphdriver/btrfs/version_none.go0000644000175000017500000000037112524223634022270 0ustar tianontianon// +build linux,btrfs_noversion package btrfs // TODO(vbatts) remove this work-around once supported linux distros are on // btrfs utililties of >= 3.16.1 func BtrfsBuildVersion() string { return "-" } func BtrfsLibVersion() int { return -1 } docker-1.6.2/daemon/graphdriver/devmapper/0000755000175000017500000000000012524223634020077 5ustar tianontianondocker-1.6.2/daemon/graphdriver/devmapper/mount.go0000644000175000017500000000310212524223634021564 0ustar tianontianon// +build linux package devmapper import ( "bytes" "fmt" "os" "path/filepath" "syscall" ) // FIXME: this is copy-pasted from the aufs driver. // It should be moved into the core. func Mounted(mountpoint string) (bool, error) { mntpoint, err := os.Stat(mountpoint) if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } parent, err := os.Stat(filepath.Join(mountpoint, "..")) if err != nil { return false, err } mntpointSt := mntpoint.Sys().(*syscall.Stat_t) parentSt := parent.Sys().(*syscall.Stat_t) return mntpointSt.Dev != parentSt.Dev, nil } type probeData struct { fsName string magic string offset uint64 } func ProbeFsType(device string) (string, error) { probes := []probeData{ {"btrfs", "_BHRfS_M", 0x10040}, {"ext4", "\123\357", 0x438}, {"xfs", "XFSB", 0}, } maxLen := uint64(0) for _, p := range probes { l := p.offset + uint64(len(p.magic)) if l > maxLen { maxLen = l } } file, err := os.Open(device) if err != nil { return "", err } defer file.Close() buffer := make([]byte, maxLen) l, err := file.Read(buffer) if err != nil { return "", err } if uint64(l) != maxLen { return "", fmt.Errorf("unable to detect filesystem type of %s, short read", device) } for _, p := range probes { if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { return p.fsName, nil } } return "", fmt.Errorf("Unknown filesystem type on %s", device) } func joinMountOptions(a, b string) string { if a == "" { return b } if b == "" { return a } return a + "," + b } docker-1.6.2/daemon/graphdriver/devmapper/README.md0000644000175000017500000002023612524223634021361 0ustar tianontianon## devicemapper - a storage backend based on Device Mapper ### Theory of operation The device mapper graphdriver uses the device mapper thin provisioning module (dm-thinp) to implement CoW snapshots. For each devicemapper graph location (typically `/var/lib/docker/devicemapper`, $graph below) a thin pool is created based on two block devices, one for data and one for metadata. By default these block devices are created automatically by using loopback mounts of automatically created sparse files. The default loopback files used are `$graph/devicemapper/data` and `$graph/devicemapper/metadata`. Additional metadata required to map from docker entities to the corresponding devicemapper volumes is stored in the `$graph/devicemapper/json` file (encoded as Json). In order to support multiple devicemapper graphs on a system, the thin pool will be named something like: `docker-0:33-19478248-pool`, where the `0:33` part is the minor/major device nr and `19478248` is the inode number of the $graph directory. On the thin pool, docker automatically creates a base thin device, called something like `docker-0:33-19478248-base` of a fixed size. This is automatically formatted with an empty filesystem on creation. This device is the base of all docker images and containers. All base images are snapshots of this device and those images are then in turn used as snapshots for other images and eventually containers. ### Information on `docker info` As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver will display something like: $ sudo docker info [...] Storage Driver: devicemapper Pool Name: docker-253:1-17538953-pool Pool Blocksize: 65.54 kB Data file: /dev/loop4 Metadata file: /dev/loop4 Data Space Used: 2.536 GB Data Space Total: 107.4 GB Data Space Available: 104.8 GB Metadata Space Used: 7.93 MB Metadata Space Total: 2.147 GB Metadata Space Available: 2.14 GB Udev Sync Supported: true Data loop file: /home/docker/devicemapper/devicemapper/data Metadata loop file: /home/docker/devicemapper/devicemapper/metadata Library Version: 1.02.82-git (2013-10-04) [...] #### status items Each item in the indented section under `Storage Driver: devicemapper` are status information about the driver. * `Pool Name` name of the devicemapper pool for this driver. * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. * `Data file` blockdevice file used for the devicemapper data * `Metadata file` blockdevice file used for the devicemapper metadata * `Data Space Used` tells how much of `Data file` is currently used * `Data Space Total` tells max size the `Data file` * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. * `Metadata Space Used` tells how much of `Metadata file` is currently used * `Metadata Space Total` tells max size the `Metadata file` * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. * `Data loop file` file attached to `Data file`, if loopback device is used * `Metadata loop file` file attached to `Metadata file`, if loopback device is used * `Library Version` from the libdevmapper used ### options The devicemapper backend supports some options that you can specify when starting the docker daemon using the `--storage-opt` flags. This uses the `dm` prefix and would be used something like `docker -d --storage-opt dm.foo=bar`. Here is the list of supported options: * `dm.basesize` Specifies the size to use when creating the base device, which limits the size of images and containers. The default value is 10G. Note, thin devices are inherently "sparse", so a 10G device which is mostly empty doesn't use 10 GB of space on the pool. However, the filesystem will use more space for the empty case the larger the device is. **Warning**: This value affects the system-wide "base" empty filesystem that may already be initialized and inherited by pulled images. Typically, a change to this value will require additional steps to take effect: 1) stop `docker -d`, 2) `rm -rf /var/lib/docker`, 3) start `docker -d`. Example use: ``docker -d --storage-opt dm.basesize=20G`` * `dm.loopdatasize` Specifies the size to use when creating the loopback file for the "data" device which is used for the thin pool. The default size is 100G. Note that the file is sparse, so it will not initially take up this much space. Example use: ``docker -d --storage-opt dm.loopdatasize=200G`` * `dm.loopmetadatasize` Specifies the size to use when creating the loopback file for the "metadadata" device which is used for the thin pool. The default size is 2G. Note that the file is sparse, so it will not initially take up this much space. Example use: ``docker -d --storage-opt dm.loopmetadatasize=4G`` * `dm.fs` Specifies the filesystem type to use for the base device. The supported options are "ext4" and "xfs". The default is "ext4" Example use: ``docker -d --storage-opt dm.fs=xfs`` * `dm.mkfsarg` Specifies extra mkfs arguments to be used when creating the base device. Example use: ``docker -d --storage-opt "dm.mkfsarg=-O ^has_journal"`` * `dm.mountopt` Specifies extra mount options used when mounting the thin devices. Example use: ``docker -d --storage-opt dm.mountopt=nodiscard`` * `dm.thinpooldev` Specifies a custom blockdevice to use for the thin pool. If using a block device for device mapper storage, ideally lvm2 would be used to create/manage the thin-pool volume that is then handed to docker to exclusively create/manage the thin and thin snapshot volumes needed for its containers. Managing the thin-pool outside of docker makes for the most feature-rich method of having docker utilize device mapper thin provisioning as the backing storage for docker's containers. lvm2-based thin-pool management feature highlights include: automatic or interactive thin-pool resize support, dynamically change thin-pool features, automatic thinp metadata checking when lvm2 activates the thin-pool, etc. Example use: ``docker -d --storage-opt dm.thinpooldev=/dev/mapper/thin-pool`` * `dm.datadev` Specifies a custom blockdevice to use for data for the thin pool. If using a block device for device mapper storage, ideally both datadev and metadatadev should be specified to completely avoid using the loopback device. Example use: ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` * `dm.metadatadev` Specifies a custom blockdevice to use for metadata for the thin pool. For best performance the metadata should be on a different spindle than the data, or even better on an SSD. If setting up a new metadata pool it is required to be valid. This can be achieved by zeroing the first 4k to indicate empty metadata, like this: ``dd if=/dev/zero of=$metadata_dev bs=4096 count=1``` Example use: ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` * `dm.blocksize` Specifies a custom blocksize to use for the thin pool. The default blocksize is 64K. Example use: ``docker -d --storage-opt dm.blocksize=512K`` * `dm.blkdiscard` Enables or disables the use of blkdiscard when removing devicemapper devices. This is enabled by default (only) if using loopback devices and is required to resparsify the loopback file on image/container removal. Disabling this on loopback can lead to *much* faster container removal times, but will make the space used in /var/lib/docker directory not be returned to the system for other use when containers are removed. Example use: ``docker -d --storage-opt dm.blkdiscard=false`` docker-1.6.2/daemon/graphdriver/devmapper/devmapper_doc.go0000644000175000017500000000534512524223634023245 0ustar tianontianonpackage devmapper // Definition of struct dm_task and sub structures (from lvm2) // // struct dm_ioctl { // /* // * The version number is made up of three parts: // * major - no backward or forward compatibility, // * minor - only backwards compatible, // * patch - both backwards and forwards compatible. // * // * All clients of the ioctl interface should fill in the // * version number of the interface that they were // * compiled with. // * // * All recognised ioctl commands (ie. those that don't // * return -ENOTTY) fill out this field, even if the // * command failed. // */ // uint32_t version[3]; /* in/out */ // uint32_t data_size; /* total size of data passed in // * including this struct */ // uint32_t data_start; /* offset to start of data // * relative to start of this struct */ // uint32_t target_count; /* in/out */ // int32_t open_count; /* out */ // uint32_t flags; /* in/out */ // /* // * event_nr holds either the event number (input and output) or the // * udev cookie value (input only). // * The DM_DEV_WAIT ioctl takes an event number as input. // * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls // * use the field as a cookie to return in the DM_COOKIE // * variable with the uevents they issue. // * For output, the ioctls return the event number, not the cookie. // */ // uint32_t event_nr; /* in/out */ // uint32_t padding; // uint64_t dev; /* in/out */ // char name[DM_NAME_LEN]; /* device name */ // char uuid[DM_UUID_LEN]; /* unique identifier for // * the block device */ // char data[7]; /* padding or data */ // }; // struct target { // uint64_t start; // uint64_t length; // char *type; // char *params; // struct target *next; // }; // typedef enum { // DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ // DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ // } dm_add_node_t; // struct dm_task { // int type; // char *dev_name; // char *mangled_dev_name; // struct target *head, *tail; // int read_only; // uint32_t event_nr; // int major; // int minor; // int allow_default_major_fallback; // uid_t uid; // gid_t gid; // mode_t mode; // uint32_t read_ahead; // uint32_t read_ahead_flags; // union { // struct dm_ioctl *v4; // } dmi; // char *newname; // char *message; // char *geometry; // uint64_t sector; // int no_flush; // int no_open_count; // int skip_lockfs; // int query_inactive_table; // int suppress_identical_reload; // dm_add_node_t add_node; // uint64_t existing_table_size; // int cookie_set; // int new_uuid; // int secure_data; // int retry_remove; // int enable_checks; // int expected_errno; // char *uuid; // char *mangled_uuid; // }; // docker-1.6.2/daemon/graphdriver/devmapper/devmapper_test.go0000644000175000017500000000175312524223634023456 0ustar tianontianon// +build linux package devmapper import ( "testing" "github.com/docker/docker/daemon/graphdriver/graphtest" ) func init() { // Reduce the size the the base fs and loopback for the tests DefaultDataLoopbackSize = 300 * 1024 * 1024 DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 DefaultBaseFsSize = 300 * 1024 * 1024 if err := graphtest.InitLoopbacks(); err != nil { panic(err) } } // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown func TestDevmapperSetup(t *testing.T) { graphtest.GetDriver(t, "devicemapper") } func TestDevmapperCreateEmpty(t *testing.T) { graphtest.DriverTestCreateEmpty(t, "devicemapper") } func TestDevmapperCreateBase(t *testing.T) { graphtest.DriverTestCreateBase(t, "devicemapper") } func TestDevmapperCreateSnap(t *testing.T) { graphtest.DriverTestCreateSnap(t, "devicemapper") } func TestDevmapperTeardown(t *testing.T) { graphtest.PutDriver(t) } docker-1.6.2/daemon/graphdriver/devmapper/driver.go0000644000175000017500000001035412524223634021724 0ustar tianontianon// +build linux package devmapper import ( "fmt" "io/ioutil" "os" "path" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/devicemapper" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/units" ) func init() { graphdriver.Register("devicemapper", Init) } // Placeholder interfaces, to be replaced // at integration. // End of placeholder interfaces. type Driver struct { *DeviceSet home string } var backingFs = "" func Init(home string, options []string) (graphdriver.Driver, error) { fsMagic, err := graphdriver.GetFSMagic(home) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } deviceSet, err := NewDeviceSet(home, true, options) if err != nil { return nil, err } if err := mount.MakePrivate(home); err != nil { return nil, err } d := &Driver{ DeviceSet: deviceSet, home: home, } return graphdriver.NaiveDiffDriver(d), nil } func (d *Driver) String() string { return "devicemapper" } func (d *Driver) Status() [][2]string { s := d.DeviceSet.Status() status := [][2]string{ {"Pool Name", s.PoolName}, {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, {"Backing Filesystem", backingFs}, {"Data file", s.DataFile}, {"Metadata file", s.MetadataFile}, {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, } if len(s.DataLoopback) > 0 { status = append(status, [2]string{"Data loop file", s.DataLoopback}) } if len(s.MetadataLoopback) > 0 { status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) } if vStr, err := devicemapper.GetLibraryVersion(); err == nil { status = append(status, [2]string{"Library Version", vStr}) } return status } func (d *Driver) Cleanup() error { err := d.DeviceSet.Shutdown() if err2 := mount.Unmount(d.home); err == nil { err = err2 } return err } func (d *Driver) Create(id, parent string) error { if err := d.DeviceSet.AddDevice(id, parent); err != nil { return err } return nil } func (d *Driver) Remove(id string) error { if !d.DeviceSet.HasDevice(id) { // Consider removing a non-existing device a no-op // This is useful to be able to progress on container removal // if the underlying device has gone away due to earlier errors return nil } // This assumes the device has been properly Get/Put:ed and thus is unmounted if err := d.DeviceSet.DeleteDevice(id); err != nil { return err } mp := path.Join(d.home, "mnt", id) if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { return err } return nil } func (d *Driver) Get(id, mountLabel string) (string, error) { mp := path.Join(d.home, "mnt", id) // Create the target directories if they don't exist if err := os.MkdirAll(mp, 0755); err != nil && !os.IsExist(err) { return "", err } // Mount the device if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { return "", err } rootFs := path.Join(mp, "rootfs") if err := os.MkdirAll(rootFs, 0755); err != nil && !os.IsExist(err) { d.DeviceSet.UnmountDevice(id) return "", err } idFile := path.Join(mp, "id") if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { // Create an "id" file with the container/image id in it to help reconscruct this in case // of later problems if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { d.DeviceSet.UnmountDevice(id) return "", err } } return rootFs, nil } func (d *Driver) Put(id string) error { err := d.DeviceSet.UnmountDevice(id) if err != nil { log.Errorf("Error unmounting device %s: %s", id, err) } return err } func (d *Driver) Exists(id string) bool { return d.DeviceSet.HasDevice(id) } docker-1.6.2/daemon/graphdriver/devmapper/deviceset.go0000644000175000017500000013413412524223634022407 0ustar tianontianon// +build linux package devmapper import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strconv" "strings" "sync" "syscall" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/devicemapper" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/units" "github.com/docker/libcontainer/label" ) var ( DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors MaxDeviceId int = 0xffffff // 24 bit, pool limit DeviceIdMapSz int = (MaxDeviceId + 1) / 8 ) const deviceSetMetaFile string = "deviceset-metadata" const transactionMetaFile string = "transaction-metadata" type Transaction struct { OpenTransactionId uint64 `json:"open_transaction_id"` DeviceIdHash string `json:"device_hash"` DeviceId int `json:"device_id"` } type DevInfo struct { Hash string `json:"-"` DeviceId int `json:"device_id"` Size uint64 `json:"size"` TransactionId uint64 `json:"transaction_id"` Initialized bool `json:"initialized"` devices *DeviceSet mountCount int mountPath string // The global DeviceSet lock guarantees that we serialize all // the calls to libdevmapper (which is not threadsafe), but we // sometimes release that lock while sleeping. In that case // this per-device lock is still held, protecting against // other accesses to the device that we're doing the wait on. // // WARNING: In order to avoid AB-BA deadlocks when releasing // the global lock while holding the per-device locks all // device locks must be aquired *before* the device lock, and // multiple device locks should be aquired parent before child. lock sync.Mutex } type MetaData struct { Devices map[string]*DevInfo `json:"Devices"` devicesLock sync.Mutex // Protects all read/writes to Devices map } type DeviceSet struct { MetaData `json:"-"` sync.Mutex `json:"-"` // Protects Devices map and serializes calls into libdevmapper root string devicePrefix string TransactionId uint64 `json:"-"` NextDeviceId int `json:"next_device_id"` deviceIdMap []byte // Options dataLoopbackSize int64 metaDataLoopbackSize int64 baseFsSize uint64 filesystem string mountOptions string mkfsArgs []string dataDevice string // block or loop dev dataLoopFile string // loopback file, if used metadataDevice string // block or loop dev metadataLoopFile string // loopback file, if used doBlkDiscard bool thinpBlockSize uint32 thinPoolDevice string Transaction `json:"-"` } type DiskUsage struct { Used uint64 Total uint64 Available uint64 } type Status struct { PoolName string DataFile string // actual block device for data DataLoopback string // loopback file, if used MetadataFile string // actual block device for metadata MetadataLoopback string // loopback file, if used Data DiskUsage Metadata DiskUsage SectorSize uint64 UdevSyncSupported bool } type DevStatus struct { DeviceId int Size uint64 TransactionId uint64 SizeInSectors uint64 MappedSectors uint64 HighestMappedSector uint64 } func getDevName(name string) string { return "/dev/mapper/" + name } func (info *DevInfo) Name() string { hash := info.Hash if hash == "" { hash = "base" } return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) } func (info *DevInfo) DevName() string { return getDevName(info.Name()) } func (devices *DeviceSet) loopbackDir() string { return path.Join(devices.root, "devicemapper") } func (devices *DeviceSet) metadataDir() string { return path.Join(devices.root, "metadata") } func (devices *DeviceSet) metadataFile(info *DevInfo) string { file := info.Hash if file == "" { file = "base" } return path.Join(devices.metadataDir(), file) } func (devices *DeviceSet) transactionMetaFile() string { return path.Join(devices.metadataDir(), transactionMetaFile) } func (devices *DeviceSet) deviceSetMetaFile() string { return path.Join(devices.metadataDir(), deviceSetMetaFile) } func (devices *DeviceSet) oldMetadataFile() string { return path.Join(devices.loopbackDir(), "json") } func (devices *DeviceSet) getPoolName() string { if devices.thinPoolDevice == "" { return devices.devicePrefix + "-pool" } return devices.thinPoolDevice } func (devices *DeviceSet) getPoolDevName() string { return getDevName(devices.getPoolName()) } func (devices *DeviceSet) hasImage(name string) bool { dirname := devices.loopbackDir() filename := path.Join(dirname, name) _, err := os.Stat(filename) return err == nil } // ensureImage creates a sparse file of bytes at the path // /devicemapper/. // If the file already exists, it does nothing. // Either way it returns the full path. func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { dirname := devices.loopbackDir() filename := path.Join(dirname, name) if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) { return "", err } if _, err := os.Stat(filename); err != nil { if !os.IsNotExist(err) { return "", err } log.Debugf("Creating loopback file %s for device-manage use", filename) file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) if err != nil { return "", err } defer file.Close() if err = file.Truncate(size); err != nil { return "", err } } return filename, nil } func (devices *DeviceSet) allocateTransactionId() uint64 { devices.OpenTransactionId = devices.TransactionId + 1 return devices.OpenTransactionId } func (devices *DeviceSet) updatePoolTransactionId() error { if err := devicemapper.SetTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.OpenTransactionId); err != nil { return fmt.Errorf("Error setting devmapper transaction ID: %s", err) } devices.TransactionId = devices.OpenTransactionId return nil } func (devices *DeviceSet) removeMetadata(info *DevInfo) error { if err := os.RemoveAll(devices.metadataFile(info)); err != nil { return fmt.Errorf("Error removing metadata file %s: %s", devices.metadataFile(info), err) } return nil } // Given json data and file path, write it to disk func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") if err != nil { return fmt.Errorf("Error creating metadata file: %s", err) } n, err := tmpFile.Write(jsonData) if err != nil { return fmt.Errorf("Error writing metadata to %s: %s", tmpFile.Name(), err) } if n < len(jsonData) { return io.ErrShortWrite } if err := tmpFile.Sync(); err != nil { return fmt.Errorf("Error syncing metadata file %s: %s", tmpFile.Name(), err) } if err := tmpFile.Close(); err != nil { return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) } if err := os.Rename(tmpFile.Name(), filePath); err != nil { return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) } return nil } func (devices *DeviceSet) saveMetadata(info *DevInfo) error { jsonData, err := json.Marshal(info) if err != nil { return fmt.Errorf("Error encoding metadata to json: %s", err) } if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { return err } return nil } func (devices *DeviceSet) markDeviceIdUsed(deviceId int) { var mask byte i := deviceId % 8 mask = 1 << uint(i) devices.deviceIdMap[deviceId/8] = devices.deviceIdMap[deviceId/8] | mask } func (devices *DeviceSet) markDeviceIdFree(deviceId int) { var mask byte i := deviceId % 8 mask = ^(1 << uint(i)) devices.deviceIdMap[deviceId/8] = devices.deviceIdMap[deviceId/8] & mask } func (devices *DeviceSet) isDeviceIdFree(deviceId int) bool { var mask byte i := deviceId % 8 mask = (1 << uint(i)) if (devices.deviceIdMap[deviceId/8] & mask) != 0 { return false } return true } func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { devices.devicesLock.Lock() defer devices.devicesLock.Unlock() info := devices.Devices[hash] if info == nil { info = devices.loadMetadata(hash) if info == nil { return nil, fmt.Errorf("Unknown device %s", hash) } devices.Devices[hash] = info } return info, nil } func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { // Skip some of the meta files which are not device files. if strings.HasSuffix(finfo.Name(), ".migrated") { log.Debugf("Skipping file %s", path) return nil } if strings.HasPrefix(finfo.Name(), ".") { log.Debugf("Skipping file %s", path) return nil } if finfo.Name() == deviceSetMetaFile { log.Debugf("Skipping file %s", path) return nil } log.Debugf("Loading data for file %s", path) hash := finfo.Name() if hash == "base" { hash = "" } dinfo := devices.loadMetadata(hash) if dinfo == nil { return fmt.Errorf("Error loading device metadata file %s", hash) } if dinfo.DeviceId > MaxDeviceId { log.Errorf("Ignoring Invalid DeviceId=%d", dinfo.DeviceId) return nil } devices.Lock() devices.markDeviceIdUsed(dinfo.DeviceId) devices.Unlock() log.Debugf("Added deviceId=%d to DeviceIdMap", dinfo.DeviceId) return nil } func (devices *DeviceSet) constructDeviceIdMap() error { log.Debugf("[deviceset] constructDeviceIdMap()") defer log.Debugf("[deviceset] constructDeviceIdMap() END") var scan = func(path string, info os.FileInfo, err error) error { if err != nil { log.Debugf("Can't walk the file %s", path) return nil } // Skip any directories if info.IsDir() { return nil } return devices.deviceFileWalkFunction(path, info) } return filepath.Walk(devices.metadataDir(), scan) } func (devices *DeviceSet) unregisterDevice(id int, hash string) error { log.Debugf("unregisterDevice(%v, %v)", id, hash) info := &DevInfo{ Hash: hash, DeviceId: id, } devices.devicesLock.Lock() delete(devices.Devices, hash) devices.devicesLock.Unlock() if err := devices.removeMetadata(info); err != nil { log.Debugf("Error removing metadata: %s", err) return err } return nil } func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionId uint64) (*DevInfo, error) { log.Debugf("registerDevice(%v, %v)", id, hash) info := &DevInfo{ Hash: hash, DeviceId: id, Size: size, TransactionId: transactionId, Initialized: false, devices: devices, } devices.devicesLock.Lock() devices.Devices[hash] = info devices.devicesLock.Unlock() if err := devices.saveMetadata(info); err != nil { // Try to remove unused device devices.devicesLock.Lock() delete(devices.Devices, hash) devices.devicesLock.Unlock() return nil, err } return info, nil } func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { log.Debugf("activateDeviceIfNeeded(%v)", info.Hash) if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { return nil } return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) } func (devices *DeviceSet) createFilesystem(info *DevInfo) error { devname := info.DevName() args := []string{} for _, arg := range devices.mkfsArgs { args = append(args, arg) } args = append(args, devname) var err error switch devices.filesystem { case "xfs": err = exec.Command("mkfs.xfs", args...).Run() case "ext4": err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() if err != nil { err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() } if err != nil { return err } err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() default: err = fmt.Errorf("Unsupported filesystem type %s", devices.filesystem) } if err != nil { return err } return nil } func (devices *DeviceSet) migrateOldMetaData() error { // Migrate old metadata file jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) if err != nil && !os.IsNotExist(err) { return err } if jsonData != nil { m := MetaData{Devices: make(map[string]*DevInfo)} if err := json.Unmarshal(jsonData, &m); err != nil { return err } for hash, info := range m.Devices { info.Hash = hash devices.saveMetadata(info) } if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { return err } } return nil } func (devices *DeviceSet) initMetaData() error { if err := devices.migrateOldMetaData(); err != nil { return err } _, transactionId, _, _, _, _, err := devices.poolStatus() if err != nil { return err } devices.TransactionId = transactionId if err := devices.constructDeviceIdMap(); err != nil { return err } if err := devices.processPendingTransaction(); err != nil { return err } return nil } func (devices *DeviceSet) incNextDeviceId() { // Ids are 24bit, so wrap around devices.NextDeviceId = (devices.NextDeviceId + 1) & MaxDeviceId } func (devices *DeviceSet) getNextFreeDeviceId() (int, error) { devices.incNextDeviceId() for i := 0; i <= MaxDeviceId; i++ { if devices.isDeviceIdFree(devices.NextDeviceId) { devices.markDeviceIdUsed(devices.NextDeviceId) return devices.NextDeviceId, nil } devices.incNextDeviceId() } return 0, fmt.Errorf("Unable to find a free device Id") } func (devices *DeviceSet) createRegisterDevice(hash string) (*DevInfo, error) { deviceId, err := devices.getNextFreeDeviceId() if err != nil { return nil, err } if err := devices.openTransaction(hash, deviceId); err != nil { log.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId) devices.markDeviceIdFree(deviceId) return nil, err } for { if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceId); err != nil { if devicemapper.DeviceIdExists(err) { // Device Id already exists. This should not // happen. Now we have a mechianism to find // a free device Id. So something is not right. // Give a warning and continue. log.Errorf("Device Id %d exists in pool but it is supposed to be unused", deviceId) deviceId, err = devices.getNextFreeDeviceId() if err != nil { return nil, err } // Save new device id into transaction devices.refreshTransaction(deviceId) continue } log.Debugf("Error creating device: %s", err) devices.markDeviceIdFree(deviceId) return nil, err } break } log.Debugf("Registering device (id %v) with FS size %v", deviceId, devices.baseFsSize) info, err := devices.registerDevice(deviceId, hash, devices.baseFsSize, devices.OpenTransactionId) if err != nil { _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) devices.markDeviceIdFree(deviceId) return nil, err } if err := devices.closeTransaction(); err != nil { devices.unregisterDevice(deviceId, hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) devices.markDeviceIdFree(deviceId) return nil, err } return info, nil } func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *DevInfo) error { deviceId, err := devices.getNextFreeDeviceId() if err != nil { return err } if err := devices.openTransaction(hash, deviceId); err != nil { log.Debugf("Error opening transaction hash = %s deviceId = %d", hash, deviceId) devices.markDeviceIdFree(deviceId) return err } for { if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { if devicemapper.DeviceIdExists(err) { // Device Id already exists. This should not // happen. Now we have a mechianism to find // a free device Id. So something is not right. // Give a warning and continue. log.Errorf("Device Id %d exists in pool but it is supposed to be unused", deviceId) deviceId, err = devices.getNextFreeDeviceId() if err != nil { return err } // Save new device id into transaction devices.refreshTransaction(deviceId) continue } log.Debugf("Error creating snap device: %s", err) devices.markDeviceIdFree(deviceId) return err } break } if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size, devices.OpenTransactionId); err != nil { devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) devices.markDeviceIdFree(deviceId) log.Debugf("Error registering device: %s", err) return err } if err := devices.closeTransaction(); err != nil { devices.unregisterDevice(deviceId, hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceId) devices.markDeviceIdFree(deviceId) return err } return nil } func (devices *DeviceSet) loadMetadata(hash string) *DevInfo { info := &DevInfo{Hash: hash, devices: devices} jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) if err != nil { return nil } if err := json.Unmarshal(jsonData, &info); err != nil { return nil } return info } func (devices *DeviceSet) setupBaseImage() error { oldInfo, _ := devices.lookupDevice("") if oldInfo != nil && oldInfo.Initialized { return nil } if oldInfo != nil && !oldInfo.Initialized { log.Debugf("Removing uninitialized base image") if err := devices.DeleteDevice(""); err != nil { return err } } if devices.thinPoolDevice != "" && oldInfo == nil { _, transactionId, dataUsed, _, _, _, err := devices.poolStatus() if err != nil { return err } if dataUsed != 0 { return fmt.Errorf("Unable to take ownership of thin-pool (%s) that already has used data blocks", devices.thinPoolDevice) } if transactionId != 0 { return fmt.Errorf("Unable to take ownership of thin-pool (%s) with non-zero transaction Id", devices.thinPoolDevice) } } log.Debugf("Initializing base device-mapper thin volume") // Create initial device info, err := devices.createRegisterDevice("") if err != nil { return err } log.Debugf("Creating filesystem on base device-mapper thin volume") if err = devices.activateDeviceIfNeeded(info); err != nil { return err } if err := devices.createFilesystem(info); err != nil { return err } info.Initialized = true if err = devices.saveMetadata(info); err != nil { info.Initialized = false return err } return nil } func setCloseOnExec(name string) { if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { for _, i := range fileInfos { link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) if link == name { fd, err := strconv.Atoi(i.Name()) if err == nil { syscall.CloseOnExec(fd) } } } } } func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { if level >= devicemapper.LogLevelDebug { // (vbatts) libdm debug is very verbose. If you're debugging libdm, you can // comment out this check yourself level = devicemapper.LogLevelInfo } // FIXME(vbatts) push this back into ./pkg/devicemapper/ log.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) } func major(device uint64) uint64 { return (device >> 8) & 0xfff } func minor(device uint64) uint64 { return (device & 0xff) | ((device >> 12) & 0xfff00) } func (devices *DeviceSet) ResizePool(size int64) error { dirname := devices.loopbackDir() datafilename := path.Join(dirname, "data") if len(devices.dataDevice) > 0 { datafilename = devices.dataDevice } metadatafilename := path.Join(dirname, "metadata") if len(devices.metadataDevice) > 0 { metadatafilename = devices.metadataDevice } datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) if datafile == nil { return err } defer datafile.Close() fi, err := datafile.Stat() if fi == nil { return err } if fi.Size() > size { return fmt.Errorf("Can't shrink file") } dataloopback := devicemapper.FindLoopDeviceFor(datafile) if dataloopback == nil { return fmt.Errorf("Unable to find loopback mount for: %s", datafilename) } defer dataloopback.Close() metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) if metadatafile == nil { return err } defer metadatafile.Close() metadataloopback := devicemapper.FindLoopDeviceFor(metadatafile) if metadataloopback == nil { return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename) } defer metadataloopback.Close() // Grow loopback file if err := datafile.Truncate(size); err != nil { return fmt.Errorf("Unable to grow loopback file: %s", err) } // Reload size for loopback device if err := devicemapper.LoopbackSetCapacity(dataloopback); err != nil { return fmt.Errorf("Unable to update loopback capacity: %s", err) } // Suspend the pool if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { return fmt.Errorf("Unable to suspend pool: %s", err) } // Reload with the new block sizes if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { return fmt.Errorf("Unable to reload pool: %s", err) } // Resume the pool if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { return fmt.Errorf("Unable to resume pool: %s", err) } return nil } func (devices *DeviceSet) loadTransactionMetaData() error { jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) if err != nil { // There is no active transaction. This will be the case // during upgrade. if os.IsNotExist(err) { devices.OpenTransactionId = devices.TransactionId return nil } return err } json.Unmarshal(jsonData, &devices.Transaction) return nil } func (devices *DeviceSet) saveTransactionMetaData() error { jsonData, err := json.Marshal(&devices.Transaction) if err != nil { return fmt.Errorf("Error encoding metadata to json: %s", err) } return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) } func (devices *DeviceSet) removeTransactionMetaData() error { if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { return err } return nil } func (devices *DeviceSet) rollbackTransaction() error { log.Debugf("Rolling back open transaction: TransactionId=%d hash=%s device_id=%d", devices.OpenTransactionId, devices.DeviceIdHash, devices.DeviceId) // A device id might have already been deleted before transaction // closed. In that case this call will fail. Just leave a message // in case of failure. if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceId); err != nil { log.Errorf("Unable to delete device: %s", err) } dinfo := &DevInfo{Hash: devices.DeviceIdHash} if err := devices.removeMetadata(dinfo); err != nil { log.Errorf("Unable to remove metadata: %s", err) } else { devices.markDeviceIdFree(devices.DeviceId) } if err := devices.removeTransactionMetaData(); err != nil { log.Errorf("Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) } return nil } func (devices *DeviceSet) processPendingTransaction() error { if err := devices.loadTransactionMetaData(); err != nil { return err } // If there was open transaction but pool transaction Id is same // as open transaction Id, nothing to roll back. if devices.TransactionId == devices.OpenTransactionId { return nil } // If open transaction Id is less than pool transaction Id, something // is wrong. Bail out. if devices.OpenTransactionId < devices.TransactionId { log.Errorf("Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionId, devices.TransactionId) return nil } // Pool transaction Id is not same as open transaction. There is // a transaction which was not completed. if err := devices.rollbackTransaction(); err != nil { return fmt.Errorf("Rolling back open transaction failed: %s", err) } devices.OpenTransactionId = devices.TransactionId return nil } func (devices *DeviceSet) loadDeviceSetMetaData() error { jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) if err != nil { // For backward compatibility return success if file does // not exist. if os.IsNotExist(err) { return nil } return err } return json.Unmarshal(jsonData, devices) } func (devices *DeviceSet) saveDeviceSetMetaData() error { jsonData, err := json.Marshal(devices) if err != nil { return fmt.Errorf("Error encoding metadata to json: %s", err) } return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) } func (devices *DeviceSet) openTransaction(hash string, DeviceId int) error { devices.allocateTransactionId() devices.DeviceIdHash = hash devices.DeviceId = DeviceId if err := devices.saveTransactionMetaData(); err != nil { return fmt.Errorf("Error saving transaction metadata: %s", err) } return nil } func (devices *DeviceSet) refreshTransaction(DeviceId int) error { devices.DeviceId = DeviceId if err := devices.saveTransactionMetaData(); err != nil { return fmt.Errorf("Error saving transaction metadata: %s", err) } return nil } func (devices *DeviceSet) closeTransaction() error { if err := devices.updatePoolTransactionId(); err != nil { log.Debugf("Failed to close Transaction") return err } return nil } func (devices *DeviceSet) initDevmapper(doInit bool) error { if os.Getenv("DEBUG") != "" { devicemapper.LogInitVerbose(devicemapper.LogLevelDebug) } else { devicemapper.LogInitVerbose(devicemapper.LogLevelWarn) } // give ourselves to libdm as a log handler devicemapper.LogInit(devices) _, err := devicemapper.GetDriverVersion() if err != nil { // Can't even get driver version, assume not supported return graphdriver.ErrNotSupported } // https://github.com/docker/docker/issues/4036 if supported := devicemapper.UdevSetSyncSupport(true); !supported { log.Warnf("Udev sync is not supported. This will lead to unexpected behavior, data loss and errors") } log.Debugf("devicemapper: udev sync support: %v", devicemapper.UdevSyncSupported()) if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { return err } // Set the device prefix from the device id and inode of the docker root dir st, err := os.Stat(devices.root) if err != nil { return fmt.Errorf("Error looking up dir %s: %s", devices.root, err) } sysSt := st.Sys().(*syscall.Stat_t) // "reg-" stands for "regular file". // In the future we might use "dev-" for "device file", etc. // docker-maj,min[-inode] stands for: // - Managed by docker // - The target of this device is at major and minor // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) log.Debugf("Generated prefix: %s", devices.devicePrefix) // Check for the existence of the thin-pool device log.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) info, err := devicemapper.GetInfo(devices.getPoolName()) if info == nil { log.Debugf("Error device devicemapper.GetInfo: %s", err) return err } // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files // that are not Close-on-exec, and lxc-start will die if it inherits any unexpected files, // so we add this badhack to make sure it closes itself setCloseOnExec("/dev/mapper/control") // Make sure the sparse images exist in /devicemapper/data and // /devicemapper/metadata createdLoopback := false // If the pool doesn't exist, create it if info.Exists == 0 && devices.thinPoolDevice == "" { log.Debugf("Pool doesn't exist. Creating it.") var ( dataFile *os.File metadataFile *os.File ) if devices.dataDevice == "" { // Make sure the sparse images exist in /devicemapper/data hasData := devices.hasImage("data") if !doInit && !hasData { return errors.New("Loopback data file not found") } if !hasData { createdLoopback = true } data, err := devices.ensureImage("data", devices.dataLoopbackSize) if err != nil { log.Debugf("Error device ensureImage (data): %s", err) return err } dataFile, err = devicemapper.AttachLoopDevice(data) if err != nil { return err } devices.dataLoopFile = data devices.dataDevice = dataFile.Name() } else { dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) if err != nil { return err } } defer dataFile.Close() if devices.metadataDevice == "" { // Make sure the sparse images exist in /devicemapper/metadata hasMetadata := devices.hasImage("metadata") if !doInit && !hasMetadata { return errors.New("Loopback metadata file not found") } if !hasMetadata { createdLoopback = true } metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) if err != nil { log.Debugf("Error device ensureImage (metadata): %s", err) return err } metadataFile, err = devicemapper.AttachLoopDevice(metadata) if err != nil { return err } devices.metadataLoopFile = metadata devices.metadataDevice = metadataFile.Name() } else { metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) if err != nil { return err } } defer metadataFile.Close() if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { return err } } // If we didn't just create the data or metadata image, we need to // load the transaction id and migrate old metadata if !createdLoopback { if err = devices.initMetaData(); err != nil { return err } } // Right now this loads only NextDeviceId. If there is more metadata // down the line, we might have to move it earlier. if err = devices.loadDeviceSetMetaData(); err != nil { return err } // Setup the base image if doInit { if err := devices.setupBaseImage(); err != nil { log.Debugf("Error device setupBaseImage: %s", err) return err } } return nil } func (devices *DeviceSet) AddDevice(hash, baseHash string) error { log.Debugf("[deviceset] AddDevice(hash=%s basehash=%s)", hash, baseHash) defer log.Debugf("[deviceset] AddDevice(hash=%s basehash=%s) END", hash, baseHash) baseInfo, err := devices.lookupDevice(baseHash) if err != nil { return err } baseInfo.lock.Lock() defer baseInfo.lock.Unlock() devices.Lock() defer devices.Unlock() if info, _ := devices.lookupDevice(hash); info != nil { return fmt.Errorf("device %s already exists", hash) } if err := devices.createRegisterSnapDevice(hash, baseInfo); err != nil { return err } return nil } func (devices *DeviceSet) deleteDevice(info *DevInfo) error { if devices.doBlkDiscard { // This is a workaround for the kernel not discarding block so // on the thin pool when we remove a thinp device, so we do it // manually if err := devices.activateDeviceIfNeeded(info); err == nil { if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { log.Debugf("Error discarding block on device: %s (ignoring)", err) } } } devinfo, _ := devicemapper.GetInfo(info.Name()) if devinfo != nil && devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { log.Debugf("Error removing device: %s", err) return err } } if err := devices.openTransaction(info.Hash, info.DeviceId); err != nil { log.Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceId) return err } if err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { log.Debugf("Error deleting device: %s", err) return err } if err := devices.unregisterDevice(info.DeviceId, info.Hash); err != nil { return err } if err := devices.closeTransaction(); err != nil { return err } devices.markDeviceIdFree(info.DeviceId) return nil } func (devices *DeviceSet) DeleteDevice(hash string) error { info, err := devices.lookupDevice(hash) if err != nil { return err } info.lock.Lock() defer info.lock.Unlock() devices.Lock() defer devices.Unlock() return devices.deleteDevice(info) } func (devices *DeviceSet) deactivatePool() error { log.Debugf("[devmapper] deactivatePool()") defer log.Debugf("[devmapper] deactivatePool END") devname := devices.getPoolDevName() devinfo, err := devicemapper.GetInfo(devname) if err != nil { return err } if d, err := devicemapper.GetDeps(devname); err == nil { // Access to more Debug output log.Debugf("[devmapper] devicemapper.GetDeps() %s: %#v", devname, d) } if devinfo.Exists != 0 { return devicemapper.RemoveDevice(devname) } return nil } func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { log.Debugf("[devmapper] deactivateDevice(%s)", info.Hash) defer log.Debugf("[devmapper] deactivateDevice END(%s)", info.Hash) // Wait for the unmount to be effective, // by watching the value of Info.OpenCount for the device if err := devices.waitClose(info); err != nil { log.Errorf("Error waiting for device %s to close: %s", info.Hash, err) } devinfo, err := devicemapper.GetInfo(info.Name()) if err != nil { return err } if devinfo.Exists != 0 { if err := devices.removeDeviceAndWait(info.Name()); err != nil { return err } } return nil } // Issues the underlying dm remove operation and then waits // for it to finish. func (devices *DeviceSet) removeDeviceAndWait(devname string) error { var err error for i := 0; i < 1000; i++ { err = devicemapper.RemoveDevice(devname) if err == nil { break } if err != devicemapper.ErrBusy { return err } // If we see EBUSY it may be a transient error, // sleep a bit a retry a few times. devices.Unlock() time.Sleep(10 * time.Millisecond) devices.Lock() } if err != nil { return err } if err := devices.waitRemove(devname); err != nil { return err } return nil } // waitRemove blocks until either: // a) the device registered at - is removed, // or b) the 10 second timeout expires. func (devices *DeviceSet) waitRemove(devname string) error { log.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) defer log.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) i := 0 for ; i < 1000; i++ { devinfo, err := devicemapper.GetInfo(devname) if err != nil { // If there is an error we assume the device doesn't exist. // The error might actually be something else, but we can't differentiate. return nil } if i%100 == 0 { log.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) } if devinfo.Exists == 0 { break } devices.Unlock() time.Sleep(10 * time.Millisecond) devices.Lock() } if i == 1000 { return fmt.Errorf("Timeout while waiting for device %s to be removed", devname) } return nil } // waitClose blocks until either: // a) the device registered at - is closed, // or b) the 10 second timeout expires. func (devices *DeviceSet) waitClose(info *DevInfo) error { i := 0 for ; i < 1000; i++ { devinfo, err := devicemapper.GetInfo(info.Name()) if err != nil { return err } if i%100 == 0 { log.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount) } if devinfo.OpenCount == 0 { break } devices.Unlock() time.Sleep(10 * time.Millisecond) devices.Lock() } if i == 1000 { return fmt.Errorf("Timeout while waiting for device %s to close", info.Hash) } return nil } func (devices *DeviceSet) Shutdown() error { log.Debugf("[deviceset %s] Shutdown()", devices.devicePrefix) log.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) defer log.Debugf("[deviceset %s] Shutdown() END", devices.devicePrefix) var devs []*DevInfo devices.devicesLock.Lock() for _, info := range devices.Devices { devs = append(devs, info) } devices.devicesLock.Unlock() for _, info := range devs { info.lock.Lock() if info.mountCount > 0 { // We use MNT_DETACH here in case it is still busy in some running // container. This means it'll go away from the global scope directly, // and the device will be released when that container dies. if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { log.Debugf("Shutdown unmounting %s, error: %s", info.mountPath, err) } devices.Lock() if err := devices.deactivateDevice(info); err != nil { log.Debugf("Shutdown deactivate %s , error: %s", info.Hash, err) } devices.Unlock() } info.lock.Unlock() } info, _ := devices.lookupDevice("") if info != nil { info.lock.Lock() devices.Lock() if err := devices.deactivateDevice(info); err != nil { log.Debugf("Shutdown deactivate base , error: %s", err) } devices.Unlock() info.lock.Unlock() } devices.Lock() if devices.thinPoolDevice == "" { if err := devices.deactivatePool(); err != nil { log.Debugf("Shutdown deactivate pool , error: %s", err) } } devices.saveDeviceSetMetaData() devices.Unlock() return nil } func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { info, err := devices.lookupDevice(hash) if err != nil { return err } info.lock.Lock() defer info.lock.Unlock() devices.Lock() defer devices.Unlock() if info.mountCount > 0 { if path != info.mountPath { return fmt.Errorf("Trying to mount devmapper device in multiple places (%s, %s)", info.mountPath, path) } info.mountCount++ return nil } if err := devices.activateDeviceIfNeeded(info); err != nil { return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) } var flags uintptr = syscall.MS_MGC_VAL fstype, err := ProbeFsType(info.DevName()) if err != nil { return err } options := "" if fstype == "xfs" { // XFS needs nouuid or it can't mount filesystems with the same fs options = joinMountOptions(options, "nouuid") } options = joinMountOptions(options, devices.mountOptions) options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) err = syscall.Mount(info.DevName(), path, fstype, flags, joinMountOptions("discard", options)) if err != nil && err == syscall.EINVAL { err = syscall.Mount(info.DevName(), path, fstype, flags, options) } if err != nil { return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) } info.mountCount = 1 info.mountPath = path return nil } func (devices *DeviceSet) UnmountDevice(hash string) error { log.Debugf("[devmapper] UnmountDevice(hash=%s)", hash) defer log.Debugf("[devmapper] UnmountDevice(hash=%s) END", hash) info, err := devices.lookupDevice(hash) if err != nil { return err } info.lock.Lock() defer info.lock.Unlock() devices.Lock() defer devices.Unlock() if info.mountCount == 0 { return fmt.Errorf("UnmountDevice: device not-mounted id %s", hash) } info.mountCount-- if info.mountCount > 0 { return nil } log.Debugf("[devmapper] Unmount(%s)", info.mountPath) if err := syscall.Unmount(info.mountPath, syscall.MNT_DETACH); err != nil { return err } log.Debugf("[devmapper] Unmount done") if err := devices.deactivateDevice(info); err != nil { return err } info.mountPath = "" return nil } func (devices *DeviceSet) HasDevice(hash string) bool { devices.Lock() defer devices.Unlock() info, _ := devices.lookupDevice(hash) return info != nil } func (devices *DeviceSet) HasActivatedDevice(hash string) bool { info, _ := devices.lookupDevice(hash) if info == nil { return false } info.lock.Lock() defer info.lock.Unlock() devices.Lock() defer devices.Unlock() devinfo, _ := devicemapper.GetInfo(info.Name()) return devinfo != nil && devinfo.Exists != 0 } func (devices *DeviceSet) List() []string { devices.Lock() defer devices.Unlock() devices.devicesLock.Lock() ids := make([]string, len(devices.Devices)) i := 0 for k := range devices.Devices { ids[i] = k i++ } devices.devicesLock.Unlock() return ids } func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { var params string _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) if err != nil { return } if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { return } return } func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { info, err := devices.lookupDevice(hash) if err != nil { return nil, err } info.lock.Lock() defer info.lock.Unlock() devices.Lock() defer devices.Unlock() status := &DevStatus{ DeviceId: info.DeviceId, Size: info.Size, TransactionId: info.TransactionId, } if err := devices.activateDeviceIfNeeded(info); err != nil { return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) } if sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()); err != nil { return nil, err } else { status.SizeInSectors = sizeInSectors status.MappedSectors = mappedSectors status.HighestMappedSector = highestMappedSector } return status, nil } func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { var params string if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionId, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) } return } // DataDevicePath returns the path to the data storage for this deviceset, // regardless of loopback or block device func (devices *DeviceSet) DataDevicePath() string { return devices.dataDevice } // MetadataDevicePath returns the path to the metadata storage for this deviceset, // regardless of loopback or block device func (devices *DeviceSet) MetadataDevicePath() string { return devices.metadataDevice } func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { buf := new(syscall.Statfs_t) err := syscall.Statfs(loopFile, buf) if err != nil { log.Warnf("Couldn't stat loopfile filesystem %v: %v", loopFile, err) return 0, err } return buf.Bfree * uint64(buf.Bsize), nil } func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { if loopFile != "" { fi, err := os.Stat(loopFile) if err != nil { log.Warnf("Couldn't stat loopfile %v: %v", loopFile, err) return false, err } return fi.Mode().IsRegular(), nil } return false, nil } // Status returns the current status of this deviceset func (devices *DeviceSet) Status() *Status { devices.Lock() defer devices.Unlock() status := &Status{} status.PoolName = devices.getPoolName() status.DataFile = devices.DataDevicePath() status.DataLoopback = devices.dataLoopFile status.MetadataFile = devices.MetadataDevicePath() status.MetadataLoopback = devices.metadataLoopFile status.UdevSyncSupported = devicemapper.UdevSyncSupported() totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() if err == nil { // Convert from blocks to bytes blockSizeInSectors := totalSizeInSectors / dataTotal status.Data.Used = dataUsed * blockSizeInSectors * 512 status.Data.Total = dataTotal * blockSizeInSectors * 512 status.Data.Available = status.Data.Total - status.Data.Used // metadata blocks are always 4k status.Metadata.Used = metadataUsed * 4096 status.Metadata.Total = metadataTotal * 4096 status.Metadata.Available = status.Metadata.Total - status.Metadata.Used status.SectorSize = blockSizeInSectors * 512 if check, _ := devices.isRealFile(devices.dataLoopFile); check { actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) if err == nil && actualSpace < status.Data.Available { status.Data.Available = actualSpace } } if check, _ := devices.isRealFile(devices.metadataLoopFile); check { actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) if err == nil && actualSpace < status.Metadata.Available { status.Metadata.Available = actualSpace } } } return status } func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error) { devicemapper.SetDevDir("/dev") devices := &DeviceSet{ root: root, MetaData: MetaData{Devices: make(map[string]*DevInfo)}, dataLoopbackSize: DefaultDataLoopbackSize, metaDataLoopbackSize: DefaultMetaDataLoopbackSize, baseFsSize: DefaultBaseFsSize, filesystem: "ext4", doBlkDiscard: true, thinpBlockSize: DefaultThinpBlockSize, deviceIdMap: make([]byte, DeviceIdMapSz), } foundBlkDiscard := false for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "dm.basesize": size, err := units.RAMInBytes(val) if err != nil { return nil, err } devices.baseFsSize = uint64(size) case "dm.loopdatasize": size, err := units.RAMInBytes(val) if err != nil { return nil, err } devices.dataLoopbackSize = size case "dm.loopmetadatasize": size, err := units.RAMInBytes(val) if err != nil { return nil, err } devices.metaDataLoopbackSize = size case "dm.fs": if val != "ext4" && val != "xfs" { return nil, fmt.Errorf("Unsupported filesystem %s\n", val) } devices.filesystem = val case "dm.mkfsarg": devices.mkfsArgs = append(devices.mkfsArgs, val) case "dm.mountopt": devices.mountOptions = joinMountOptions(devices.mountOptions, val) case "dm.metadatadev": devices.metadataDevice = val case "dm.datadev": devices.dataDevice = val case "dm.thinpooldev": devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") case "dm.blkdiscard": foundBlkDiscard = true devices.doBlkDiscard, err = strconv.ParseBool(val) if err != nil { return nil, err } case "dm.blocksize": size, err := units.RAMInBytes(val) if err != nil { return nil, err } // convert to 512b sectors devices.thinpBlockSize = uint32(size) >> 9 default: return nil, fmt.Errorf("Unknown option %s\n", key) } } // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { devices.doBlkDiscard = false } if err := devices.initDevmapper(doInit); err != nil { return nil, err } return devices, nil } docker-1.6.2/daemon/graphdriver/aufs/0000755000175000017500000000000012524223634017052 5ustar tianontianondocker-1.6.2/daemon/graphdriver/aufs/mount_unsupported.go0000644000175000017500000000034312524223634023213 0ustar tianontianon// +build !linux package aufs import "errors" const MsRemount = 0 func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { return errors.New("mount is not implemented on darwin") } docker-1.6.2/daemon/graphdriver/aufs/mount.go0000644000175000017500000000052112524223634020541 0ustar tianontianonpackage aufs import ( "os/exec" "syscall" log "github.com/Sirupsen/logrus" ) func Unmount(target string) error { if err := exec.Command("auplink", target, "flush").Run(); err != nil { log.Errorf("Couldn't run auplink before unmount: %s", err) } if err := syscall.Unmount(target, 0); err != nil { return err } return nil } docker-1.6.2/daemon/graphdriver/aufs/dirs.go0000644000175000017500000000153012524223634020341 0ustar tianontianonpackage aufs import ( "bufio" "io/ioutil" "os" "path" ) // Return all the directories func loadIds(root string) ([]string, error) { dirs, err := ioutil.ReadDir(root) if err != nil { return nil, err } out := []string{} for _, d := range dirs { if !d.IsDir() { out = append(out, d.Name()) } } return out, nil } // Read the layers file for the current id and return all the // layers represented by new lines in the file // // If there are no lines in the file then the id has no parent // and an empty slice is returned. func getParentIds(root, id string) ([]string, error) { f, err := os.Open(path.Join(root, "layers", id)) if err != nil { return nil, err } defer f.Close() out := []string{} s := bufio.NewScanner(f) for s.Scan() { if t := s.Text(); t != "" { out = append(out, s.Text()) } } return out, s.Err() } docker-1.6.2/daemon/graphdriver/aufs/aufs.go0000644000175000017500000002717312524223634020351 0ustar tianontianon/* aufs driver directory structure . ├── layers // Metadata of layers │   ├── 1 │   ├── 2 │   └── 3 ├── diff // Content of the layer │   ├── 1 // Contains layers that need to be mounted for the id │   ├── 2 │   └── 3 └── mnt // Mount points for the rw layers to be mounted ├── 1 ├── 2 └── 3 */ package aufs import ( "bufio" "fmt" "io/ioutil" "os" "os/exec" "path" "strings" "sync" "syscall" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/directory" mountpk "github.com/docker/docker/pkg/mount" "github.com/docker/libcontainer/label" ) var ( ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") incompatibleFsMagic = []graphdriver.FsMagic{ graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, } backingFs = "" enableDirpermLock sync.Once enableDirperm bool ) func init() { graphdriver.Register("aufs", Init) } type Driver struct { root string sync.Mutex // Protects concurrent modification to active active map[string]int } // New returns a new AUFS driver. // An error is returned if AUFS is not supported. func Init(root string, options []string) (graphdriver.Driver, error) { // Try to load the aufs kernel module if err := supportsAufs(); err != nil { return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(root) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } for _, magic := range incompatibleFsMagic { if fsMagic == magic { return nil, graphdriver.ErrIncompatibleFS } } paths := []string{ "mnt", "diff", "layers", } a := &Driver{ root: root, active: make(map[string]int), } // Create the root aufs driver dir and return // if it already exists // If not populate the dir structure if err := os.MkdirAll(root, 0755); err != nil { if os.IsExist(err) { return a, nil } return nil, err } if err := mountpk.MakePrivate(root); err != nil { return nil, err } for _, p := range paths { if err := os.MkdirAll(path.Join(root, p), 0755); err != nil { return nil, err } } return a, nil } // Return a nil error if the kernel supports aufs // We cannot modprobe because inside dind modprobe fails // to run func supportsAufs() error { // We can try to modprobe aufs first before looking at // proc/filesystems for when aufs is supported exec.Command("modprobe", "aufs").Run() f, err := os.Open("/proc/filesystems") if err != nil { return err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.Contains(s.Text(), "aufs") { return nil } } return ErrAufsNotSupported } func (a *Driver) rootPath() string { return a.root } func (*Driver) String() string { return "aufs" } func (a *Driver) Status() [][2]string { ids, _ := loadIds(path.Join(a.rootPath(), "layers")) return [][2]string{ {"Root Dir", a.rootPath()}, {"Backing Filesystem", backingFs}, {"Dirs", fmt.Sprintf("%d", len(ids))}, {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, } } // Exists returns true if the given id is registered with // this driver func (a *Driver) Exists(id string) bool { if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { return false } return true } // Three folders are created for each id // mnt, layers, and diff func (a *Driver) Create(id, parent string) error { if err := a.createDirsFor(id); err != nil { return err } // Write the layers metadata f, err := os.Create(path.Join(a.rootPath(), "layers", id)) if err != nil { return err } defer f.Close() if parent != "" { ids, err := getParentIds(a.rootPath(), parent) if err != nil { return err } if _, err := fmt.Fprintln(f, parent); err != nil { return err } for _, i := range ids { if _, err := fmt.Fprintln(f, i); err != nil { return err } } } return nil } func (a *Driver) createDirsFor(id string) error { paths := []string{ "mnt", "diff", } for _, p := range paths { if err := os.MkdirAll(path.Join(a.rootPath(), p, id), 0755); err != nil { return err } } return nil } // Unmount and remove the dir information func (a *Driver) Remove(id string) error { // Protect the a.active from concurrent access a.Lock() defer a.Unlock() if a.active[id] != 0 { log.Errorf("Removing active id %s", id) } // Make sure the dir is umounted first if err := a.unmount(id); err != nil { return err } tmpDirs := []string{ "mnt", "diff", } // Atomically remove each directory in turn by first moving it out of the // way (so that docker doesn't find it anymore) before doing removal of // the whole tree. for _, p := range tmpDirs { realPath := path.Join(a.rootPath(), p, id) tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { return err } defer os.RemoveAll(tmpPath) } // Remove the layers file for the id if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { return err } return nil } // Return the rootfs path for the id // This will mount the dir at it's given path func (a *Driver) Get(id, mountLabel string) (string, error) { ids, err := getParentIds(a.rootPath(), id) if err != nil { if !os.IsNotExist(err) { return "", err } ids = []string{} } // Protect the a.active from concurrent access a.Lock() defer a.Unlock() count := a.active[id] // If a dir does not have a parent ( no layers )do not try to mount // just return the diff path to the data out := path.Join(a.rootPath(), "diff", id) if len(ids) > 0 { out = path.Join(a.rootPath(), "mnt", id) if count == 0 { if err := a.mount(id, mountLabel); err != nil { return "", err } } } a.active[id] = count + 1 return out, nil } func (a *Driver) Put(id string) error { // Protect the a.active from concurrent access a.Lock() defer a.Unlock() if count := a.active[id]; count > 1 { a.active[id] = count - 1 } else { ids, _ := getParentIds(a.rootPath(), id) // We only mounted if there are any parents if ids != nil && len(ids) > 0 { a.unmount(id) } delete(a.active, id) } return nil } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (a *Driver) Diff(id, parent string) (archive.Archive, error) { // AUFS doesn't need the parent layer to produce a diff. return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: []string{".wh..wh.*"}, }) } func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error { return chrootarchive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (a *Driver) DiffSize(id, parent string) (size int64, err error) { // AUFS doesn't need the parent layer to calculate the diff size. return directory.Size(path.Join(a.rootPath(), "diff", id)) } // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. func (a *Driver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error) { // AUFS doesn't need the parent id to apply the diff. if err = a.applyDiff(id, diff); err != nil { return } return a.DiffSize(id, parent) } // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { // AUFS doesn't have snapshots, so we need to get changes from all parent // layers. layers, err := a.getParentLayerPaths(id) if err != nil { return nil, err } return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) } func (a *Driver) getParentLayerPaths(id string) ([]string, error) { parentIds, err := getParentIds(a.rootPath(), id) if err != nil { return nil, err } layers := make([]string, len(parentIds)) // Get the diff paths for all the parent ids for i, p := range parentIds { layers[i] = path.Join(a.rootPath(), "diff", p) } return layers, nil } func (a *Driver) mount(id, mountLabel string) error { // If the id is mounted or we get an error return if mounted, err := a.mounted(id); err != nil || mounted { return err } var ( target = path.Join(a.rootPath(), "mnt", id) rw = path.Join(a.rootPath(), "diff", id) ) layers, err := a.getParentLayerPaths(id) if err != nil { return err } if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { return fmt.Errorf("error creating aufs mount to %s: %v", target, err) } return nil } func (a *Driver) unmount(id string) error { if mounted, err := a.mounted(id); err != nil || !mounted { return err } target := path.Join(a.rootPath(), "mnt", id) return Unmount(target) } func (a *Driver) mounted(id string) (bool, error) { target := path.Join(a.rootPath(), "mnt", id) return mountpk.Mounted(target) } // During cleanup aufs needs to unmount all mountpoints func (a *Driver) Cleanup() error { ids, err := loadIds(path.Join(a.rootPath(), "layers")) if err != nil { return err } for _, id := range ids { if err := a.unmount(id); err != nil { log.Errorf("Unmounting %s: %s", common.TruncateID(id), err) } } return mountpk.Unmount(a.root) } func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { defer func() { if err != nil { Unmount(target) } }() // Mount options are clipped to page size(4096 bytes). If there are more // layers then these are remounted individually using append. offset := 54 if useDirperm() { offset += len("dirperm1") } b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) firstMount := true i := 0 for { for ; i < len(ro); i++ { layer := fmt.Sprintf(":%s=ro+wh", ro[i]) if firstMount { if bp+len(layer) > len(b) { break } bp += copy(b[bp:], layer) } else { data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) if err = mount("none", target, "aufs", MsRemount, data); err != nil { return } } } if firstMount { opts := "dio,xino=/dev/shm/aufs.xino" if useDirperm() { opts += ",dirperm1" } data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) if err = mount("none", target, "aufs", 0, data); err != nil { return } firstMount = false } if i == len(ro) { break } } return } // useDirperm checks dirperm1 mount option can be used with the current // version of aufs. func useDirperm() bool { enableDirpermLock.Do(func() { base, err := ioutil.TempDir("", "docker-aufs-base") if err != nil { log.Errorf("error checking dirperm1: %v", err) return } defer os.RemoveAll(base) union, err := ioutil.TempDir("", "docker-aufs-union") if err != nil { log.Errorf("error checking dirperm1: %v", err) return } defer os.RemoveAll(union) opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) if err := mount("none", union, "aufs", 0, opts); err != nil { return } enableDirperm = true if err := Unmount(union); err != nil { log.Errorf("error checking dirperm1: failed to unmount %v", err) } }) return enableDirperm } docker-1.6.2/daemon/graphdriver/aufs/migrate.go0000644000175000017500000001107312524223634021033 0ustar tianontianonpackage aufs import ( "encoding/json" "fmt" "io/ioutil" "os" "path" ) type metadata struct { ID string `json:"id"` ParentID string `json:"parent,omitempty"` Image string `json:"Image,omitempty"` parent *metadata } func pathExists(pth string) bool { if _, err := os.Stat(pth); err != nil { return false } return true } // Migrate existing images and containers from docker < 0.7.x // // The format pre 0.7 is for docker to store the metadata and filesystem // content in the same directory. For the migration to work we need to move Image layer // data from /var/lib/docker/graph//layers to the diff of the registered id. // // Next we need to migrate the container's rw layer to diff of the driver. After the // contents are migrated we need to register the image and container ids with the // driver. // // For the migration we try to move the folder containing the layer files, if that // fails because the data is currently mounted we will fallback to creating a // symlink. func (a *Driver) Migrate(pth string, setupInit func(p string) error) error { if pathExists(path.Join(pth, "graph")) { if err := a.migrateRepositories(pth); err != nil { return err } if err := a.migrateImages(path.Join(pth, "graph")); err != nil { return err } return a.migrateContainers(path.Join(pth, "containers"), setupInit) } return nil } func (a *Driver) migrateRepositories(pth string) error { name := path.Join(pth, "repositories") if err := os.Rename(name, name+"-aufs"); err != nil && !os.IsNotExist(err) { return err } return nil } func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) error { fis, err := ioutil.ReadDir(pth) if err != nil { return err } for _, fi := range fis { if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "rw")) { if err := tryRelocate(path.Join(pth, id, "rw"), path.Join(a.rootPath(), "diff", id)); err != nil { return err } if !a.Exists(id) { metadata, err := loadMetadata(path.Join(pth, id, "config.json")) if err != nil { return err } initID := fmt.Sprintf("%s-init", id) if err := a.Create(initID, metadata.Image); err != nil { return err } initPath, err := a.Get(initID, "") if err != nil { return err } // setup init layer if err := setupInit(initPath); err != nil { return err } if err := a.Create(id, initID); err != nil { return err } } } } return nil } func (a *Driver) migrateImages(pth string) error { fis, err := ioutil.ReadDir(pth) if err != nil { return err } var ( m = make(map[string]*metadata) current *metadata exists bool ) for _, fi := range fis { if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "layer")) { if current, exists = m[id]; !exists { current, err = loadMetadata(path.Join(pth, id, "json")) if err != nil { return err } m[id] = current } } } for _, v := range m { v.parent = m[v.ParentID] } migrated := make(map[string]bool) for _, v := range m { if err := a.migrateImage(v, pth, migrated); err != nil { return err } } return nil } func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool) error { if !migrated[m.ID] { if m.parent != nil { a.migrateImage(m.parent, pth, migrated) } if err := tryRelocate(path.Join(pth, m.ID, "layer"), path.Join(a.rootPath(), "diff", m.ID)); err != nil { return err } if !a.Exists(m.ID) { if err := a.Create(m.ID, m.ParentID); err != nil { return err } } migrated[m.ID] = true } return nil } // tryRelocate will try to rename the old path to the new pack and if // the operation fails, it will fallback to a symlink func tryRelocate(oldPath, newPath string) error { s, err := os.Lstat(newPath) if err != nil && !os.IsNotExist(err) { return err } // If the destination is a symlink then we already tried to relocate once before // and it failed so we delete it and try to remove if s != nil && s.Mode()&os.ModeSymlink == os.ModeSymlink { if err := os.RemoveAll(newPath); err != nil { return err } } if err := os.Rename(oldPath, newPath); err != nil { if sErr := os.Symlink(oldPath, newPath); sErr != nil { return fmt.Errorf("Unable to relocate %s to %s: Rename err %s Symlink err %s", oldPath, newPath, err, sErr) } } return nil } func loadMetadata(pth string) (*metadata, error) { f, err := os.Open(pth) if err != nil { return nil, err } defer f.Close() var ( out = &metadata{} dec = json.NewDecoder(f) ) if err := dec.Decode(out); err != nil { return nil, err } return out, nil } docker-1.6.2/daemon/graphdriver/aufs/mount_linux.go0000644000175000017500000000033712524223634021765 0ustar tianontianonpackage aufs import "syscall" const MsRemount = syscall.MS_REMOUNT func mount(source string, target string, fstype string, flags uintptr, data string) error { return syscall.Mount(source, target, fstype, flags, data) } docker-1.6.2/daemon/graphdriver/aufs/aufs_test.go0000644000175000017500000003230412524223634021400 0ustar tianontianonpackage aufs import ( "crypto/sha256" "encoding/hex" "fmt" "io/ioutil" "os" "path" "testing" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" ) var ( tmpOuter = path.Join(os.TempDir(), "aufs-tests") tmp = path.Join(tmpOuter, "aufs") ) func init() { reexec.Init() } func testInit(dir string, t *testing.T) graphdriver.Driver { d, err := Init(dir, nil) if err != nil { if err == graphdriver.ErrNotSupported { t.Skip(err) } else { t.Fatal(err) } } return d } func newDriver(t *testing.T) *Driver { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) } d := testInit(tmp, t) return d.(*Driver) } func TestNewDriver(t *testing.T) { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) } d := testInit(tmp, t) defer os.RemoveAll(tmp) if d == nil { t.Fatalf("Driver should not be nil") } } func TestAufsString(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if d.String() != "aufs" { t.Fatalf("Expected aufs got %s", d.String()) } } func TestCreateDirStructure(t *testing.T) { newDriver(t) defer os.RemoveAll(tmp) paths := []string{ "mnt", "layers", "diff", } for _, p := range paths { if _, err := os.Stat(path.Join(tmp, p)); err != nil { t.Fatal(err) } } } // We should be able to create two drivers with the same dir structure func TestNewDriverFromExistingDir(t *testing.T) { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) } testInit(tmp, t) testInit(tmp, t) os.RemoveAll(tmp) } func TestCreateNewDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } } func TestCreateNewDirStructure(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } paths := []string{ "mnt", "diff", "layers", } for _, p := range paths { if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { t.Fatal(err) } } } func TestRemoveImage(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Remove("1"); err != nil { t.Fatal(err) } paths := []string{ "mnt", "diff", "layers", } for _, p := range paths { if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) } } } func TestGetWithoutParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } expected := path.Join(tmp, "diff", "1") if diffPath != expected { t.Fatalf("Expected path %s got %s", expected, diffPath) } } func TestCleanupWithNoDirs(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Cleanup(); err != nil { t.Fatal(err) } } func TestCleanupWithDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Cleanup(); err != nil { t.Fatal(err) } } func TestMountedFalseResponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } response, err := d.mounted("1") if err != nil { t.Fatal(err) } if response != false { t.Fatalf("Response if dir id 1 is mounted should be false") } } func TestMountedTrueReponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1"); err != nil { t.Fatal(err) } _, err := d.Get("2", "") if err != nil { t.Fatal(err) } response, err := d.mounted("2") if err != nil { t.Fatal(err) } if response != true { t.Fatalf("Response if dir id 2 is mounted should be true") } } func TestMountWithParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1"); err != nil { t.Fatal(err) } defer func() { if err := d.Cleanup(); err != nil { t.Fatal(err) } }() mntPath, err := d.Get("2", "") if err != nil { t.Fatal(err) } if mntPath == "" { t.Fatal("mntPath should not be empty string") } expected := path.Join(tmp, "mnt", "2") if mntPath != expected { t.Fatalf("Expected %s got %s", expected, mntPath) } } func TestRemoveMountedDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1"); err != nil { t.Fatal(err) } defer func() { if err := d.Cleanup(); err != nil { t.Fatal(err) } }() mntPath, err := d.Get("2", "") if err != nil { t.Fatal(err) } if mntPath == "" { t.Fatal("mntPath should not be empty string") } mounted, err := d.mounted("2") if err != nil { t.Fatal(err) } if !mounted { t.Fatalf("Dir id 2 should be mounted") } if err := d.Remove("2"); err != nil { t.Fatal(err) } } func TestCreateWithInvalidParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", "docker"); err == nil { t.Fatalf("Error should not be nil with parent does not exist") } } func TestGetDiff(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } f.Close() a, err := d.Diff("1", "") if err != nil { t.Fatal(err) } if a == nil { t.Fatalf("Archive should not be nil") } } func TestChanges(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } if err := d.Create("2", "1"); err != nil { t.Fatal(err) } defer func() { if err := d.Cleanup(); err != nil { t.Fatal(err) } }() mntPoint, err := d.Get("2", "") if err != nil { t.Fatal(err) } // Create a file to save in the mountpoint f, err := os.Create(path.Join(mntPoint, "test.txt")) if err != nil { t.Fatal(err) } if _, err := f.WriteString("testline"); err != nil { t.Fatal(err) } if err := f.Close(); err != nil { t.Fatal(err) } changes, err := d.Changes("2", "") if err != nil { t.Fatal(err) } if len(changes) != 1 { t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) } change := changes[0] expectedPath := "/test.txt" if change.Path != expectedPath { t.Fatalf("Expected path %s got %s", expectedPath, change.Path) } if change.Kind != archive.ChangeAdd { t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) } if err := d.Create("3", "2"); err != nil { t.Fatal(err) } mntPoint, err = d.Get("3", "") if err != nil { t.Fatal(err) } // Create a file to save in the mountpoint f, err = os.Create(path.Join(mntPoint, "test2.txt")) if err != nil { t.Fatal(err) } if _, err := f.WriteString("testline"); err != nil { t.Fatal(err) } if err := f.Close(); err != nil { t.Fatal(err) } changes, err = d.Changes("3", "") if err != nil { t.Fatal(err) } if len(changes) != 1 { t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) } change = changes[0] expectedPath = "/test2.txt" if change.Path != expectedPath { t.Fatalf("Expected path %s got %s", expectedPath, change.Path) } if change.Kind != archive.ChangeAdd { t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) } } func TestDiffSize(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) if err := d.Create("1", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } s, err := f.Stat() if err != nil { t.Fatal(err) } size = s.Size() if err := f.Close(); err != nil { t.Fatal(err) } diffSize, err := d.DiffSize("1", "") if err != nil { t.Fatal(err) } if diffSize != size { t.Fatalf("Expected size to be %d got %d", size, diffSize) } } func TestChildDiffSize(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } s, err := f.Stat() if err != nil { t.Fatal(err) } size = s.Size() if err := f.Close(); err != nil { t.Fatal(err) } diffSize, err := d.DiffSize("1", "") if err != nil { t.Fatal(err) } if diffSize != size { t.Fatalf("Expected size to be %d got %d", size, diffSize) } if err := d.Create("2", "1"); err != nil { t.Fatal(err) } diffSize, err = d.DiffSize("2", "") if err != nil { t.Fatal(err) } // The diff size for the child should be zero if diffSize != 0 { t.Fatalf("Expected size to be %d got %d", 0, diffSize) } } func TestExists(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", ""); err != nil { t.Fatal(err) } if d.Exists("none") { t.Fatal("id name should not exist in the driver") } if !d.Exists("1") { t.Fatal("id 1 should exist in the driver") } } func TestStatus(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", ""); err != nil { t.Fatal(err) } status := d.Status() if status == nil || len(status) == 0 { t.Fatal("Status should not be nil or empty") } rootDir := status[0] dirs := status[2] if rootDir[0] != "Root Dir" { t.Fatalf("Expected Root Dir got %s", rootDir[0]) } if rootDir[1] != d.rootPath() { t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) } if dirs[0] != "Dirs" { t.Fatalf("Expected Dirs got %s", dirs[0]) } if dirs[1] != "1" { t.Fatalf("Expected 1 got %s", dirs[1]) } } func TestApplyDiff(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() if err := d.Create("1", ""); err != nil { t.Fatal(err) } diffPath, err := d.Get("1", "") if err != nil { t.Fatal(err) } // Add a file to the diff path with a fixed size size := int64(1024) f, err := os.Create(path.Join(diffPath, "test_file")) if err != nil { t.Fatal(err) } if err := f.Truncate(size); err != nil { t.Fatal(err) } f.Close() diff, err := d.Diff("1", "") if err != nil { t.Fatal(err) } if err := d.Create("2", ""); err != nil { t.Fatal(err) } if err := d.Create("3", "2"); err != nil { t.Fatal(err) } if err := d.applyDiff("3", diff); err != nil { t.Fatal(err) } // Ensure that the file is in the mount point for id 3 mountPoint, err := d.Get("3", "") if err != nil { t.Fatal(err) } if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { t.Fatal(err) } } func hash(c string) string { h := sha256.New() fmt.Fprint(h, c) return hex.EncodeToString(h.Sum(nil)) } func testMountMoreThan42Layers(t *testing.T, mountPath string) { if err := os.MkdirAll(mountPath, 0755); err != nil { t.Fatal(err) } defer os.RemoveAll(mountPath) d := testInit(mountPath, t).(*Driver) defer d.Cleanup() var last string var expected int for i := 1; i < 127; i++ { expected++ var ( parent = fmt.Sprintf("%d", i-1) current = fmt.Sprintf("%d", i) ) if parent == "0" { parent = "" } else { parent = hash(parent) } current = hash(current) if err := d.Create(current, parent); err != nil { t.Logf("Current layer %d", i) t.Error(err) } point, err := d.Get(current, "") if err != nil { t.Logf("Current layer %d", i) t.Error(err) } f, err := os.Create(path.Join(point, current)) if err != nil { t.Logf("Current layer %d", i) t.Error(err) } f.Close() if i%10 == 0 { if err := os.Remove(path.Join(point, parent)); err != nil { t.Logf("Current layer %d", i) t.Error(err) } expected-- } last = current } // Perform the actual mount for the top most image point, err := d.Get(last, "") if err != nil { t.Error(err) } files, err := ioutil.ReadDir(point) if err != nil { t.Error(err) } if len(files) != expected { t.Errorf("Expected %d got %d", expected, len(files)) } } func TestMountMoreThan42Layers(t *testing.T) { os.RemoveAll(tmpOuter) testMountMoreThan42Layers(t, tmp) } func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { defer os.RemoveAll(tmpOuter) zeroes := "0" for { // This finds a mount path so that when combined into aufs mount options // 4096 byte boundary would be in between the paths or in permission // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' mountPath := path.Join(tmpOuter, zeroes, "aufs") pathLength := 77 + len(mountPath) if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { t.Logf("Using path: %s", mountPath) testMountMoreThan42Layers(t, mountPath) return } zeroes += "0" } } docker-1.6.2/daemon/graphdriver/driver_unsupported.go0000644000175000017500000000017512524223634022411 0ustar tianontianon// +build !linux package graphdriver func GetFSMagic(rootpath string) (FsMagic, error) { return FsMagicUnsupported, nil } docker-1.6.2/daemon/rename.go0000644000175000017500000000172212524223634015377 0ustar tianontianonpackage daemon import "github.com/docker/docker/engine" func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("usage: %s OLD_NAME NEW_NAME", job.Name) } oldName := job.Args[0] newName := job.Args[1] container, err := daemon.Get(oldName) if err != nil { return job.Error(err) } oldName = container.Name container.Lock() defer container.Unlock() if newName, err = daemon.reserveName(container.ID, newName); err != nil { return job.Errorf("Error when allocating new name: %s", err) } container.Name = newName undo := func() { container.Name = oldName daemon.reserveName(container.ID, oldName) daemon.containerGraph.Delete(newName) } if err := daemon.containerGraph.Delete(oldName); err != nil { undo() return job.Errorf("Failed to delete container %q: %v", oldName, err) } if err := container.toDisk(); err != nil { undo() return job.Error(err) } return engine.StatusOK } docker-1.6.2/daemon/start.go0000644000175000017500000000404712524223634015270 0ustar tianontianonpackage daemon import ( "fmt" "os" "strings" "github.com/docker/docker/engine" "github.com/docker/docker/runconfig" ) func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) } var ( name = job.Args[0] ) container, err := daemon.Get(name) if err != nil { return job.Error(err) } if container.IsPaused() { return job.Errorf("Cannot start a paused container, try unpause instead.") } if container.IsRunning() { return job.Errorf("Container already started") } // If no environment was set, then no hostconfig was passed. // This is kept for backward compatibility - hostconfig should be passed when // creating a container, not during start. if len(job.Environ()) > 0 { hostConfig := runconfig.ContainerHostConfigFromJob(job) if err := daemon.setHostConfig(container, hostConfig); err != nil { return job.Error(err) } } if err := container.Start(); err != nil { container.LogEvent("die") return job.Errorf("Cannot start container %s: %s", name, err) } return engine.StatusOK } func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error { container.Lock() defer container.Unlock() if err := parseSecurityOpt(container, hostConfig); err != nil { return err } // FIXME: this should be handled by the volume subsystem // Validate the HostConfig binds. Make sure that: // the source exists for _, bind := range hostConfig.Binds { splitBind := strings.Split(bind, ":") source := splitBind[0] // ensure the source exists on the host _, err := os.Stat(source) if err != nil && os.IsNotExist(err) { err = os.MkdirAll(source, 0755) if err != nil { return fmt.Errorf("Could not create local directory '%s' for bind mount: %v!", source, err) } } } // Register any links from the host config before starting the container if err := daemon.RegisterLinks(container, hostConfig); err != nil { return err } container.hostConfig = hostConfig container.toDisk() return nil } docker-1.6.2/daemon/history.go0000644000175000017500000000120512524223634015625 0ustar tianontianonpackage daemon import ( "sort" ) // History is a convenience type for storing a list of containers, // ordered by creation date. type History []*Container func (history *History) Len() int { return len(*history) } func (history *History) Less(i, j int) bool { containers := *history return containers[j].Created.Before(containers[i].Created) } func (history *History) Swap(i, j int) { containers := *history tmp := containers[i] containers[i] = containers[j] containers[j] = tmp } func (history *History) Add(container *Container) { *history = append(*history, container) } func (history *History) Sort() { sort.Sort(history) } docker-1.6.2/daemon/stop.go0000644000175000017500000000120212524223634015106 0ustar tianontianonpackage daemon import ( "github.com/docker/docker/engine" ) func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] t = 10 ) if job.EnvExists("t") { t = job.GetenvInt("t") } container, err := daemon.Get(name) if err != nil { return job.Error(err) } if !container.IsRunning() { return job.Errorf("Container already stopped") } if err := container.Stop(int(t)); err != nil { return job.Errorf("Cannot stop container %s: %s\n", name, err) } container.LogEvent("stop") return engine.StatusOK } docker-1.6.2/daemon/daemon_test.go0000644000175000017500000000530212524223634016430 0ustar tianontianonpackage daemon import ( "github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/truncindex" "os" "path" "testing" ) // // https://github.com/docker/docker/issues/8069 // func TestGet(t *testing.T) { c1 := &Container{ ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", Name: "tender_bardeen", } c2 := &Container{ ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", Name: "drunk_hawking", } c3 := &Container{ ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", Name: "3cdbd1aa", } c4 := &Container{ ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", } c5 := &Container{ ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", Name: "d22d69a2b896", } store := &contStore{ s: map[string]*Container{ c1.ID: c1, c2.ID: c2, c3.ID: c3, c4.ID: c4, c5.ID: c5, }, } index := truncindex.NewTruncIndex([]string{}) index.Add(c1.ID) index.Add(c2.ID) index.Add(c3.ID) index.Add(c4.ID) index.Add(c5.ID) daemonTestDbPath := path.Join(os.TempDir(), "daemon_test.db") graph, err := graphdb.NewSqliteConn(daemonTestDbPath) if err != nil { t.Fatalf("Failed to create daemon test sqlite database at %s", daemonTestDbPath) } graph.Set(c1.Name, c1.ID) graph.Set(c2.Name, c2.ID) graph.Set(c3.Name, c3.ID) graph.Set(c4.Name, c4.ID) graph.Set(c5.Name, c5.ID) daemon := &Daemon{ containers: store, idIndex: index, containerGraph: graph, } if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { t.Fatal("Should explicitly match full container IDs") } if container, _ := daemon.Get("75fb0b8009"); container != c4 { t.Fatal("Should match a partial ID") } if container, _ := daemon.Get("drunk_hawking"); container != c2 { t.Fatal("Should match a full name") } // c3.Name is a partial match for both c3.ID and c2.ID if c, _ := daemon.Get("3cdbd1aa"); c != c3 { t.Fatal("Should match a full name even though it collides with another container's ID") } if container, _ := daemon.Get("d22d69a2b896"); container != c5 { t.Fatal("Should match a container where the provided prefix is an exact match to the it's name, and is also a prefix for it's ID") } if _, err := daemon.Get("3cdbd1"); err == nil { t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") } if _, err := daemon.Get("nothing"); err == nil { t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") } os.Remove(daemonTestDbPath) } docker-1.6.2/daemon/daemon_unit_test.go0000644000175000017500000000210312524223634017463 0ustar tianontianonpackage daemon import ( "testing" "github.com/docker/docker/runconfig" ) func TestParseSecurityOpt(t *testing.T) { container := &Container{} config := &runconfig.HostConfig{} // test apparmor config.SecurityOpt = []string{"apparmor:test_profile"} if err := parseSecurityOpt(container, config); err != nil { t.Fatalf("Unexpected parseSecurityOpt error: %v", err) } if container.AppArmorProfile != "test_profile" { t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) } // test valid label config.SecurityOpt = []string{"label:user:USER"} if err := parseSecurityOpt(container, config); err != nil { t.Fatalf("Unexpected parseSecurityOpt error: %v", err) } // test invalid label config.SecurityOpt = []string{"label"} if err := parseSecurityOpt(container, config); err == nil { t.Fatal("Expected parseSecurityOpt error, got nil") } // test invalid opt config.SecurityOpt = []string{"test"} if err := parseSecurityOpt(container, config); err == nil { t.Fatal("Expected parseSecurityOpt error, got nil") } } docker-1.6.2/daemon/top.go0000644000175000017500000000345712524223634014741 0ustar tianontianonpackage daemon import ( "os/exec" "strconv" "strings" "github.com/docker/docker/engine" ) func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status { if len(job.Args) != 1 && len(job.Args) != 2 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) } var ( name = job.Args[0] psArgs = "-ef" ) if len(job.Args) == 2 && job.Args[1] != "" { psArgs = job.Args[1] } container, err := daemon.Get(name) if err != nil { return job.Error(err) } if !container.IsRunning() { return job.Errorf("Container %s is not running", name) } pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) if err != nil { return job.Error(err) } output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() if err != nil { return job.Errorf("Error running ps: %s", err) } lines := strings.Split(string(output), "\n") header := strings.Fields(lines[0]) out := &engine.Env{} out.SetList("Titles", header) pidIndex := -1 for i, name := range header { if name == "PID" { pidIndex = i } } if pidIndex == -1 { return job.Errorf("Couldn't find PID field in ps output") } processes := [][]string{} for _, line := range lines[1:] { if len(line) == 0 { continue } fields := strings.Fields(line) p, err := strconv.Atoi(fields[pidIndex]) if err != nil { return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) } for _, pid := range pids { if pid == p { // Make sure number of fields equals number of header titles // merging "overhanging" fields process := fields[:len(header)-1] process = append(process, strings.Join(fields[len(header)-1:], " ")) processes = append(processes, process) } } } out.SetJson("Processes", processes) out.WriteTo(job.Stdout) return engine.StatusOK } docker-1.6.2/daemon/config.go0000644000175000017500000001150112524223634015371 0ustar tianontianonpackage daemon import ( "net" "github.com/docker/docker/daemon/networkdriver" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/ulimit" "github.com/docker/docker/runconfig" ) const ( defaultNetworkMtu = 1500 disableNetworkBridge = "none" ) // Config define the configuration of a docker daemon // These are the configuration settings that you pass // to the docker daemon when you launch it with say: `docker -d -e lxc` // FIXME: separate runtime configuration from http api configuration type Config struct { Pidfile string Root string AutoRestart bool Dns []string DnsSearch []string EnableIPv6 bool EnableIptables bool EnableIpForward bool EnableIpMasq bool DefaultIp net.IP BridgeIface string BridgeIP string FixedCIDR string FixedCIDRv6 string InterContainerCommunication bool GraphDriver string GraphOptions []string ExecDriver string Mtu int SocketGroup string EnableCors bool CorsHeaders string DisableNetwork bool EnableSelinuxSupport bool Context map[string][]string TrustKeyPath string Labels []string Ulimits map[string]*ulimit.Ulimit LogConfig runconfig.LogConfig } // InstallFlags adds command-line options to the top-level flag parser for // the current process. // Subsequent calls to `flag.Parse` will populate config with values parsed // from the command-line. func (config *Config) InstallFlags() { flag.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file") flag.StringVar(&config.Root, []string{"g", "-graph"}, "/var/lib/docker", "Root of the Docker runtime") flag.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") flag.BoolVar(&config.EnableIptables, []string{"#iptables", "-iptables"}, true, "Enable addition of iptables rules") flag.BoolVar(&config.EnableIpForward, []string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward") flag.BoolVar(&config.EnableIpMasq, []string{"-ip-masq"}, true, "Enable IP masquerading") flag.BoolVar(&config.EnableIPv6, []string{"-ipv6"}, false, "Enable IPv6 networking") flag.StringVar(&config.BridgeIP, []string{"#bip", "-bip"}, "", "Specify network bridge IP") flag.StringVar(&config.BridgeIface, []string{"b", "-bridge"}, "", "Attach containers to a network bridge") flag.StringVar(&config.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs") flag.StringVar(&config.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", "IPv6 subnet for fixed IPs") flag.BoolVar(&config.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication") flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Storage driver to use") flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Exec driver to use") flag.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, "Enable selinux support") flag.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, "Set the containers network MTU") flag.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", "Group for the unix socket") flag.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, "Enable CORS headers in the remote API, this is deprecated by --api-cors-header") flag.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", "Set CORS headers in the remote API") opts.IPVar(&config.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP when binding container ports") opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options") // FIXME: why the inconsistency between "hosts" and "sockets"? opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "DNS server to use") opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "DNS search domains to use") opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon") config.Ulimits = make(map[string]*ulimit.Ulimit) opts.UlimitMapVar(config.Ulimits, []string{"-default-ulimit"}, "Set default ulimits for containers") flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Containers logging driver") } func getDefaultNetworkMtu() int { if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { return iface.MTU } return defaultNetworkMtu } docker-1.6.2/daemon/changes.go0000644000175000017500000000130512524223634015535 0ustar tianontianonpackage daemon import ( "github.com/docker/docker/engine" ) func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] container, error := daemon.Get(name) if error != nil { return job.Error(error) } outs := engine.NewTable("", 0) changes, err := container.Changes() if err != nil { return job.Error(err) } for _, change := range changes { out := &engine.Env{} if err := out.Import(change); err != nil { return job.Error(err) } outs.Add(out) } if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } docker-1.6.2/daemon/image_delete.go0000644000175000017500000001075012524223634016535 0ustar tianontianonpackage daemon import ( "fmt" "strings" "github.com/docker/docker/engine" "github.com/docker/docker/graph" "github.com/docker/docker/image" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/utils" ) func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } imgs := engine.NewTable("", 0) if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil { return job.Error(err) } if len(imgs.Data) == 0 { return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) } if _, err := imgs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } // FIXME: make this private and use the job instead func (daemon *Daemon) DeleteImage(eng *engine.Engine, name string, imgs *engine.Table, first, force, noprune bool) error { var ( repoName, tag string tags = []string{} ) // FIXME: please respect DRY and centralize repo+tag parsing in a single central place! -- shykes repoName, tag = parsers.ParseRepositoryTag(name) if tag == "" { tag = graph.DEFAULTTAG } if name == "" { return fmt.Errorf("Image name can not be blank") } img, err := daemon.Repositories().LookupImage(name) if err != nil { if r, _ := daemon.Repositories().Get(repoName); r != nil { return fmt.Errorf("No such image: %s", utils.ImageReference(repoName, tag)) } return fmt.Errorf("No such image: %s", name) } if strings.Contains(img.ID, name) { repoName = "" tag = "" } byParents, err := daemon.Graph().ByParent() if err != nil { return err } repos := daemon.Repositories().ByID()[img.ID] //If delete by id, see if the id belong only to one repository if repoName == "" { for _, repoAndTag := range repos { parsedRepo, parsedTag := parsers.ParseRepositoryTag(repoAndTag) if repoName == "" || repoName == parsedRepo { repoName = parsedRepo if parsedTag != "" { tags = append(tags, parsedTag) } } else if repoName != parsedRepo && !force && first { // the id belongs to multiple repos, like base:latest and user:test, // in that case return conflict return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) } } } else { tags = append(tags, tag) } if !first && len(tags) > 0 { return nil } if len(repos) <= 1 { if err := daemon.canDeleteImage(img.ID, force); err != nil { return err } } // Untag the current image for _, tag := range tags { tagDeleted, err := daemon.Repositories().Delete(repoName, tag) if err != nil { return err } if tagDeleted { out := &engine.Env{} out.Set("Untagged", utils.ImageReference(repoName, tag)) imgs.Add(out) eng.Job("log", "untag", img.ID, "").Run() } } tags = daemon.Repositories().ByID()[img.ID] if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { if len(byParents[img.ID]) == 0 { if err := daemon.Repositories().DeleteAll(img.ID); err != nil { return err } if err := daemon.Graph().Delete(img.ID); err != nil { return err } out := &engine.Env{} out.SetJson("Deleted", img.ID) imgs.Add(out) eng.Job("log", "delete", img.ID, "").Run() if img.Parent != "" && !noprune { err := daemon.DeleteImage(eng, img.Parent, imgs, false, force, noprune) if first { return err } } } } return nil } func (daemon *Daemon) canDeleteImage(imgID string, force bool) error { for _, container := range daemon.List() { parent, err := daemon.Repositories().LookupImage(container.ImageID) if err != nil { if daemon.Graph().IsNotExist(err) { return nil } return err } if err := parent.WalkHistory(func(p *image.Image) error { if imgID == p.ID { if container.IsRunning() { if force { return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it, stop it and retry", common.TruncateID(imgID), common.TruncateID(container.ID)) } return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", common.TruncateID(imgID), common.TruncateID(container.ID)) } else if !force { return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it, use -f to force", common.TruncateID(imgID), common.TruncateID(container.ID)) } } return nil }); err != nil { return err } } return nil } docker-1.6.2/daemon/info.go0000644000175000017500000000710212524223634015061 0ustar tianontianonpackage daemon import ( "os" "runtime" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/parsers/operatingsystem" "github.com/docker/docker/pkg/system" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { images, _ := daemon.Graph().Map() var imgcount int if images == nil { imgcount = 0 } else { imgcount = len(images) } kernelVersion := "" if kv, err := kernel.GetKernelVersion(); err == nil { kernelVersion = kv.String() } operatingSystem := "" if s, err := operatingsystem.GetOperatingSystem(); err == nil { operatingSystem = s } if inContainer, err := operatingsystem.IsContainerized(); err != nil { log.Errorf("Could not determine if daemon is containerized: %v", err) operatingSystem += " (error determining if containerized)" } else if inContainer { operatingSystem += " (containerized)" } meminfo, err := system.ReadMemInfo() if err != nil { log.Errorf("Could not read system memory info: %v", err) } // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) initPath := utils.DockerInitPath("") if initPath == "" { // if that fails, we'll just return the path from the daemon initPath = daemon.SystemInitPath() } cjob := job.Eng.Job("subscribers_count") env, _ := cjob.Stdout.AddEnv() if err := cjob.Run(); err != nil { return job.Error(err) } registryJob := job.Eng.Job("registry_config") registryEnv, _ := registryJob.Stdout.AddEnv() if err := registryJob.Run(); err != nil { return job.Error(err) } registryConfig := registry.ServiceConfig{} if err := registryEnv.GetJson("config", ®istryConfig); err != nil { return job.Error(err) } v := &engine.Env{} v.SetJson("ID", daemon.ID) v.SetInt("Containers", len(daemon.List())) v.SetInt("Images", imgcount) v.Set("Driver", daemon.GraphDriver().String()) v.SetJson("DriverStatus", daemon.GraphDriver().Status()) v.SetBool("MemoryLimit", daemon.SystemConfig().MemoryLimit) v.SetBool("SwapLimit", daemon.SystemConfig().SwapLimit) v.SetBool("IPv4Forwarding", !daemon.SystemConfig().IPv4ForwardingDisabled) v.SetBool("Debug", os.Getenv("DEBUG") != "") v.SetInt("NFd", utils.GetTotalUsedFds()) v.SetInt("NGoroutines", runtime.NumGoroutine()) v.Set("SystemTime", time.Now().Format(time.RFC3339Nano)) v.Set("ExecutionDriver", daemon.ExecutionDriver().Name()) v.SetInt("NEventsListener", env.GetInt("count")) v.Set("KernelVersion", kernelVersion) v.Set("OperatingSystem", operatingSystem) v.Set("IndexServerAddress", registry.IndexServerAddress()) v.SetJson("RegistryConfig", registryConfig) v.Set("InitSha1", dockerversion.INITSHA1) v.Set("InitPath", initPath) v.SetInt("NCPU", runtime.NumCPU()) v.SetInt64("MemTotal", meminfo.MemTotal) v.Set("DockerRootDir", daemon.Config().Root) if http_proxy := os.Getenv("http_proxy"); http_proxy != "" { v.Set("HttpProxy", http_proxy) } if https_proxy := os.Getenv("https_proxy"); https_proxy != "" { v.Set("HttpsProxy", https_proxy) } if no_proxy := os.Getenv("no_proxy"); no_proxy != "" { v.Set("NoProxy", no_proxy) } if hostname, err := os.Hostname(); err == nil { v.SetJson("Name", hostname) } v.SetList("Labels", daemon.Config().Labels) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } docker-1.6.2/daemon/volumes.go0000644000175000017500000002547012524223634015630 0ustar tianontianonpackage daemon import ( "fmt" "io" "io/ioutil" "os" "path/filepath" "sort" "strings" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" "github.com/docker/docker/volumes" ) type Mount struct { MountToPath string container *Container volume *volumes.Volume Writable bool copyData bool from *Container isBind bool } func (mnt *Mount) Export(resource string) (io.ReadCloser, error) { var name string if resource == mnt.MountToPath[1:] { name = filepath.Base(resource) } path, err := filepath.Rel(mnt.MountToPath[1:], resource) if err != nil { return nil, err } return mnt.volume.Export(path, name) } func (container *Container) prepareVolumes() error { if container.Volumes == nil || len(container.Volumes) == 0 { container.Volumes = make(map[string]string) container.VolumesRW = make(map[string]bool) } return container.createVolumes() } // sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order func (container *Container) sortedVolumeMounts() []string { var mountPaths []string for path := range container.Volumes { mountPaths = append(mountPaths, path) } sort.Strings(mountPaths) return mountPaths } func (container *Container) createVolumes() error { mounts, err := container.parseVolumeMountConfig() if err != nil { return err } for _, mnt := range mounts { if err := mnt.initialize(); err != nil { return err } } // On every start, this will apply any new `VolumesFrom` entries passed in via HostConfig, which may override volumes set in `create` return container.applyVolumesFrom() } func (m *Mount) initialize() error { // No need to initialize anything since it's already been initialized if hostPath, exists := m.container.Volumes[m.MountToPath]; exists { // If this is a bind-mount/volumes-from, maybe it was passed in at start instead of create // We need to make sure bind-mounts/volumes-from passed on start can override existing ones. if (!m.volume.IsBindMount && !m.isBind) && m.from == nil { return nil } if m.volume.Path == hostPath { return nil } // Make sure we remove these old volumes we don't actually want now. // Ignore any errors here since this is just cleanup, maybe someone volumes-from'd this volume if v := m.container.daemon.volumes.Get(hostPath); v != nil { v.RemoveContainer(m.container.ID) m.container.daemon.volumes.Delete(v.Path) } } // This is the full path to container fs + mntToPath containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(m.container.basefs, m.MountToPath), m.container.basefs) if err != nil { return err } m.container.VolumesRW[m.MountToPath] = m.Writable m.container.Volumes[m.MountToPath] = m.volume.Path m.volume.AddContainer(m.container.ID) if m.Writable && m.copyData { // Copy whatever is in the container at the mntToPath to the volume copyExistingContents(containerMntPath, m.volume.Path) } return nil } func (container *Container) VolumePaths() map[string]struct{} { var paths = make(map[string]struct{}) for _, path := range container.Volumes { paths[path] = struct{}{} } return paths } func (container *Container) registerVolumes() { for path := range container.VolumePaths() { if v := container.daemon.volumes.Get(path); v != nil { v.AddContainer(container.ID) continue } // if container was created with an old daemon, this volume may not be registered so we need to make sure it gets registered writable := true if rw, exists := container.VolumesRW[path]; exists { writable = rw } v, err := container.daemon.volumes.FindOrCreateVolume(path, writable) if err != nil { log.Debugf("error registering volume %s: %v", path, err) continue } v.AddContainer(container.ID) } } func (container *Container) derefVolumes() { for path := range container.VolumePaths() { vol := container.daemon.volumes.Get(path) if vol == nil { log.Debugf("Volume %s was not found and could not be dereferenced", path) continue } vol.RemoveContainer(container.ID) } } func (container *Container) parseVolumeMountConfig() (map[string]*Mount, error) { var mounts = make(map[string]*Mount) // Get all the bind mounts for _, spec := range container.hostConfig.Binds { path, mountToPath, writable, err := parseBindMountSpec(spec) if err != nil { return nil, err } // Check if a bind mount has already been specified for the same container path if m, exists := mounts[mountToPath]; exists { return nil, fmt.Errorf("Duplicate volume %q: %q already in use, mounted from %q", path, mountToPath, m.volume.Path) } // Check if a volume already exists for this and use it vol, err := container.daemon.volumes.FindOrCreateVolume(path, writable) if err != nil { return nil, err } mounts[mountToPath] = &Mount{ container: container, volume: vol, MountToPath: mountToPath, Writable: writable, isBind: true, // in case the volume itself is a normal volume, but is being mounted in as a bindmount here } } // Get the rest of the volumes for path := range container.Config.Volumes { // Check if this is already added as a bind-mount path = filepath.Clean(path) if _, exists := mounts[path]; exists { continue } // Check if this has already been created if _, exists := container.Volumes[path]; exists { continue } realPath, err := container.getResourcePath(path) if err != nil { return nil, fmt.Errorf("failed to evaluate the absolute path of symlink") } if stat, err := os.Stat(realPath); err == nil { if !stat.IsDir() { return nil, fmt.Errorf("file exists at %s, can't create volume there", realPath) } } vol, err := container.daemon.volumes.FindOrCreateVolume("", true) if err != nil { return nil, err } mounts[path] = &Mount{ container: container, MountToPath: path, volume: vol, Writable: true, copyData: true, } } return mounts, nil } func parseBindMountSpec(spec string) (string, string, bool, error) { var ( path, mountToPath string writable bool arr = strings.Split(spec, ":") ) switch len(arr) { case 2: path = arr[0] mountToPath = arr[1] writable = true case 3: path = arr[0] mountToPath = arr[1] writable = validMountMode(arr[2]) && arr[2] == "rw" default: return "", "", false, fmt.Errorf("Invalid volume specification: %s", spec) } if !filepath.IsAbs(path) { return "", "", false, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", path) } path = filepath.Clean(path) mountToPath = filepath.Clean(mountToPath) return path, mountToPath, writable, nil } func parseVolumesFromSpec(spec string) (string, string, error) { specParts := strings.SplitN(spec, ":", 2) if len(specParts) == 0 { return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec) } var ( id = specParts[0] mode = "rw" ) if len(specParts) == 2 { mode = specParts[1] if !validMountMode(mode) { return "", "", fmt.Errorf("invalid mode for volumes-from: %s", mode) } } return id, mode, nil } func (container *Container) applyVolumesFrom() error { volumesFrom := container.hostConfig.VolumesFrom if len(volumesFrom) > 0 && container.AppliedVolumesFrom == nil { container.AppliedVolumesFrom = make(map[string]struct{}) } mountGroups := make(map[string][]*Mount) for _, spec := range volumesFrom { id, mode, err := parseVolumesFromSpec(spec) if err != nil { return err } if _, exists := container.AppliedVolumesFrom[id]; exists { // Don't try to apply these since they've already been applied continue } c, err := container.daemon.Get(id) if err != nil { return fmt.Errorf("Could not apply volumes of non-existent container %q.", id) } var ( fromMounts = c.VolumeMounts() mounts []*Mount ) for _, mnt := range fromMounts { mnt.Writable = mnt.Writable && (mode == "rw") mounts = append(mounts, mnt) } mountGroups[id] = mounts } for id, mounts := range mountGroups { for _, mnt := range mounts { mnt.from = mnt.container mnt.container = container if err := mnt.initialize(); err != nil { return err } } container.AppliedVolumesFrom[id] = struct{}{} } return nil } func validMountMode(mode string) bool { validModes := map[string]bool{ "rw": true, "ro": true, } return validModes[mode] } func (container *Container) setupMounts() error { mounts := []execdriver.Mount{} // Mount user specified volumes // Note, these are not private because you may want propagation of (un)mounts from host // volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you // want this new mount in the container // These mounts must be ordered based on the length of the path that it is being mounted to (lexicographic) for _, path := range container.sortedVolumeMounts() { mounts = append(mounts, execdriver.Mount{ Source: container.Volumes[path], Destination: path, Writable: container.VolumesRW[path], }) } if container.ResolvConfPath != "" { mounts = append(mounts, execdriver.Mount{Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: true, Private: true}) } if container.HostnamePath != "" { mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: true, Private: true}) } if container.HostsPath != "" { mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true}) } container.command.Mounts = mounts return nil } func (container *Container) VolumeMounts() map[string]*Mount { mounts := make(map[string]*Mount) for mountToPath, path := range container.Volumes { if v := container.daemon.volumes.Get(path); v != nil { mounts[mountToPath] = &Mount{volume: v, container: container, MountToPath: mountToPath, Writable: container.VolumesRW[mountToPath]} } } return mounts } func copyExistingContents(source, destination string) error { volList, err := ioutil.ReadDir(source) if err != nil { return err } if len(volList) > 0 { srcList, err := ioutil.ReadDir(destination) if err != nil { return err } if len(srcList) == 0 { // If the source volume is empty copy files from the root into the volume if err := chrootarchive.CopyWithTar(source, destination); err != nil { return err } } } return copyOwnership(source, destination) } // copyOwnership copies the permissions and uid:gid of the source file // into the destination file func copyOwnership(source, destination string) error { stat, err := system.Stat(source) if err != nil { return err } if err := os.Chown(destination, int(stat.Uid()), int(stat.Gid())); err != nil { return err } return os.Chmod(destination, os.FileMode(stat.Mode())) } docker-1.6.2/daemon/kill.go0000644000175000017500000000327112524223634015064 0ustar tianontianonpackage daemon import ( "strconv" "strings" "syscall" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/signal" ) // ContainerKill send signal to the container // If no signal is given (sig 0), then Kill with SIGKILL and wait // for the container to exit. // If a signal is given, then just send it to the container and return. func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status { if n := len(job.Args); n < 1 || n > 2 { return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) } var ( name = job.Args[0] sig uint64 err error ) // If we have a signal, look at it. Otherwise, do nothing if len(job.Args) == 2 && job.Args[1] != "" { // Check if we passed the signal as a number: // The largest legal signal is 31, so let's parse on 5 bits sig, err = strconv.ParseUint(job.Args[1], 10, 5) if err != nil { // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL") sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")]) } if sig == 0 { return job.Errorf("Invalid signal: %s", job.Args[1]) } } container, err := daemon.Get(name) if err != nil { return job.Error(err) } // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { if err := container.Kill(); err != nil { return job.Errorf("Cannot kill container %s: %s", name, err) } container.LogEvent("kill") } else { // Otherwise, just send the requested signal if err := container.KillSig(int(sig)); err != nil { return job.Errorf("Cannot kill container %s: %s", name, err) } // FIXME: Add event for signals } return engine.StatusOK } docker-1.6.2/daemon/container.go0000644000175000017500000012364412524223634016122 0ustar tianontianonpackage daemon import ( "bytes" "encoding/json" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strings" "syscall" "time" "github.com/docker/libcontainer" "github.com/docker/libcontainer/configs" "github.com/docker/libcontainer/devices" "github.com/docker/libcontainer/label" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog" "github.com/docker/docker/daemon/logger/syslog" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/links" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/networkfs/etchosts" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/ulimit" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" var ( ErrNotATTY = errors.New("The PTY is not a file") ErrNoTTY = errors.New("No PTY found") ErrContainerStart = errors.New("The container failed to start. Unknown error") ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") ) type StreamConfig struct { stdout *broadcastwriter.BroadcastWriter stderr *broadcastwriter.BroadcastWriter stdin io.ReadCloser stdinPipe io.WriteCloser } type Container struct { *State `json:"State"` // Needed for remote api version <= 1.11 root string // Path to the "home" of the container, including metadata. basefs string // Path to the graphdriver mountpoint ID string Created time.Time Path string Args []string Config *runconfig.Config ImageID string `json:"Image"` NetworkSettings *NetworkSettings ResolvConfPath string HostnamePath string HostsPath string LogPath string Name string Driver string ExecDriver string command *execdriver.Command StreamConfig daemon *Daemon MountLabel, ProcessLabel string AppArmorProfile string RestartCount int UpdateDns bool // Maps container paths to volume paths. The key in this is the path to which // the volume is being mounted inside the container. Value is the path of the // volume on disk Volumes map[string]string // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. // Easier than migrating older container configs :) VolumesRW map[string]bool hostConfig *runconfig.HostConfig activeLinks map[string]*links.Link monitor *containerMonitor execCommands *execStore // logDriver for closing logDriver logger.Logger logCopier *logger.Copier AppliedVolumesFrom map[string]struct{} } func (container *Container) FromDisk() error { pth, err := container.jsonPath() if err != nil { return err } jsonSource, err := os.Open(pth) if err != nil { return err } defer jsonSource.Close() dec := json.NewDecoder(jsonSource) // Load container settings // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it if err := dec.Decode(container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { return err } if err := label.ReserveLabel(container.ProcessLabel); err != nil { return err } return container.readHostConfig() } func (container *Container) toDisk() error { data, err := json.Marshal(container) if err != nil { return err } pth, err := container.jsonPath() if err != nil { return err } err = ioutil.WriteFile(pth, data, 0666) if err != nil { return err } return container.WriteHostConfig() } func (container *Container) ToDisk() error { container.Lock() err := container.toDisk() container.Unlock() return err } func (container *Container) readHostConfig() error { container.hostConfig = &runconfig.HostConfig{} // If the hostconfig file does not exist, do not read it. // (We still have to initialize container.hostConfig, // but that's OK, since we just did that above.) pth, err := container.hostConfigPath() if err != nil { return err } _, err = os.Stat(pth) if os.IsNotExist(err) { return nil } data, err := ioutil.ReadFile(pth) if err != nil { return err } return json.Unmarshal(data, container.hostConfig) } func (container *Container) WriteHostConfig() error { data, err := json.Marshal(container.hostConfig) if err != nil { return err } pth, err := container.hostConfigPath() if err != nil { return err } return ioutil.WriteFile(pth, data, 0666) } func (container *Container) LogEvent(action string) { d := container.daemon if err := d.eng.Job("log", action, container.ID, d.Repositories().ImageName(container.ImageID)).Run(); err != nil { log.Errorf("Error logging event %s for %s: %s", action, container.ID, err) } } func (container *Container) getResourcePath(path string) (string, error) { cleanPath := filepath.Join("/", path) return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) } func (container *Container) getRootResourcePath(path string) (string, error) { cleanPath := filepath.Join("/", path) return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) } func populateCommand(c *Container, env []string) error { en := &execdriver.Network{ Mtu: c.daemon.config.Mtu, Interface: nil, } parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "host": en.HostNetworking = true case "bridge", "": // empty string to support existing containers if !c.Config.NetworkDisabled { network := c.NetworkSettings en.Interface = &execdriver.NetworkInterface{ Gateway: network.Gateway, Bridge: network.Bridge, IPAddress: network.IPAddress, IPPrefixLen: network.IPPrefixLen, MacAddress: network.MacAddress, LinkLocalIPv6Address: network.LinkLocalIPv6Address, GlobalIPv6Address: network.GlobalIPv6Address, GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, IPv6Gateway: network.IPv6Gateway, } } case "container": nc, err := c.getNetworkedContainer() if err != nil { return err } en.ContainerID = nc.ID default: return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) } ipc := &execdriver.Ipc{} if c.hostConfig.IpcMode.IsContainer() { ic, err := c.getIpcContainer() if err != nil { return err } ipc.ContainerID = ic.ID } else { ipc.HostIpc = c.hostConfig.IpcMode.IsHost() } pid := &execdriver.Pid{} pid.HostPid = c.hostConfig.PidMode.IsHost() // Build lists of devices allowed and created within the container. userSpecifiedDevices := make([]*configs.Device, len(c.hostConfig.Devices)) for i, deviceMapping := range c.hostConfig.Devices { device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions) if err != nil { return fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) } device.Path = deviceMapping.PathInContainer userSpecifiedDevices[i] = device } allowedDevices := append(configs.DefaultAllowedDevices, userSpecifiedDevices...) autoCreatedDevices := append(configs.DefaultAutoCreatedDevices, userSpecifiedDevices...) // TODO: this can be removed after lxc-conf is fully deprecated lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig) if err != nil { return err } var rlimits []*ulimit.Rlimit ulimits := c.hostConfig.Ulimits // Merge ulimits with daemon defaults ulIdx := make(map[string]*ulimit.Ulimit) for _, ul := range ulimits { ulIdx[ul.Name] = ul } for name, ul := range c.daemon.config.Ulimits { if _, exists := ulIdx[name]; !exists { ulimits = append(ulimits, ul) } } for _, limit := range ulimits { rl, err := limit.GetRlimit() if err != nil { return err } rlimits = append(rlimits, rl) } resources := &execdriver.Resources{ Memory: c.hostConfig.Memory, MemorySwap: c.hostConfig.MemorySwap, CpuShares: c.hostConfig.CpuShares, CpusetCpus: c.hostConfig.CpusetCpus, Rlimits: rlimits, } processConfig := execdriver.ProcessConfig{ Privileged: c.hostConfig.Privileged, Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, User: c.Config.User, } processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true} processConfig.Env = env c.command = &execdriver.Command{ ID: c.ID, Rootfs: c.RootfsPath(), ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, InitPath: "/.dockerinit", WorkingDir: c.Config.WorkingDir, Network: en, Ipc: ipc, Pid: pid, Resources: resources, AllowedDevices: allowedDevices, AutoCreatedDevices: autoCreatedDevices, CapAdd: c.hostConfig.CapAdd, CapDrop: c.hostConfig.CapDrop, ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), MountLabel: c.GetMountLabel(), LxcConfig: lxcConfig, AppArmorProfile: c.AppArmorProfile, CgroupParent: c.hostConfig.CgroupParent, } return nil } func (container *Container) Start() (err error) { container.Lock() defer container.Unlock() if container.Running { return nil } if container.removalInProgress || container.Dead { return fmt.Errorf("Container is marked for removal and cannot be started.") } // if we encounter an error during start we need to ensure that any other // setup has been cleaned up properly defer func() { if err != nil { container.setError(err) // if no one else has set it, make sure we don't leave it at zero if container.ExitCode == 0 { container.ExitCode = 128 } container.toDisk() container.cleanup() } }() if err := container.setupContainerDns(); err != nil { return err } if err := container.Mount(); err != nil { return err } if err := container.initializeNetworking(); err != nil { return err } if err := container.updateParentsHosts(); err != nil { return err } container.verifyDaemonSettings() if err := container.prepareVolumes(); err != nil { return err } linkedEnv, err := container.setupLinkedContainers() if err != nil { return err } if err := container.setupWorkingDirectory(); err != nil { return err } env := container.createDaemonEnvironment(linkedEnv) if err := populateCommand(container, env); err != nil { return err } if err := container.setupMounts(); err != nil { return err } return container.waitForStart() } func (container *Container) Run() error { if err := container.Start(); err != nil { return err } container.WaitStop(-1 * time.Second) return nil } func (container *Container) Output() (output []byte, err error) { pipe := container.StdoutPipe() defer pipe.Close() if err := container.Start(); err != nil { return nil, err } output, err = ioutil.ReadAll(pipe) container.WaitStop(-1 * time.Second) return output, err } // StreamConfig.StdinPipe returns a WriteCloser which can be used to feed data // to the standard input of the container's active process. // Container.StdoutPipe and Container.StderrPipe each return a ReadCloser // which can be used to retrieve the standard output (and error) generated // by the container's active process. The output (and error) are actually // copied and delivered to all StdoutPipe and StderrPipe consumers, using // a kind of "broadcaster". func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { return streamConfig.stdinPipe } func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { reader, writer := io.Pipe() streamConfig.stdout.AddWriter(writer, "") return ioutils.NewBufReader(reader) } func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { reader, writer := io.Pipe() streamConfig.stderr.AddWriter(writer, "") return ioutils.NewBufReader(reader) } func (streamConfig *StreamConfig) StdoutLogPipe() io.ReadCloser { reader, writer := io.Pipe() streamConfig.stdout.AddWriter(writer, "stdout") return ioutils.NewBufReader(reader) } func (streamConfig *StreamConfig) StderrLogPipe() io.ReadCloser { reader, writer := io.Pipe() streamConfig.stderr.AddWriter(writer, "stderr") return ioutils.NewBufReader(reader) } func (container *Container) buildHostnameFile() error { hostnamePath, err := container.getRootResourcePath("hostname") if err != nil { return err } container.HostnamePath = hostnamePath if container.Config.Domainname != "" { return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644) } return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) } func (container *Container) buildHostsFiles(IP string) error { hostsPath, err := container.getRootResourcePath("hosts") if err != nil { return err } container.HostsPath = hostsPath var extraContent []etchosts.Record children, err := container.daemon.Children(container.Name) if err != nil { return err } for linkAlias, child := range children { _, alias := path.Split(linkAlias) // allow access to the linked container via the alias, real name, and container hostname aliasList := alias + " " + child.Config.Hostname // only add the name if alias isn't equal to the name if alias != child.Name[1:] { aliasList = aliasList + " " + child.Name[1:] } extraContent = append(extraContent, etchosts.Record{Hosts: aliasList, IP: child.NetworkSettings.IPAddress}) } for _, extraHost := range container.hostConfig.ExtraHosts { // allow IPv6 addresses in extra hosts; only split on first ":" parts := strings.SplitN(extraHost, ":", 2) extraContent = append(extraContent, etchosts.Record{Hosts: parts[0], IP: parts[1]}) } return etchosts.Build(container.HostsPath, IP, container.Config.Hostname, container.Config.Domainname, extraContent) } func (container *Container) buildHostnameAndHostsFiles(IP string) error { if err := container.buildHostnameFile(); err != nil { return err } return container.buildHostsFiles(IP) } func (container *Container) AllocateNetwork() error { mode := container.hostConfig.NetworkMode if container.Config.NetworkDisabled || !mode.IsPrivate() { return nil } var ( env *engine.Env err error eng = container.daemon.eng ) job := eng.Job("allocate_interface", container.ID) job.Setenv("RequestedMac", container.Config.MacAddress) if env, err = job.Stdout.AddEnv(); err != nil { return err } if err = job.Run(); err != nil { return err } // Error handling: At this point, the interface is allocated so we have to // make sure that it is always released in case of error, otherwise we // might leak resources. if container.Config.PortSpecs != nil { if err = migratePortMappings(container.Config, container.hostConfig); err != nil { eng.Job("release_interface", container.ID).Run() return err } container.Config.PortSpecs = nil if err = container.WriteHostConfig(); err != nil { eng.Job("release_interface", container.ID).Run() return err } } var ( portSpecs = make(nat.PortSet) bindings = make(nat.PortMap) ) if container.Config.ExposedPorts != nil { portSpecs = container.Config.ExposedPorts } if container.hostConfig.PortBindings != nil { for p, b := range container.hostConfig.PortBindings { bindings[p] = []nat.PortBinding{} for _, bb := range b { bindings[p] = append(bindings[p], nat.PortBinding{ HostIp: bb.HostIp, HostPort: bb.HostPort, }) } } } container.NetworkSettings.PortMapping = nil for port := range portSpecs { if err = container.allocatePort(eng, port, bindings); err != nil { eng.Job("release_interface", container.ID).Run() return err } } container.WriteHostConfig() container.NetworkSettings.Ports = bindings container.NetworkSettings.Bridge = env.Get("Bridge") container.NetworkSettings.IPAddress = env.Get("IP") container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen") container.NetworkSettings.MacAddress = env.Get("MacAddress") container.NetworkSettings.Gateway = env.Get("Gateway") container.NetworkSettings.LinkLocalIPv6Address = env.Get("LinkLocalIPv6") container.NetworkSettings.LinkLocalIPv6PrefixLen = 64 container.NetworkSettings.GlobalIPv6Address = env.Get("GlobalIPv6") container.NetworkSettings.GlobalIPv6PrefixLen = env.GetInt("GlobalIPv6PrefixLen") container.NetworkSettings.IPv6Gateway = env.Get("IPv6Gateway") return nil } func (container *Container) ReleaseNetwork() { if container.Config.NetworkDisabled || !container.hostConfig.NetworkMode.IsPrivate() { return } eng := container.daemon.eng job := eng.Job("release_interface", container.ID) job.SetenvBool("overrideShutdown", true) job.Run() container.NetworkSettings = &NetworkSettings{} } func (container *Container) isNetworkAllocated() bool { return container.NetworkSettings.IPAddress != "" } func (container *Container) RestoreNetwork() error { mode := container.hostConfig.NetworkMode // Don't attempt a restore if we previously didn't allocate networking. // This might be a legacy container with no network allocated, in which case the // allocation will happen once and for all at start. if !container.isNetworkAllocated() || container.Config.NetworkDisabled || !mode.IsPrivate() { return nil } eng := container.daemon.eng // Re-allocate the interface with the same IP and MAC address. job := eng.Job("allocate_interface", container.ID) job.Setenv("RequestedIP", container.NetworkSettings.IPAddress) job.Setenv("RequestedMac", container.NetworkSettings.MacAddress) if err := job.Run(); err != nil { return err } // Re-allocate any previously allocated ports. for port := range container.NetworkSettings.Ports { if err := container.allocatePort(eng, port, container.NetworkSettings.Ports); err != nil { return err } } return nil } // cleanup releases any network resources allocated to the container along with any rules // around how containers are linked together. It also unmounts the container's root filesystem. func (container *Container) cleanup() { container.ReleaseNetwork() // Disable all active links if container.activeLinks != nil { for _, link := range container.activeLinks { link.Disable() } } if err := container.Unmount(); err != nil { log.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) } for _, eConfig := range container.execCommands.s { container.daemon.unregisterExecCommand(eConfig) } } func (container *Container) KillSig(sig int) error { log.Debugf("Sending %d to %s", sig, container.ID) container.Lock() defer container.Unlock() // We could unpause the container for them rather than returning this error if container.Paused { return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) } if !container.Running { return nil } // signal to the monitor that it should not restart the container // after we send the kill signal container.monitor.ExitOnNext() // if the container is currently restarting we do not need to send the signal // to the process. Telling the monitor that it should exit on it's next event // loop is enough if container.Restarting { return nil } return container.daemon.Kill(container, sig) } // Wrapper aroung KillSig() suppressing "no such process" error. func (container *Container) killPossiblyDeadProcess(sig int) error { err := container.KillSig(sig) if err == syscall.ESRCH { log.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig) return nil } return err } func (container *Container) Pause() error { if container.IsPaused() { return fmt.Errorf("Container %s is already paused", container.ID) } if !container.IsRunning() { return fmt.Errorf("Container %s is not running", container.ID) } return container.daemon.Pause(container) } func (container *Container) Unpause() error { if !container.IsPaused() { return fmt.Errorf("Container %s is not paused", container.ID) } if !container.IsRunning() { return fmt.Errorf("Container %s is not running", container.ID) } return container.daemon.Unpause(container) } func (container *Container) Kill() error { if !container.IsRunning() { return nil } // 1. Send SIGKILL if err := container.killPossiblyDeadProcess(9); err != nil { return err } // 2. Wait for the process to die, in last resort, try to kill the process directly if _, err := container.WaitStop(10 * time.Second); err != nil { // Ensure that we don't kill ourselves if pid := container.GetPid(); pid != 0 { log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", common.TruncateID(container.ID)) if err := syscall.Kill(pid, 9); err != nil { if err != syscall.ESRCH { return err } log.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid) } } } container.WaitStop(-1 * time.Second) return nil } func (container *Container) Stop(seconds int) error { if !container.IsRunning() { return nil } // 1. Send a SIGTERM if err := container.killPossiblyDeadProcess(15); err != nil { log.Infof("Failed to send SIGTERM to the process, force killing") if err := container.killPossiblyDeadProcess(9); err != nil { return err } } // 2. Wait for the process to exit on its own if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { log.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) // 3. If it doesn't, then send SIGKILL if err := container.Kill(); err != nil { container.WaitStop(-1 * time.Second) return err } } return nil } func (container *Container) Restart(seconds int) error { // Avoid unnecessarily unmounting and then directly mounting // the container when the container stops and then starts // again if err := container.Mount(); err == nil { defer container.Unmount() } if err := container.Stop(seconds); err != nil { return err } return container.Start() } func (container *Container) Resize(h, w int) error { if !container.IsRunning() { return fmt.Errorf("Cannot resize container %s, container is not running", container.ID) } return container.command.ProcessConfig.Terminal.Resize(h, w) } func (container *Container) ExportRw() (archive.Archive, error) { if err := container.Mount(); err != nil { return nil, err } if container.daemon == nil { return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID) } archive, err := container.daemon.Diff(container) if err != nil { container.Unmount() return nil, err } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() container.Unmount() return err }), nil } func (container *Container) Export() (archive.Archive, error) { if err := container.Mount(); err != nil { return nil, err } archive, err := archive.Tar(container.basefs, archive.Uncompressed) if err != nil { container.Unmount() return nil, err } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() container.Unmount() return err }), nil } func (container *Container) Mount() error { return container.daemon.Mount(container) } func (container *Container) changes() ([]archive.Change, error) { return container.daemon.Changes(container) } func (container *Container) Changes() ([]archive.Change, error) { container.Lock() defer container.Unlock() return container.changes() } func (container *Container) GetImage() (*image.Image, error) { if container.daemon == nil { return nil, fmt.Errorf("Can't get image of unregistered container") } return container.daemon.graph.Get(container.ImageID) } func (container *Container) Unmount() error { return container.daemon.Unmount(container) } func (container *Container) logPath(name string) (string, error) { return container.getRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name)) } func (container *Container) ReadLog(name string) (io.Reader, error) { pth, err := container.logPath(name) if err != nil { return nil, err } return os.Open(pth) } func (container *Container) hostConfigPath() (string, error) { return container.getRootResourcePath("hostconfig.json") } func (container *Container) jsonPath() (string, error) { return container.getRootResourcePath("config.json") } // This method must be exported to be used from the lxc template // This directory is only usable when the container is running func (container *Container) RootfsPath() string { return container.basefs } func validateID(id string) error { if id == "" { return fmt.Errorf("Invalid empty id") } return nil } // GetSize, return real size, virtual size func (container *Container) GetSize() (int64, int64) { var ( sizeRw, sizeRootfs int64 err error driver = container.daemon.driver ) if err := container.Mount(); err != nil { log.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) return sizeRw, sizeRootfs } defer container.Unmount() initID := fmt.Sprintf("%s-init", container.ID) sizeRw, err = driver.DiffSize(container.ID, initID) if err != nil { log.Errorf("Driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) // FIXME: GetSize should return an error. Not changing it now in case // there is a side-effect. sizeRw = -1 } if _, err = os.Stat(container.basefs); err != nil { if sizeRootfs, err = directory.Size(container.basefs); err != nil { sizeRootfs = -1 } } return sizeRw, sizeRootfs } func (container *Container) Copy(resource string) (io.ReadCloser, error) { if err := container.Mount(); err != nil { return nil, err } basePath, err := container.getResourcePath(resource) if err != nil { container.Unmount() return nil, err } // Check if this is actually in a volume for _, mnt := range container.VolumeMounts() { if len(mnt.MountToPath) > 0 && strings.HasPrefix(resource, mnt.MountToPath[1:]) { return mnt.Export(resource) } } stat, err := os.Stat(basePath) if err != nil { container.Unmount() return nil, err } var filter []string if !stat.IsDir() { d, f := path.Split(basePath) basePath = d filter = []string{f} } else { filter = []string{path.Base(basePath)} basePath = path.Dir(basePath) } archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ Compression: archive.Uncompressed, IncludeFiles: filter, }) if err != nil { container.Unmount() return nil, err } return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() container.Unmount() return err }), nil } // Returns true if the container exposes a certain port func (container *Container) Exposes(p nat.Port) bool { _, exists := container.Config.ExposedPorts[p] return exists } func (container *Container) GetPtyMaster() (libcontainer.Console, error) { ttyConsole, ok := container.command.ProcessConfig.Terminal.(execdriver.TtyTerminal) if !ok { return nil, ErrNoTTY } return ttyConsole.Master(), nil } func (container *Container) HostConfig() *runconfig.HostConfig { container.Lock() res := container.hostConfig container.Unlock() return res } func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) { container.Lock() container.hostConfig = hostConfig container.Unlock() } func (container *Container) DisableLink(name string) { if container.activeLinks != nil { if link, exists := container.activeLinks[name]; exists { link.Disable() } else { log.Debugf("Could not find active link for %s", name) } } } func (container *Container) setupContainerDns() error { if container.ResolvConfPath != "" { // check if this is an existing container that needs DNS update: if container.UpdateDns { // read the host's resolv.conf, get the hash and call updateResolvConf log.Debugf("Check container (%s) for update to resolv.conf - UpdateDns flag was set", container.ID) latestResolvConf, latestHash := resolvconf.GetLastModified() // clean container resolv.conf re: localhost nameservers and IPv6 NS (if IPv6 disabled) updatedResolvConf, modified := resolvconf.FilterResolvDns(latestResolvConf, container.daemon.config.EnableIPv6) if modified { // changes have occurred during resolv.conf localhost cleanup: generate an updated hash newHash, err := utils.HashData(bytes.NewReader(updatedResolvConf)) if err != nil { return err } latestHash = newHash } if err := container.updateResolvConf(updatedResolvConf, latestHash); err != nil { return err } // successful update of the restarting container; set the flag off container.UpdateDns = false } return nil } var ( config = container.hostConfig daemon = container.daemon ) resolvConf, err := resolvconf.Get() if err != nil { return err } container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf") if err != nil { return err } if config.NetworkMode != "host" { // check configurations for any container/daemon dns settings if len(config.Dns) > 0 || len(daemon.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(daemon.config.DnsSearch) > 0 { var ( dns = resolvconf.GetNameservers(resolvConf) dnsSearch = resolvconf.GetSearchDomains(resolvConf) ) if len(config.Dns) > 0 { dns = config.Dns } else if len(daemon.config.Dns) > 0 { dns = daemon.config.Dns } if len(config.DnsSearch) > 0 { dnsSearch = config.DnsSearch } else if len(daemon.config.DnsSearch) > 0 { dnsSearch = daemon.config.DnsSearch } return resolvconf.Build(container.ResolvConfPath, dns, dnsSearch) } // replace any localhost/127.*, and remove IPv6 nameservers if IPv6 disabled in daemon resolvConf, _ = resolvconf.FilterResolvDns(resolvConf, daemon.config.EnableIPv6) } //get a sha256 hash of the resolv conf at this point so we can check //for changes when the host resolv.conf changes (e.g. network update) resolvHash, err := utils.HashData(bytes.NewReader(resolvConf)) if err != nil { return err } resolvHashFile := container.ResolvConfPath + ".hash" if err = ioutil.WriteFile(resolvHashFile, []byte(resolvHash), 0644); err != nil { return err } return ioutil.WriteFile(container.ResolvConfPath, resolvConf, 0644) } // called when the host's resolv.conf changes to check whether container's resolv.conf // is unchanged by the container "user" since container start: if unchanged, the // container's resolv.conf will be updated to match the host's new resolv.conf func (container *Container) updateResolvConf(updatedResolvConf []byte, newResolvHash string) error { if container.ResolvConfPath == "" { return nil } if container.Running { //set a marker in the hostConfig to update on next start/restart container.UpdateDns = true return nil } resolvHashFile := container.ResolvConfPath + ".hash" //read the container's current resolv.conf and compute the hash resolvBytes, err := ioutil.ReadFile(container.ResolvConfPath) if err != nil { return err } curHash, err := utils.HashData(bytes.NewReader(resolvBytes)) if err != nil { return err } //read the hash from the last time we wrote resolv.conf in the container hashBytes, err := ioutil.ReadFile(resolvHashFile) if err != nil { if !os.IsNotExist(err) { return err } // backwards compat: if no hash file exists, this container pre-existed from // a Docker daemon that didn't contain this update feature. Given we can't know // if the user has modified the resolv.conf since container start time, safer // to just never update the container's resolv.conf during it's lifetime which // we can control by setting hashBytes to an empty string hashBytes = []byte("") } //if the user has not modified the resolv.conf of the container since we wrote it last //we will replace it with the updated resolv.conf from the host if string(hashBytes) == curHash { log.Debugf("replacing %q with updated host resolv.conf", container.ResolvConfPath) // for atomic updates to these files, use temporary files with os.Rename: dir := path.Dir(container.ResolvConfPath) tmpHashFile, err := ioutil.TempFile(dir, "hash") if err != nil { return err } tmpResolvFile, err := ioutil.TempFile(dir, "resolv") if err != nil { return err } // write the updates to the temp files if err = ioutil.WriteFile(tmpHashFile.Name(), []byte(newResolvHash), 0644); err != nil { return err } if err = ioutil.WriteFile(tmpResolvFile.Name(), updatedResolvConf, 0644); err != nil { return err } // rename the temp files for atomic replace if err = os.Rename(tmpHashFile.Name(), resolvHashFile); err != nil { return err } return os.Rename(tmpResolvFile.Name(), container.ResolvConfPath) } return nil } func (container *Container) updateParentsHosts() error { refs := container.daemon.ContainerGraph().RefPaths(container.ID) for _, ref := range refs { if ref.ParentID == "0" { continue } c, err := container.daemon.Get(ref.ParentID) if err != nil { log.Error(err) } if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() { log.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress) if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, ref.Name); err != nil { log.Errorf("Failed to update /etc/hosts in parent container %s for alias %s: %v", c.ID, ref.Name, err) } } } return nil } func (container *Container) initializeNetworking() error { var err error if container.hostConfig.NetworkMode.IsHost() { container.Config.Hostname, err = os.Hostname() if err != nil { return err } parts := strings.SplitN(container.Config.Hostname, ".", 2) if len(parts) > 1 { container.Config.Hostname = parts[0] container.Config.Domainname = parts[1] } content, err := ioutil.ReadFile("/etc/hosts") if os.IsNotExist(err) { return container.buildHostnameAndHostsFiles("") } else if err != nil { return err } if err := container.buildHostnameFile(); err != nil { return err } hostsPath, err := container.getRootResourcePath("hosts") if err != nil { return err } container.HostsPath = hostsPath return ioutil.WriteFile(container.HostsPath, content, 0644) } if container.hostConfig.NetworkMode.IsContainer() { // we need to get the hosts files from the container to join nc, err := container.getNetworkedContainer() if err != nil { return err } container.HostnamePath = nc.HostnamePath container.HostsPath = nc.HostsPath container.ResolvConfPath = nc.ResolvConfPath container.Config.Hostname = nc.Config.Hostname container.Config.Domainname = nc.Config.Domainname return nil } if container.daemon.config.DisableNetwork { container.Config.NetworkDisabled = true return container.buildHostnameAndHostsFiles("127.0.1.1") } if err := container.AllocateNetwork(); err != nil { return err } return container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress) } // Make sure the config is compatible with the current kernel func (container *Container) verifyDaemonSettings() { if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit { log.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") container.Config.Memory = 0 } if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit { log.Warnf("Your kernel does not support swap limit capabilities. Limitation discarded.") container.Config.MemorySwap = -1 } if container.daemon.sysInfo.IPv4ForwardingDisabled { log.Warnf("IPv4 forwarding is disabled. Networking will not work") } } func (container *Container) setupLinkedContainers() ([]string, error) { var ( env []string daemon = container.daemon ) children, err := daemon.Children(container.Name) if err != nil { return nil, err } if len(children) > 0 { container.activeLinks = make(map[string]*links.Link, len(children)) // If we encounter an error make sure that we rollback any network // config and iptables changes rollback := func() { for _, link := range container.activeLinks { link.Disable() } container.activeLinks = nil } for linkAlias, child := range children { if !child.IsRunning() { return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) } link, err := links.NewLink( container.NetworkSettings.IPAddress, child.NetworkSettings.IPAddress, linkAlias, child.Config.Env, child.Config.ExposedPorts, daemon.eng) if err != nil { rollback() return nil, err } container.activeLinks[link.Alias()] = link if err := link.Enable(); err != nil { rollback() return nil, err } for _, envVar := range link.ToEnv() { env = append(env, envVar) } } } return env, nil } func (container *Container) createDaemonEnvironment(linkedEnv []string) []string { // if a domain name was specified, append it to the hostname (see #7851) fullHostname := container.Config.Hostname if container.Config.Domainname != "" { fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) } // Setup environment env := []string{ "PATH=" + DefaultPathEnv, "HOSTNAME=" + fullHostname, // Note: we don't set HOME here because it'll get autoset intelligently // based on the value of USER inside dockerinit, but only if it isn't // set already (ie, that can be overridden by setting HOME via -e or ENV // in a Dockerfile). } if container.Config.Tty { env = append(env, "TERM=xterm") } env = append(env, linkedEnv...) // because the env on the container can override certain default values // we need to replace the 'env' keys where they match and append anything // else. env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) return env } func (container *Container) setupWorkingDirectory() error { if container.Config.WorkingDir != "" { container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) pth, err := container.getResourcePath(container.Config.WorkingDir) if err != nil { return err } pthInfo, err := os.Stat(pth) if err != nil { if !os.IsNotExist(err) { return err } if err := os.MkdirAll(pth, 0755); err != nil { return err } } if pthInfo != nil && !pthInfo.IsDir() { return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) } } return nil } func (container *Container) startLogging() error { cfg := container.hostConfig.LogConfig if cfg.Type == "" { cfg = container.daemon.defaultLogConfig } var l logger.Logger switch cfg.Type { case "json-file": pth, err := container.logPath("json") if err != nil { return err } container.LogPath = pth dl, err := jsonfilelog.New(pth) if err != nil { return err } l = dl case "syslog": dl, err := syslog.New(container.ID[:12]) if err != nil { return err } l = dl case "none": return nil default: return fmt.Errorf("Unknown logging driver: %s", cfg.Type) } copier, err := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) if err != nil { return err } container.logCopier = copier copier.Run() container.logDriver = l return nil } func (container *Container) waitForStart() error { container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) // block until we either receive an error from the initial start of the container's // process or until the process is running in the container select { case <-container.monitor.startSignal: case err := <-promise.Go(container.monitor.Start): return err } return nil } func (container *Container) allocatePort(eng *engine.Engine, port nat.Port, bindings nat.PortMap) error { binding := bindings[port] if container.hostConfig.PublishAllPorts && len(binding) == 0 { binding = append(binding, nat.PortBinding{}) } for i := 0; i < len(binding); i++ { b := binding[i] job := eng.Job("allocate_port", container.ID) job.Setenv("HostIP", b.HostIp) job.Setenv("HostPort", b.HostPort) job.Setenv("Proto", port.Proto()) job.Setenv("ContainerPort", port.Port()) portEnv, err := job.Stdout.AddEnv() if err != nil { return err } if err := job.Run(); err != nil { return err } b.HostIp = portEnv.Get("HostIP") b.HostPort = portEnv.Get("HostPort") binding[i] = b } bindings[port] = binding return nil } func (container *Container) GetProcessLabel() string { // even if we have a process label return "" if we are running // in privileged mode if container.hostConfig.Privileged { return "" } return container.ProcessLabel } func (container *Container) GetMountLabel() string { if container.hostConfig.Privileged { return "" } return container.MountLabel } func (container *Container) getIpcContainer() (*Container, error) { containerID := container.hostConfig.IpcMode.Container() c, err := container.daemon.Get(containerID) if err != nil { return nil, err } if !c.IsRunning() { return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) } return c, nil } func (container *Container) getNetworkedContainer() (*Container, error) { parts := strings.SplitN(string(container.hostConfig.NetworkMode), ":", 2) switch parts[0] { case "container": if len(parts) != 2 { return nil, fmt.Errorf("no container specified to join network") } nc, err := container.daemon.Get(parts[1]) if err != nil { return nil, err } if !nc.IsRunning() { return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1]) } return nc, nil default: return nil, fmt.Errorf("network mode not set to container") } } func (container *Container) Stats() (*execdriver.ResourceStats, error) { return container.daemon.Stats(container) } func (c *Container) LogDriverType() string { c.Lock() defer c.Unlock() if c.hostConfig.LogConfig.Type == "" { return c.daemon.defaultLogConfig.Type } return c.hostConfig.LogConfig.Type } docker-1.6.2/daemon/daemon_btrfs.go0000644000175000017500000000017012524223634016567 0ustar tianontianon// +build !exclude_graphdriver_btrfs package daemon import ( _ "github.com/docker/docker/daemon/graphdriver/btrfs" ) docker-1.6.2/daemon/container_unit_test.go0000644000175000017500000001011112524223634020200 0ustar tianontianonpackage daemon import ( "github.com/docker/docker/nat" "testing" ) func TestParseNetworkOptsPrivateOnly(t *testing.T) { ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::80"}) if err != nil { t.Fatal(err) } if len(ports) != 1 { t.Logf("Expected 1 got %d", len(ports)) t.FailNow() } if len(bindings) != 1 { t.Logf("Expected 1 got %d", len(bindings)) t.FailNow() } for k := range ports { if k.Proto() != "tcp" { t.Logf("Expected tcp got %s", k.Proto()) t.Fail() } if k.Port() != "80" { t.Logf("Expected 80 got %s", k.Port()) t.Fail() } b, exists := bindings[k] if !exists { t.Log("Binding does not exist") t.FailNow() } if len(b) != 1 { t.Logf("Expected 1 got %d", len(b)) t.FailNow() } s := b[0] if s.HostPort != "" { t.Logf("Expected \"\" got %s", s.HostPort) t.Fail() } if s.HostIp != "192.168.1.100" { t.Fail() } } } func TestParseNetworkOptsPublic(t *testing.T) { ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:8080:80"}) if err != nil { t.Fatal(err) } if len(ports) != 1 { t.Logf("Expected 1 got %d", len(ports)) t.FailNow() } if len(bindings) != 1 { t.Logf("Expected 1 got %d", len(bindings)) t.FailNow() } for k := range ports { if k.Proto() != "tcp" { t.Logf("Expected tcp got %s", k.Proto()) t.Fail() } if k.Port() != "80" { t.Logf("Expected 80 got %s", k.Port()) t.Fail() } b, exists := bindings[k] if !exists { t.Log("Binding does not exist") t.FailNow() } if len(b) != 1 { t.Logf("Expected 1 got %d", len(b)) t.FailNow() } s := b[0] if s.HostPort != "8080" { t.Logf("Expected 8080 got %s", s.HostPort) t.Fail() } if s.HostIp != "192.168.1.100" { t.Fail() } } } func TestParseNetworkOptsPublicNoPort(t *testing.T) { ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100"}) if err == nil { t.Logf("Expected error Invalid containerPort") t.Fail() } if ports != nil { t.Logf("Expected nil got %s", ports) t.Fail() } if bindings != nil { t.Logf("Expected nil got %s", bindings) t.Fail() } } func TestParseNetworkOptsNegativePorts(t *testing.T) { ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) if err == nil { t.Fail() } t.Logf("%v", len(ports)) t.Logf("%v", bindings) if len(ports) != 0 { t.Logf("Expected nil got %s", len(ports)) t.Fail() } if len(bindings) != 0 { t.Logf("Expected 0 got %s", len(bindings)) t.Fail() } } func TestParseNetworkOptsUdp(t *testing.T) { ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) if err != nil { t.Fatal(err) } if len(ports) != 1 { t.Logf("Expected 1 got %d", len(ports)) t.FailNow() } if len(bindings) != 1 { t.Logf("Expected 1 got %d", len(bindings)) t.FailNow() } for k := range ports { if k.Proto() != "udp" { t.Logf("Expected udp got %s", k.Proto()) t.Fail() } if k.Port() != "6000" { t.Logf("Expected 6000 got %s", k.Port()) t.Fail() } b, exists := bindings[k] if !exists { t.Log("Binding does not exist") t.FailNow() } if len(b) != 1 { t.Logf("Expected 1 got %d", len(b)) t.FailNow() } s := b[0] if s.HostPort != "" { t.Logf("Expected \"\" got %s", s.HostPort) t.Fail() } if s.HostIp != "192.168.1.100" { t.Fail() } } } func TestGetFullName(t *testing.T) { name, err := GetFullContainerName("testing") if err != nil { t.Fatal(err) } if name != "/testing" { t.Fatalf("Expected /testing got %s", name) } if _, err := GetFullContainerName(""); err == nil { t.Fatal("Error should not be nil") } } func TestValidContainerNames(t *testing.T) { invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} validNames := []string{"word-word", "word_word", "1weoid"} for _, name := range invalidNames { if validContainerNamePattern.MatchString(name) { t.Fatalf("%q is not a valid container name and was returned as valid.", name) } } for _, name := range validNames { if !validContainerNamePattern.MatchString(name) { t.Fatalf("%q is a valid container name and was returned as invalid.", name) } } } docker-1.6.2/daemon/restart.go0000644000175000017500000000107512524223634015615 0ustar tianontianonpackage daemon import ( "github.com/docker/docker/engine" ) func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] t = 10 ) if job.EnvExists("t") { t = job.GetenvInt("t") } container, err := daemon.Get(name) if err != nil { return job.Error(err) } if err := container.Restart(int(t)); err != nil { return job.Errorf("Cannot restart container %s: %s\n", name, err) } container.LogEvent("restart") return engine.StatusOK } docker-1.6.2/daemon/delete.go0000644000175000017500000001117712524223634015377 0ustar tianontianonpackage daemon import ( "fmt" "os" "path" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" ) func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] removeVolume := job.GetenvBool("removeVolume") removeLink := job.GetenvBool("removeLink") forceRemove := job.GetenvBool("forceRemove") container, err := daemon.Get(name) if err != nil { return job.Error(err) } if removeLink { name, err := GetFullContainerName(name) if err != nil { job.Error(err) } parent, n := path.Split(name) if parent == "/" { return job.Errorf("Conflict, cannot remove the default name of the container") } pe := daemon.ContainerGraph().Get(parent) if pe == nil { return job.Errorf("Cannot get parent %s for name %s", parent, name) } parentContainer, _ := daemon.Get(pe.ID()) if parentContainer != nil { parentContainer.DisableLink(n) } if err := daemon.ContainerGraph().Delete(name); err != nil { return job.Error(err) } return engine.StatusOK } if container != nil { // stop collection of stats for the container regardless // if stats are currently getting collected. daemon.statsCollector.stopCollection(container) if container.IsRunning() { if forceRemove { if err := container.Kill(); err != nil { return job.Errorf("Could not kill running container, cannot remove - %v", err) } } else { return job.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f") } } if forceRemove { if err := daemon.ForceRm(container); err != nil { log.Errorf("Cannot destroy container %s: %v", name, err) } } else { if err := daemon.Rm(container); err != nil { return job.Errorf("Cannot destroy container %s: %v", name, err) } } container.LogEvent("destroy") if removeVolume { daemon.DeleteVolumes(container.VolumePaths()) } } return engine.StatusOK } func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) { for id := range volumeIDs { if err := daemon.volumes.Delete(id); err != nil { log.Infof("%s", err) continue } } } func (daemon *Daemon) Rm(container *Container) (err error) { return daemon.commonRm(container, false) } func (daemon *Daemon) ForceRm(container *Container) (err error) { return daemon.commonRm(container, true) } // Destroy unregisters a container from the daemon and cleanly removes its contents from the filesystem. func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err error) { if container == nil { return fmt.Errorf("The given container is ") } element := daemon.containers.Get(container.ID) if element == nil { return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) } // Container state RemovalInProgress should be used to avoid races. if err = container.SetRemovalInProgress(); err != nil { return fmt.Errorf("Failed to set container state to RemovalInProgress: %s", err) } defer container.ResetRemovalInProgress() if err = container.Stop(3); err != nil { return err } // Mark container dead. We don't want anybody to be restarting it. container.SetDead() // Save container state to disk. So that if error happens before // container meta file got removed from disk, then a restart of // docker should not make a dead container alive. container.ToDisk() // If force removal is required, delete container from various // indexes even if removal failed. defer func() { if err != nil && forceRemove { daemon.idIndex.Delete(container.ID) daemon.containers.Delete(container.ID) } }() container.derefVolumes() if _, err := daemon.containerGraph.Purge(container.ID); err != nil { log.Debugf("Unable to remove container from link graph: %s", err) } if err = daemon.driver.Remove(container.ID); err != nil { return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.driver, container.ID, err) } initID := fmt.Sprintf("%s-init", container.ID) if err := daemon.driver.Remove(initID); err != nil { return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", daemon.driver, initID, err) } if err = os.RemoveAll(container.root); err != nil { return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) } if err = daemon.execDriver.Clean(container.ID); err != nil { return fmt.Errorf("Unable to remove execdriver data for %s: %s", container.ID, err) } selinuxFreeLxcContexts(container.ProcessLabel) daemon.idIndex.Delete(container.ID) daemon.containers.Delete(container.ID) return nil } docker-1.6.2/daemon/exec.go0000644000175000017500000002103512524223634015053 0ustar tianontianonpackage daemon import ( "fmt" "io" "io/ioutil" "strings" "sync" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/lxc" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/runconfig" ) type execConfig struct { sync.Mutex ID string Running bool ExitCode int ProcessConfig execdriver.ProcessConfig StreamConfig OpenStdin bool OpenStderr bool OpenStdout bool Container *Container } type execStore struct { s map[string]*execConfig sync.RWMutex } func newExecStore() *execStore { return &execStore{s: make(map[string]*execConfig, 0)} } func (e *execStore) Add(id string, execConfig *execConfig) { e.Lock() e.s[id] = execConfig e.Unlock() } func (e *execStore) Get(id string) *execConfig { e.RLock() res := e.s[id] e.RUnlock() return res } func (e *execStore) Delete(id string) { e.Lock() delete(e.s, id) e.Unlock() } func (e *execStore) List() []string { var IDs []string e.RLock() for id := range e.s { IDs = append(IDs, id) } e.RUnlock() return IDs } func (execConfig *execConfig) Resize(h, w int) error { return execConfig.ProcessConfig.Terminal.Resize(h, w) } func (d *Daemon) registerExecCommand(execConfig *execConfig) { // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. execConfig.Container.execCommands.Add(execConfig.ID, execConfig) // Storing execs in daemon for easy access via remote API. d.execCommands.Add(execConfig.ID, execConfig) } func (d *Daemon) getExecConfig(name string) (*execConfig, error) { if execConfig := d.execCommands.Get(name); execConfig != nil { if !execConfig.Container.IsRunning() { return nil, fmt.Errorf("Container %s is not running", execConfig.Container.ID) } return execConfig, nil } return nil, fmt.Errorf("No such exec instance '%s' found in daemon", name) } func (d *Daemon) unregisterExecCommand(execConfig *execConfig) { execConfig.Container.execCommands.Delete(execConfig.ID) d.execCommands.Delete(execConfig.ID) } func (d *Daemon) getActiveContainer(name string) (*Container, error) { container, err := d.Get(name) if err != nil { return nil, err } if !container.IsRunning() { return nil, fmt.Errorf("Container %s is not running", name) } if container.IsPaused() { return nil, fmt.Errorf("Container %s is paused, unpause the container before exec", name) } return container, nil } func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s [options] container command [args]", job.Name) } if strings.HasPrefix(d.execDriver.Name(), lxc.DriverName) { return job.Error(lxc.ErrExec) } var name = job.Args[0] container, err := d.getActiveContainer(name) if err != nil { return job.Error(err) } config, err := runconfig.ExecConfigFromJob(job) if err != nil { return job.Error(err) } entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd) processConfig := execdriver.ProcessConfig{ Tty: config.Tty, Entrypoint: entrypoint, Arguments: args, } execConfig := &execConfig{ ID: common.GenerateRandomID(), OpenStdin: config.AttachStdin, OpenStdout: config.AttachStdout, OpenStderr: config.AttachStderr, StreamConfig: StreamConfig{}, ProcessConfig: processConfig, Container: container, Running: false, } container.LogEvent("exec_create: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " ")) d.registerExecCommand(execConfig) job.Printf("%s\n", execConfig.ID) return engine.StatusOK } func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s [options] exec", job.Name) } var ( cStdin io.ReadCloser cStdout, cStderr io.Writer execName = job.Args[0] ) execConfig, err := d.getExecConfig(execName) if err != nil { return job.Error(err) } func() { execConfig.Lock() defer execConfig.Unlock() if execConfig.Running { err = fmt.Errorf("Error: Exec command %s is already running", execName) } execConfig.Running = true }() if err != nil { return job.Error(err) } log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID) container := execConfig.Container container.LogEvent("exec_start: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " ")) if execConfig.OpenStdin { r, w := io.Pipe() go func() { defer w.Close() defer log.Debugf("Closing buffered stdin pipe") io.Copy(w, job.Stdin) }() cStdin = r } if execConfig.OpenStdout { cStdout = job.Stdout } if execConfig.OpenStderr { cStderr = job.Stderr } execConfig.StreamConfig.stderr = broadcastwriter.New() execConfig.StreamConfig.stdout = broadcastwriter.New() // Attach to stdin if execConfig.OpenStdin { execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe() } else { execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } attachErr := d.Attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr) execErr := make(chan error) // Note, the execConfig data will be removed when the container // itself is deleted. This allows us to query it (for things like // the exitStatus) even after the cmd is done running. go func() { err := container.Exec(execConfig) if err != nil { execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err) } }() select { case err := <-attachErr: if err != nil { return job.Errorf("attach failed with error: %s", err) } break case err := <-execErr: return job.Error(err) } return engine.StatusOK } func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { exitStatus, err := d.execDriver.Exec(c.command, &execConfig.ProcessConfig, pipes, startCallback) // On err, make sure we don't leave ExitCode at zero if err != nil && exitStatus == 0 { exitStatus = 128 } execConfig.ExitCode = exitStatus execConfig.Running = false return exitStatus, err } func (container *Container) GetExecIDs() []string { return container.execCommands.List() } func (container *Container) Exec(execConfig *execConfig) error { container.Lock() defer container.Unlock() waitStart := make(chan struct{}) callback := func(processConfig *execdriver.ProcessConfig, pid int) { if processConfig.Tty { // The callback is called after the process Start() // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave // which we close here. if c, ok := processConfig.Stdout.(io.Closer); ok { c.Close() } } close(waitStart) } // We use a callback here instead of a goroutine and an chan for // syncronization purposes cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) }) // Exec should not return until the process is actually running select { case <-waitStart: case err := <-cErr: return err } return nil } func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error { var ( err error exitCode int ) pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin) exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback) if err != nil { log.Errorf("Error running command in existing container %s: %s", container.ID, err) } log.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) if execConfig.OpenStdin { if err := execConfig.StreamConfig.stdin.Close(); err != nil { log.Errorf("Error closing stdin while running in %s: %s", container.ID, err) } } if err := execConfig.StreamConfig.stdout.Clean(); err != nil { log.Errorf("Error closing stdout while running in %s: %s", container.ID, err) } if err := execConfig.StreamConfig.stderr.Clean(); err != nil { log.Errorf("Error closing stderr while running in %s: %s", container.ID, err) } if execConfig.ProcessConfig.Terminal != nil { if err := execConfig.ProcessConfig.Terminal.Close(); err != nil { log.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) } } return err } docker-1.6.2/daemon/logger/0000755000175000017500000000000012524223634015056 5ustar tianontianondocker-1.6.2/daemon/logger/syslog/0000755000175000017500000000000012524223634016376 5ustar tianontianondocker-1.6.2/daemon/logger/syslog/syslog.go0000644000175000017500000000133312524223634020245 0ustar tianontianonpackage syslog import ( "fmt" "log/syslog" "os" "path" "sync" "github.com/docker/docker/daemon/logger" ) type Syslog struct { writer *syslog.Writer tag string mu sync.Mutex } func New(tag string) (logger.Logger, error) { log, err := syslog.New(syslog.LOG_DAEMON, fmt.Sprintf("%s/%s", path.Base(os.Args[0]), tag)) if err != nil { return nil, err } return &Syslog{ writer: log, }, nil } func (s *Syslog) Log(msg *logger.Message) error { if msg.Source == "stderr" { return s.writer.Err(string(msg.Line)) } return s.writer.Info(string(msg.Line)) } func (s *Syslog) Close() error { if s.writer != nil { return s.writer.Close() } return nil } func (s *Syslog) Name() string { return "Syslog" } docker-1.6.2/daemon/logger/copier.go0000644000175000017500000000252412524223634016671 0ustar tianontianonpackage logger import ( "bufio" "io" "sync" "time" "github.com/Sirupsen/logrus" ) // Copier can copy logs from specified sources to Logger and attach // ContainerID and Timestamp. // Writes are concurrent, so you need implement some sync in your logger type Copier struct { // cid is container id for which we copying logs cid string // srcs is map of name -> reader pairs, for example "stdout", "stderr" srcs map[string]io.Reader dst Logger copyJobs sync.WaitGroup } // NewCopier creates new Copier func NewCopier(cid string, srcs map[string]io.Reader, dst Logger) (*Copier, error) { return &Copier{ cid: cid, srcs: srcs, dst: dst, }, nil } // Run starts logs copying func (c *Copier) Run() { for src, w := range c.srcs { c.copyJobs.Add(1) go c.copySrc(src, w) } } func (c *Copier) copySrc(name string, src io.Reader) { defer c.copyJobs.Done() scanner := bufio.NewScanner(src) for scanner.Scan() { if err := c.dst.Log(&Message{ContainerID: c.cid, Line: scanner.Bytes(), Source: name, Timestamp: time.Now().UTC()}); err != nil { logrus.Errorf("Failed to log msg %q for logger %s: %s", scanner.Bytes(), c.dst.Name(), err) } } if err := scanner.Err(); err != nil { logrus.Errorf("Error scanning log stream: %s", err) } } // Wait waits until all copying is done func (c *Copier) Wait() { c.copyJobs.Wait() } docker-1.6.2/daemon/logger/copier_test.go0000644000175000017500000000434012524223634017726 0ustar tianontianonpackage logger import ( "bytes" "encoding/json" "io" "testing" "time" ) type TestLoggerJSON struct { *json.Encoder } func (l *TestLoggerJSON) Log(m *Message) error { return l.Encode(m) } func (l *TestLoggerJSON) Close() error { return nil } func (l *TestLoggerJSON) Name() string { return "json" } type TestLoggerText struct { *bytes.Buffer } func (l *TestLoggerText) Log(m *Message) error { _, err := l.WriteString(m.ContainerID + " " + m.Source + " " + string(m.Line) + "\n") return err } func (l *TestLoggerText) Close() error { return nil } func (l *TestLoggerText) Name() string { return "text" } func TestCopier(t *testing.T) { stdoutLine := "Line that thinks that it is log line from docker stdout" stderrLine := "Line that thinks that it is log line from docker stderr" var stdout bytes.Buffer var stderr bytes.Buffer for i := 0; i < 30; i++ { if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { t.Fatal(err) } if _, err := stderr.WriteString(stderrLine + "\n"); err != nil { t.Fatal(err) } } var jsonBuf bytes.Buffer jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" c, err := NewCopier(cid, map[string]io.Reader{ "stdout": &stdout, "stderr": &stderr, }, jsonLog) if err != nil { t.Fatal(err) } c.Run() wait := make(chan struct{}) go func() { c.Wait() close(wait) }() select { case <-time.After(1 * time.Second): t.Fatal("Copier failed to do its work in 1 second") case <-wait: } dec := json.NewDecoder(&jsonBuf) for { var msg Message if err := dec.Decode(&msg); err != nil { if err == io.EOF { break } t.Fatal(err) } if msg.Source != "stdout" && msg.Source != "stderr" { t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") } if msg.ContainerID != cid { t.Fatalf("Wrong ContainerID: %q, expected %q", msg.ContainerID, cid) } if msg.Source == "stdout" { if string(msg.Line) != stdoutLine { t.Fatalf("Wrong Line: %q, expected %q", msg.Line, stdoutLine) } } if msg.Source == "stderr" { if string(msg.Line) != stderrLine { t.Fatalf("Wrong Line: %q, expected %q", msg.Line, stderrLine) } } } } docker-1.6.2/daemon/logger/jsonfilelog/0000755000175000017500000000000012524223634017371 5ustar tianontianondocker-1.6.2/daemon/logger/jsonfilelog/jsonfilelog_test.go0000644000175000017500000000414512524223634023276 0ustar tianontianonpackage jsonfilelog import ( "io/ioutil" "os" "path/filepath" "testing" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/pkg/jsonlog" ) func TestJSONFileLogger(t *testing.T) { tmp, err := ioutil.TempDir("", "docker-logger-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") l, err := New(filename) if err != nil { t.Fatal(err) } defer l.Close() cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line1"), Source: "src1"}); err != nil { t.Fatal(err) } if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line2"), Source: "src2"}); err != nil { t.Fatal(err) } if err := l.Log(&logger.Message{ContainerID: cid, Line: []byte("line3"), Source: "src3"}); err != nil { t.Fatal(err) } res, err := ioutil.ReadFile(filename) if err != nil { t.Fatal(err) } expected := `{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line2\n","stream":"src2","time":"0001-01-01T00:00:00Z"} {"log":"line3\n","stream":"src3","time":"0001-01-01T00:00:00Z"} ` if string(res) != expected { t.Fatalf("Wrong log content: %q, expected %q", res, expected) } } func BenchmarkJSONFileLogger(b *testing.B) { tmp, err := ioutil.TempDir("", "docker-logger-") if err != nil { b.Fatal(err) } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") l, err := New(filename) if err != nil { b.Fatal(err) } defer l.Close() cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" testLine := "Line that thinks that it is log line from docker\n" msg := &logger.Message{ContainerID: cid, Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() if err != nil { b.Fatal(err) } b.SetBytes(int64(len(jsonlog)+1) * 30) b.ResetTimer() for i := 0; i < b.N; i++ { for j := 0; j < 30; j++ { if err := l.Log(msg); err != nil { b.Fatal(err) } } } } docker-1.6.2/daemon/logger/jsonfilelog/jsonfilelog.go0000644000175000017500000000253712524223634022242 0ustar tianontianonpackage jsonfilelog import ( "bytes" "os" "sync" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/pkg/jsonlog" ) // JSONFileLogger is Logger implementation for default docker logging: // JSON objects to file type JSONFileLogger struct { buf *bytes.Buffer f *os.File // store for closing mu sync.Mutex // protects buffer } // New creates new JSONFileLogger which writes to filename func New(filename string) (logger.Logger, error) { log, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) if err != nil { return nil, err } return &JSONFileLogger{ f: log, buf: bytes.NewBuffer(nil), }, nil } // Log converts logger.Message to jsonlog.JSONLog and serializes it to file func (l *JSONFileLogger) Log(msg *logger.Message) error { l.mu.Lock() defer l.mu.Unlock() err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSONBuf(l.buf) if err != nil { return err } l.buf.WriteByte('\n') _, err = l.buf.WriteTo(l.f) if err != nil { // this buffer is screwed, replace it with another to avoid races l.buf = bytes.NewBuffer(nil) return err } return nil } // Close closes underlying file func (l *JSONFileLogger) Close() error { return l.f.Close() } // Name returns name of this logger func (l *JSONFileLogger) Name() string { return "JSONFile" } docker-1.6.2/daemon/logger/logger.go0000644000175000017500000000052112524223634016662 0ustar tianontianonpackage logger import "time" // Message is datastructure that represents record from some container type Message struct { ContainerID string Line []byte Source string Timestamp time.Time } // Logger is interface for docker logging drivers type Logger interface { Log(*Message) error Name() string Close() error } docker-1.6.2/daemon/wait.go0000644000175000017500000000066712524223634015103 0ustar tianontianonpackage daemon import ( "time" "github.com/docker/docker/engine" ) func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Errorf("%s: %v", job.Name, err) } status, _ := container.WaitStop(-1 * time.Second) job.Printf("%d\n", status) return engine.StatusOK } docker-1.6.2/daemon/utils_nolinux.go0000644000175000017500000000023512524223634017042 0ustar tianontianon// +build !linux package daemon func selinuxSetDisabled() { } func selinuxFreeLxcContexts(label string) { } func selinuxEnabled() bool { return false } docker-1.6.2/daemon/attach.go0000644000175000017500000001104212524223634015370 0ustar tianontianonpackage daemon import ( "encoding/json" "io" "os" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/utils" ) func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] logs = job.GetenvBool("logs") stream = job.GetenvBool("stream") stdin = job.GetenvBool("stdin") stdout = job.GetenvBool("stdout") stderr = job.GetenvBool("stderr") ) container, err := daemon.Get(name) if err != nil { return job.Error(err) } //logs if logs { cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs log.Debugf("Old logs format") if stdout { cLog, err := container.ReadLog("stdout") if err != nil { log.Errorf("Error reading logs (stdout): %s", err) } else if _, err := io.Copy(job.Stdout, cLog); err != nil { log.Errorf("Error streaming logs (stdout): %s", err) } } if stderr { cLog, err := container.ReadLog("stderr") if err != nil { log.Errorf("Error reading logs (stderr): %s", err) } else if _, err := io.Copy(job.Stderr, cLog); err != nil { log.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { log.Errorf("Error reading logs (json): %s", err) } else { dec := json.NewDecoder(cLog) for { l := &jsonlog.JSONLog{} if err := dec.Decode(l); err == io.EOF { break } else if err != nil { log.Errorf("Error streaming logs: %s", err) break } if l.Stream == "stdout" && stdout { io.WriteString(job.Stdout, l.Log) } if l.Stream == "stderr" && stderr { io.WriteString(job.Stderr, l.Log) } } } } //stream if stream { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer ) if stdin { r, w := io.Pipe() go func() { defer w.Close() defer log.Debugf("Closing buffered stdin pipe") io.Copy(w, job.Stdin) }() cStdin = r } if stdout { cStdout = job.Stdout } if stderr { cStderr = job.Stderr } <-daemon.Attach(&container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, cStdin, cStdout, cStderr) // If we are in stdinonce mode, wait for the process to end // otherwise, simply return if container.Config.StdinOnce && !container.Config.Tty { container.WaitStop(-1 * time.Second) } } return engine.StatusOK } func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { var ( cStdout, cStderr io.ReadCloser cStdin io.WriteCloser wg sync.WaitGroup errors = make(chan error, 3) ) if stdin != nil && openStdin { cStdin = streamConfig.StdinPipe() wg.Add(1) } if stdout != nil { cStdout = streamConfig.StdoutPipe() wg.Add(1) } if stderr != nil { cStderr = streamConfig.StderrPipe() wg.Add(1) } // Connect stdin of container to the http conn. go func() { if stdin == nil || !openStdin { return } log.Debugf("attach: stdin: begin") defer func() { if stdinOnce && !tty { cStdin.Close() } else { // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr if cStdout != nil { cStdout.Close() } if cStderr != nil { cStderr.Close() } } wg.Done() log.Debugf("attach: stdin: end") }() var err error if tty { _, err = utils.CopyEscapable(cStdin, stdin) } else { _, err = io.Copy(cStdin, stdin) } if err == io.ErrClosedPipe { err = nil } if err != nil { log.Errorf("attach: stdin: %s", err) errors <- err return } }() attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { if stream == nil { return } defer func() { // Make sure stdin gets closed if stdin != nil { stdin.Close() } streamPipe.Close() wg.Done() log.Debugf("attach: %s: end", name) }() log.Debugf("attach: %s: begin", name) _, err := io.Copy(stream, streamPipe) if err == io.ErrClosedPipe { err = nil } if err != nil { log.Errorf("attach: %s: %v", name, err) errors <- err } } go attachStream("stdout", stdout, cStdout) go attachStream("stderr", stderr, cStderr) return promise.Go(func() error { wg.Wait() close(errors) for err := range errors { if err != nil { return err } } return nil }) } docker-1.6.2/daemon/daemon_overlay.go0000644000175000017500000000017412524223634017134 0ustar tianontianon// +build !exclude_graphdriver_overlay package daemon import ( _ "github.com/docker/docker/daemon/graphdriver/overlay" ) docker-1.6.2/daemon/state_test.go0000644000175000017500000000514512524223634016312 0ustar tianontianonpackage daemon import ( "sync/atomic" "testing" "time" "github.com/docker/docker/daemon/execdriver" ) func TestStateRunStop(t *testing.T) { s := NewState() for i := 1; i < 3; i++ { // full lifecycle two times started := make(chan struct{}) var pid int64 go func() { runPid, _ := s.WaitRunning(-1 * time.Second) atomic.StoreInt64(&pid, int64(runPid)) close(started) }() s.SetRunning(i + 100) if !s.IsRunning() { t.Fatal("State not running") } if s.Pid != i+100 { t.Fatalf("Pid %v, expected %v", s.Pid, i+100) } if s.ExitCode != 0 { t.Fatalf("ExitCode %v, expected 0", s.ExitCode) } select { case <-time.After(100 * time.Millisecond): t.Fatal("Start callback doesn't fire in 100 milliseconds") case <-started: t.Log("Start callback fired") } runPid := int(atomic.LoadInt64(&pid)) if runPid != i+100 { t.Fatalf("Pid %v, expected %v", runPid, i+100) } if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 { t.Fatalf("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil) } stopped := make(chan struct{}) var exit int64 go func() { exitCode, _ := s.WaitStop(-1 * time.Second) atomic.StoreInt64(&exit, int64(exitCode)) close(stopped) }() s.SetStopped(&execdriver.ExitStatus{ExitCode: i}) if s.IsRunning() { t.Fatal("State is running") } if s.ExitCode != i { t.Fatalf("ExitCode %v, expected %v", s.ExitCode, i) } if s.Pid != 0 { t.Fatalf("Pid %v, expected 0", s.Pid) } select { case <-time.After(100 * time.Millisecond): t.Fatal("Stop callback doesn't fire in 100 milliseconds") case <-stopped: t.Log("Stop callback fired") } exitCode := int(atomic.LoadInt64(&exit)) if exitCode != i { t.Fatalf("ExitCode %v, expected %v", exitCode, i) } if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) } } } func TestStateTimeoutWait(t *testing.T) { s := NewState() started := make(chan struct{}) go func() { s.WaitRunning(100 * time.Millisecond) close(started) }() select { case <-time.After(200 * time.Millisecond): t.Fatal("Start callback doesn't fire in 100 milliseconds") case <-started: t.Log("Start callback fired") } s.SetRunning(42) stopped := make(chan struct{}) go func() { s.WaitRunning(100 * time.Millisecond) close(stopped) }() select { case <-time.After(200 * time.Millisecond): t.Fatal("Start callback doesn't fire in 100 milliseconds") case <-stopped: t.Log("Start callback fired") } } docker-1.6.2/daemon/utils_test.go0000644000175000017500000000102312524223634016321 0ustar tianontianonpackage daemon import ( "testing" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) func TestMergeLxcConfig(t *testing.T) { hostConfig := &runconfig.HostConfig{ LxcConf: []utils.KeyValuePair{ {Key: "lxc.cgroups.cpuset", Value: "1,2"}, }, } out, err := mergeLxcConfIntoOptions(hostConfig) if err != nil { t.Fatalf("Failed to merge Lxc Config: %s", err) } cpuset := out[0] if expected := "cgroups.cpuset=1,2"; cpuset != expected { t.Fatalf("expected %s got %s", expected, cpuset) } } docker-1.6.2/daemon/state.go0000644000175000017500000001241712524223634015253 0ustar tianontianonpackage daemon import ( "fmt" "sync" "time" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/units" ) type State struct { sync.Mutex Running bool Paused bool Restarting bool OOMKilled bool removalInProgress bool // Not need for this to be persistent on disk. Dead bool Pid int ExitCode int Error string // contains last known error when starting the container StartedAt time.Time FinishedAt time.Time waitChan chan struct{} } func NewState() *State { return &State{ waitChan: make(chan struct{}), } } // String returns a human-readable description of the state func (s *State) String() string { if s.Running { if s.Paused { return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } if s.Restarting { return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } if s.removalInProgress { return "Removal In Progress" } if s.Dead { return "Dead" } if s.FinishedAt.IsZero() { return "" } return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } // StateString returns a single string to describe state func (s *State) StateString() string { if s.Running { if s.Paused { return "paused" } if s.Restarting { return "restarting" } return "running" } if s.Dead { return "dead" } return "exited" } func wait(waitChan <-chan struct{}, timeout time.Duration) error { if timeout < 0 { <-waitChan return nil } select { case <-time.After(timeout): return fmt.Errorf("Timed out: %v", timeout) case <-waitChan: return nil } } // WaitRunning waits until state is running. If state already running it returns // immediately. If you want wait forever you must supply negative timeout. // Returns pid, that was passed to SetRunning func (s *State) WaitRunning(timeout time.Duration) (int, error) { s.Lock() if s.Running { pid := s.Pid s.Unlock() return pid, nil } waitChan := s.waitChan s.Unlock() if err := wait(waitChan, timeout); err != nil { return -1, err } return s.GetPid(), nil } // WaitStop waits until state is stopped. If state already stopped it returns // immediately. If you want wait forever you must supply negative timeout. // Returns exit code, that was passed to SetStopped func (s *State) WaitStop(timeout time.Duration) (int, error) { s.Lock() if !s.Running { exitCode := s.ExitCode s.Unlock() return exitCode, nil } waitChan := s.waitChan s.Unlock() if err := wait(waitChan, timeout); err != nil { return -1, err } return s.GetExitCode(), nil } func (s *State) IsRunning() bool { s.Lock() res := s.Running s.Unlock() return res } func (s *State) GetPid() int { s.Lock() res := s.Pid s.Unlock() return res } func (s *State) GetExitCode() int { s.Lock() res := s.ExitCode s.Unlock() return res } func (s *State) SetRunning(pid int) { s.Lock() s.setRunning(pid) s.Unlock() } func (s *State) setRunning(pid int) { s.Error = "" s.Running = true s.Paused = false s.Restarting = false s.ExitCode = 0 s.Pid = pid s.StartedAt = time.Now().UTC() close(s.waitChan) // fire waiters for start s.waitChan = make(chan struct{}) } func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) { s.Lock() s.setStopped(exitStatus) s.Unlock() } func (s *State) setStopped(exitStatus *execdriver.ExitStatus) { s.Running = false s.Restarting = false s.Pid = 0 s.FinishedAt = time.Now().UTC() s.ExitCode = exitStatus.ExitCode s.OOMKilled = exitStatus.OOMKilled close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) } // SetRestarting is when docker hanldes the auto restart of containers when they are // in the middle of a stop and being restarted again func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) { s.Lock() // we should consider the container running when it is restarting because of // all the checks in docker around rm/stop/etc s.Running = true s.Restarting = true s.Pid = 0 s.FinishedAt = time.Now().UTC() s.ExitCode = exitStatus.ExitCode s.OOMKilled = exitStatus.OOMKilled close(s.waitChan) // fire waiters for stop s.waitChan = make(chan struct{}) s.Unlock() } // setError sets the container's error state. This is useful when we want to // know the error that occurred when container transits to another state // when inspecting it func (s *State) setError(err error) { s.Error = err.Error() } func (s *State) IsRestarting() bool { s.Lock() res := s.Restarting s.Unlock() return res } func (s *State) SetPaused() { s.Lock() s.Paused = true s.Unlock() } func (s *State) SetUnpaused() { s.Lock() s.Paused = false s.Unlock() } func (s *State) IsPaused() bool { s.Lock() res := s.Paused s.Unlock() return res } func (s *State) SetRemovalInProgress() error { s.Lock() defer s.Unlock() if s.removalInProgress { return fmt.Errorf("Status is already RemovalInProgress") } s.removalInProgress = true return nil } func (s *State) ResetRemovalInProgress() { s.Lock() s.removalInProgress = false s.Unlock() } func (s *State) SetDead() { s.Lock() s.Dead = true s.Unlock() } docker-1.6.2/daemon/utils.go0000644000175000017500000000253412524223634015272 0ustar tianontianonpackage daemon import ( "errors" "fmt" "strings" "github.com/docker/docker/nat" "github.com/docker/docker/runconfig" ) func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { if config.PortSpecs != nil { ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs) if err != nil { return err } config.PortSpecs = nil if len(bindings) > 0 { if hostConfig == nil { hostConfig = &runconfig.HostConfig{} } hostConfig.PortBindings = bindings } if config.ExposedPorts == nil { config.ExposedPorts = make(nat.PortSet, len(ports)) } for k, v := range ports { config.ExposedPorts[k] = v } } return nil } func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) ([]string, error) { if hostConfig == nil { return nil, nil } out := []string{} // merge in the lxc conf options into the generic config map if lxcConf := hostConfig.LxcConf; lxcConf != nil { for _, pair := range lxcConf { // because lxc conf gets the driver name lxc.XXXX we need to trim it off // and let the lxc driver add it back later if needed if !strings.Contains(pair.Key, ".") { return nil, errors.New("Illegal Key passed into LXC Configurations") } parts := strings.SplitN(pair.Key, ".", 2) out = append(out, fmt.Sprintf("%s=%s", parts[1], pair.Value)) } } return out, nil } docker-1.6.2/daemon/inspect.go0000644000175000017500000000533312524223634015577 0ustar tianontianonpackage daemon import ( "encoding/json" "fmt" "github.com/docker/docker/engine" "github.com/docker/docker/runconfig" ) func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Error(err) } container.Lock() defer container.Unlock() if job.GetenvBool("raw") { b, err := json.Marshal(&struct { *Container HostConfig *runconfig.HostConfig }{container, container.hostConfig}) if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.SetJson("Id", container.ID) out.SetAuto("Created", container.Created) out.SetJson("Path", container.Path) out.SetList("Args", container.Args) out.SetJson("Config", container.Config) out.SetJson("State", container.State) out.Set("Image", container.ImageID) out.SetJson("NetworkSettings", container.NetworkSettings) out.Set("ResolvConfPath", container.ResolvConfPath) out.Set("HostnamePath", container.HostnamePath) out.Set("HostsPath", container.HostsPath) out.Set("LogPath", container.LogPath) out.SetJson("Name", container.Name) out.SetInt("RestartCount", container.RestartCount) out.Set("Driver", container.Driver) out.Set("ExecDriver", container.ExecDriver) out.Set("MountLabel", container.MountLabel) out.Set("ProcessLabel", container.ProcessLabel) out.SetJson("Volumes", container.Volumes) out.SetJson("VolumesRW", container.VolumesRW) out.SetJson("AppArmorProfile", container.AppArmorProfile) out.SetList("ExecIDs", container.GetExecIDs()) if children, err := daemon.Children(container.Name); err == nil { for linkAlias, child := range children { container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) } } // we need this trick to preserve empty log driver, so // container will use daemon defaults even if daemon change them if container.hostConfig.LogConfig.Type == "" { container.hostConfig.LogConfig = daemon.defaultLogConfig defer func() { container.hostConfig.LogConfig = runconfig.LogConfig{} }() } out.SetJson("HostConfig", container.hostConfig) container.hostConfig.Links = nil if _, err := out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } func (daemon *Daemon) ContainerExecInspect(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s ID", job.Name) } id := job.Args[0] eConfig, err := daemon.getExecConfig(id) if err != nil { return job.Error(err) } b, err := json.Marshal(*eConfig) if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } docker-1.6.2/daemon/utils_linux.go0000644000175000017500000000042712524223634016510 0ustar tianontianon// +build linux package daemon import "github.com/docker/libcontainer/selinux" func selinuxSetDisabled() { selinux.SetDisabled() } func selinuxFreeLxcContexts(label string) { selinux.FreeLxcContexts(label) } func selinuxEnabled() bool { return selinux.SelinuxEnabled() } docker-1.6.2/daemon/daemon.go0000644000175000017500000010741412524223634015400 0ustar tianontianonpackage daemon import ( "bytes" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "regexp" "runtime" "strings" "sync" "time" "github.com/docker/libcontainer/label" log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/execdrivers" "github.com/docker/docker/daemon/execdriver/lxc" "github.com/docker/docker/daemon/graphdriver" _ "github.com/docker/docker/daemon/graphdriver/vfs" _ "github.com/docker/docker/daemon/networkdriver/bridge" "github.com/docker/docker/engine" "github.com/docker/docker/graph" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/namesgenerator" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/runconfig" "github.com/docker/docker/trust" "github.com/docker/docker/utils" "github.com/docker/docker/volumes" "github.com/go-fsnotify/fsnotify" ) var ( validContainerNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) ) type contStore struct { s map[string]*Container sync.Mutex } func (c *contStore) Add(id string, cont *Container) { c.Lock() c.s[id] = cont c.Unlock() } func (c *contStore) Get(id string) *Container { c.Lock() res := c.s[id] c.Unlock() return res } func (c *contStore) Delete(id string) { c.Lock() delete(c.s, id) c.Unlock() } func (c *contStore) List() []*Container { containers := new(History) c.Lock() for _, cont := range c.s { containers.Add(cont) } c.Unlock() containers.Sort() return *containers } type Daemon struct { ID string repository string sysInitPath string containers *contStore execCommands *execStore graph *graph.Graph repositories *graph.TagStore idIndex *truncindex.TruncIndex sysInfo *sysinfo.SysInfo volumes *volumes.Repository eng *engine.Engine config *Config containerGraph *graphdb.Database driver graphdriver.Driver execDriver execdriver.Driver trustStore *trust.TrustStore statsCollector *statsCollector defaultLogConfig runconfig.LogConfig } // Install installs daemon capabilities to eng. func (daemon *Daemon) Install(eng *engine.Engine) error { // FIXME: remove ImageDelete's dependency on Daemon, then move to graph/ for name, method := range map[string]engine.Handler{ "attach": daemon.ContainerAttach, "commit": daemon.ContainerCommit, "container_changes": daemon.ContainerChanges, "container_copy": daemon.ContainerCopy, "container_rename": daemon.ContainerRename, "container_inspect": daemon.ContainerInspect, "container_stats": daemon.ContainerStats, "containers": daemon.Containers, "create": daemon.ContainerCreate, "rm": daemon.ContainerRm, "export": daemon.ContainerExport, "info": daemon.CmdInfo, "kill": daemon.ContainerKill, "logs": daemon.ContainerLogs, "pause": daemon.ContainerPause, "resize": daemon.ContainerResize, "restart": daemon.ContainerRestart, "start": daemon.ContainerStart, "stop": daemon.ContainerStop, "top": daemon.ContainerTop, "unpause": daemon.ContainerUnpause, "wait": daemon.ContainerWait, "image_delete": daemon.ImageDelete, // FIXME: see above "execCreate": daemon.ContainerExecCreate, "execStart": daemon.ContainerExecStart, "execResize": daemon.ContainerExecResize, "execInspect": daemon.ContainerExecInspect, } { if err := eng.Register(name, method); err != nil { return err } } if err := daemon.Repositories().Install(eng); err != nil { return err } if err := daemon.trustStore.Install(eng); err != nil { return err } // FIXME: this hack is necessary for legacy integration tests to access // the daemon object. eng.Hack_SetGlobalVar("httpapi.daemon", daemon) return nil } // Get looks for a container using the provided information, which could be // one of the following inputs from the caller: // - A full container ID, which will exact match a container in daemon's list // - A container name, which will only exact match via the GetByName() function // - A partial container ID prefix (e.g. short ID) of any length that is // unique enough to only return a single container object // If none of these searches succeed, an error is returned func (daemon *Daemon) Get(prefixOrName string) (*Container, error) { if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { // prefix is an exact match to a full container ID return containerByID, nil } // GetByName will match only an exact name provided; we ignore errors containerByName, _ := daemon.GetByName(prefixOrName) containerId, indexError := daemon.idIndex.Get(prefixOrName) if containerByName != nil { // prefix is an exact match to a full container Name return containerByName, nil } if containerId != "" { // prefix is a fuzzy match to a container ID return daemon.containers.Get(containerId), nil } return nil, indexError } // Exists returns a true if a container of the specified ID or name exists, // false otherwise. func (daemon *Daemon) Exists(id string) bool { c, _ := daemon.Get(id) return c != nil } func (daemon *Daemon) containerRoot(id string) string { return path.Join(daemon.repository, id) } // Load reads the contents of a container from disk // This is typically done at startup. func (daemon *Daemon) load(id string) (*Container, error) { container := &Container{ root: daemon.containerRoot(id), State: NewState(), execCommands: newExecStore(), } if err := container.FromDisk(); err != nil { return nil, err } if container.ID != id { return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) } container.readHostConfig() return container, nil } // Register makes a container object usable by the daemon as // This is a wrapper for register func (daemon *Daemon) Register(container *Container) error { return daemon.register(container, true) } // register makes a container object usable by the daemon as func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error { if container.daemon != nil || daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := daemon.ensureName(container); err != nil { return err } container.daemon = daemon // Attach to stdout and stderr container.stderr = broadcastwriter.New() container.stdout = broadcastwriter.New() // Attach to stdin if container.Config.OpenStdin { container.stdin, container.stdinPipe = io.Pipe() } else { container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin } // done daemon.containers.Add(container.ID, container) // don't update the Suffixarray if we're starting up // we'll waste time if we update it for every container daemon.idIndex.Add(container.ID) container.registerVolumes() // FIXME: if the container is supposed to be running but is not, auto restart it? // if so, then we need to restart monitor and init a new lock // If the container is supposed to be running, make sure of it if container.IsRunning() { log.Debugf("killing old running container %s", container.ID) existingPid := container.Pid container.SetStopped(&execdriver.ExitStatus{ExitCode: 0}) // We only have to handle this for lxc because the other drivers will ensure that // no processes are left when docker dies if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { lxc.KillLxc(container.ID, 9) } else { // use the current driver and ensure that the container is dead x.x cmd := &execdriver.Command{ ID: container.ID, } var err error cmd.ProcessConfig.Process, err = os.FindProcess(existingPid) if err != nil { log.Debugf("cannot find existing process for %d", existingPid) } daemon.execDriver.Terminate(cmd) } if err := container.Unmount(); err != nil { log.Debugf("unmount error %s", err) } if err := container.ToDisk(); err != nil { log.Debugf("saving stopped state to disk %s", err) } } return nil } func (daemon *Daemon) ensureName(container *Container) error { if container.Name == "" { name, err := daemon.generateNewName(container.ID) if err != nil { return err } container.Name = name if err := container.ToDisk(); err != nil { log.Debugf("Error saving container name %s", err) } } return nil } func (daemon *Daemon) LogToDisk(src *broadcastwriter.BroadcastWriter, dst, stream string) error { log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) if err != nil { return err } src.AddWriter(log, stream) return nil } func (daemon *Daemon) restore() error { var ( debug = (os.Getenv("DEBUG") != "" || os.Getenv("TEST") != "") containers = make(map[string]*Container) currentDriver = daemon.driver.String() ) if !debug { log.Infof("Loading containers: start.") } dir, err := ioutil.ReadDir(daemon.repository) if err != nil { return err } for _, v := range dir { id := v.Name() container, err := daemon.load(id) if !debug && log.GetLevel() == log.InfoLevel { fmt.Print(".") } if err != nil { log.Errorf("Failed to load container %v: %v", id, err) continue } // Ignore the container if it does not support the current driver being used by the graph if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { log.Debugf("Loaded container %v", container.ID) containers[container.ID] = container } else { log.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) } } registeredContainers := []*Container{} if entities := daemon.containerGraph.List("/", -1); entities != nil { for _, p := range entities.Paths() { if !debug && log.GetLevel() == log.InfoLevel { fmt.Print(".") } e := entities[p] if container, ok := containers[e.ID()]; ok { if err := daemon.register(container, false); err != nil { log.Debugf("Failed to register container %s: %s", container.ID, err) } registeredContainers = append(registeredContainers, container) // delete from the map so that a new name is not automatically generated delete(containers, e.ID()) } } } // Any containers that are left over do not exist in the graph for _, container := range containers { // Try to set the default name for a container if it exists prior to links container.Name, err = daemon.generateNewName(container.ID) if err != nil { log.Debugf("Setting default id - %s", err) } if err := daemon.register(container, false); err != nil { log.Debugf("Failed to register container %s: %s", container.ID, err) } registeredContainers = append(registeredContainers, container) } // check the restart policy on the containers and restart any container with // the restart policy of "always" if daemon.config.AutoRestart { log.Debugf("Restarting containers...") for _, container := range registeredContainers { if container.hostConfig.RestartPolicy.Name == "always" || (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) { log.Debugf("Starting container %s", container.ID) if err := container.Start(); err != nil { log.Debugf("Failed to start container %s: %s", container.ID, err) } } } } if !debug { if log.GetLevel() == log.InfoLevel { fmt.Println() } log.Infof("Loading containers: done.") } return nil } // set up the watch on the host's /etc/resolv.conf so that we can update container's // live resolv.conf when the network changes on the host func (daemon *Daemon) setupResolvconfWatcher() error { watcher, err := fsnotify.NewWatcher() if err != nil { return err } //this goroutine listens for the events on the watch we add //on the resolv.conf file on the host go func() { for { select { case event := <-watcher.Events: if event.Name == "/etc/resolv.conf" && (event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create) { // verify a real change happened before we go further--a file write may have happened // without an actual change to the file updatedResolvConf, newResolvConfHash, err := resolvconf.GetIfChanged() if err != nil { log.Debugf("Error retrieving updated host resolv.conf: %v", err) } else if updatedResolvConf != nil { // because the new host resolv.conf might have localhost nameservers.. updatedResolvConf, modified := resolvconf.FilterResolvDns(updatedResolvConf, daemon.config.EnableIPv6) if modified { // changes have occurred during localhost cleanup: generate an updated hash newHash, err := utils.HashData(bytes.NewReader(updatedResolvConf)) if err != nil { log.Debugf("Error generating hash of new resolv.conf: %v", err) } else { newResolvConfHash = newHash } } log.Debugf("host network resolv.conf changed--walking container list for updates") contList := daemon.containers.List() for _, container := range contList { if err := container.updateResolvConf(updatedResolvConf, newResolvConfHash); err != nil { log.Debugf("Error on resolv.conf update check for container ID: %s: %v", container.ID, err) } } } } case err := <-watcher.Errors: log.Debugf("host resolv.conf notify error: %v", err) } } }() if err := watcher.Add("/etc"); err != nil { return err } return nil } func (daemon *Daemon) checkDeprecatedExpose(config *runconfig.Config) bool { if config != nil { if config.PortSpecs != nil { for _, p := range config.PortSpecs { if strings.Contains(p, ":") { return true } } } } return false } func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) { warnings := []string{} if (img != nil && daemon.checkDeprecatedExpose(img.Config)) || daemon.checkDeprecatedExpose(config) { warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.") } if img != nil && img.Config != nil { if err := runconfig.Merge(config, img.Config); err != nil { return nil, err } } if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { return nil, fmt.Errorf("No command specified") } return warnings, nil } func (daemon *Daemon) generateIdAndName(name string) (string, string, error) { var ( err error id = common.GenerateRandomID() ) if name == "" { if name, err = daemon.generateNewName(id); err != nil { return "", "", err } return id, name, nil } if name, err = daemon.reserveName(id, name); err != nil { return "", "", err } return id, name, nil } func (daemon *Daemon) reserveName(id, name string) (string, error) { if !validContainerNamePattern.MatchString(name) { return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) } if name[0] != '/' { name = "/" + name } if _, err := daemon.containerGraph.Set(name, id); err != nil { if !graphdb.IsNonUniqueNameError(err) { return "", err } conflictingContainer, err := daemon.GetByName(name) if err != nil { if strings.Contains(err.Error(), "Could not find entity") { return "", err } // Remove name and continue starting the container if err := daemon.containerGraph.Delete(name); err != nil { return "", err } } else { nameAsKnownByUser := strings.TrimPrefix(name, "/") return "", fmt.Errorf( "Conflict. The name %q is already in use by container %s. You have to delete (or rename) that container to be able to reuse that name.", nameAsKnownByUser, common.TruncateID(conflictingContainer.ID)) } } return name, nil } func (daemon *Daemon) generateNewName(id string) (string, error) { var name string for i := 0; i < 6; i++ { name = namesgenerator.GetRandomName(i) if name[0] != '/' { name = "/" + name } if _, err := daemon.containerGraph.Set(name, id); err != nil { if !graphdb.IsNonUniqueNameError(err) { return "", err } continue } return name, nil } name = "/" + common.TruncateID(id) if _, err := daemon.containerGraph.Set(name, id); err != nil { return "", err } return name, nil } func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) { // Generate default hostname // FIXME: the lxc template no longer needs to set a default hostname if config.Hostname == "" { config.Hostname = id[:12] } } func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint, configCmd []string) (string, []string) { var ( entrypoint string args []string ) if len(configEntrypoint) != 0 { entrypoint = configEntrypoint[0] args = append(configEntrypoint[1:], configCmd...) } else { entrypoint = configCmd[0] args = configCmd[1:] } return entrypoint, args } func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error { var ( labelOpts []string err error ) for _, opt := range config.SecurityOpt { con := strings.SplitN(opt, ":", 2) if len(con) == 1 { return fmt.Errorf("Invalid --security-opt: %q", opt) } switch con[0] { case "label": labelOpts = append(labelOpts, con[1]) case "apparmor": container.AppArmorProfile = con[1] default: return fmt.Errorf("Invalid --security-opt: %q", opt) } } container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) return err } func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID string) (*Container, error) { var ( id string err error ) id, name, err = daemon.generateIdAndName(name) if err != nil { return nil, err } daemon.generateHostname(id, config) entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) container := &Container{ // FIXME: we should generate the ID here instead of receiving it as an argument ID: id, Created: time.Now().UTC(), Path: entrypoint, Args: args, //FIXME: de-duplicate from config Config: config, hostConfig: &runconfig.HostConfig{}, ImageID: imgID, NetworkSettings: &NetworkSettings{}, Name: name, Driver: daemon.driver.String(), ExecDriver: daemon.execDriver.Name(), State: NewState(), execCommands: newExecStore(), } container.root = daemon.containerRoot(container.ID) return container, err } func (daemon *Daemon) createRootfs(container *Container) error { // Step 1: create the container directory. // This doubles as a barrier to avoid race conditions. if err := os.Mkdir(container.root, 0700); err != nil { return err } initID := fmt.Sprintf("%s-init", container.ID) if err := daemon.driver.Create(initID, container.ImageID); err != nil { return err } initPath, err := daemon.driver.Get(initID, "") if err != nil { return err } defer daemon.driver.Put(initID) if err := graph.SetupInitLayer(initPath); err != nil { return err } if err := daemon.driver.Create(container.ID, initID); err != nil { return err } return nil } func GetFullContainerName(name string) (string, error) { if name == "" { return "", fmt.Errorf("Container name cannot be empty") } if name[0] != '/' { name = "/" + name } return name, nil } func (daemon *Daemon) GetByName(name string) (*Container, error) { fullName, err := GetFullContainerName(name) if err != nil { return nil, err } entity := daemon.containerGraph.Get(fullName) if entity == nil { return nil, fmt.Errorf("Could not find entity for %s", name) } e := daemon.containers.Get(entity.ID()) if e == nil { return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID()) } return e, nil } func (daemon *Daemon) Children(name string) (map[string]*Container, error) { name, err := GetFullContainerName(name) if err != nil { return nil, err } children := make(map[string]*Container) err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { c, err := daemon.Get(e.ID()) if err != nil { return err } children[p] = c return nil }, 0) if err != nil { return nil, err } return children, nil } func (daemon *Daemon) Parents(name string) ([]string, error) { name, err := GetFullContainerName(name) if err != nil { return nil, err } return daemon.containerGraph.Parents(name) } func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error { fullName := path.Join(parent.Name, alias) if !daemon.containerGraph.Exists(fullName) { _, err := daemon.containerGraph.Set(fullName, child.ID) return err } return nil } func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { if hostConfig != nil && hostConfig.Links != nil { for _, l := range hostConfig.Links { parts, err := parsers.PartParser("name:alias", l) if err != nil { return err } child, err := daemon.Get(parts["name"]) if err != nil { //An error from daemon.Get() means this name could not be found return fmt.Errorf("Could not get container for %s", parts["name"]) } for child.hostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2) child, err = daemon.Get(parts[1]) if err != nil { return fmt.Errorf("Could not get container for %s", parts[1]) } } if child.hostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } if err := daemon.RegisterLink(container, child, parts["alias"]); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig hostConfig.Links = nil if err := container.WriteHostConfig(); err != nil { return err } } return nil } // FIXME: harmonize with NewGraph() func NewDaemon(config *Config, eng *engine.Engine) (*Daemon, error) { daemon, err := NewDaemonFromDirectory(config, eng) if err != nil { return nil, err } return daemon, nil } func NewDaemonFromDirectory(config *Config, eng *engine.Engine) (*Daemon, error) { if config.Mtu == 0 { config.Mtu = getDefaultNetworkMtu() } // Check for mutually incompatible config options if config.BridgeIface != "" && config.BridgeIP != "" { return nil, fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one.") } if !config.EnableIptables && !config.InterContainerCommunication { return nil, fmt.Errorf("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.") } if !config.EnableIptables && config.EnableIpMasq { config.EnableIpMasq = false } config.DisableNetwork = config.BridgeIface == disableNetworkBridge // Claim the pidfile first, to avoid any and all unexpected race conditions. // Some of the init doesn't need a pidfile lock - but let's not try to be smart. if config.Pidfile != "" { if err := utils.CreatePidFile(config.Pidfile); err != nil { return nil, err } eng.OnShutdown(func() { // Always release the pidfile last, just in case utils.RemovePidFile(config.Pidfile) }) } // Check that the system is supported and we have sufficient privileges if runtime.GOOS != "linux" { return nil, fmt.Errorf("The Docker daemon is only supported on linux") } if os.Geteuid() != 0 { return nil, fmt.Errorf("The Docker daemon needs to be run as root") } if err := checkKernel(); err != nil { return nil, err } // set up the TempDir to use a canonical path tmp, err := utils.TempDir(config.Root) if err != nil { return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) } realTmp, err := utils.ReadSymlinkedDirectory(tmp) if err != nil { return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } os.Setenv("TMPDIR", realTmp) // get the canonical path to the Docker root directory var realRoot string if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { realRoot = config.Root } else { realRoot, err = utils.ReadSymlinkedDirectory(config.Root) if err != nil { return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) } } config.Root = realRoot // Create the root directory if it doesn't exists if err := os.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { return nil, err } // Set the default driver graphdriver.DefaultDriver = config.GraphDriver // Load storage driver driver, err := graphdriver.New(config.Root, config.GraphOptions) if err != nil { return nil, fmt.Errorf("error intializing graphdriver: %v", err) } log.Debugf("Using graph driver %s", driver) // register cleanup for graph driver eng.OnShutdown(func() { if err := driver.Cleanup(); err != nil { log.Errorf("Error during graph storage driver.Cleanup(): %v", err) } }) if config.EnableSelinuxSupport { if selinuxEnabled() { // As Docker on btrfs and SELinux are incompatible at present, error on both being enabled if driver.String() == "btrfs" { return nil, fmt.Errorf("SELinux is not supported with the BTRFS graph driver") } log.Debug("SELinux enabled successfully") } else { log.Warn("Docker could not enable SELinux on the host system") } } else { selinuxSetDisabled() } daemonRepo := path.Join(config.Root, "containers") if err := os.MkdirAll(daemonRepo, 0700); err != nil && !os.IsExist(err) { return nil, err } // Migrate the container if it is aufs and aufs is enabled if err = migrateIfAufs(driver, config.Root); err != nil { return nil, err } log.Debugf("Creating images graph") g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver) if err != nil { return nil, err } volumesDriver, err := graphdriver.GetDriver("vfs", config.Root, config.GraphOptions) if err != nil { return nil, err } volumes, err := volumes.NewRepository(filepath.Join(config.Root, "volumes"), volumesDriver) if err != nil { return nil, err } trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) if err != nil { return nil, err } log.Debugf("Creating repository list") repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g, trustKey) if err != nil { return nil, fmt.Errorf("Couldn't create Tag store: %s", err) } trustDir := path.Join(config.Root, "trust") if err := os.MkdirAll(trustDir, 0700); err != nil && !os.IsExist(err) { return nil, err } t, err := trust.NewTrustStore(trustDir) if err != nil { return nil, fmt.Errorf("could not create trust store: %s", err) } if !config.DisableNetwork { job := eng.Job("init_networkdriver") job.SetenvBool("EnableIptables", config.EnableIptables) job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication) job.SetenvBool("EnableIpForward", config.EnableIpForward) job.SetenvBool("EnableIpMasq", config.EnableIpMasq) job.SetenvBool("EnableIPv6", config.EnableIPv6) job.Setenv("BridgeIface", config.BridgeIface) job.Setenv("BridgeIP", config.BridgeIP) job.Setenv("FixedCIDR", config.FixedCIDR) job.Setenv("FixedCIDRv6", config.FixedCIDRv6) job.Setenv("DefaultBindingIP", config.DefaultIp.String()) if err := job.Run(); err != nil { return nil, err } } graphdbPath := path.Join(config.Root, "linkgraph.db") graph, err := graphdb.NewSqliteConn(graphdbPath) if err != nil { return nil, err } // register graph close on shutdown eng.OnShutdown(func() { if err := graph.Close(); err != nil { log.Errorf("Error during container graph.Close(): %v", err) } }) localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) sysInitPath := utils.DockerInitPath(localCopy) if sysInitPath == "" { return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.com/contributing/devenvironment for official build instructions.") } if sysInitPath != localCopy { // When we find a suitable dockerinit binary (even if it's our local binary), we copy it into config.Root at localCopy for future use (so that the original can go away without that being a problem, for example during a package upgrade). if err := os.Mkdir(path.Dir(localCopy), 0700); err != nil && !os.IsExist(err) { return nil, err } if _, err := utils.CopyFile(sysInitPath, localCopy); err != nil { return nil, err } if err := os.Chmod(localCopy, 0700); err != nil { return nil, err } sysInitPath = localCopy } sysInfo := sysinfo.New(false) const runDir = "/var/run/docker" ed, err := execdrivers.NewDriver(config.ExecDriver, runDir, config.Root, sysInitPath, sysInfo) if err != nil { return nil, err } daemon := &Daemon{ ID: trustKey.PublicKey().KeyID(), repository: daemonRepo, containers: &contStore{s: make(map[string]*Container)}, execCommands: newExecStore(), graph: g, repositories: repositories, idIndex: truncindex.NewTruncIndex([]string{}), sysInfo: sysInfo, volumes: volumes, config: config, containerGraph: graph, driver: driver, sysInitPath: sysInitPath, execDriver: ed, eng: eng, trustStore: t, statsCollector: newStatsCollector(1 * time.Second), defaultLogConfig: config.LogConfig, } eng.OnShutdown(func() { if err := daemon.shutdown(); err != nil { log.Errorf("Error during daemon.shutdown(): %v", err) } }) if err := daemon.restore(); err != nil { return nil, err } // set up filesystem watch on resolv.conf for network changes if err := daemon.setupResolvconfWatcher(); err != nil { return nil, err } return daemon, nil } func (daemon *Daemon) shutdown() error { group := sync.WaitGroup{} log.Debugf("starting clean shutdown of all containers...") for _, container := range daemon.List() { c := container if c.IsRunning() { log.Debugf("stopping %s", c.ID) group.Add(1) go func() { defer group.Done() if err := c.KillSig(15); err != nil { log.Debugf("kill 15 error for %s - %s", c.ID, err) } c.WaitStop(-1 * time.Second) log.Debugf("container stopped %s", c.ID) }() } } group.Wait() return nil } func (daemon *Daemon) Mount(container *Container) error { dir, err := daemon.driver.Get(container.ID, container.GetMountLabel()) if err != nil { return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err) } if container.basefs == "" { container.basefs = dir } else if container.basefs != dir { daemon.driver.Put(container.ID) return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", daemon.driver, container.ID, container.basefs, dir) } return nil } func (daemon *Daemon) Unmount(container *Container) error { daemon.driver.Put(container.ID) return nil } func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) { initID := fmt.Sprintf("%s-init", container.ID) return daemon.driver.Changes(container.ID, initID) } func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { initID := fmt.Sprintf("%s-init", container.ID) return daemon.driver.Diff(container.ID, initID) } func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { return daemon.execDriver.Run(c.command, pipes, startCallback) } func (daemon *Daemon) Pause(c *Container) error { if err := daemon.execDriver.Pause(c.command); err != nil { return err } c.SetPaused() return nil } func (daemon *Daemon) Unpause(c *Container) error { if err := daemon.execDriver.Unpause(c.command); err != nil { return err } c.SetUnpaused() return nil } func (daemon *Daemon) Kill(c *Container, sig int) error { return daemon.execDriver.Kill(c.command, sig) } func (daemon *Daemon) Stats(c *Container) (*execdriver.ResourceStats, error) { return daemon.execDriver.Stats(c.ID) } func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{}, error) { c, err := daemon.Get(name) if err != nil { return nil, err } ch := daemon.statsCollector.collect(c) return ch, nil } func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface{}) error { c, err := daemon.Get(name) if err != nil { return err } daemon.statsCollector.unsubscribe(c, ch) return nil } // Nuke kills all containers then removes all content // from the content root, including images, volumes and // container filesystems. // Again: this will remove your entire docker daemon! // FIXME: this is deprecated, and only used in legacy // tests. Please remove. func (daemon *Daemon) Nuke() error { var wg sync.WaitGroup for _, container := range daemon.List() { wg.Add(1) go func(c *Container) { c.Kill() wg.Done() }(container) } wg.Wait() return os.RemoveAll(daemon.config.Root) } // FIXME: this is a convenience function for integration tests // which need direct access to daemon.graph. // Once the tests switch to using engine and jobs, this method // can go away. func (daemon *Daemon) Graph() *graph.Graph { return daemon.graph } func (daemon *Daemon) Repositories() *graph.TagStore { return daemon.repositories } func (daemon *Daemon) Config() *Config { return daemon.config } func (daemon *Daemon) SystemConfig() *sysinfo.SysInfo { return daemon.sysInfo } func (daemon *Daemon) SystemInitPath() string { return daemon.sysInitPath } func (daemon *Daemon) GraphDriver() graphdriver.Driver { return daemon.driver } func (daemon *Daemon) ExecutionDriver() execdriver.Driver { return daemon.execDriver } func (daemon *Daemon) ContainerGraph() *graphdb.Database { return daemon.containerGraph } func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { // Retrieve all images images, err := daemon.Graph().Map() if err != nil { return nil, err } // Store the tree in a map of map (map[parentId][childId]) imageMap := make(map[string]map[string]struct{}) for _, img := range images { if _, exists := imageMap[img.Parent]; !exists { imageMap[img.Parent] = make(map[string]struct{}) } imageMap[img.Parent][img.ID] = struct{}{} } // Loop on the children of the given image and check the config var match *image.Image for elem := range imageMap[imgID] { img, ok := images[elem] if !ok { return nil, fmt.Errorf("unable to find image %q", elem) } if runconfig.Compare(&img.ContainerConfig, config) { if match == nil || match.Created.Before(img.Created) { match = img } } } return match, nil } func checkKernel() error { // Check for unsupported kernel versions // FIXME: it would be cleaner to not test for specific versions, but rather // test for specific functionalities. // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.8 crashes are clearer. // For details see http://github.com/docker/docker/issues/407 if k, err := kernel.GetKernelVersion(); err != nil { log.Warnf("%s", err) } else { if kernel.CompareKernelVersion(k, &kernel.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 { if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { log.Warnf("You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String()) } } } return nil } docker-1.6.2/daemon/commit.go0000644000175000017500000000500712524223634015420 0ustar tianontianonpackage daemon import ( "bytes" "encoding/json" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/runconfig" ) func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] container, err := daemon.Get(name) if err != nil { return job.Error(err) } var ( config = container.Config stdoutBuffer = bytes.NewBuffer(nil) newConfig runconfig.Config ) buildConfigJob := daemon.eng.Job("build_config") buildConfigJob.Stdout.Add(stdoutBuffer) buildConfigJob.Setenv("changes", job.Getenv("changes")) // FIXME this should be remove when we remove deprecated config param buildConfigJob.Setenv("config", job.Getenv("config")) if err := buildConfigJob.Run(); err != nil { return job.Error(err) } if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil { return job.Error(err) } if err := runconfig.Merge(&newConfig, config); err != nil { return job.Error(err) } img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig) if err != nil { return job.Error(err) } job.Printf("%s\n", img.ID) return engine.StatusOK } // Commit creates a new filesystem image from the current state of a container. // The image can optionally be tagged into a repository func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) { if pause && !container.IsPaused() { container.Pause() defer container.Unpause() } if err := container.Mount(); err != nil { return nil, err } defer container.Unmount() rwTar, err := container.ExportRw() if err != nil { return nil, err } defer rwTar.Close() // Create a new image from the container's base layers + a new layer from container changes var ( containerID, parentImageID string containerConfig *runconfig.Config ) if container != nil { containerID = container.ID parentImageID = container.ImageID containerConfig = container.Config } img, err := daemon.graph.Create(rwTar, containerID, parentImageID, comment, author, containerConfig, config) if err != nil { return nil, err } // Register the image if needed if repository != "" { if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil { return img, err } } return img, nil } docker-1.6.2/volumes/0000755000175000017500000000000012524223634014026 5ustar tianontianondocker-1.6.2/volumes/repository_test.go0000644000175000017500000000662712524223634017646 0ustar tianontianonpackage volumes import ( "io/ioutil" "os" "path/filepath" "testing" "github.com/docker/docker/daemon/graphdriver" _ "github.com/docker/docker/daemon/graphdriver/vfs" ) func TestRepositoryFindOrCreate(t *testing.T) { root, err := ioutil.TempDir(os.TempDir(), "volumes") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) repo, err := newRepo(root) if err != nil { t.Fatal(err) } // no path v, err := repo.FindOrCreateVolume("", true) if err != nil { t.Fatal(err) } // FIXME: volumes are heavily dependent on the vfs driver, but this should not be so! expected := filepath.Join(root, "repo-graph", "vfs", "dir", v.ID) if v.Path != expected { t.Fatalf("expected new path to be created in %s, got %s", expected, v.Path) } // with a non-existant path dir := filepath.Join(root, "doesntexist") v, err = repo.FindOrCreateVolume(dir, true) if err != nil { t.Fatal(err) } if v.Path != dir { t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path) } if _, err := os.Stat(v.Path); err != nil { t.Fatal(err) } // with a pre-existing path // can just use the same path from above since it now exists v, err = repo.FindOrCreateVolume(dir, true) if err != nil { t.Fatal(err) } if v.Path != dir { t.Fatalf("expected new path to be created in %s, got %s", dir, v.Path) } } func TestRepositoryGet(t *testing.T) { root, err := ioutil.TempDir(os.TempDir(), "volumes") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) repo, err := newRepo(root) if err != nil { t.Fatal(err) } v, err := repo.FindOrCreateVolume("", true) if err != nil { t.Fatal(err) } v2 := repo.Get(v.Path) if v2 == nil { t.Fatalf("expected to find volume but didn't") } if v2 != v { t.Fatalf("expected get to return same volume") } } func TestRepositoryDelete(t *testing.T) { root, err := ioutil.TempDir(os.TempDir(), "volumes") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) repo, err := newRepo(root) if err != nil { t.Fatal(err) } // with a normal volume v, err := repo.FindOrCreateVolume("", true) if err != nil { t.Fatal(err) } if err := repo.Delete(v.Path); err != nil { t.Fatal(err) } if v := repo.Get(v.Path); v != nil { t.Fatalf("expected volume to not exist") } if _, err := os.Stat(v.Path); err == nil { t.Fatalf("expected volume files to be removed") } // with a bind mount dir := filepath.Join(root, "test") v, err = repo.FindOrCreateVolume(dir, true) if err != nil { t.Fatal(err) } if err := repo.Delete(v.Path); err != nil { t.Fatal(err) } if v := repo.Get(v.Path); v != nil { t.Fatalf("expected volume to not exist") } if _, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) { t.Fatalf("expected bind volume data to persist after destroying volume") } // with container refs dir = filepath.Join(root, "test") v, err = repo.FindOrCreateVolume(dir, true) if err != nil { t.Fatal(err) } v.AddContainer("1234") if err := repo.Delete(v.Path); err == nil { t.Fatalf("expected volume delete to fail due to container refs") } v.RemoveContainer("1234") if err := repo.Delete(v.Path); err != nil { t.Fatal(err) } } func newRepo(root string) (*Repository, error) { configPath := filepath.Join(root, "repo-config") graphDir := filepath.Join(root, "repo-graph") driver, err := graphdriver.GetDriver("vfs", graphDir, []string{}) if err != nil { return nil, err } return NewRepository(configPath, driver) } docker-1.6.2/volumes/repository.go0000644000175000017500000000766612524223634016613 0ustar tianontianonpackage volumes import ( "fmt" "io/ioutil" "os" "path/filepath" "sync" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/common" ) type Repository struct { configPath string driver graphdriver.Driver volumes map[string]*Volume lock sync.Mutex } func NewRepository(configPath string, driver graphdriver.Driver) (*Repository, error) { abspath, err := filepath.Abs(configPath) if err != nil { return nil, err } // Create the config path if err := os.MkdirAll(abspath, 0700); err != nil && !os.IsExist(err) { return nil, err } repo := &Repository{ driver: driver, configPath: abspath, volumes: make(map[string]*Volume), } return repo, repo.restore() } func (r *Repository) newVolume(path string, writable bool) (*Volume, error) { var ( isBindMount bool err error id = common.GenerateRandomID() ) if path != "" { isBindMount = true } if path == "" { path, err = r.createNewVolumePath(id) if err != nil { return nil, err } } path = filepath.Clean(path) // Ignore the error here since the path may not exist // Really just want to make sure the path we are using is real(or non-existant) if cleanPath, err := filepath.EvalSymlinks(path); err == nil { path = cleanPath } v := &Volume{ ID: id, Path: path, repository: r, Writable: writable, containers: make(map[string]struct{}), configPath: r.configPath + "/" + id, IsBindMount: isBindMount, } if err := v.initialize(); err != nil { return nil, err } return v, r.add(v) } func (r *Repository) restore() error { dir, err := ioutil.ReadDir(r.configPath) if err != nil { return err } for _, v := range dir { id := v.Name() vol := &Volume{ ID: id, configPath: r.configPath + "/" + id, containers: make(map[string]struct{}), } if err := vol.FromDisk(); err != nil { if !os.IsNotExist(err) { log.Debugf("Error restoring volume: %v", err) continue } if err := vol.initialize(); err != nil { log.Debugf("%s", err) continue } } if err := r.add(vol); err != nil { log.Debugf("Error restoring volume: %v", err) } } return nil } func (r *Repository) Get(path string) *Volume { r.lock.Lock() vol := r.get(path) r.lock.Unlock() return vol } func (r *Repository) get(path string) *Volume { path, err := filepath.EvalSymlinks(path) if err != nil { return nil } return r.volumes[filepath.Clean(path)] } func (r *Repository) add(volume *Volume) error { if vol := r.get(volume.Path); vol != nil { return fmt.Errorf("Volume exists: %s", volume.ID) } r.volumes[volume.Path] = volume return nil } func (r *Repository) Delete(path string) error { r.lock.Lock() defer r.lock.Unlock() path, err := filepath.EvalSymlinks(path) if err != nil { return err } volume := r.get(filepath.Clean(path)) if volume == nil { return fmt.Errorf("Volume %s does not exist", path) } containers := volume.Containers() if len(containers) > 0 { return fmt.Errorf("Volume %s is being used and cannot be removed: used by containers %s", volume.Path, containers) } if err := os.RemoveAll(volume.configPath); err != nil { return err } if !volume.IsBindMount { if err := r.driver.Remove(volume.ID); err != nil { if !os.IsNotExist(err) { return err } } } delete(r.volumes, volume.Path) return nil } func (r *Repository) createNewVolumePath(id string) (string, error) { if err := r.driver.Create(id, ""); err != nil { return "", err } path, err := r.driver.Get(id, "") if err != nil { return "", fmt.Errorf("Driver %s failed to get volume rootfs %s: %v", r.driver, id, err) } return path, nil } func (r *Repository) FindOrCreateVolume(path string, writable bool) (*Volume, error) { r.lock.Lock() defer r.lock.Unlock() if path == "" { return r.newVolume(path, writable) } if v := r.get(path); v != nil { return v, nil } return r.newVolume(path, writable) } docker-1.6.2/volumes/volume.go0000644000175000017500000000610512524223634015666 0ustar tianontianonpackage volumes import ( "encoding/json" "io" "io/ioutil" "os" "path" "path/filepath" "sync" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/symlink" ) type Volume struct { ID string Path string IsBindMount bool Writable bool containers map[string]struct{} configPath string repository *Repository lock sync.Mutex } func (v *Volume) Export(resource, name string) (io.ReadCloser, error) { if v.IsBindMount && filepath.Base(resource) == name { name = "" } basePath, err := v.getResourcePath(resource) if err != nil { return nil, err } stat, err := os.Stat(basePath) if err != nil { return nil, err } var filter []string if !stat.IsDir() { d, f := path.Split(basePath) basePath = d filter = []string{f} } else { filter = []string{path.Base(basePath)} basePath = path.Dir(basePath) } return archive.TarWithOptions(basePath, &archive.TarOptions{ Compression: archive.Uncompressed, Name: name, IncludeFiles: filter, }) } func (v *Volume) IsDir() (bool, error) { stat, err := os.Stat(v.Path) if err != nil { return false, err } return stat.IsDir(), nil } func (v *Volume) Containers() []string { v.lock.Lock() var containers []string for c := range v.containers { containers = append(containers, c) } v.lock.Unlock() return containers } func (v *Volume) RemoveContainer(containerId string) { v.lock.Lock() delete(v.containers, containerId) v.lock.Unlock() } func (v *Volume) AddContainer(containerId string) { v.lock.Lock() v.containers[containerId] = struct{}{} v.lock.Unlock() } func (v *Volume) initialize() error { v.lock.Lock() defer v.lock.Unlock() if _, err := os.Stat(v.Path); err != nil && os.IsNotExist(err) { if err := os.MkdirAll(v.Path, 0755); err != nil { return err } } if err := os.MkdirAll(v.configPath, 0755); err != nil { return err } jsonPath, err := v.jsonPath() if err != nil { return err } f, err := os.Create(jsonPath) if err != nil { return err } defer f.Close() return v.toDisk() } func (v *Volume) ToDisk() error { v.lock.Lock() defer v.lock.Unlock() return v.toDisk() } func (v *Volume) toDisk() error { data, err := json.Marshal(v) if err != nil { return err } pth, err := v.jsonPath() if err != nil { return err } return ioutil.WriteFile(pth, data, 0666) } func (v *Volume) FromDisk() error { v.lock.Lock() defer v.lock.Unlock() pth, err := v.jsonPath() if err != nil { return err } jsonSource, err := os.Open(pth) if err != nil { return err } defer jsonSource.Close() dec := json.NewDecoder(jsonSource) return dec.Decode(v) } func (v *Volume) jsonPath() (string, error) { return v.getRootResourcePath("config.json") } func (v *Volume) getRootResourcePath(path string) (string, error) { cleanPath := filepath.Join("/", path) return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath) } func (v *Volume) getResourcePath(path string) (string, error) { cleanPath := filepath.Join("/", path) return symlink.FollowSymlinkInScope(filepath.Join(v.Path, cleanPath), v.Path) } docker-1.6.2/volumes/volume_test.go0000644000175000017500000000052112524223634016721 0ustar tianontianonpackage volumes import "testing" func TestContainers(t *testing.T) { v := &Volume{containers: make(map[string]struct{})} id := "1234" v.AddContainer(id) if v.Containers()[0] != id { t.Fatalf("adding a container ref failed") } v.RemoveContainer(id) if len(v.Containers()) != 0 { t.Fatalf("removing container failed") } } docker-1.6.2/Dockerfile0000644000175000017500000001326012524223634014330 0ustar tianontianon# This file describes the standard way to build Docker, using docker # # Usage: # # # Assemble the full dev environment. This is slow the first time. # docker build -t docker . # # # Mount your source in an interactive container for quick testing: # docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash # # # Run the test suite: # docker run --privileged docker hack/make.sh test # # # Publish a release: # docker run --privileged \ # -e AWS_S3_BUCKET=baz \ # -e AWS_ACCESS_KEY=foo \ # -e AWS_SECRET_KEY=bar \ # -e GPG_PASSPHRASE=gloubiboulga \ # docker hack/release.sh # # Note: Apparmor used to mess with privileged mode, but this is no longer # the case. Therefore, you don't have to disable it anymore. # FROM ubuntu:14.04 MAINTAINER Tianon Gravi (@tianon) # Packaged dependencies RUN apt-get update && apt-get install -y \ apparmor \ aufs-tools \ automake \ btrfs-tools \ build-essential \ curl \ dpkg-sig \ git \ iptables \ libapparmor-dev \ libcap-dev \ libsqlite3-dev \ mercurial \ parallel \ python-mock \ python-pip \ python-websocket \ reprepro \ ruby1.9.1 \ ruby1.9.1-dev \ s3cmd=1.1.0* \ --no-install-recommends # Get lvm2 source for compiling statically RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags # Compile and install lvm2 RUN cd /usr/local/lvm2 \ && ./configure --enable-static_link \ && make device-mapper \ && make install_device-mapper # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install lxc ENV LXC_VERSION 1.0.7 RUN mkdir -p /usr/src/lxc \ && curl -sSL https://linuxcontainers.org/downloads/lxc/lxc-${LXC_VERSION}.tar.gz | tar -v -C /usr/src/lxc/ -xz --strip-components=1 RUN cd /usr/src/lxc \ && ./configure \ && make \ && make install \ && ldconfig # Install Go ENV GO_VERSION 1.4.2 RUN curl -sSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/local -xz \ && mkdir -p /go/bin ENV PATH /go/bin:/usr/local/go/bin:$PATH ENV GOPATH /go:/go/src/github.com/docker/docker/vendor RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 # Compile Go for cross compilation ENV DOCKER_CROSSPLATFORMS \ linux/386 linux/arm \ darwin/amd64 darwin/386 \ freebsd/amd64 freebsd/386 freebsd/arm \ windows/amd64 windows/386 # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 RUN cd /usr/local/go/src \ && set -x \ && for platform in $DOCKER_CROSSPLATFORMS; do \ GOOS=${platform%/*} \ GOARCH=${platform##*/} \ ./make.bash --no-clean 2>&1; \ done # We still support compiling with older Go, so need to grab older "gofmt" ENV GOFMT_VERSION 1.3.3 RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt # Grab Go's cover tool for dead-simple code coverage testing RUN go get golang.org/x/tools/cmd/cover # TODO replace FPM with some very minimal debhelper stuff RUN gem install --no-rdoc --no-ri fpm --version 1.3.2 # Install registry ENV REGISTRY_COMMIT d957768537c5af40e4f4cd96871f7b2bde9e2923 RUN set -x \ && git clone https://github.com/docker/distribution.git /go/src/github.com/docker/distribution \ && (cd /go/src/github.com/docker/distribution && git checkout -q $REGISTRY_COMMIT) \ && GOPATH=/go/src/github.com/docker/distribution/Godeps/_workspace:/go \ go build -o /go/bin/registry-v2 github.com/docker/distribution/cmd/registry # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT 91985b239764fe54714fa0a93d52aa362357d251 RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT # Setup s3cmd config RUN { \ echo '[default]'; \ echo 'access_key=$AWS_ACCESS_KEY'; \ echo 'secret_key=$AWS_SECRET_KEY'; \ } > ~/.s3cfg # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' # Add an unprivileged user to be used for tests which need it RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser VOLUME /var/lib/docker WORKDIR /go/src/github.com/docker/docker ENV DOCKER_BUILDTAGS apparmor selinux btrfs_noversion # Let us use a .bashrc file RUN ln -sfv $PWD/.bashrc ~/.bashrc # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image.sh /go/src/github.com/docker/docker/contrib/ RUN ./contrib/download-frozen-image.sh /docker-frozen-images \ busybox:latest@4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 \ hello-world:frozen@e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5 # see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) # Install man page generator COPY vendor /go/src/github.com/docker/docker/vendor # (copy vendor/ because go-md2man needs golang.org/x/net) RUN set -x \ && git clone -b v1.0.1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ && git clone -b v1.2 https://github.com/russross/blackfriday.git /go/src/github.com/russross/blackfriday \ && go install -v github.com/cpuguy83/go-md2man # install toml validator ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a RUN set -x \ && git clone https://github.com/BurntSushi/toml.git /go/src/github.com/BurntSushi/toml \ && (cd /go/src/github.com/BurntSushi/toml && git checkout -q $TOMLV_COMMIT) \ && go install -v github.com/BurntSushi/toml/cmd/tomlv # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] # Upload docker source COPY . /go/src/github.com/docker/docker docker-1.6.2/graph/0000755000175000017500000000000012524223634013435 5ustar tianontianondocker-1.6.2/graph/viz.go0000644000175000017500000000177012524223634014601 0ustar tianontianonpackage graph import ( "strings" "github.com/docker/docker/engine" "github.com/docker/docker/image" ) func (s *TagStore) CmdViz(job *engine.Job) engine.Status { images, _ := s.graph.Map() if images == nil { return engine.StatusOK } job.Stdout.Write([]byte("digraph docker {\n")) var ( parentImage *image.Image err error ) for _, image := range images { parentImage, err = image.GetParent() if err != nil { return job.Errorf("Error while getting parent image: %v", err) } if parentImage != nil { job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) } else { job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) } } for id, repos := range s.GetRepoRefs() { job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) } job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) return engine.StatusOK } docker-1.6.2/graph/list.go0000644000175000017500000000675712524223634014756 0ustar tianontianonpackage graph import ( "log" "path" "strings" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/pkg/parsers/filters" "github.com/docker/docker/utils" ) var acceptedImageFilterTags = map[string]struct{}{ "dangling": {}, "label": {}, } func (s *TagStore) CmdImages(job *engine.Job) engine.Status { var ( allImages map[string]*image.Image err error filt_tagged = true filt_label = false ) imageFilters, err := filters.FromParam(job.Getenv("filters")) if err != nil { return job.Error(err) } for name := range imageFilters { if _, ok := acceptedImageFilterTags[name]; !ok { return job.Errorf("Invalid filter '%s'", name) } } if i, ok := imageFilters["dangling"]; ok { for _, value := range i { if strings.ToLower(value) == "true" { filt_tagged = false } } } _, filt_label = imageFilters["label"] if job.GetenvBool("all") && filt_tagged { allImages, err = s.graph.Map() } else { allImages, err = s.graph.Heads() } if err != nil { return job.Error(err) } lookup := make(map[string]*engine.Env) s.Lock() for repoName, repository := range s.Repositories { if job.Getenv("filter") != "" { if match, _ := path.Match(job.Getenv("filter"), repoName); !match { continue } } for ref, id := range repository { imgRef := utils.ImageReference(repoName, ref) image, err := s.graph.Get(id) if err != nil { log.Printf("Warning: couldn't load %s from %s: %s", id, imgRef, err) continue } if out, exists := lookup[id]; exists { if filt_tagged { if utils.DigestReference(ref) { out.SetList("RepoDigests", append(out.GetList("RepoDigests"), imgRef)) } else { // Tag Ref. out.SetList("RepoTags", append(out.GetList("RepoTags"), imgRef)) } } } else { // get the boolean list for if only the untagged images are requested delete(allImages, id) if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) { continue } if filt_tagged { out := &engine.Env{} out.SetJson("ParentId", image.Parent) out.SetJson("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) out.SetJson("Labels", image.ContainerConfig.Labels) if utils.DigestReference(ref) { out.SetList("RepoTags", []string{}) out.SetList("RepoDigests", []string{imgRef}) } else { out.SetList("RepoTags", []string{imgRef}) out.SetList("RepoDigests", []string{}) } lookup[id] = out } } } } s.Unlock() outs := engine.NewTable("Created", len(lookup)) for _, value := range lookup { outs.Add(value) } // Display images which aren't part of a repository/tag if job.Getenv("filter") == "" || filt_label { for _, image := range allImages { if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) { continue } out := &engine.Env{} out.SetJson("ParentId", image.Parent) out.SetList("RepoTags", []string{":"}) out.SetList("RepoDigests", []string{"@"}) out.SetJson("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) out.SetJson("Labels", image.ContainerConfig.Labels) outs.Add(out) } } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } docker-1.6.2/graph/export.go0000644000175000017500000000772712524223634015322 0ustar tianontianonpackage graph import ( "encoding/json" "io" "io/ioutil" "os" "path" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/registry" ) // CmdImageExport exports all images with the given tag. All versions // containing the same tag are exported. The resulting output is an // uncompressed tar ball. // name is the set of tags to export. // out is the writer where the images are written to. func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name) } // get image json tempdir, err := ioutil.TempDir("", "docker-export-") if err != nil { return job.Error(err) } defer os.RemoveAll(tempdir) rootRepoMap := map[string]Repository{} addKey := func(name string, tag string, id string) { log.Debugf("add key [%s:%s]", name, tag) if repo, ok := rootRepoMap[name]; !ok { rootRepoMap[name] = Repository{tag: id} } else { repo[tag] = id } } for _, name := range job.Args { name = registry.NormalizeLocalName(name) log.Debugf("Serializing %s", name) rootRepo := s.Repositories[name] if rootRepo != nil { // this is a base repo name, like 'busybox' for tag, id := range rootRepo { addKey(name, tag, id) if err := s.exportImage(job.Eng, id, tempdir); err != nil { return job.Error(err) } } } else { img, err := s.LookupImage(name) if err != nil { return job.Error(err) } if img != nil { // This is a named image like 'busybox:latest' repoName, repoTag := parsers.ParseRepositoryTag(name) // check this length, because a lookup of a truncated has will not have a tag // and will not need to be added to this map if len(repoTag) > 0 { addKey(repoName, repoTag, img.ID) } if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil { return job.Error(err) } } else { // this must be an ID that didn't get looked up just right? if err := s.exportImage(job.Eng, name, tempdir); err != nil { return job.Error(err) } } } log.Debugf("End Serializing %s", name) } // write repositories, if there is something to write if len(rootRepoMap) > 0 { rootRepoJson, _ := json.Marshal(rootRepoMap) if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil { return job.Error(err) } } else { log.Debugf("There were no repositories to write") } fs, err := archive.Tar(tempdir, archive.Uncompressed) if err != nil { return job.Error(err) } defer fs.Close() if _, err := io.Copy(job.Stdout, fs); err != nil { return job.Error(err) } log.Debugf("End export job: %s", job.Name) return engine.StatusOK } // FIXME: this should be a top-level function, not a class method func (s *TagStore) exportImage(eng *engine.Engine, name, tempdir string) error { for n := name; n != ""; { // temporary directory tmpImageDir := path.Join(tempdir, n) if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil { if os.IsExist(err) { return nil } return err } var version = "1.0" var versionBuf = []byte(version) if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil { return err } // serialize json json, err := os.Create(path.Join(tmpImageDir, "json")) if err != nil { return err } job := eng.Job("image_inspect", n) job.SetenvBool("raw", true) job.Stdout.Add(json) if err := job.Run(); err != nil { return err } // serialize filesystem fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) if err != nil { return err } job = eng.Job("image_tarlayer", n) job.Stdout.Add(fsTar) if err := job.Run(); err != nil { return err } // find parent job = eng.Job("image_get", n) info, _ := job.Stdout.AddEnv() if err := job.Run(); err != nil { return err } n = info.Get("Parent") } return nil } docker-1.6.2/graph/push.go0000644000175000017500000004046012524223634014747 0ustar tianontianonpackage graph import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "os" "path" "strings" "sync" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" "github.com/docker/libtrust" ) var ErrV2RegistryUnavailable = errors.New("error v2 registry unavailable") // Retrieve the all the images to be uploaded in the correct order func (s *TagStore) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) { var ( imageList []string imagesSeen = make(map[string]bool) tagsByImage = make(map[string][]string) ) for tag, id := range localRepo { if requestedTag != "" && requestedTag != tag { // Include only the requested tag. continue } if utils.DigestReference(tag) { // Ignore digest references. continue } var imageListForThisTag []string tagsByImage[id] = append(tagsByImage[id], tag) for img, err := s.graph.Get(id); img != nil; img, err = img.GetParent() { if err != nil { return nil, nil, err } if imagesSeen[img.ID] { // This image is already on the list, we can ignore it and all its parents break } imagesSeen[img.ID] = true imageListForThisTag = append(imageListForThisTag, img.ID) } // reverse the image list for this tag (so the "most"-parent image is first) for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] } // append to main image list imageList = append(imageList, imageListForThisTag...) } if len(imageList) == 0 { return nil, nil, fmt.Errorf("No images found for the requested repository / tag") } log.Debugf("Image list: %v", imageList) log.Debugf("Tags by image: %v", tagsByImage) return imageList, tagsByImage, nil } func (s *TagStore) getImageTags(localRepo map[string]string, askedTag string) ([]string, error) { log.Debugf("Checking %s against %#v", askedTag, localRepo) if len(askedTag) > 0 { if _, ok := localRepo[askedTag]; !ok || utils.DigestReference(askedTag) { return nil, fmt.Errorf("Tag does not exist: %s", askedTag) } return []string{askedTag}, nil } var tags []string for tag := range localRepo { if !utils.DigestReference(tag) { tags = append(tags, tag) } } return tags, nil } // createImageIndex returns an index of an image's layer IDs and tags. func (s *TagStore) createImageIndex(images []string, tags map[string][]string) []*registry.ImgData { var imageIndex []*registry.ImgData for _, id := range images { if tags, hasTags := tags[id]; hasTags { // If an image has tags you must add an entry in the image index // for each tag for _, tag := range tags { imageIndex = append(imageIndex, ®istry.ImgData{ ID: id, Tag: tag, }) } continue } // If the image does not have a tag it still needs to be sent to the // registry with an empty tag so that it is accociated with the repository imageIndex = append(imageIndex, ®istry.ImgData{ ID: id, Tag: "", }) } return imageIndex } type imagePushData struct { id string endpoint string tokens []string } // lookupImageOnEndpoint checks the specified endpoint to see if an image exists // and if it is absent then it sends the image id to the channel to be pushed. func lookupImageOnEndpoint(wg *sync.WaitGroup, r *registry.Session, out io.Writer, sf *utils.StreamFormatter, images chan imagePushData, imagesToPush chan string) { defer wg.Done() for image := range images { if err := r.LookupRemoteImage(image.id, image.endpoint, image.tokens); err != nil { log.Errorf("Error in LookupRemoteImage: %s", err) imagesToPush <- image.id continue } out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", common.TruncateID(image.id))) } } func (s *TagStore) pushImageToEndpoint(endpoint string, out io.Writer, remoteName string, imageIDs []string, tags map[string][]string, repo *registry.RepositoryData, sf *utils.StreamFormatter, r *registry.Session) error { workerCount := len(imageIDs) // start a maximum of 5 workers to check if images exist on the specified endpoint. if workerCount > 5 { workerCount = 5 } var ( wg = &sync.WaitGroup{} imageData = make(chan imagePushData, workerCount*2) imagesToPush = make(chan string, workerCount*2) pushes = make(chan map[string]struct{}, 1) ) for i := 0; i < workerCount; i++ { wg.Add(1) go lookupImageOnEndpoint(wg, r, out, sf, imageData, imagesToPush) } // start a go routine that consumes the images to push go func() { shouldPush := make(map[string]struct{}) for id := range imagesToPush { shouldPush[id] = struct{}{} } pushes <- shouldPush }() for _, id := range imageIDs { imageData <- imagePushData{ id: id, endpoint: endpoint, tokens: repo.Tokens, } } // close the channel to notify the workers that there will be no more images to check. close(imageData) wg.Wait() close(imagesToPush) // wait for all the images that require pushes to be collected into a consumable map. shouldPush := <-pushes // finish by pushing any images and tags to the endpoint. The order that the images are pushed // is very important that is why we are still iterating over the ordered list of imageIDs. for _, id := range imageIDs { if _, push := shouldPush[id]; push { if _, err := s.pushImage(r, out, id, endpoint, repo.Tokens, sf); err != nil { // FIXME: Continue on error? return err } } for _, tag := range tags[id] { out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", common.TruncateID(id), endpoint+"repositories/"+remoteName+"/tags/"+tag)) if err := r.PushRegistryTag(remoteName, id, tag, endpoint, repo.Tokens); err != nil { return err } } } return nil } // pushRepository pushes layers that do not already exist on the registry. func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error { log.Debugf("Local repo: %s", localRepo) out = utils.NewWriteFlusher(out) imgList, tags, err := s.getImageList(localRepo, tag) if err != nil { return err } out.Write(sf.FormatStatus("", "Sending image list")) imageIndex := s.createImageIndex(imgList, tags) log.Debugf("Preparing to push %s with the following images and tags", localRepo) for _, data := range imageIndex { log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) } // Register all the images in a repository with the registry // If an image is not in this list it will not be associated with the repository repoData, err := r.PushImageJSONIndex(repoInfo.RemoteName, imageIndex, false, nil) if err != nil { return err } nTag := 1 if tag == "" { nTag = len(localRepo) } out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", repoInfo.CanonicalName, nTag)) // push the repository to each of the endpoints only if it does not exist. for _, endpoint := range repoData.Endpoints { if err := s.pushImageToEndpoint(endpoint, out, repoInfo.RemoteName, imgList, tags, repoData, sf, r); err != nil { return err } } _, err = r.PushImageJSONIndex(repoInfo.RemoteName, imageIndex, true, repoData.Endpoints) return err } func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { out = utils.NewWriteFlusher(out) jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json")) if err != nil { return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) } out.Write(sf.FormatProgress(common.TruncateID(imgID), "Pushing", nil)) imgData := ®istry.ImgData{ ID: imgID, } // Send the json if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { if err == registry.ErrAlreadyExists { out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) return "", nil } return "", err } layerData, err := s.graph.TempLayerArchive(imgID, sf, out) if err != nil { return "", fmt.Errorf("Failed to generate layer archive: %s", err) } defer os.RemoveAll(layerData.Name()) // Send the layer log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, progressreader.New(progressreader.Config{ In: layerData, Out: out, Formatter: sf, Size: int(layerData.Size), NewLines: false, ID: common.TruncateID(imgData.ID), Action: "Pushing", }), ep, token, jsonRaw) if err != nil { return "", err } imgData.Checksum = checksum imgData.ChecksumPayload = checksumPayload // Send the checksum if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { return "", err } out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image successfully pushed", nil)) return imgData.Checksum, nil } func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter) error { endpoint, err := r.V2RegistryEndpoint(repoInfo.Index) if err != nil { if repoInfo.Index.Official { log.Debugf("Unable to push to V2 registry, falling back to v1: %s", err) return ErrV2RegistryUnavailable } return fmt.Errorf("error getting registry endpoint: %s", err) } tags, err := s.getImageTags(localRepo, tag) if err != nil { return err } if len(tags) == 0 { return fmt.Errorf("No tags to push for %s", repoInfo.LocalName) } auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, false) if err != nil { return fmt.Errorf("error getting authorization: %s", err) } for _, tag := range tags { log.Debugf("Pushing repository: %s:%s", repoInfo.CanonicalName, tag) layerId, exists := localRepo[tag] if !exists { return fmt.Errorf("tag does not exist: %s", tag) } layer, err := s.graph.Get(layerId) if err != nil { return err } m := ®istry.ManifestData{ SchemaVersion: 1, Name: repoInfo.RemoteName, Tag: tag, Architecture: layer.Architecture, } var metadata runconfig.Config if layer.Config != nil { metadata = *layer.Config } layersSeen := make(map[string]bool) layers := []*image.Image{layer} for ; layer != nil; layer, err = layer.GetParent() { if err != nil { return err } if layersSeen[layer.ID] { break } layers = append(layers, layer) layersSeen[layer.ID] = true } m.FSLayers = make([]*registry.FSLayer, len(layers)) m.History = make([]*registry.ManifestHistory, len(layers)) // Schema version 1 requires layer ordering from top to root for i, layer := range layers { log.Debugf("Pushing layer: %s", layer.ID) if layer.Config != nil && metadata.Image != layer.ID { err = runconfig.Merge(&metadata, layer.Config) if err != nil { return err } } jsonData, err := layer.RawJson() if err != nil { return fmt.Errorf("cannot retrieve the path for %s: %s", layer.ID, err) } checksum, err := layer.GetCheckSum(s.graph.ImageRoot(layer.ID)) if err != nil { return fmt.Errorf("error getting image checksum: %s", err) } var exists bool if len(checksum) > 0 { sumParts := strings.SplitN(checksum, ":", 2) if len(sumParts) < 2 { return fmt.Errorf("Invalid checksum: %s", checksum) } // Call mount blob exists, err = r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], sumParts[1], auth) if err != nil { out.Write(sf.FormatProgress(common.TruncateID(layer.ID), "Image push failed", nil)) return err } } if !exists { if cs, err := s.pushV2Image(r, layer, endpoint, repoInfo.RemoteName, sf, out, auth); err != nil { return err } else if cs != checksum { // Cache new checksum if err := layer.SaveCheckSum(s.graph.ImageRoot(layer.ID), cs); err != nil { return err } checksum = cs } } else { out.Write(sf.FormatProgress(common.TruncateID(layer.ID), "Image already exists", nil)) } m.FSLayers[i] = ®istry.FSLayer{BlobSum: checksum} m.History[i] = ®istry.ManifestHistory{V1Compatibility: string(jsonData)} } if err := checkValidManifest(m); err != nil { return fmt.Errorf("invalid manifest: %s", err) } log.Debugf("Pushing %s:%s to v2 repository", repoInfo.LocalName, tag) mBytes, err := json.MarshalIndent(m, "", " ") if err != nil { return err } js, err := libtrust.NewJSONSignature(mBytes) if err != nil { return err } if err = js.Sign(s.trustKey); err != nil { return err } signedBody, err := js.PrettySignature("signatures") if err != nil { return err } log.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID()) // push the manifest digest, err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, signedBody, mBytes, auth) if err != nil { return err } out.Write(sf.FormatStatus("", "Digest: %s", digest)) } return nil } // PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) (string, error) { out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Buffering to Disk", nil)) image, err := s.graph.Get(img.ID) if err != nil { return "", err } arch, err := image.TarLayer() if err != nil { return "", err } defer arch.Close() tf, err := s.graph.newTempFile() if err != nil { return "", err } defer func() { tf.Close() os.Remove(tf.Name()) }() size, dgst, err := bufferToFile(tf, arch) // Send the layer log.Debugf("rendered layer for %s of [%d] size", img.ID, size) if err := r.PutV2ImageBlob(endpoint, imageName, dgst.Algorithm(), dgst.Hex(), progressreader.New(progressreader.Config{ In: tf, Out: out, Formatter: sf, Size: int(size), NewLines: false, ID: common.TruncateID(img.ID), Action: "Pushing", }), auth); err != nil { out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image push failed", nil)) return "", err } out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image successfully pushed", nil)) return dgst.String(), nil } // FIXME: Allow to interrupt current push when new push of same image is done. func (s *TagStore) CmdPush(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } var ( localName = job.Args[0] sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ResolveRepositoryInfo(job, localName) if err != nil { return job.Error(err) } tag := job.Getenv("tag") job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil { return job.Error(err) } defer s.poolRemove("push", repoInfo.LocalName) endpoint, err := repoInfo.GetEndpoint() if err != nil { return job.Error(err) } r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) if err != nil { return job.Error(err) } reposLen := 1 if tag == "" { reposLen = len(s.Repositories[repoInfo.LocalName]) } job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen)) // If it fails, try to get the repository localRepo, exists := s.Repositories[repoInfo.LocalName] if !exists { return job.Errorf("Repository does not exist: %s", repoInfo.LocalName) } if repoInfo.Index.Official || endpoint.Version == registry.APIVersion2 { err := s.pushV2Repository(r, localRepo, job.Stdout, repoInfo, tag, sf) if err == nil { return engine.StatusOK } if err != ErrV2RegistryUnavailable { return job.Errorf("Error pushing to registry: %s", err) } } if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil { return job.Error(err) } return engine.StatusOK } docker-1.6.2/graph/pools_test.go0000644000175000017500000000244412524223634016163 0ustar tianontianonpackage graph import ( "testing" "github.com/docker/docker/pkg/reexec" ) func init() { reexec.Init() } func TestPools(t *testing.T) { s := &TagStore{ pullingPool: make(map[string]chan struct{}), pushingPool: make(map[string]chan struct{}), } if _, err := s.poolAdd("pull", "test1"); err != nil { t.Fatal(err) } if _, err := s.poolAdd("pull", "test2"); err != nil { t.Fatal(err) } if _, err := s.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { t.Fatalf("Expected `pull test1 is already in progress`") } if _, err := s.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { t.Fatalf("Expected `pull test1 is already in progress`") } if _, err := s.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { t.Fatalf("Expected `Unknown pool type`") } if err := s.poolRemove("pull", "test2"); err != nil { t.Fatal(err) } if err := s.poolRemove("pull", "test2"); err != nil { t.Fatal(err) } if err := s.poolRemove("pull", "test1"); err != nil { t.Fatal(err) } if err := s.poolRemove("push", "test1"); err != nil { t.Fatal(err) } if err := s.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { t.Fatalf("Expected `Unknown pool type`") } } docker-1.6.2/graph/tags_unit_test.go0000644000175000017500000001407512524223634017027 0ustar tianontianonpackage graph import ( "bytes" "io" "os" "path" "testing" "github.com/docker/docker/daemon/graphdriver" _ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests "github.com/docker/docker/image" "github.com/docker/docker/utils" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) const ( testOfficialImageName = "myapp" testOfficialImageID = "1a2d3c4d4e5fa2d2a21acea242a5e2345d3aefc3e7dfa2a2a2a21a2a2ad2d234" testOfficialImageIDShort = "1a2d3c4d4e5f" testPrivateImageName = "127.0.0.1:8000/privateapp" testPrivateImageID = "5bc255f8699e4ee89ac4469266c3d11515da88fdcbde45d7b069b636ff4efd81" testPrivateImageIDShort = "5bc255f8699e" testPrivateImageDigest = "sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb" testPrivateImageTag = "sometag" ) func fakeTar() (io.Reader, error) { uid := os.Getuid() gid := os.Getgid() content := []byte("Hello world!\n") buf := new(bytes.Buffer) tw := tar.NewWriter(buf) for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { hdr := new(tar.Header) // Leaving these fields blank requires root privileges hdr.Uid = uid hdr.Gid = gid hdr.Size = int64(len(content)) hdr.Name = name if err := tw.WriteHeader(hdr); err != nil { return nil, err } tw.Write([]byte(content)) } tw.Close() return buf, nil } func mkTestTagStore(root string, t *testing.T) *TagStore { driver, err := graphdriver.New(root, nil) if err != nil { t.Fatal(err) } graph, err := NewGraph(root, driver) if err != nil { t.Fatal(err) } store, err := NewTagStore(path.Join(root, "tags"), graph, nil) if err != nil { t.Fatal(err) } officialArchive, err := fakeTar() if err != nil { t.Fatal(err) } img := &image.Image{ID: testOfficialImageID} if err := graph.Register(img, officialArchive); err != nil { t.Fatal(err) } if err := store.Set(testOfficialImageName, "", testOfficialImageID, false); err != nil { t.Fatal(err) } privateArchive, err := fakeTar() if err != nil { t.Fatal(err) } img = &image.Image{ID: testPrivateImageID} if err := graph.Register(img, privateArchive); err != nil { t.Fatal(err) } if err := store.Set(testPrivateImageName, "", testPrivateImageID, false); err != nil { t.Fatal(err) } if err := store.SetDigest(testPrivateImageName, testPrivateImageDigest, testPrivateImageID); err != nil { t.Fatal(err) } return store } func TestLookupImage(t *testing.T) { tmp, err := utils.TestDirectory("") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) store := mkTestTagStore(tmp, t) defer store.graph.driver.Cleanup() officialLookups := []string{ testOfficialImageID, testOfficialImageIDShort, testOfficialImageName + ":" + testOfficialImageID, testOfficialImageName + ":" + testOfficialImageIDShort, testOfficialImageName, testOfficialImageName + ":" + DEFAULTTAG, "docker.io/" + testOfficialImageName, "docker.io/" + testOfficialImageName + ":" + DEFAULTTAG, "index.docker.io/" + testOfficialImageName, "index.docker.io/" + testOfficialImageName + ":" + DEFAULTTAG, "library/" + testOfficialImageName, "library/" + testOfficialImageName + ":" + DEFAULTTAG, "docker.io/library/" + testOfficialImageName, "docker.io/library/" + testOfficialImageName + ":" + DEFAULTTAG, "index.docker.io/library/" + testOfficialImageName, "index.docker.io/library/" + testOfficialImageName + ":" + DEFAULTTAG, } privateLookups := []string{ testPrivateImageID, testPrivateImageIDShort, testPrivateImageName + ":" + testPrivateImageID, testPrivateImageName + ":" + testPrivateImageIDShort, testPrivateImageName, testPrivateImageName + ":" + DEFAULTTAG, } invalidLookups := []string{ testOfficialImageName + ":" + "fail", "fail:fail", } digestLookups := []string{ testPrivateImageName + "@" + testPrivateImageDigest, } for _, name := range officialLookups { if img, err := store.LookupImage(name); err != nil { t.Errorf("Error looking up %s: %s", name, err) } else if img == nil { t.Errorf("Expected 1 image, none found: %s", name) } else if img.ID != testOfficialImageID { t.Errorf("Expected ID '%s' found '%s'", testOfficialImageID, img.ID) } } for _, name := range privateLookups { if img, err := store.LookupImage(name); err != nil { t.Errorf("Error looking up %s: %s", name, err) } else if img == nil { t.Errorf("Expected 1 image, none found: %s", name) } else if img.ID != testPrivateImageID { t.Errorf("Expected ID '%s' found '%s'", testPrivateImageID, img.ID) } } for _, name := range invalidLookups { if img, err := store.LookupImage(name); err == nil { t.Errorf("Expected error, none found: %s", name) } else if img != nil { t.Errorf("Expected 0 image, 1 found: %s", name) } } for _, name := range digestLookups { if img, err := store.LookupImage(name); err != nil { t.Errorf("Error looking up %s: %s", name, err) } else if img == nil { t.Errorf("Expected 1 image, none found: %s", name) } else if img.ID != testPrivateImageID { t.Errorf("Expected ID '%s' found '%s'", testPrivateImageID, img.ID) } } } func TestValidTagName(t *testing.T) { validTags := []string{"9", "foo", "foo-test", "bar.baz.boo"} for _, tag := range validTags { if err := ValidateTagName(tag); err != nil { t.Errorf("'%s' should've been a valid tag", tag) } } } func TestInvalidTagName(t *testing.T) { validTags := []string{"-9", ".foo", "-test", ".", "-"} for _, tag := range validTags { if err := ValidateTagName(tag); err == nil { t.Errorf("'%s' shouldn't have been a valid tag", tag) } } } func TestValidateDigest(t *testing.T) { tests := []struct { input string expectError bool }{ {"", true}, {"latest", true}, {"a:b", false}, {"aZ0124-.+:bY852-_.+=", false}, {"#$%#$^:$%^#$%", true}, } for i, test := range tests { err := validateDigest(test.input) gotError := err != nil if e, a := test.expectError, gotError; e != a { t.Errorf("%d: with input %s, expected error=%t, got %t: %s", i, test.input, test.expectError, gotError, err) } } } docker-1.6.2/graph/history.go0000644000175000017500000000233612524223634015471 0ustar tianontianonpackage graph import ( "strings" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/utils" ) func (s *TagStore) CmdHistory(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } name := job.Args[0] foundImage, err := s.LookupImage(name) if err != nil { return job.Error(err) } lookupMap := make(map[string][]string) for name, repository := range s.Repositories { for tag, id := range repository { // If the ID already has a reverse lookup, do not update it unless for "latest" if _, exists := lookupMap[id]; !exists { lookupMap[id] = []string{} } lookupMap[id] = append(lookupMap[id], utils.ImageReference(name, tag)) } } outs := engine.NewTable("Created", 0) err = foundImage.WalkHistory(func(img *image.Image) error { out := &engine.Env{} out.SetJson("Id", img.ID) out.SetInt64("Created", img.Created.Unix()) out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " ")) out.SetList("Tags", lookupMap[img.ID]) out.SetInt64("Size", img.Size) outs.Add(out) return nil }) if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } docker-1.6.2/graph/manifest_test.go0000644000175000017500000001112012524223634016624 0ustar tianontianonpackage graph import ( "encoding/json" "fmt" "io" "io/ioutil" "os" "testing" "github.com/docker/docker/image" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) const ( testManifestImageName = "testapp" testManifestImageID = "d821b739e8834ec89ac4469266c3d11515da88fdcbcbdddcbcddb636f54fdde9" testManifestImageIDShort = "d821b739e883" testManifestTag = "manifesttest" ) func (s *TagStore) newManifest(localName, remoteName, tag string) ([]byte, error) { manifest := ®istry.ManifestData{ Name: remoteName, Tag: tag, SchemaVersion: 1, } localRepo, err := s.Get(localName) if err != nil { return nil, err } if localRepo == nil { return nil, fmt.Errorf("Repo does not exist: %s", localName) } // Get the top-most layer id which the tag points to layerId, exists := localRepo[tag] if !exists { return nil, fmt.Errorf("Tag does not exist for %s: %s", localName, tag) } layersSeen := make(map[string]bool) layer, err := s.graph.Get(layerId) if err != nil { return nil, err } manifest.Architecture = layer.Architecture manifest.FSLayers = make([]*registry.FSLayer, 0, 4) manifest.History = make([]*registry.ManifestHistory, 0, 4) var metadata runconfig.Config if layer.Config != nil { metadata = *layer.Config } for ; layer != nil; layer, err = layer.GetParent() { if err != nil { return nil, err } if layersSeen[layer.ID] { break } if layer.Config != nil && metadata.Image != layer.ID { err = runconfig.Merge(&metadata, layer.Config) if err != nil { return nil, err } } checksum, err := layer.GetCheckSum(s.graph.ImageRoot(layer.ID)) if err != nil { return nil, fmt.Errorf("Error getting image checksum: %s", err) } if tarsum.VersionLabelForChecksum(checksum) != tarsum.Version1.String() { archive, err := layer.TarLayer() if err != nil { return nil, err } defer archive.Close() tarSum, err := tarsum.NewTarSum(archive, true, tarsum.Version1) if err != nil { return nil, err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return nil, err } checksum = tarSum.Sum(nil) // Save checksum value if err := layer.SaveCheckSum(s.graph.ImageRoot(layer.ID), checksum); err != nil { return nil, err } } jsonData, err := layer.RawJson() if err != nil { return nil, fmt.Errorf("Cannot retrieve the path for {%s}: %s", layer.ID, err) } manifest.FSLayers = append(manifest.FSLayers, ®istry.FSLayer{BlobSum: checksum}) layersSeen[layer.ID] = true manifest.History = append(manifest.History, ®istry.ManifestHistory{V1Compatibility: string(jsonData)}) } manifestBytes, err := json.MarshalIndent(manifest, "", " ") if err != nil { return nil, err } return manifestBytes, nil } func TestManifestTarsumCache(t *testing.T) { tmp, err := utils.TestDirectory("") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) store := mkTestTagStore(tmp, t) defer store.graph.driver.Cleanup() archive, err := fakeTar() if err != nil { t.Fatal(err) } img := &image.Image{ID: testManifestImageID} if err := store.graph.Register(img, archive); err != nil { t.Fatal(err) } if err := store.Set(testManifestImageName, testManifestTag, testManifestImageID, false); err != nil { t.Fatal(err) } if cs, err := img.GetCheckSum(store.graph.ImageRoot(testManifestImageID)); err != nil { t.Fatal(err) } else if cs != "" { t.Fatalf("Non-empty checksum file after register") } // Generate manifest payload, err := store.newManifest(testManifestImageName, testManifestImageName, testManifestTag) if err != nil { t.Fatal(err) } manifestChecksum, err := img.GetCheckSum(store.graph.ImageRoot(testManifestImageID)) if err != nil { t.Fatal(err) } var manifest registry.ManifestData if err := json.Unmarshal(payload, &manifest); err != nil { t.Fatalf("error unmarshalling manifest: %s", err) } if len(manifest.FSLayers) != 1 { t.Fatalf("Unexpected number of layers, expecting 1: %d", len(manifest.FSLayers)) } if manifest.FSLayers[0].BlobSum != manifestChecksum { t.Fatalf("Unexpected blob sum, expecting %q, got %q", manifestChecksum, manifest.FSLayers[0].BlobSum) } if len(manifest.History) != 1 { t.Fatalf("Unexpected number of layer history, expecting 1: %d", len(manifest.History)) } v1compat, err := img.RawJson() if err != nil { t.Fatal(err) } if manifest.History[0].V1Compatibility != string(v1compat) { t.Fatalf("Unexpected json value\nExpected:\n%s\nActual:\n%s", v1compat, manifest.History[0].V1Compatibility) } } docker-1.6.2/graph/tag.go0000644000175000017500000000067312524223634014545 0ustar tianontianonpackage graph import ( "github.com/docker/docker/engine" ) func (s *TagStore) CmdTag(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { return job.Error(err) } return engine.StatusOK } docker-1.6.2/graph/service.go0000644000175000017500000001355012524223634015430 0ustar tianontianonpackage graph import ( "fmt" "io" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/image" ) func (s *TagStore) Install(eng *engine.Engine) error { for name, handler := range map[string]engine.Handler{ "image_set": s.CmdSet, "tag": s.CmdTag, "image_get": s.CmdGet, "image_inspect": s.CmdLookup, "image_tarlayer": s.CmdTarLayer, "image_export": s.CmdImageExport, "history": s.CmdHistory, "images": s.CmdImages, "viz": s.CmdViz, "load": s.CmdLoad, "import": s.CmdImport, "pull": s.CmdPull, "push": s.CmdPush, } { if err := eng.Register(name, handler); err != nil { return fmt.Errorf("Could not register %q: %v", name, err) } } return nil } // CmdSet stores a new image in the graph. // Images are stored in the graph using 4 elements: // - A user-defined ID // - A collection of metadata describing the image // - A directory tree stored as a tar archive (also called the "layer") // - A reference to a "parent" ID on top of which the layer should be applied // // NOTE: even though the parent ID is only useful in relation to the layer and how // to apply it (ie you could represent the full directory tree as 'parent_layer + layer', // it is treated as a top-level property of the image. This is an artifact of early // design and should probably be cleaned up in the future to simplify the design. // // Syntax: image_set ID // Input: // - Layer content must be streamed in tar format on stdin. An empty input is // valid and represents a nil layer. // // - Image metadata must be passed in the command environment. // 'json': a json-encoded object with all image metadata. // It will be stored as-is, without any encoding/decoding artifacts. // That is a requirement of the current registry client implementation, // because a re-encoded json might invalidate the image checksum at // the next upload, even with functionaly identical content. func (s *TagStore) CmdSet(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } var ( imgJSON = []byte(job.Getenv("json")) layer = job.Stdin ) if len(imgJSON) == 0 { return job.Errorf("mandatory key 'json' is not set") } // We have to pass an *image.Image object, even though it will be completely // ignored in favor of the redundant json data. // FIXME: the current prototype of Graph.Register is stupid and redundant. img, err := image.NewImgJSON(imgJSON) if err != nil { return job.Error(err) } if err := s.graph.Register(img, layer); err != nil { return job.Error(err) } return engine.StatusOK } // CmdGet returns information about an image. // If the image doesn't exist, an empty object is returned, to allow // checking for an image's existence. func (s *TagStore) CmdGet(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] res := &engine.Env{} img, err := s.LookupImage(name) // Note: if the image doesn't exist, LookupImage returns // nil, nil. if err != nil { return job.Error(err) } if img != nil { // We don't directly expose all fields of the Image objects, // to maintain a clean public API which we can maintain over // time even if the underlying structure changes. // We should have done this with the Image object to begin with... // but we didn't, so now we're doing it here. // // Fields that we're probably better off not including: // - Config/ContainerConfig. Those structs have the same sprawl problem, // so we shouldn't include them wholesale either. // - Comment: initially created to fulfill the "every image is a git commit" // metaphor, in practice people either ignore it or use it as a // generic description field which it isn't. On deprecation shortlist. res.SetAuto("Created", img.Created) res.SetJson("Author", img.Author) res.Set("Os", img.OS) res.Set("Architecture", img.Architecture) res.Set("DockerVersion", img.DockerVersion) res.SetJson("Id", img.ID) res.SetJson("Parent", img.Parent) } res.WriteTo(job.Stdout) return engine.StatusOK } // CmdLookup return an image encoded in JSON func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if image, err := s.LookupImage(name); err == nil && image != nil { if job.GetenvBool("raw") { b, err := image.RawJson() if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.SetJson("Id", image.ID) out.SetJson("Parent", image.Parent) out.SetJson("Comment", image.Comment) out.SetAuto("Created", image.Created) out.SetJson("Container", image.Container) out.SetJson("ContainerConfig", image.ContainerConfig) out.Set("DockerVersion", image.DockerVersion) out.SetJson("Author", image.Author) out.SetJson("Config", image.Config) out.Set("Architecture", image.Architecture) out.Set("Os", image.OS) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) if _, err = out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such image: %s", name) } // CmdTarLayer return the tarLayer of the image func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if image, err := s.LookupImage(name); err == nil && image != nil { fs, err := image.TarLayer() if err != nil { return job.Error(err) } defer fs.Close() written, err := io.Copy(job.Stdout, fs) if err != nil { return job.Error(err) } log.Debugf("rendered layer for %s of [%d] size", image.ID, written) return engine.StatusOK } return job.Errorf("No such image: %s", name) } docker-1.6.2/graph/import.go0000644000175000017500000000467512524223634015312 0ustar tianontianonpackage graph import ( "bytes" "encoding/json" "net/http" "net/url" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) func (s *TagStore) CmdImport(job *engine.Job) engine.Status { if n := len(job.Args); n != 2 && n != 3 { return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) } var ( src = job.Args[0] repo = job.Args[1] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) archive archive.ArchiveReader resp *http.Response stdoutBuffer = bytes.NewBuffer(nil) newConfig runconfig.Config ) if len(job.Args) > 2 { tag = job.Args[2] } if src == "-" { archive = job.Stdin } else { u, err := url.Parse(src) if err != nil { return job.Error(err) } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = utils.Download(u.String()) if err != nil { return job.Error(err) } progressReader := progressreader.New(progressreader.Config{ In: resp.Body, Out: job.Stdout, Formatter: sf, Size: int(resp.ContentLength), NewLines: true, ID: "", Action: "Importing", }) defer progressReader.Close() archive = progressReader } buildConfigJob := job.Eng.Job("build_config") buildConfigJob.Stdout.Add(stdoutBuffer) buildConfigJob.Setenv("changes", job.Getenv("changes")) // FIXME this should be remove when we remove deprecated config param buildConfigJob.Setenv("config", job.Getenv("config")) if err := buildConfigJob.Run(); err != nil { return job.Error(err) } if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil { return job.Error(err) } img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, &newConfig) if err != nil { return job.Error(err) } // Optionally register the image at REPO/TAG if repo != "" { if err := s.Set(repo, tag, img.ID, true); err != nil { return job.Error(err) } } job.Stdout.Write(sf.FormatStatus("", img.ID)) logID := img.ID if tag != "" { logID = utils.ImageReference(logID, tag) } if err = job.Eng.Job("log", "import", logID, "").Run(); err != nil { log.Errorf("Error logging event 'import' for %s: %s", logID, err) } return engine.StatusOK } docker-1.6.2/graph/load.go0000644000175000017500000000633312524223634014710 0ustar tianontianon// +build linux package graph import ( "encoding/json" "io/ioutil" "os" "path" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/utils" ) // Loads a set of images into the repository. This is the complementary of ImageExport. // The input stream is an uncompressed tar ball containing images and metadata. func (s *TagStore) CmdLoad(job *engine.Job) engine.Status { tmpImageDir, err := ioutil.TempDir("", "docker-import-") if err != nil { return job.Error(err) } defer os.RemoveAll(tmpImageDir) var ( repoDir = path.Join(tmpImageDir, "repo") ) if err := os.Mkdir(repoDir, os.ModeDir); err != nil { return job.Error(err) } images, err := s.graph.Map() if err != nil { return job.Error(err) } excludes := make([]string, len(images)) i := 0 for k := range images { excludes[i] = k i++ } if err := chrootarchive.Untar(job.Stdin, repoDir, &archive.TarOptions{ExcludePatterns: excludes}); err != nil { return job.Error(err) } dirs, err := ioutil.ReadDir(repoDir) if err != nil { return job.Error(err) } for _, d := range dirs { if d.IsDir() { if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil { return job.Error(err) } } } repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) if err == nil { repositories := map[string]Repository{} if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { return job.Error(err) } for imageName, tagMap := range repositories { for tag, address := range tagMap { if err := s.Set(imageName, tag, address, true); err != nil { return job.Error(err) } } } } else if !os.IsNotExist(err) { return job.Error(err) } return engine.StatusOK } func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error { if err := eng.Job("image_get", address).Run(); err != nil { log.Debugf("Loading %s", address) imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) if err != nil { log.Debugf("Error reading json", err) return err } layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar")) if err != nil { log.Debugf("Error reading embedded tar", err) return err } img, err := image.NewImgJSON(imageJson) if err != nil { log.Debugf("Error unmarshalling json", err) return err } if err := utils.ValidateID(img.ID); err != nil { log.Debugf("Error validating ID: %s", err) return err } // ensure no two downloads of the same layer happen at the same time if c, err := s.poolAdd("pull", "layer:"+img.ID); err != nil { if c != nil { log.Debugf("Image (id: %s) load is already running, waiting: %v", img.ID, err) <-c return nil } return err } defer s.poolRemove("pull", "layer:"+img.ID) if img.Parent != "" { if !s.graph.Exists(img.Parent) { if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil { return err } } } if err := s.graph.Register(img, layer); err != nil { return err } } log.Debugf("Completed processing %s", address) return nil } docker-1.6.2/graph/graph.go0000644000175000017500000003002212524223634015062 0ustar tianontianonpackage graph import ( "compress/gzip" "crypto/sha256" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "runtime" "strings" "syscall" "time" log "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) // A Graph is a store for versioned filesystem images and the relationship between them. type Graph struct { Root string idIndex *truncindex.TruncIndex driver graphdriver.Driver } // NewGraph instantiates a new graph at the given root path in the filesystem. // `root` will be created if it doesn't exist. func NewGraph(root string, driver graphdriver.Driver) (*Graph, error) { abspath, err := filepath.Abs(root) if err != nil { return nil, err } // Create the root directory if it doesn't exists if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { return nil, err } graph := &Graph{ Root: abspath, idIndex: truncindex.NewTruncIndex([]string{}), driver: driver, } if err := graph.restore(); err != nil { return nil, err } return graph, nil } func (graph *Graph) restore() error { dir, err := ioutil.ReadDir(graph.Root) if err != nil { return err } var ids = []string{} for _, v := range dir { id := v.Name() if graph.driver.Exists(id) { ids = append(ids, id) } } graph.idIndex = truncindex.NewTruncIndex(ids) log.Debugf("Restored %d elements", len(dir)) return nil } // FIXME: Implement error subclass instead of looking at the error text // Note: This is the way golang implements os.IsNotExists on Plan9 func (graph *Graph) IsNotExist(err error) bool { return err != nil && (strings.Contains(strings.ToLower(err.Error()), "does not exist") || strings.Contains(strings.ToLower(err.Error()), "no such")) } // Exists returns true if an image is registered at the given id. // If the image doesn't exist or if an error is encountered, false is returned. func (graph *Graph) Exists(id string) bool { if _, err := graph.Get(id); err != nil { return false } return true } // Get returns the image with the given id, or an error if the image doesn't exist. func (graph *Graph) Get(name string) (*image.Image, error) { id, err := graph.idIndex.Get(name) if err != nil { return nil, fmt.Errorf("could not find image: %v", err) } img, err := image.LoadImage(graph.ImageRoot(id)) if err != nil { return nil, err } if img.ID != id { return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID) } img.SetGraph(graph) if img.Size < 0 { size, err := graph.driver.DiffSize(img.ID, img.Parent) if err != nil { return nil, fmt.Errorf("unable to calculate size of image id %q: %s", img.ID, err) } img.Size = size if err := img.SaveSize(graph.ImageRoot(id)); err != nil { return nil, err } } return img, nil } // Create creates a new image and registers it in the graph. func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) { img := &image.Image{ ID: common.GenerateRandomID(), Comment: comment, Created: time.Now().UTC(), DockerVersion: dockerversion.VERSION, Author: author, Config: config, Architecture: runtime.GOARCH, OS: runtime.GOOS, } if containerID != "" { img.Parent = containerImage img.Container = containerID img.ContainerConfig = *containerConfig } if err := graph.Register(img, layerData); err != nil { return nil, err } return img, nil } // Register imports a pre-existing image into the graph. func (graph *Graph) Register(img *image.Image, layerData archive.ArchiveReader) (err error) { defer func() { // If any error occurs, remove the new dir from the driver. // Don't check for errors since the dir might not have been created. // FIXME: this leaves a possible race condition. if err != nil { graph.driver.Remove(img.ID) } }() if err := utils.ValidateID(img.ID); err != nil { return err } // (This is a convenience to save time. Race conditions are taken care of by os.Rename) if graph.Exists(img.ID) { return fmt.Errorf("Image %s already exists", img.ID) } // Ensure that the image root does not exist on the filesystem // when it is not registered in the graph. // This is common when you switch from one graph driver to another if err := os.RemoveAll(graph.ImageRoot(img.ID)); err != nil && !os.IsNotExist(err) { return err } // If the driver has this ID but the graph doesn't, remove it from the driver to start fresh. // (the graph is the source of truth). // Ignore errors, since we don't know if the driver correctly returns ErrNotExist. // (FIXME: make that mandatory for drivers). graph.driver.Remove(img.ID) tmp, err := graph.Mktemp("") defer os.RemoveAll(tmp) if err != nil { return fmt.Errorf("Mktemp failed: %s", err) } // Create root filesystem in the driver if err := graph.driver.Create(img.ID, img.Parent); err != nil { return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) } // Apply the diff/layer img.SetGraph(graph) if err := image.StoreImage(img, layerData, tmp); err != nil { return err } // Commit if err := os.Rename(tmp, graph.ImageRoot(img.ID)); err != nil { return err } graph.idIndex.Add(img.ID) return nil } // TempLayerArchive creates a temporary archive of the given image's filesystem layer. // The archive is stored on disk and will be automatically deleted as soon as has been read. // If output is not nil, a human-readable progress bar will be written to it. // FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives? func (graph *Graph) TempLayerArchive(id string, sf *utils.StreamFormatter, output io.Writer) (*archive.TempArchive, error) { image, err := graph.Get(id) if err != nil { return nil, err } tmp, err := graph.Mktemp("") if err != nil { return nil, err } a, err := image.TarLayer() if err != nil { return nil, err } progressReader := progressreader.New(progressreader.Config{ In: a, Out: output, Formatter: sf, Size: 0, NewLines: false, ID: common.TruncateID(id), Action: "Buffering to disk", }) defer progressReader.Close() return archive.NewTempArchive(progressReader, tmp) } // Mktemp creates a temporary sub-directory inside the graph's filesystem. func (graph *Graph) Mktemp(id string) (string, error) { dir := path.Join(graph.Root, "_tmp", common.GenerateRandomID()) if err := os.MkdirAll(dir, 0700); err != nil { return "", err } return dir, nil } func (graph *Graph) newTempFile() (*os.File, error) { tmp, err := graph.Mktemp("") if err != nil { return nil, err } return ioutil.TempFile(tmp, "") } func bufferToFile(f *os.File, src io.Reader) (int64, digest.Digest, error) { var ( h = sha256.New() w = gzip.NewWriter(io.MultiWriter(f, h)) ) _, err := io.Copy(w, src) w.Close() if err != nil { return 0, "", err } if err = f.Sync(); err != nil { return 0, "", err } n, err := f.Seek(0, os.SEEK_CUR) if err != nil { return 0, "", err } if _, err := f.Seek(0, 0); err != nil { return 0, "", err } return n, digest.NewDigest("sha256", h), nil } // setupInitLayer populates a directory with mountpoints suitable // for bind-mounting dockerinit into the container. The mountpoint is simply an // empty file at /.dockerinit // // This extra layer is used by all containers as the top-most ro layer. It protects // the container from unwanted side-effects on the rw layer. func SetupInitLayer(initLayer string) error { for pth, typ := range map[string]string{ "/dev/pts": "dir", "/dev/shm": "dir", "/proc": "dir", "/sys": "dir", "/.dockerinit": "file", "/.dockerenv": "file", "/etc/resolv.conf": "file", "/etc/hosts": "file", "/etc/hostname": "file", "/dev/console": "file", "/etc/mtab": "/proc/mounts", } { parts := strings.Split(pth, "/") prev := "/" for _, p := range parts[1:] { prev = path.Join(prev, p) syscall.Unlink(path.Join(initLayer, prev)) } if _, err := os.Stat(path.Join(initLayer, pth)); err != nil { if os.IsNotExist(err) { if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil { return err } switch typ { case "dir": if err := os.MkdirAll(path.Join(initLayer, pth), 0755); err != nil { return err } case "file": f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755) if err != nil { return err } f.Close() default: if err := os.Symlink(typ, path.Join(initLayer, pth)); err != nil { return err } } } else { return err } } } // Layer is ready to use, if it wasn't before. return nil } // Check if given error is "not empty". // Note: this is the way golang does it internally with os.IsNotExists. func isNotEmpty(err error) bool { switch pe := err.(type) { case nil: return false case *os.PathError: err = pe.Err case *os.LinkError: err = pe.Err } return strings.Contains(err.Error(), " not empty") } // Delete atomically removes an image from the graph. func (graph *Graph) Delete(name string) error { id, err := graph.idIndex.Get(name) if err != nil { return err } tmp, err := graph.Mktemp("") graph.idIndex.Delete(id) if err == nil { err = os.Rename(graph.ImageRoot(id), tmp) // On err make tmp point to old dir and cleanup unused tmp dir if err != nil { os.RemoveAll(tmp) tmp = graph.ImageRoot(id) } } else { // On err make tmp point to old dir for cleanup tmp = graph.ImageRoot(id) } // Remove rootfs data from the driver graph.driver.Remove(id) // Remove the trashed image directory return os.RemoveAll(tmp) } // Map returns a list of all images in the graph, addressable by ID. func (graph *Graph) Map() (map[string]*image.Image, error) { images := make(map[string]*image.Image) err := graph.walkAll(func(image *image.Image) { images[image.ID] = image }) if err != nil { return nil, err } return images, nil } // walkAll iterates over each image in the graph, and passes it to a handler. // The walking order is undetermined. func (graph *Graph) walkAll(handler func(*image.Image)) error { files, err := ioutil.ReadDir(graph.Root) if err != nil { return err } for _, st := range files { if img, err := graph.Get(st.Name()); err != nil { // Skip image continue } else if handler != nil { handler(img) } } return nil } // ByParent returns a lookup table of images by their parent. // If an image of id ID has 3 children images, then the value for key ID // will be a list of 3 images. // If an image has no children, it will not have an entry in the table. func (graph *Graph) ByParent() (map[string][]*image.Image, error) { byParent := make(map[string][]*image.Image) err := graph.walkAll(func(img *image.Image) { parent, err := graph.Get(img.Parent) if err != nil { return } if children, exists := byParent[parent.ID]; exists { byParent[parent.ID] = append(children, img) } else { byParent[parent.ID] = []*image.Image{img} } }) return byParent, err } // Heads returns all heads in the graph, keyed by id. // A head is an image which is not the parent of another image in the graph. func (graph *Graph) Heads() (map[string]*image.Image, error) { heads := make(map[string]*image.Image) byParent, err := graph.ByParent() if err != nil { return nil, err } err = graph.walkAll(func(image *image.Image) { // If it's not in the byParent lookup table, then // it's not a parent -> so it's a head! if _, exists := byParent[image.ID]; !exists { heads[image.ID] = image } }) return heads, err } func (graph *Graph) ImageRoot(id string) string { return path.Join(graph.Root, id) } func (graph *Graph) Driver() graphdriver.Driver { return graph.driver } docker-1.6.2/graph/manifest.go0000644000175000017500000000647712524223634015610 0ustar tianontianonpackage graph import ( "bytes" "encoding/json" "fmt" log "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/engine" "github.com/docker/docker/registry" "github.com/docker/docker/utils" "github.com/docker/libtrust" ) // loadManifest loads a manifest from a byte array and verifies its content. // The signature must be verified or an error is returned. If the manifest // contains no signatures by a trusted key for the name in the manifest, the // image is not considered verified. The parsed manifest object and a boolean // for whether the manifest is verified is returned. func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte, dgst, ref string) (*registry.ManifestData, bool, error) { sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures") if err != nil { return nil, false, fmt.Errorf("error parsing payload: %s", err) } keys, err := sig.Verify() if err != nil { return nil, false, fmt.Errorf("error verifying payload: %s", err) } payload, err := sig.Payload() if err != nil { return nil, false, fmt.Errorf("error retrieving payload: %s", err) } var manifestDigest digest.Digest if dgst != "" { manifestDigest, err = digest.ParseDigest(dgst) if err != nil { return nil, false, fmt.Errorf("invalid manifest digest from registry: %s", err) } dgstVerifier, err := digest.NewDigestVerifier(manifestDigest) if err != nil { return nil, false, fmt.Errorf("unable to verify manifest digest from registry: %s", err) } dgstVerifier.Write(payload) if !dgstVerifier.Verified() { computedDigest, _ := digest.FromBytes(payload) return nil, false, fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", manifestDigest, computedDigest) } } if utils.DigestReference(ref) && ref != manifestDigest.String() { return nil, false, fmt.Errorf("mismatching image manifest digest: got %q, expected %q", manifestDigest, ref) } var manifest registry.ManifestData if err := json.Unmarshal(payload, &manifest); err != nil { return nil, false, fmt.Errorf("error unmarshalling manifest: %s", err) } if manifest.SchemaVersion != 1 { return nil, false, fmt.Errorf("unsupported schema version: %d", manifest.SchemaVersion) } var verified bool for _, key := range keys { job := eng.Job("trust_key_check") b, err := key.MarshalJSON() if err != nil { return nil, false, fmt.Errorf("error marshalling public key: %s", err) } namespace := manifest.Name if namespace[0] != '/' { namespace = "/" + namespace } stdoutBuffer := bytes.NewBuffer(nil) job.Args = append(job.Args, namespace) job.Setenv("PublicKey", string(b)) // Check key has read/write permission (0x03) job.SetenvInt("Permission", 0x03) job.Stdout.Add(stdoutBuffer) if err = job.Run(); err != nil { return nil, false, fmt.Errorf("error running key check: %s", err) } result := engine.Tail(stdoutBuffer, 1) log.Debugf("Key check result: %q", result) if result == "verified" { verified = true } } return &manifest, verified, nil } func checkValidManifest(manifest *registry.ManifestData) error { if len(manifest.FSLayers) != len(manifest.History) { return fmt.Errorf("length of history not equal to number of layers") } if len(manifest.FSLayers) == 0 { return fmt.Errorf("no FSLayers in manifest") } return nil } docker-1.6.2/graph/load_unsupported.go0000644000175000017500000000031612524223634017353 0ustar tianontianon// +build !linux package graph import ( "github.com/docker/docker/engine" ) func (s *TagStore) CmdLoad(job *engine.Job) engine.Status { return job.Errorf("CmdLoad is not supported on this platform") } docker-1.6.2/graph/tags.go0000644000175000017500000002261512524223634014730 0ustar tianontianonpackage graph import ( "encoding/json" "errors" "fmt" "io/ioutil" "os" "path/filepath" "regexp" "sort" "strings" "sync" "github.com/docker/docker/image" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/registry" "github.com/docker/docker/utils" "github.com/docker/libtrust" ) const DEFAULTTAG = "latest" var ( //FIXME these 2 regexes also exist in registry/v2/regexp.go validTagName = regexp.MustCompile(`^[\w][\w.-]{0,127}$`) validDigest = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) ) type TagStore struct { path string graph *Graph Repositories map[string]Repository trustKey libtrust.PrivateKey sync.Mutex // FIXME: move push/pull-related fields // to a helper type pullingPool map[string]chan struct{} pushingPool map[string]chan struct{} } type Repository map[string]string // update Repository mapping with content of u func (r Repository) Update(u Repository) { for k, v := range u { r[k] = v } } // return true if the contents of u Repository, are wholly contained in r Repository func (r Repository) Contains(u Repository) bool { for k, v := range u { // if u's key is not present in r OR u's key is present, but not the same value if rv, ok := r[k]; !ok || (ok && rv != v) { return false } } return true } func NewTagStore(path string, graph *Graph, key libtrust.PrivateKey) (*TagStore, error) { abspath, err := filepath.Abs(path) if err != nil { return nil, err } store := &TagStore{ path: abspath, graph: graph, trustKey: key, Repositories: make(map[string]Repository), pullingPool: make(map[string]chan struct{}), pushingPool: make(map[string]chan struct{}), } // Load the json file if it exists, otherwise create it. if err := store.reload(); os.IsNotExist(err) { if err := store.save(); err != nil { return nil, err } } else if err != nil { return nil, err } return store, nil } func (store *TagStore) save() error { // Store the json ball jsonData, err := json.Marshal(store) if err != nil { return err } if err := ioutil.WriteFile(store.path, jsonData, 0600); err != nil { return err } return nil } func (store *TagStore) reload() error { jsonData, err := ioutil.ReadFile(store.path) if err != nil { return err } if err := json.Unmarshal(jsonData, store); err != nil { return err } return nil } func (store *TagStore) LookupImage(name string) (*image.Image, error) { // FIXME: standardize on returning nil when the image doesn't exist, and err for everything else // (so we can pass all errors here) repoName, ref := parsers.ParseRepositoryTag(name) if ref == "" { ref = DEFAULTTAG } var ( err error img *image.Image ) img, err = store.GetImage(repoName, ref) if err != nil { return nil, err } if img != nil { return img, err } // name must be an image ID. store.Lock() defer store.Unlock() if img, err = store.graph.Get(name); err != nil { return nil, err } return img, nil } // Return a reverse-lookup table of all the names which refer to each image // Eg. {"43b5f19b10584": {"base:latest", "base:v1"}} func (store *TagStore) ByID() map[string][]string { store.Lock() defer store.Unlock() byID := make(map[string][]string) for repoName, repository := range store.Repositories { for tag, id := range repository { name := utils.ImageReference(repoName, tag) if _, exists := byID[id]; !exists { byID[id] = []string{name} } else { byID[id] = append(byID[id], name) sort.Strings(byID[id]) } } } return byID } func (store *TagStore) ImageName(id string) string { if names, exists := store.ByID()[id]; exists && len(names) > 0 { return names[0] } return common.TruncateID(id) } func (store *TagStore) DeleteAll(id string) error { names, exists := store.ByID()[id] if !exists || len(names) == 0 { return nil } for _, name := range names { if strings.Contains(name, ":") { nameParts := strings.Split(name, ":") if _, err := store.Delete(nameParts[0], nameParts[1]); err != nil { return err } } else { if _, err := store.Delete(name, ""); err != nil { return err } } } return nil } func (store *TagStore) Delete(repoName, ref string) (bool, error) { store.Lock() defer store.Unlock() deleted := false if err := store.reload(); err != nil { return false, err } repoName = registry.NormalizeLocalName(repoName) if ref == "" { // Delete the whole repository. delete(store.Repositories, repoName) return true, store.save() } repoRefs, exists := store.Repositories[repoName] if !exists { return false, fmt.Errorf("No such repository: %s", repoName) } if _, exists := repoRefs[ref]; exists { delete(repoRefs, ref) if len(repoRefs) == 0 { delete(store.Repositories, repoName) } deleted = true } return deleted, store.save() } func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { img, err := store.LookupImage(imageName) store.Lock() defer store.Unlock() if err != nil { return err } if tag == "" { tag = DEFAULTTAG } if err := validateRepoName(repoName); err != nil { return err } if err := ValidateTagName(tag); err != nil { return err } if err := store.reload(); err != nil { return err } var repo Repository repoName = registry.NormalizeLocalName(repoName) if r, exists := store.Repositories[repoName]; exists { repo = r if old, exists := store.Repositories[repoName][tag]; exists && !force { return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", tag, old) } } else { repo = make(map[string]string) store.Repositories[repoName] = repo } repo[tag] = img.ID return store.save() } // SetDigest creates a digest reference to an image ID. func (store *TagStore) SetDigest(repoName, digest, imageName string) error { img, err := store.LookupImage(imageName) if err != nil { return err } if err := validateRepoName(repoName); err != nil { return err } if err := validateDigest(digest); err != nil { return err } store.Lock() defer store.Unlock() if err := store.reload(); err != nil { return err } repoName = registry.NormalizeLocalName(repoName) repoRefs, exists := store.Repositories[repoName] if !exists { repoRefs = Repository{} store.Repositories[repoName] = repoRefs } else if oldID, exists := repoRefs[digest]; exists && oldID != img.ID { return fmt.Errorf("Conflict: Digest %s is already set to image %s", digest, oldID) } repoRefs[digest] = img.ID return store.save() } func (store *TagStore) Get(repoName string) (Repository, error) { store.Lock() defer store.Unlock() if err := store.reload(); err != nil { return nil, err } repoName = registry.NormalizeLocalName(repoName) if r, exists := store.Repositories[repoName]; exists { return r, nil } return nil, nil } func (store *TagStore) GetImage(repoName, refOrID string) (*image.Image, error) { repo, err := store.Get(repoName) if err != nil { return nil, err } if repo == nil { return nil, nil } store.Lock() defer store.Unlock() if imgID, exists := repo[refOrID]; exists { return store.graph.Get(imgID) } // If no matching tag is found, search through images for a matching image id for _, revision := range repo { if strings.HasPrefix(revision, refOrID) { return store.graph.Get(revision) } } return nil, nil } func (store *TagStore) GetRepoRefs() map[string][]string { store.Lock() reporefs := make(map[string][]string) for name, repository := range store.Repositories { for tag, id := range repository { shortID := common.TruncateID(id) reporefs[shortID] = append(reporefs[shortID], utils.ImageReference(name, tag)) } } store.Unlock() return reporefs } // Validate the name of a repository func validateRepoName(name string) error { if name == "" { return fmt.Errorf("Repository name can't be empty") } if name == "scratch" { return fmt.Errorf("'scratch' is a reserved name") } return nil } // ValidateTagName validates the name of a tag func ValidateTagName(name string) error { if name == "" { return fmt.Errorf("tag name can't be empty") } if !validTagName.MatchString(name) { return fmt.Errorf("Illegal tag name (%s): only [A-Za-z0-9_.-] are allowed, minimum 1, maximum 128 in length", name) } return nil } func validateDigest(dgst string) error { if dgst == "" { return errors.New("digest can't be empty") } if !validDigest.MatchString(dgst) { return fmt.Errorf("illegal digest (%s): must be of the form [a-zA-Z0-9-_+.]+:[a-fA-F0-9]+", dgst) } return nil } func (store *TagStore) poolAdd(kind, key string) (chan struct{}, error) { store.Lock() defer store.Unlock() if c, exists := store.pullingPool[key]; exists { return c, fmt.Errorf("pull %s is already in progress", key) } if c, exists := store.pushingPool[key]; exists { return c, fmt.Errorf("push %s is already in progress", key) } c := make(chan struct{}) switch kind { case "pull": store.pullingPool[key] = c case "push": store.pushingPool[key] = c default: return nil, fmt.Errorf("Unknown pool type") } return c, nil } func (store *TagStore) poolRemove(kind, key string) error { store.Lock() defer store.Unlock() switch kind { case "pull": if c, exists := store.pullingPool[key]; exists { close(c) delete(store.pullingPool, key) } case "push": if c, exists := store.pushingPool[key]; exists { close(c) delete(store.pushingPool, key) } default: return fmt.Errorf("Unknown pool type") } return nil } docker-1.6.2/graph/pull.go0000644000175000017500000004571712524223634014756 0ustar tianontianonpackage graph import ( "fmt" "io" "io/ioutil" "net" "net/url" "os" "strings" "time" log "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) func (s *TagStore) CmdPull(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 && n != 2 { return job.Errorf("Usage: %s IMAGE [TAG|DIGEST]", job.Name) } var ( localName = job.Args[0] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ResolveRepositoryInfo(job, localName) if err != nil { return job.Error(err) } if len(job.Args) > 1 { tag = job.Args[1] } job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) c, err := s.poolAdd("pull", utils.ImageReference(repoInfo.LocalName, tag)) if err != nil { if c != nil { // Another pull of the same repository is already taking place; just wait for it to finish job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", repoInfo.LocalName)) <-c return engine.StatusOK } return job.Error(err) } defer s.poolRemove("pull", utils.ImageReference(repoInfo.LocalName, tag)) log.Debugf("pulling image from host %q with remote name %q", repoInfo.Index.Name, repoInfo.RemoteName) endpoint, err := repoInfo.GetEndpoint() if err != nil { return job.Error(err) } r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) if err != nil { return job.Error(err) } logName := repoInfo.LocalName if tag != "" { logName = utils.ImageReference(logName, tag) } if len(repoInfo.Index.Mirrors) == 0 && (repoInfo.Index.Official || endpoint.Version == registry.APIVersion2) { if repoInfo.Official { j := job.Eng.Job("trust_update_base") if err = j.Run(); err != nil { log.Errorf("error updating trust base graph: %s", err) } } log.Debugf("pulling v2 repository with local name %q", repoInfo.LocalName) if err := s.pullV2Repository(job.Eng, r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err == nil { if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil { log.Errorf("Error logging event 'pull' for %s: %s", logName, err) } return engine.StatusOK } else if err != registry.ErrDoesNotExist && err != ErrV2RegistryUnavailable { log.Errorf("Error from V2 registry: %s", err) } log.Debug("image does not exist on v2 registry, falling back to v1") } log.Debugf("pulling v1 repository with local name %q", repoInfo.LocalName) if err = s.pullRepository(r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err != nil { return job.Error(err) } if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil { log.Errorf("Error logging event 'pull' for %s: %s", logName, err) } return engine.StatusOK } func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, askedTag string, sf *utils.StreamFormatter, parallel bool) error { out.Write(sf.FormatStatus("", "Pulling repository %s", repoInfo.CanonicalName)) repoData, err := r.GetRepositoryData(repoInfo.RemoteName) if err != nil { if strings.Contains(err.Error(), "HTTP code: 404") { return fmt.Errorf("Error: image %s not found", utils.ImageReference(repoInfo.RemoteName, askedTag)) } // Unexpected HTTP error return err } log.Debugf("Retrieving the tag list") tagsList, err := r.GetRemoteTags(repoData.Endpoints, repoInfo.RemoteName, repoData.Tokens) if err != nil { log.Errorf("unable to get remote tags: %s", err) return err } for tag, id := range tagsList { repoData.ImgList[id] = ®istry.ImgData{ ID: id, Tag: tag, Checksum: "", } } log.Debugf("Registering tags") // If no tag has been specified, pull them all if askedTag == "" { for tag, id := range tagsList { repoData.ImgList[id].Tag = tag } } else { // Otherwise, check that the tag exists and use only that one id, exists := tagsList[askedTag] if !exists { return fmt.Errorf("Tag %s not found in repository %s", askedTag, repoInfo.CanonicalName) } repoData.ImgList[id].Tag = askedTag } errors := make(chan error) layers_downloaded := false for _, image := range repoData.ImgList { downloadImage := func(img *registry.ImgData) { if askedTag != "" && img.Tag != askedTag { if parallel { errors <- nil } return } if img.Tag == "" { log.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) if parallel { errors <- nil } return } // ensure no two downloads of the same image happen at the same time if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { if c != nil { out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) <-c out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil)) } else { log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) } if parallel { errors <- nil } return } defer s.poolRemove("pull", "img:"+img.ID) out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, repoInfo.CanonicalName), nil)) success := false var lastErr, err error var is_downloaded bool for _, ep := range repoInfo.Index.Mirrors { out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, repoInfo.CanonicalName, ep), nil)) if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { // Don't report errors when pulling from mirrors. log.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err) continue } layers_downloaded = layers_downloaded || is_downloaded success = true break } if !success { for _, ep := range repoData.Endpoints { out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, repoInfo.CanonicalName, ep), nil)) if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { // It's not ideal that only the last error is returned, it would be better to concatenate the errors. // As the error is also given to the output stream the user will see the error. lastErr = err out.Write(sf.FormatProgress(common.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err), nil)) continue } layers_downloaded = layers_downloaded || is_downloaded success = true break } } if !success { err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, repoInfo.CanonicalName, lastErr) out.Write(sf.FormatProgress(common.TruncateID(img.ID), err.Error(), nil)) if parallel { errors <- err return } } out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil)) if parallel { errors <- nil } } if parallel { go downloadImage(image) } else { downloadImage(image) } } if parallel { var lastError error for i := 0; i < len(repoData.ImgList); i++ { if err := <-errors; err != nil { lastError = err } } if lastError != nil { return lastError } } for tag, id := range tagsList { if askedTag != "" && tag != askedTag { continue } if err := s.Set(repoInfo.LocalName, tag, id, true); err != nil { return err } } requestedTag := repoInfo.CanonicalName if len(askedTag) > 0 { requestedTag = utils.ImageReference(repoInfo.CanonicalName, askedTag) } WriteStatus(requestedTag, out, sf, layers_downloaded) return nil } func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) { history, err := r.GetRemoteHistory(imgID, endpoint, token) if err != nil { return false, err } out.Write(sf.FormatProgress(common.TruncateID(imgID), "Pulling dependent layers", nil)) // FIXME: Try to stream the images? // FIXME: Launch the getRemoteImage() in goroutines layers_downloaded := false for i := len(history) - 1; i >= 0; i-- { id := history[i] // ensure no two downloads of the same layer happen at the same time if c, err := s.poolAdd("pull", "layer:"+id); err != nil { log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err) <-c } defer s.poolRemove("pull", "layer:"+id) if !s.graph.Exists(id) { out.Write(sf.FormatProgress(common.TruncateID(id), "Pulling metadata", nil)) var ( imgJSON []byte imgSize int err error img *image.Image ) retries := 5 for j := 1; j <= retries; j++ { imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) if err != nil && j == retries { out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil)) return layers_downloaded, err } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } img, err = image.NewImgJSON(imgJSON) layers_downloaded = true if err != nil && j == retries { out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil)) return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err) } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else { break } } for j := 1; j <= retries; j++ { // Get the layer status := "Pulling fs layer" if j > 1 { status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) } out.Write(sf.FormatProgress(common.TruncateID(id), status, nil)) layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize)) if uerr, ok := err.(*url.Error); ok { err = uerr.Err } if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else if err != nil { out.Write(sf.FormatProgress(common.TruncateID(id), "Error pulling dependent layers", nil)) return layers_downloaded, err } layers_downloaded = true defer layer.Close() err = s.graph.Register(img, progressreader.New(progressreader.Config{ In: layer, Out: out, Formatter: sf, Size: imgSize, NewLines: false, ID: common.TruncateID(id), Action: "Downloading", })) if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else if err != nil { out.Write(sf.FormatProgress(common.TruncateID(id), "Error downloading dependent layers", nil)) return layers_downloaded, err } else { break } } } out.Write(sf.FormatProgress(common.TruncateID(id), "Download complete", nil)) } return layers_downloaded, nil } func WriteStatus(requestedTag string, out io.Writer, sf *utils.StreamFormatter, layers_downloaded bool) { if layers_downloaded { out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag)) } else { out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag)) } } // downloadInfo is used to pass information from download to extractor type downloadInfo struct { imgJSON []byte img *image.Image digest digest.Digest tmpFile *os.File length int64 downloaded bool err chan error } func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool) error { endpoint, err := r.V2RegistryEndpoint(repoInfo.Index) if err != nil { if repoInfo.Index.Official { log.Debugf("Unable to pull from V2 registry, falling back to v1: %s", err) return ErrV2RegistryUnavailable } return fmt.Errorf("error getting registry endpoint: %s", err) } auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, true) if err != nil { return fmt.Errorf("error getting authorization: %s", err) } var layersDownloaded bool if tag == "" { log.Debugf("Pulling tag list from V2 registry for %s", repoInfo.CanonicalName) tags, err := r.GetV2RemoteTags(endpoint, repoInfo.RemoteName, auth) if err != nil { return err } if len(tags) == 0 { return registry.ErrDoesNotExist } for _, t := range tags { if downloaded, err := s.pullV2Tag(eng, r, out, endpoint, repoInfo, t, sf, parallel, auth); err != nil { return err } else if downloaded { layersDownloaded = true } } } else { if downloaded, err := s.pullV2Tag(eng, r, out, endpoint, repoInfo, tag, sf, parallel, auth); err != nil { return err } else if downloaded { layersDownloaded = true } } requestedTag := repoInfo.CanonicalName if len(tag) > 0 { requestedTag = utils.ImageReference(repoInfo.CanonicalName, tag) } WriteStatus(requestedTag, out, sf, layersDownloaded) return nil } func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) { log.Debugf("Pulling tag from V2 registry: %q", tag) manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth) if err != nil { return false, err } // loadManifest ensures that the manifest payload has the expected digest // if the tag is a digest reference. manifest, verified, err := s.loadManifest(eng, manifestBytes, manifestDigest, tag) if err != nil { return false, fmt.Errorf("error verifying manifest: %s", err) } if err := checkValidManifest(manifest); err != nil { return false, err } if verified { log.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag)) } out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName)) downloads := make([]downloadInfo, len(manifest.FSLayers)) for i := len(manifest.FSLayers) - 1; i >= 0; i-- { var ( sumStr = manifest.FSLayers[i].BlobSum imgJSON = []byte(manifest.History[i].V1Compatibility) ) img, err := image.NewImgJSON(imgJSON) if err != nil { return false, fmt.Errorf("failed to parse json: %s", err) } downloads[i].img = img // Check if exists if s.graph.Exists(img.ID) { log.Debugf("Image already exists: %s", img.ID) continue } dgst, err := digest.ParseDigest(sumStr) if err != nil { return false, err } downloads[i].digest = dgst out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Pulling fs layer", nil)) downloadFunc := func(di *downloadInfo) error { log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID) if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { if c != nil { out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) <-c out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil)) } else { log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) } } else { defer s.poolRemove("pull", "img:"+img.ID) tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob") if err != nil { return err } r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest.Algorithm(), di.digest.Hex(), auth) if err != nil { return err } defer r.Close() verifier, err := digest.NewDigestVerifier(di.digest) if err != nil { return err } if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{ In: ioutil.NopCloser(io.TeeReader(r, verifier)), Out: out, Formatter: sf, Size: int(l), NewLines: false, ID: common.TruncateID(img.ID), Action: "Downloading", })); err != nil { return fmt.Errorf("unable to copy v2 image blob data: %s", err) } out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Verifying Checksum", nil)) if !verifier.Verified() { log.Infof("Image verification failed: checksum mismatch for %q", di.digest.String()) verified = false } out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil)) log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name()) di.tmpFile = tmpFile di.length = l di.downloaded = true } di.imgJSON = imgJSON return nil } if parallel { downloads[i].err = make(chan error) go func(di *downloadInfo) { di.err <- downloadFunc(di) }(&downloads[i]) } else { err := downloadFunc(&downloads[i]) if err != nil { return false, err } } } var tagUpdated bool for i := len(downloads) - 1; i >= 0; i-- { d := &downloads[i] if d.err != nil { err := <-d.err if err != nil { return false, err } } if d.downloaded { // if tmpFile is empty assume download and extracted elsewhere defer os.Remove(d.tmpFile.Name()) defer d.tmpFile.Close() d.tmpFile.Seek(0, 0) if d.tmpFile != nil { err = s.graph.Register(d.img, progressreader.New(progressreader.Config{ In: d.tmpFile, Out: out, Formatter: sf, Size: int(d.length), ID: common.TruncateID(d.img.ID), Action: "Extracting", })) if err != nil { return false, err } // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) } out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Pull complete", nil)) tagUpdated = true } else { out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Already exists", nil)) } } // Check for new tag if no layers downloaded if !tagUpdated { repo, err := s.Get(repoInfo.LocalName) if err != nil { return false, err } if repo != nil { if _, exists := repo[tag]; !exists { tagUpdated = true } } } if verified && tagUpdated { out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.")) } if manifestDigest != "" { out.Write(sf.FormatStatus("", "Digest: %s", manifestDigest)) } if utils.DigestReference(tag) { if err = s.SetDigest(repoInfo.LocalName, tag, downloads[0].img.ID); err != nil { return false, err } } else { // only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest) if err = s.Set(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil { return false, err } } return tagUpdated, nil } docker-1.6.2/MAINTAINERS0000644000175000017500000004627112524223634014043 0ustar tianontianon# Docker maintainers file # # This file describes who runs the Docker project and how. # This is a living document - if you see something out of date or missing, # speak up! # # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant # parser. [Rules] [Rules.maintainers] title = "What is a maintainer?" text = """ There are different types of maintainers, with different responsibilities, but all maintainers have 3 things in common: 1) They share responsibility in the project's success. 2) They have made a long-term, recurring time investment to improve the project. 3) They spend that time doing whatever needs to be done, not necessarily what is the most interesting or fun. Maintainers are often under-appreciated, because their work is harder to appreciate. It's easy to appreciate a really cool and technically advanced feature. It's harder to appreciate the absence of bugs, the slow but steady improvement in stability, or the reliability of a release process. But those things distinguish a good project from a great one. """ [Rules.bdfl] title = "The Benevolent dictator for life (BDFL)" text = """ Docker follows the timeless, highly efficient and totally unfair system known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with yours truly, Solomon Hykes, in the role of BDFL. This means that all decisions are made, by default, by Solomon. Since making every decision myself would be highly un-scalable, in practice decisions are spread across multiple maintainers. Ideally, the BDFL role is like the Queen of England: awesome crown, but not an actual operational role day-to-day. The real job of a BDFL is to NEVER GO AWAY. Every other rule can change, perhaps drastically so, but the BDFL will always be there, preserving the philosophy and principles of the project, and keeping ultimate authority over its fate. This gives us great flexibility in experimenting with various governance models, knowing that we can always press the "reset" button without fear of fragmentation or deadlock. See the US congress for a counter-example. BDFL daily routine: * Is the project governance stuck in a deadlock or irreversibly fragmented? * If yes: refactor the project governance * Are there issues or conflicts escalated by core? * If yes: resolve them * Go back to polishing that crown. """ [Rules.decisions] title = "How are decisions made?" text = """ Short answer: EVERYTHING IS A PULL REQUEST. Docker is an open-source project with an open design philosophy. This means that the repository is the source of truth for EVERY aspect of the project, including its philosophy, design, road map, and APIs. *If it's part of the project, it's in the repo. If it's in the repo, it's part of the project.* As a result, all decisions can be expressed as changes to the repository. An implementation change is a change to the source code. An API change is a change to the API specification. A philosophy change is a change to the philosophy manifesto, and so on. All decisions affecting Docker, big and small, follow the same 3 steps: * Step 1: Open a pull request. Anyone can do this. * Step 2: Discuss the pull request. Anyone can do this. * Step 3: Merge or refuse the pull request. Who does this depends on the nature of the pull request and which areas of the project it affects. See *review flow* for details. Because Docker is such a large and active project, it's important for everyone to know who is responsible for deciding what. That is determined by a precise set of rules. * For every *decision* in the project, the rules should designate, in a deterministic way, who should *decide*. * For every *problem* in the project, the rules should designate, in a deterministic way, who should be responsible for *fixing* it. * For every *question* in the project, the rules should designate, in a deterministic way, who should be expected to have the *answer*. """ [Rules.review] title = "Review flow" text = """ Pull requests should be processed according to the following flow: * For each subsystem affected by the change, the maintainers of the subsystem must approve or refuse it. It is the responsibility of the subsystem maintainers to process patches affecting them in a timely manner. * If the change affects areas of the code which are not part of a subsystem, or if subsystem maintainers are unable to reach a timely decision, it must be approved by the core maintainers. * If the change affects the UI or public APIs, or if it represents a major change in architecture, the architects must approve or refuse it. * If the change affects the operations of the project, it must be approved or rejected by the relevant operators. * If the change affects the governance, philosophy, goals or principles of the project, it must be approved by BDFL. * A pull request can be in 1 of 5 distinct states, for each of which there is a corresponding label that needs to be applied. `Rules.review.states` contains the list of states with possible targets for each. """ # Triage [Rules.review.states.0-triage] # Maintainers are expected to triage new incoming pull requests by removing # the `0-triage` label and adding the correct labels (e.g. `1-design-review`) # potentially skipping some steps depending on the kind of pull request. # Use common sense for judging. # # Checking for DCO should be done at this stage. # # If an owner, responsible for closing or merging, can be assigned to the PR, # the better. close = "e.g. unresponsive contributor without DCO" 3-docs-review = "non-proposal documentation-only change" 2-code-review = "e.g. trivial bugfix" 1-design-review = "general case" # Design review [Rules.review.states.1-design-review] # Maintainers are expected to comment on the design of the pull request. # Review of documentation is expected only in the context of design validation, # not for stylistic changes. # # Ideally, documentation should reflect the expected behavior of the code. # No code review should take place in this step. # # Once design is approved, a maintainer should make sure to remove this label # and add the next one. close = "design rejected" 3-docs-review = "proposals with only documentation changes" 2-code-review = "general case" # Code review [Rules.review.states.2-code-review] # Maintainers are expected to review the code and ensure that it is good # quality and in accordance with the documentation in the PR. # # If documentation is absent but expected, maintainers should ask for documentation. # # All tests should pass. # # Once code is approved according to the rules of the subsystem, a maintainer # should make sure to remove this label and add the next one. close = "" 1-design-review = "raises design concerns" 4-merge = "trivial change not impacting documentation" 3-docs-review = "general case" # Docs review [Rules.review.states.3-docs-review] # Maintainers are expected to review the documentation in its bigger context, # ensuring consistency, completeness, validity, and breadth of coverage across # all extent and new documentation. # # They should ask for any editorial change that makes the documentation more # consistent and easier to understand. # # Once documentation is approved (see below), a maintainer should make sure to remove this # label and add the next one. close = "" 2-code-review = "requires more code changes" 1-design-review = "raises design concerns" 4-merge = "general case" # Docs approval [Rules.review.docs-approval] # Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs sub-project maintainers. # If the docs change originates with a docs maintainer, only one additional LGTM is required (since we assume a docs maintainer approves of their own PR). # Merge [Rules.review.states.4-merge] # Maintainers are expected to merge this pull request as soon as possible. # They can ask for a rebase, or carry the pull request themselves. # These should be the easy PRs to merge. close = "carry PR" merge = "" [Rules.DCO] title = "Helping contributors with the DCO" text = """ The [DCO or `Sign your work`]( https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work) requirement is not intended as a roadblock or speed bump. Some Docker contributors are not as familiar with `git`, or have used a web based editor, and thus asking them to `git commit --amend -s` is not the best way forward. In this case, maintainers can update the commits based on clause (c) of the DCO. The most trivial way for a contributor to allow the maintainer to do this, is to add a DCO signature in a Pull Requests's comment, or a maintainer can simply note that the change is sufficiently trivial that it does not substantivly change the existing contribution - i.e., a spelling change. When you add someone's DCO, please also add your own to keep a log. """ [Rules.holiday] title = "I'm a maintainer, and I'm going on holiday" text = """ Please let your co-maintainers and other contributors know by raising a pull request that comments out your `MAINTAINERS` file entry using a `#`. """ [Rules."no direct push"] title = "I'm a maintainer. Should I make pull requests too?" text = """ Yes. Nobody should ever push to master directly. All changes should be made through a pull request. """ [Rules.meta] title = "How is this process changed?" text = "Just like everything else: by making a pull request :)" # Current project organization [Org] bdfl = "shykes" # The chief architect is responsible for the overall integrity of the technical architecture # across all subsystems, and the consistency of APIs and UI. # # Changes to UI, public APIs and overall architecture (for example a plugin system) must # be approved by the chief architect. "Chief Architect" = "shykes" # The Chief Operator is responsible for the day-to-day operations of the project including: # - facilitating communications amongst all the contributors; # - tracking release schedules; # - managing the relationship with downstream distributions and upstream dependencies; # - helping new contributors to get involved and become successful contributors and maintainers # # The role is also responsible for managing and measuring the success of the overall project # and ensuring it is governed properly working in concert with the Docker Governance Advisory Board (DGAB). "Chief Operator" = "spf13" [Org.Operators] # The operators make sure the trains run on time. They are responsible for overall operations # of the project. This includes facilitating communication between all the participants; helping # newcomers get involved and become successful contributors and maintainers; tracking the schedule # of releases; managing the relationship with downstream distributions and upstream dependencies; # define measures of success for the project and measure progress; Devise and implement tools and # processes which make contributors and maintainers happier and more efficient. [Org.Operators.security] people = [ "erw" ] [Org.Operators."monthly meetings"] people = [ "sven", "tianon" ] [Org.Operators.infrastructure] people = [ "jfrazelle", "crosbymichael" ] # The chief maintainer is responsible for all aspects of quality for the project including # code reviews, usability, stability, security, performance, etc. # The most important function of the chief maintainer is to lead by example. On the first # day of a new maintainer, the best advice should be "follow the C.M.'s example and you'll # be fine". "Chief Maintainer" = "crosbymichael" [Org."Core maintainers"] # The Core maintainers are the ghostbusters of the project: when there's a problem others # can't solve, they show up and fix it with bizarre devices and weaponry. # They have final say on technical implementation and coding style. # They are ultimately responsible for quality in all its forms: usability polish, # bugfixes, performance, stability, etc. When ownership can cleanly be passed to # a subsystem, they are responsible for doing so and holding the # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. # For each release (including minor releases), a "release captain" is assigned from the # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure # the release process is clear and up-to-date. # # It is common for core maintainers to "branch out" to join or start a subsystem. people = [ "unclejack", "crosbymichael", "erikh", "estesp", "icecrime", "jfrazelle", "lk4d4", "tibor", "vbatts", "vieux", "vishh" ] [Org.Subsystems] # As the project grows, it gets separated into well-defined subsystems. Each subsystem # has a dedicated group of maintainers, which are dedicated to that subsytem and responsible # for its quality. # This "cellular division" is the primary mechanism for scaling maintenance of the project as it grows. # # The maintainers of each subsytem are responsible for: # # 1. Exposing a clear road map for improving their subsystem. # 2. Deliver prompt feedback and decisions on pull requests affecting their subsystem. # 3. Be available to anyone with questions, bug reports, criticism etc. # on their component. This includes IRC, GitHub requests and the mailing # list. # 4. Make sure their subsystem respects the philosophy, design and # road map of the project. # # #### How to review patches to your subsystem # # Accepting pull requests: # # - If the pull request appears to be ready to merge, give it a `LGTM`, which # stands for "Looks Good To Me". # - If the pull request has some small problems that need to be changed, make # a comment adressing the issues. # - If the changes needed to a PR are small, you can add a "LGTM once the # following comments are adressed..." this will reduce needless back and # forth. # - If the PR only needs a few changes before being merged, any MAINTAINER can # make a replacement PR that incorporates the existing commits and fixes the # problems before a fast track merge. # # Closing pull requests: # # - If a PR appears to be abandoned, after having attempted to contact the # original contributor, then a replacement PR may be made. Once the # replacement PR is made, any contributor may close the original one. # - If you are not sure if the pull request implements a good feature or you # do not understand the purpose of the PR, ask the contributor to provide # more documentation. If the contributor is not able to adequately explain # the purpose of the PR, the PR may be closed by any MAINTAINER. # - If a MAINTAINER feels that the pull request is sufficiently architecturally # flawed, or if the pull request needs significantly more design discussion # before being considered, the MAINTAINER should close the pull request with # a short explanation of what discussion still needs to be had. It is # important not to leave such pull requests open, as this will waste both the # MAINTAINER's time and the contributor's time. It is not good to string a # contributor on for weeks or months, having them make many changes to a PR # that will eventually be rejected. [Org.Subsystems.Documentation] people = [ "fredlf", "james", "sven", "spf13", "mary" ] [Org.Subsystems.libcontainer] people = [ "crosbymichael", "vmarmol", "mpatel", "jnagal", "lk4d4" ] [Org.Subsystems.registry] people = [ "dmp42", "vbatts", "joffrey", "samalba", "sday", "jlhawn", "dmcg" ] [Org.Subsystems."build tools"] people = [ "shykes", "tianon" ] [Org.Subsystem."remote api"] people = [ "vieux" ] [Org.Subsystem.swarm] people = [ "aluzzardi", "vieux" ] [Org.Subsystem.machine] people = [ "bfirsh", "ehazlett" ] [Org.Subsystem.compose] people = [ "aanand" ] [Org.Subsystem.builder] people = [ "erikh", "tibor", "duglin" ] [people] # A reference list of all people associated with the project. # All other sections should refer to people by their canonical key # in the people section. # ADD YOURSELF HERE IN ALPHABETICAL ORDER [people.aanand] Name = "Aanand Prasad" Email = "aanand@docker.com" GitHub = "aanand" [people.aluzzardi] Name = "Andrea Luzzardi" Email = "aluzzardi@docker.com" GitHub = "aluzzardi" [people.bfirsh] Name = "Ben Firshman" Email = "ben@firshman.co.uk" GitHub = "bfirsh" [people.crosbymichael] Name = "Michael Crosby" Email = "crosbymichael@gmail.com" GitHub = "crosbymichael" [people.duglin] Name = "Doug Davis" Email = "dug@us.ibm.com" GitHub = "duglin" [people.dmcg] Name = "Derek McGowan" Email = "derek@docker.com" Github = "dmcgowan" [people.dmp42] Name = "Olivier Gambier" Email = "olivier@docker.com" Github = "dmp42" [people.ehazlett] Name = "Evan Hazlett" Email = "ejhazlett@gmail.com" GitHub = "ehazlett" [people.erikh] Name = "Erik Hollensbe" Email = "erik@docker.com" GitHub = "erikh" [people.erw] Name = "Eric Windisch" Email = "eric@windisch.us" GitHub = "ewindisch" [people.estesp] Name = "Phil Estes" Email = "estesp@linux.vnet.ibm.com" GitHub = "estesp" [people.fredlf] Name = "Fred Lifton" Email = "fred.lifton@docker.com" GitHub = "fredlf" [people.icecrime] Name = "Arnaud Porterie" Email = "arnaud@docker.com" GitHub = "icecrime" [people.jfrazelle] Name = "Jessie Frazelle" Email = "jess@docker.com" GitHub = "jfrazelle" [people.jlhawn] Name = "Josh Hawn" Email = "josh.hawn@docker.com" Github = "jlhawn" [people.joffrey] Name = "Joffrey Fuhrer" Email = "joffrey@docker.com" Github = "shin-" [people.lk4d4] Name = "Alexander Morozov" Email = "lk4d4@docker.com" GitHub = "lk4d4" [people.mary] Name = "Mary Anthony" Email = "mary.anthony@docker.com" GitHub = "moxiegirl" [people.sday] Name = "Stephen Day" Email = "stephen.day@docker.com" Github = "stevvooe" [people.shykes] Name = "Solomon Hykes" Email = "solomon@docker.com" GitHub = "shykes" [people.spf13] Name = "Steve Francia" Email = "steve.francia@gmail.com" GitHub = "spf13" [people.sven] Name = "Sven Dowideit" Email = "SvenDowideit@home.org.au" GitHub = "SvenDowideit" [people.tianon] Name = "Tianon Gravi" Email = "admwiggin@gmail.com" GitHub = "tianon" [people.tibor] Name = "Tibor Vass" Email = "tibor@docker.com" GitHub = "tiborvass" [people.vbatts] Name = "Vincent Batts" Email = "vbatts@redhat.com" GitHub = "vbatts" [people.vieux] Name = "Victor Vieux" Email = "vieux@docker.com" GitHub = "vieux" [people.vmarmol] Name = "Victor Marmol" Email = "vmarmol@google.com" GitHub = "vmarmol" [people.jnagal] Name = "Rohit Jnagal" Email = "jnagal@google.com" GitHub = "rjnagal" [people.mpatel] Name = "Mrunal Patel" Email = "mpatel@redhat.com" GitHub = "mrunalp" [people.unclejack] Name = "Cristian Staretu" Email = "cristian.staretu@gmail.com" GitHub = "unclejack" [people.vishh] Name = "Vishnu Kannan" Email = "vishnuk@google.com" GitHub = "vishh" docker-1.6.2/LICENSE0000644000175000017500000002501312524223634013342 0ustar tianontianon Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2013-2015 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-1.6.2/docs/0000755000175000017500000000000012527277712013275 5ustar tianontianondocker-1.6.2/docs/test.sh0000755000175000017500000000016312524223634014602 0ustar tianontianon#!/bin/sh mkdocs serve & echo "Waiting for 5 seconds to allow mkdocs server to be ready" sleep 5 ./docvalidate.py docker-1.6.2/docs/release.sh0000755000175000017500000001132312524223634015243 0ustar tianontianon#!/bin/bash set -e set -o pipefail usage() { cat >&2 <<'EOF' To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file (with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file) and set the AWS_S3_BUCKET env var to the name of your bucket. If you're publishing the current release's documentation, also set `BUILD_ROOT=yes` make AWS_S3_BUCKET=docs-stage.docker.com docs-release will then push the documentation site to your s3 bucket. Note: you can add `OPTIONS=--dryrun` to see what will be done without sending to the server You can also add NOCACHE=1 to publish without a cache, which is what we do for the master docs. EOF exit 1 } create_robots_txt() { if [ "$AWS_S3_BUCKET" == "docs.docker.com" ]; then cat > ./sources/robots.txt <<-'EOF' User-agent: * Allow: / EOF else cat > ./sources/robots.txt <<-'EOF' User-agent: * Disallow: / EOF fi } setup_s3() { # Try creating the bucket. Ignore errors (it might already exist). echo "create $BUCKET if it does not exist" aws s3 mb --profile $BUCKET s3://$BUCKET 2>/dev/null || true # Check access to the bucket. echo "test $BUCKET exists" aws s3 --profile $BUCKET ls s3://$BUCKET # Make the bucket accessible through website endpoints. echo "make $BUCKET accessible as a website" #aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html local s3conf=$(cat s3_website.json | envsubst) aws s3api --profile $BUCKET put-bucket-website --bucket $BUCKET --website-configuration "$s3conf" } build_current_documentation() { mkdocs build cd site/ gzip -9k -f search_content.json cd .. } upload_current_documentation() { src=site/ dst=s3://$BUCKET$1 cache=max-age=3600 if [ "$NOCACHE" ]; then cache=no-cache fi printf "\nUploading $src to $dst\n" # a really complicated way to send only the files we want # if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go # versions.html_fragment include="--recursive --include \"*.$i\" " run="aws s3 cp $src $dst $OPTIONS --profile $BUCKET --cache-control $cache --acl public-read $include" printf "\n=====\n$run\n=====\n" $run # Make sure the search_content.json.gz file has the right content-encoding aws s3 cp --profile $BUCKET --cache-control $cache --content-encoding="gzip" --acl public-read "site/search_content.json.gz" "$dst" } invalidate_cache() { if [[ -z "$DISTRIBUTION_ID" ]]; then echo "Skipping Cloudfront cache invalidation" return fi dst=$1 aws configure set preview.cloudfront true # Get all the files # not .md~ files # replace spaces w %20 so urlencoded files=( $(find site/ -not -name "*.md*" -type f | sed 's/site//g' | sed 's/ /%20/g') ) len=${#files[@]} last_file=${files[$((len-1))]} echo "aws cloudfront create-invalidation --profile $AWS_S3_BUCKET --distribution-id $DISTRIBUTION_ID --invalidation-batch '" > batchfile echo "{\"Paths\":{\"Quantity\":$len," >> batchfile echo "\"Items\": [" >> batchfile for file in "${files[@]}" ; do if [[ $file == $last_file ]]; then comma="" else comma="," fi echo "\"$dst$file\"$comma" >> batchfile done echo "]}, \"CallerReference\":\"$(date)\"}'" >> batchfile sh batchfile } main() { [ "$AWS_S3_BUCKET" ] || usage # Make sure there is an awsconfig file export AWS_CONFIG_FILE=$(pwd)/awsconfig [ -f "$AWS_CONFIG_FILE" ] || usage # Get the version VERSION=$(cat VERSION) # Disallow pushing dev docs to master if [ "$AWS_S3_BUCKET" == "docs.docker.com" ] && [ "${VERSION%-dev}" != "$VERSION" ]; then echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)" exit 1 fi # Clean version - 1.0.2-dev -> 1.0 export MAJOR_MINOR="v${VERSION%.*}" export BUCKET=$AWS_S3_BUCKET export AWS_DEFAULT_PROFILE=$BUCKET # debug variables echo "bucket: $BUCKET, full version: $VERSION, major-minor: $MAJOR_MINOR" echo "cfg file: $AWS_CONFIG_FILE ; profile: $AWS_DEFAULT_PROFILE" # create the robots.txt create_robots_txt if [ "$OPTIONS" != "--dryrun" ]; then setup_s3 fi # Default to only building the version specific docs # so we don't clober the latest by accident with old versions if [ "$BUILD_ROOT" == "yes" ]; then echo "Building root documentation" build_current_documentation echo "Uploading root documentation" upload_current_documentation [ "$NOCACHE" ] || invalidate_cache fi #build again with /v1.0/ prefix sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml echo "Building the /$MAJOR_MINOR/ documentation" build_current_documentation echo "Uploading the documentation" upload_current_documentation "/$MAJOR_MINOR/" # Invalidating cache [ "$NOCACHE" ] || invalidate_cache "/$MAJOR_MINOR" } main docker-1.6.2/docs/README.md0000755000175000017500000002544412524223634014557 0ustar tianontianon# Docker Documentation The source for Docker documentation is in this directory under `sources/`. Our documentation uses extended Markdown, as implemented by [MkDocs](http://mkdocs.org). The current release of the Docker documentation resides on [http://docs.docker.com](http://docs.docker.com). ## Understanding the documentation branches and processes Docker has two primary branches for documentation: | Branch | Description | URL (published via commit-hook) | |----------|--------------------------------|------------------------------------------------------------------------------| | `docs` | Official release documentation | [http://docs.docker.com](http://docs.docker.com) | | `master` | Merged but unreleased development work | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) | Additions and updates to upcoming releases are made in a feature branch off of the `master` branch. The Docker maintainers also support a `docs` branch that contains the last release of documentation. After a release, documentation updates are continually merged into `master` as they occur. This work includes new documentation for forthcoming features, bug fixes, and other updates. Docker's CI system automatically builds and updates the `master` documentation after each merge and posts it to [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com). Periodically, the Docker maintainers update `docs.docker.com` between official releases of Docker. They do this by cherry-picking commits from `master`, merging them into `docs`, and then publishing the result. In the rare case where a change is not forward-compatible, changes may be made on other branches by special arrangement with the Docker maintainers. ### Quickstart for documentation contributors If you are a new or beginner contributor, we encourage you to read through the [our detailed contributors guide](https://docs.docker.com/project/who-written-for/). The guide explains in detail, with examples, how to contribute. If you are an experienced contributor this quickstart should be enough to get you started. The following is the essential workflow for contributing to the documentation: 1. Fork the `docker/docker` repository. 2. Clone the repository to your local machine. 3. Select an issue from `docker/docker` to work on or submit a proposal of your own. 4. Create a feature branch from `master` in which to work. By basing from `master` your work is automatically included in the next release. It also allows docs maintainers to easily cherry-pick your changes into the `docs` release branch. 4. Modify existing or add new `.md` files to the `docs/sources` directory. If you add a new document (`.md`) file, you must also add it to the appropriate section of the `docs/mkdocs.yml` file in this repository. 5. As you work, build the documentation site locally to see your changes. The `docker/docker` repository contains a `Dockerfile` and a `Makefile`. Together, these create a development environment in which you can build and run a container running the Docker documentation website. To build the documentation site, enter `make docs` at the root of your `docker/docker` fork: $ make docs .... (lots of output) .... docker run --rm -it -e AWS_S3_BUCKET -p 8000:8000 "docker-docs:master" mkdocs serve Running at: http://0.0.0.0:8000/ Live reload enabled. Hold ctrl+c to quit. The build creates an image containing all the required tools, adds the local `docs/` directory and generates the HTML files. Then, it runs a Docker container with this image. The container exposes port 8000 on the localhost so that you can connect and see your changes. If you are running Boot2Docker, use the `boot2docker ip` to get the address of your server. 6. Check your writing for style and mechanical errors. Use our [documentation style guide](https://docs.docker.com/project/doc-style/) to check style. There are several [good grammar and spelling online checkers](http://www.hemingwayapp.com/) that can check your writing mechanics. 7. Squash your commits on your branch. 8. Make a pull request from your fork back to Docker's `master` branch. 9. Work with the reviewers until your change is approved and merged. ### Debugging and testing If you have any issues you need to debug, you can use `make docs-shell` and then run `mkdocs serve`. You can use `make docs-test` to generate a report of missing links that are referenced in the documentation—there should be none. ## Style guide If you have questions about how to write for Docker's documentation, please see the [style guide](sources/project/doc-style.md). The style guide provides guidance about grammar, syntax, formatting, styling, language, or tone. If something isn't clear in the guide, please submit an issue to let us know or submit a pull request to help us improve it. ## Publishing documentation (for Docker maintainers) To publish Docker's documentation you need to have Docker up and running on your machine. You'll also need a `docs/awsconfig` file containing the settings you need to access the AWS bucket you'll be deploying to. The process for publishing is to build first to an AWS bucket, verify the build, and then publish the final release. 1. Have Docker installed and running on your machine. 2. Ask the core maintainers for the `awsconfig` file. 3. Copy the `awsconfig` file to the `docs/` directory. The `awsconfig` file contains the profiles of the S3 buckets for our documentation sites. (If needed, the release script creates an S3 bucket and pushes the files to it.) Each profile has this format: [profile dowideit-docs] aws_access_key_id = IHOIUAHSIDH234rwf.... aws_secret_access_key = OIUYSADJHLKUHQWIUHE...... region = ap-southeast-2 The `profile` name must be the same as the name of the bucket you are deploying to. 4. Call the `make` from the `docker` directory. $ make AWS_S3_BUCKET=dowideit-docs docs-release This publishes _only_ to the `http://bucket-url/v1.2/` version of the documentation. 5. If you're publishing the current release's documentation, you need to also update the root docs pages by running $ make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release ### Errors publishing using Boot2Docker Sometimes, in a Boot2Docker environment, the publishing procedure returns this error: Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2: dial unix /var/run/docker.sock: no such file or directory. If this happens, set the Docker host. Run the following command to set the variables in your shell: $ eval "$(boot2docker shellinit)" ## Cherry-picking documentation changes to update an existing release. Whenever the core team makes a release, they publish the documentation based on the `release` branch. At that time, the `release` branch is copied into the `docs` branch. The documentation team makes updates between Docker releases by cherry-picking changes from `master` into any of the documentation branches. Typically, we cherry-pick into the `docs` branch. For example, to update the current release's docs, do the following: 1. Go to your `docker/docker` fork and get the latest from master. $ git fetch upstream 2. Checkout a new branch based on `upstream/docs`. You should give your new branch a descriptive name. $ git checkout -b post-1.2.0-docs-update-1 upstream/docs 3. In a browser window, open [https://github.com/docker/docker/commits/master]. 4. Locate the merges you want to publish. You should only cherry-pick individual commits; do not cherry-pick merge commits. To minimize merge conflicts, start with the oldest commit and work your way forward in time. 5. Copy the commit SHA from GitHub. 6. Cherry-pick the commit. $ git cherry-pick -x fe845c4 7. Repeat until you have cherry-picked everything you want to merge. 8. Push your changes to your fork. $ git push origin post-1.2.0-docs-update-1 9. Make a pull request to merge into the `docs` branch. Do __NOT__ merge into `master`. 10. Have maintainers review your pull request. 11. Once the PR has the needed "LGTMs", merge it on GitHub. 12. Return to your local fork and make sure you are still on the `docs` branch. $ git checkout docs 13. Fetch your merged pull request from `docs`. $ git fetch upstream/docs 14. Ensure your branch is clean and set to the latest. $ git reset --hard upstream/docs 15. Copy the `awsconfig` file into the `docs` directory. 16. Make the beta documentation $ make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release 17. Open [the beta website](http://beta-docs.docker.io.s3-website-us-west-2.amazonaws.com/) site and make sure what you published is correct. 19. When you're happy with your content, publish the docs to our live site: $ make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes DISTRIBUTION_ID=C2K6......FL2F docs-release 20. Test the uncached version of the live docs at [http://docs.docker.com.s3-website-us-east-1.amazonaws.com/] ### Caching and the docs New docs do not appear live on the site until the cache (a complex, distributed CDN system) is flushed. The `make docs-release` command flushes the cache _if_ the `DISTRIBUTION_ID` is set to the Cloudfront distribution ID. The cache flush can take at least 15 minutes to run and you can check its progress with the CDN Cloudfront Purge Tool Chrome app. ## Removing files from the docs.docker.com site Sometimes it becomes necessary to remove files from the historical published documentation. The most reliable way to do this is to do it directly using `aws s3` commands running in a docs container: Start the docs container like `make docs-shell`, but bind mount in your `awsconfig`: ``` docker run --rm -it -v $(CURDIR)/docs/awsconfig:/docs/awsconfig docker-docs:master bash ``` and then the following example shows deleting 2 documents from s3, and then requesting the CloudFlare cache to invalidate them: ``` export BUCKET BUCKET=docs.docker.com export AWS_CONFIG_FILE=$(pwd)/awsconfig aws s3 --profile $BUCKET ls s3://$BUCKET aws s3 --profile $BUCKET rm s3://$BUCKET/v1.0/reference/api/docker_io_oauth_api/index.html aws s3 --profile $BUCKET rm s3://$BUCKET/v1.1/reference/api/docker_io_oauth_api/index.html aws configure set preview.cloudfront true export DISTRIBUTION_ID=YUTIYUTIUTIUYTIUT aws cloudfront create-invalidation --profile docs.docker.com --distribution-id $DISTRIBUTION_ID --invalidation-batch '{"Paths":{"Quantity":1, "Items":["/v1.0/reference/api/docker_io_oauth_api/"]},"CallerReference":"6Mar2015sventest1"}' aws cloudfront create-invalidation --profile docs.docker.com --distribution-id $DISTRIBUTION_ID --invalidation-batch '{"Paths":{"Quantity":1, "Items":["/v1.1/reference/api/docker_io_oauth_api/"]},"CallerReference":"6Mar2015sventest1"}' ``` docker-1.6.2/docs/Dockerfile0000644000175000017500000001476712524223634015275 0ustar tianontianon# # See the top level Makefile in https://github.com/docker/docker for usage. # FROM docs/base:latest MAINTAINER Sven Dowideit (@SvenDowideit) # This section ensures we pull the correct version of each # sub project ENV COMPOSE_BRANCH 1.2.0 ENV SWARM_BRANCH v0.2.0 ENV MACHINE_BRANCH master ENV DISTRIB_BRANCH master # TODO: need the full repo source to get the git version info COPY . /src # Reset the /docs dir so we can replace the theme meta with the new repo's git info # RUN git reset --hard # Then copy the desired docs into the /docs/sources/ dir COPY ./sources/ /docs/sources COPY ./VERSION VERSION # adding the image spec will require Docker 1.5 and `docker build -f docs/Dockerfile .` #COPY ./image/spec/v1.md /docs/sources/reference/image-spec-v1.md # TODO: don't do this - look at merging the yml file in build.sh COPY ./mkdocs.yml mkdocs.yml COPY ./s3_website.json s3_website.json COPY ./release.sh release.sh # Docker Distribution # #ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/mkdocs.yml /docs/mkdocs-distribution.yml ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/images/notifications.png /docs/sources/registry/images/notifications.png ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/images/registry.png /docs/sources/registry/images/registry.png ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/overview.md /docs/sources/registry/overview.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/overview.md ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/deploying.md /docs/sources/registry/deploying.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/deploying.md ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/configuration.md /docs/sources/registry/configuration.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/configuration.md ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storagedrivers.md /docs/sources/registry/storagedrivers.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/storagedrivers.md ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/notifications.md /docs/sources/registry/notifications.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/notifications.md ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/spec/api.md /docs/sources/registry/spec/api.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/spec/api.md ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/spec/json.md /docs/sources/registry/spec/json.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/spec/json.md ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/spec/auth/token.md /docs/sources/registry/spec/auth/token.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/registry/spec/auth/token.md # Docker Swarm #ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/docs/mkdocs.yml /docs/mkdocs-swarm.yml ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/docs/index.md /docs/sources/swarm/index.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/index.md ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/discovery/README.md /docs/sources/swarm/discovery.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/discovery.md ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/api/README.md /docs/sources/swarm/API.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/API.md ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/scheduler/filter/README.md /docs/sources/swarm/scheduler/filter.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/scheduler/filter.md ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/scheduler/strategy/README.md /docs/sources/swarm/scheduler/strategy.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/scheduler/strategy.md # Docker Machine #ADD https://raw.githubusercontent.com/docker/machine/${MACHINE_BRANCH}/docs/mkdocs.yml /docs/mkdocs-machine.yml ADD https://raw.githubusercontent.com/docker/machine/${MACHINE_BRANCH}/docs/index.md /docs/sources/machine/index.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/machine/index.md # Docker Compose #ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/mkdocs.yml /docs/mkdocs-compose.yml ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/index.md /docs/sources/compose/index.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/index.md ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/install.md /docs/sources/compose/install.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/install.md ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/cli.md /docs/sources/compose/cli.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/cli.md ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/yml.md /docs/sources/compose/yml.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/yml.md ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/env.md /docs/sources/compose/env.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/env.md ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/completion.md /docs/sources/compose/completion.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/completion.md ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/django.md /docs/sources/compose/django.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/django.md ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/rails.md /docs/sources/compose/rails.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/rails.md ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/wordpress.md /docs/sources/compose/wordpress.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/wordpress.md # Then build everything together, ready for mkdocs RUN /docs/build.shdocker-1.6.2/docs/.gitignore0000644000175000017500000000016212524223634015253 0ustar tianontianon# generated by man/man/md2man-all.sh man1/ man5/ # avoid commiting the awsconfig file used for releases awsconfig docker-1.6.2/docs/man/0000755000175000017500000000000012524223634014037 5ustar tianontianondocker-1.6.2/docs/man/docker-logout.1.md0000644000175000017500000000161412524223634017300 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-logout - Log out from a Docker Registry Service. # SYNOPSIS **docker logout** [SERVER] # DESCRIPTION Log out of a Docker Registry Service located on the specified `SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you do not specify a `SERVER`, the command attempts to log you out of Docker's public registry located at `https://registry-1.docker.io/` by default. # OPTIONS There are no available options. # EXAMPLES ## Log out from a registry on your localhost # docker logout localhost:8080 # See also **docker-login(1)** to register or log in to a Docker registry server. # HISTORY June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io) July 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 docker-1.6.2/docs/man/docker-version.1.md0000644000175000017500000000042112524223634017447 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-version - Show the Docker version information. # SYNOPSIS **docker version** # OPTIONS There are no available options. # HISTORY June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-run.1.md0000644000175000017500000005650012524223634016577 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-run - Run a command in a new container # SYNOPSIS **docker run** [**-a**|**--attach**[=*[]*]] [**--add-host**[=*[]*]] [**-c**|**--cpu-shares**[=*0*]] [**--cap-add**[=*[]*]] [**--cap-drop**[=*[]*]] [**--cidfile**[=*CIDFILE*]] [**--cpuset-cpus**[=*CPUSET-CPUS*]] [**-d**|**--detach**[=*false*]] [**--device**[=*[]*]] [**--dns-search**[=*[]*]] [**--dns**[=*[]*]] [**-e**|**--env**[=*[]*]] [**--entrypoint**[=*ENTRYPOINT*]] [**--env-file**[=*[]*]] [**--expose**[=*[]*]] [**-h**|**--hostname**[=*HOSTNAME*]] [**--help**] [**-i**|**--interactive**[=*false*]] [**--ipc**[=*IPC*]] [**-l**|**--label**[=*[]*]] [**--label-file**[=*[]*]] [**--link**[=*[]*]] [**--lxc-conf**[=*[]*]] [**--log-driver**[=*[]*]] [**-m**|**--memory**[=*MEMORY*]] [**--memory-swap**[=*MEMORY-SWAP*]] [**--mac-address**[=*MAC-ADDRESS*]] [**--name**[=*NAME*]] [**--net**[=*"bridge"*]] [**-P**|**--publish-all**[=*false*]] [**-p**|**--publish**[=*[]*]] [**--pid**[=*[]*]] [**--privileged**[=*false*]] [**--read-only**[=*false*]] [**--restart**[=*RESTART*]] [**--rm**[=*false*]] [**--security-opt**[=*[]*]] [**--sig-proxy**[=*true*]] [**-t**|**--tty**[=*false*]] [**-u**|**--user**[=*USER*]] [**-v**|**--volume**[=*[]*]] [**--volumes-from**[=*[]*]] [**-w**|**--workdir**[=*WORKDIR*]] [**--cgroup-parent**[=*CGROUP-PATH*]] IMAGE [COMMAND] [ARG...] # DESCRIPTION Run a process in a new container. **docker run** starts a process with its own file system, its own networking, and its own isolated process tree. The IMAGE which starts the process may define defaults related to the process that will be run in the container, the networking to expose, and more, but **docker run** gives final control to the operator or administrator who starts the container from the image. For that reason **docker run** has more options than any other Docker command. If the IMAGE is not already loaded then **docker run** will pull the IMAGE, and all image dependencies, from the repository in the same way running **docker pull** IMAGE, before it starts the container from that image. # OPTIONS **-a**, **--attach**=[] Attach to STDIN, STDOUT or STDERR. In foreground mode (the default when **-d** is not specified), **docker run** can start the process in the container and attach the console to the process’s standard input, output, and standard error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. The **-a** option can be set for each of stdin, stdout, and stderr. **--add-host**=[] Add a custom host-to-IP mapping (host:ip) Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option can be set multiple times. **-c**, **--cpu-shares**=0 CPU shares (relative weight) By default, all containers get the same proportion of CPU cycles. This proportion can be modified by changing the container's CPU share weighting relative to the weighting of all other running containers. To modify the proportion from the default of 1024, use the **-c** or **--cpu-shares** flag to set the weighting to 2 or higher. The proportion will only apply when CPU-intensive processes are running. When tasks in one container are idle, other containers can use the left-over CPU time. The actual amount of CPU time will vary depending on the number of containers running on the system. For example, consider three containers, one has a cpu-share of 1024 and two others have a cpu-share setting of 512. When processes in all three containers attempt to use 100% of CPU, the first container would receive 50% of the total CPU time. If you add a fouth container with a cpu-share of 1024, the first container only gets 33% of the CPU. The remaining containers receive 16.5%, 16.5% and 33% of the CPU. On a multi-core system, the shares of CPU time are distributed over all CPU cores. Even if a container is limited to less than 100% of CPU time, it can use 100% of each individual CPU core. For example, consider a system with more than three cores. If you start one container **{C0}** with **-c=512** running one process, and another container **{C1}** with **-c=1024** running two processes, this can result in the following division of CPU shares: PID container CPU CPU share 100 {C0} 0 100% of CPU0 101 {C1} 1 100% of CPU1 102 {C1} 2 100% of CPU2 **--cap-add**=[] Add Linux capabilities **--cap-drop**=[] Drop Linux capabilities **--cgroup-parent**="" Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. **--cidfile**="" Write the container ID to the file **--cpuset-cpus**="" CPUs in which to allow execution (0-3, 0,1) **-d**, **--detach**=*true*|*false* Detached mode: run the container in the background and print the new container ID. The default is *false*. At any time you can run **docker ps** in the other shell to view a list of the running containers. You can reattach to a detached container with **docker attach**. If you choose to run a container in the detached mode, then you cannot use the **-rm** option. When attached in the tty mode, you can detach from a running container without stopping the process by pressing the keys CTRL-P CTRL-Q. **--device**=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) **--dns-search**=[] Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) **--dns**=[] Set custom DNS servers This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this is the case the **--dns** flags is necessary for every run. **-e**, **--env**=[] Set environment variables This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container. **--entrypoint**="" Overwrite the default ENTRYPOINT of the image This option allows you to overwrite the default entrypoint of the image that is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND because it specifies what executable to run when the container starts, but it is (purposely) more difficult to override. The ENTRYPOINT gives a container its default nature or behavior, so that when you set an ENTRYPOINT you can run the container as if it were that binary, complete with default options, and you can pass in more options via the COMMAND. But, sometimes an operator may want to run something else inside the container, so you can override the default ENTRYPOINT at runtime by using a **--entrypoint** and a string to specify the new ENTRYPOINT. **--env-file**=[] Read in a line delimited file of environment variables **--expose**=[] Expose a port, or a range of ports (e.g. --expose=3300-3310), from the container without publishing it to your host **-h**, **--hostname**="" Container host name Sets the container host name that is available inside the container. **--help** Print usage statement **-i**, **--interactive**=*true*|*false* Keep STDIN open even if not attached. The default is *false*. When set to true, keep stdin open even if not attached. The default is false. **--ipc**="" Default is to create a private IPC namespace (POSIX SysV IPC) for the container 'container:': reuses another container shared memory, semaphores and message queues 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. **-l**, **--label**=[] Set metadata on the container (e.g., --label com.example.key=value) **--label-file**=[] Read in a line delimited file of labels **--link**=[] Add link to another container in the form of :alias If the operator uses **--link** when starting the new client container, then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use. **--lxc-conf**=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" **--log-driver**="|*json-file*|*syslog*|*none*" Logging driver for container. Default is defined by daemon `--log-driver` flag. **Warning**: `docker logs` command works only for `json-file` logging driver. **-m**, **--memory**="" Memory limit (format: , where unit = b, k, m or g) Allows you to constrain the memory available to a container. If the host supports swap memory, then the **-m** memory setting can be larger than physical RAM. If a limit of 0 is specified (not using **-m**), the container's memory is not limited. The actual limit may be rounded up to a multiple of the operating system's page size (the value would be very large, that's millions of trillions). **--memory-swap**="" Total memory limit (memory + swap) Set `-1` to disable swap (format: , where unit = b, k, m or g). This value should always larger than **-m**, so you should alway use this with **-m**. **--mac-address**="" Container MAC address (e.g. 92:d0:c6:0a:29:33) Remember that the MAC address in an Ethernet network must be unique. The IPv6 link-local address will be based on the device's MAC address according to RFC4862. **--name**="" Assign a name to the container The operator can identify a container in three ways: UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778â€) UUID short identifier (“f78375b1c487â€) Name (“jonahâ€) The UUID identifiers come from the Docker daemon, and if a name is not assigned to the container with **--name** then the daemon will also generate a random string name. The name is useful when defining links (see **--link**) (or any other place you need to identify a container). This works for both background and foreground Docker containers. **--net**="bridge" Set the Network mode for the container 'bridge': creates a new network stack for the container on the docker bridge 'none': no networking for this container 'container:': reuses another container network stack 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. **-P**, **--publish-all**=*true*|*false* Publish all exposed ports to random ports on the host interfaces. The default is *false*. When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any client that can reach the host. When using -P, Docker will bind any exposed port to a random port on the host within an *ephemeral port range* defined by `/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host ports and the exposed ports, use `docker port`. **-p**, **--publish**=[] Publish a container's port, or range of ports, to the host. format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort Both hostPort and containerPort can be specified as a range of ports. When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`) (use 'docker port' to see the actual mapping) **--pid**=host Set the PID mode for the container **host**: use the host's PID namespace inside the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. **--privileged**=*true*|*false* Give extended privileges to this container. The default is *false*. By default, Docker containers are “unprivileged†(=false) and cannot, for example, run a Docker daemon inside the Docker container. This is because by default a container is not allowed to access any devices. A “privileged†container is given access to all devices. When the operator executes **docker run --privileged**, Docker will enable access to all devices on the host as well as set some configuration in AppArmor to allow the container nearly all the same access to the host as processes running outside of a container on the host. **--read-only**=*true*|*false* Mount the container's root filesystem as read only. By default a container will have its root filesystem writable allowing processes to write files anywhere. By specifying the `--read-only` flag the container will have its root filesystem mounted as read only prohibiting any writes. **--restart**="no" Restart policy to apply when a container exits (no, on-failure[:max-retry], always) **--rm**=*true*|*false* Automatically remove the container when it exits (incompatible with -d). The default is *false*. **--security-opt**=[] Security Options "label:user:USER" : Set the label user for the container "label:role:ROLE" : Set the label role for the container "label:type:TYPE" : Set the label type for the container "label:level:LEVEL" : Set the label level for the container "label:disable" : Turn off label confinement for the container **--sig-proxy**=*true*|*false* Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. **-t**, **--tty**=*true*|*false* Allocate a pseudo-TTY. The default is *false*. When set to true Docker can allocate a pseudo-tty and attach to the standard input of any container. This can be used, for example, to run a throwaway interactive shell. The default is value is false. The **-t** option is incompatible with a redirection of the docker client standard input. **-u**, **--user**="" Username or UID **-v**, **--volume**=[] Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) The **-v** option can be used one or more times to add one or more mounts to a container. These mounts can then be used in other containers using the **--volumes-from** option. The volume may be optionally suffixed with :ro or :rw to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted read-write. See examples. **--volumes-from**=[] Mount volumes from the specified container(s) Mounts already mounted volumes from a source container onto another container. You must supply the source's container-id. To share a volume, use the **--volumes-from** option when running the target container. You can share volumes even if the source container is not running. By default, Docker mounts the volumes in the same mode (read-write or read-only) as it is mounted in the source container. Optionally, you can change this by suffixing the container-id with either the `:ro` or `:rw ` keyword. If the location of the volume from the source container overlaps with data residing on a target container, then the volume hides that data on the target. **-w**, **--workdir**="" Working directory inside the container The default working directory for running binaries within a container is the root directory (/). The developer can set a different default with the Dockerfile WORKDIR instruction. The operator can override the working directory by using the **-w** option. # EXAMPLES ## Exposing log messages from the container to the host's log If you want messages that are logged in your container to show up in the host's syslog/journal then you should bind mount the /dev/log directory as follows. # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash From inside the container you can test this by sending a message to the log. (bash)# logger "Hello from my container" Then exit and check the journal. # exit # journalctl -b | grep Hello This should list the message sent to logger. ## Attaching to one or more from STDIN, STDOUT, STDERR If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) . You can specify to which of the three standard streams (stdin, stdout, stderr) you’d like to connect instead, as in: # docker run -a stdin -a stdout -i -t fedora /bin/bash ## Sharing IPC between containers Using shm_server.c available here: http://www.cs.cf.ac.uk/Dave/C/node27.html Testing `--ipc=host` mode: Host shows a shared memory segment with 7 pids attached, happens to be from httpd: ``` $ sudo ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status 0x01128e25 0 root 600 1000 7 ``` Now run a regular container, and it correctly does NOT see the shared memory segment from the host: ``` $ sudo docker run -it shm ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status ``` Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd: ``` $ sudo docker run -it --ipc=host shm ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status 0x01128e25 0 root 600 1000 7 ``` Testing `--ipc=container:CONTAINERID` mode: Start a container with a program to create a shared memory segment: ``` sudo docker run -it shm bash $ sudo shm/shm_server & $ sudo ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status 0x0000162e 0 root 666 27 1 ``` Create a 2nd container correctly shows no shared memory segment from 1st container: ``` $ sudo docker run shm ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status ``` Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first: ``` $ sudo docker run -it --ipc=container:ed735b2264ac shm ipcs -m $ sudo ipcs -m ------ Shared Memory Segments -------- key shmid owner perms bytes nattch status 0x0000162e 0 root 666 27 1 ``` ## Linking Containers The link feature allows multiple containers to communicate with each other. For example, a container whose Dockerfile has exposed port 80 can be run and named as follows: # docker run --name=link-test -d -i -t fedora/httpd A second container, in this case called linker, can communicate with the httpd container, named link-test, by running with the **--link=:** # docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash Now the container linker is linked to container link-test with the alias lt. Running the **env** command in the linker container shows environment variables with the LT (alias) context (**LT_**) # env HOSTNAME=668231cb0978 TERM=xterm LT_PORT_80_TCP=tcp://172.17.0.3:80 LT_PORT_80_TCP_PORT=80 LT_PORT_80_TCP_PROTO=tcp LT_PORT=tcp://172.17.0.3:80 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PWD=/ LT_NAME=/linker/lt SHLVL=1 HOME=/ LT_PORT_80_TCP_ADDR=172.17.0.3 _=/usr/bin/env When linking two containers Docker will use the exposed ports of the container to create a secure tunnel for the parent to access. ## Mapping Ports for External Usage The exposed port of an application can be mapped to a host port using the **-p** flag. For example, a httpd port 80 can be mapped to the host port 8080 using the following: # docker run -p 8080:80 -d -i -t fedora/httpd ## Creating and Mounting a Data Volume Container Many applications require the sharing of persistent data across several containers. Docker allows you to create a Data Volume Container that other containers can mount from. For example, create a named container that contains directories /var/volume1 and /tmp/volume2. The image will need to contain these directories so a couple of RUN mkdir instructions might be required for you fedora-data image: # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash Multiple --volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermediary container, allowing to abstract the actual data source from users of that data: # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash ## Mounting External Volumes To mount a host directory as a container volume, specify the absolute path to the directory and the absolute path for the container directory separated by a colon: # docker run -v /var/db:/data1 -i -t fedora bash When using SELinux, be aware that the host has no knowledge of container SELinux policy. Therefore, in the above example, if SELinux policy is enforced, the `/var/db` directory is not writable to the container. A "Permission Denied" message will occur and an avc: message in the host's syslog. To work around this, at time of writing this man page, the following command needs to be run in order for the proper SELinux policy type label to be attached to the host directory: # chcon -Rt svirt_sandbox_file_t /var/db Now, writing to the /data1 volume in the container will be allowed and the changes will also be reflected on the host in /var/db. ## Using alternative security labeling You can override the default labeling scheme for each container by specifying the `--security-opt` flag. For example, you can specify the MCS/MLS level, a requirement for MLS systems. Specifying the level in the following command allows you to share the same content between containers. # docker run --security-opt label:level:s0:c100,c200 -i -t fedora bash An MLS example might be: # docker run --security-opt label:level:TopSecret -i -t rhel7 bash To disable the security labeling for this container versus running with the `--permissive` flag, use the following command: # docker run --security-opt label:disable -i -t fedora bash If you want a tighter security policy on the processes within a container, you can specify an alternate type for the container. You could run a container that is only allowed to listen on Apache ports by executing the following command: # docker run --security-opt label:type:svirt_apache_t -i -t centos bash Note: You would have to write policy defining a `svirt_apache_t` type. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit July 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-push.1.md0000644000175000017500000000266712524223634016757 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-push - Push an image or a repository to a registry # SYNOPSIS **docker push** [**--help**] NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] # DESCRIPTION This command pushes an image or a repository to a registry. If you do not specify a `REGISTRY_HOST`, the command uses Docker's public registry located at `registry-1.docker.io` by default. # OPTIONS **--help** Print usage statement # EXAMPLES # Pushing a new image to a registry First save the new image by finding the container ID (using **docker ps**) and then committing it to a new image name: # docker commit c16378f943fe rhel-httpd Now, push the image to the registry using the image ID. In this example the registry is on host named `registry-host` and listening on port `5000`. To do this, tag the image with the host name or IP address, and the port of the registry: # docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd # docker push registry-host:5000/myadmin/rhel-httpd Check that this worked by running: # docker images You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` listed. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 docker-1.6.2/docs/man/Dockerfile.5.md0000644000175000017500000002743412524223634016605 0ustar tianontianon% DOCKERFILE(5) Docker User Manuals % Zac Dover % May 2014 # NAME Dockerfile - automate the steps of creating a Docker image # INTRODUCTION The **Dockerfile** is a configuration file that automates the steps of creating a Docker image. It is similar to a Makefile. Docker reads instructions from the **Dockerfile** to automate the steps otherwise performed manually to create an image. To build an image, create a file called **Dockerfile**. The **Dockerfile** describes the steps taken to assemble the image. When the **Dockerfile** has been created, call the `docker build` command, using the path of directory that contains **Dockerfile** as the argument. # SYNOPSIS INSTRUCTION arguments For example: FROM image # DESCRIPTION A Dockerfile is a file that automates the steps of creating a Docker image. A Dockerfile is similar to a Makefile. # USAGE sudo docker build . -- Runs the steps and commits them, building a final image. The path to the source repository defines where to find the context of the build. The build is run by the Docker daemon, not the CLI. The whole context must be transferred to the daemon. The Docker CLI reports `"Sending build context to Docker daemon"` when the context is sent to the daemon. ``` sudo docker build -t repository/tag . ``` -- specifies a repository and tag at which to save the new image if the build succeeds. The Docker daemon runs the steps one-by-one, committing the result to a new image if necessary, before finally outputting the ID of the new image. The Docker daemon automatically cleans up the context it is given. Docker re-uses intermediate images whenever possible. This significantly accelerates the *docker build* process. # FORMAT `FROM image` `FROM image:tag` -- The **FROM** instruction sets the base image for subsequent instructions. A valid Dockerfile must have **FROM** as its first instruction. The image can be any valid image. It is easy to start by pulling an image from the public repositories. -- **FROM** must be the first non-comment instruction in Dockerfile. -- **FROM** may appear multiple times within a single Dockerfile in order to create multiple images. Make a note of the last image ID output by the commit before each new **FROM** command. -- If no tag is given to the **FROM** instruction, latest is assumed. If the used tag does not exist, an error is returned. **MAINTAINER** -- **MAINTAINER** sets the Author field for the generated images. **RUN** -- **RUN** has two forms: ``` # the command is run in a shell - /bin/sh -c RUN # Executable form RUN ["executable", "param1", "param2"] ``` -- The **RUN** instruction executes any commands in a new layer on top of the current image and commits the results. The committed image is used for the next step in Dockerfile. -- Layering **RUN** instructions and generating commits conforms to the core concepts of Docker where commits are cheap and containers can be created from any point in the history of an image. This is similar to source control. The exec form makes it possible to avoid shell string munging. The exec form makes it possible to **RUN** commands using a base image that does not contain `/bin/sh`. Note that the exec form is parsed as a JSON array, which means that you must use double-quotes (") around words not single-quotes ('). **CMD** -- **CMD** has three forms: ``` # Executable form CMD ["executable", "param1", "param2"]` # Provide default arguments to ENTRYPOINT CMD ["param1", "param2"]` # the command is run in a shell - /bin/sh -c CMD command param1 param2 ``` -- There can be only one **CMD** in a Dockerfile. If more than one **CMD** is listed, only the last **CMD** takes effect. The main purpose of a **CMD** is to provide defaults for an executing container. These defaults may include an executable, or they can omit the executable. If they omit the executable, an **ENTRYPOINT** must be specified. When used in the shell or exec formats, the **CMD** instruction sets the command to be executed when running the image. If you use the shell form of the **CMD**, the `` executes in `/bin/sh -c`: Note that the exec form is parsed as a JSON array, which means that you must use double-quotes (") around words not single-quotes ('). ``` FROM ubuntu CMD echo "This is a test." | wc - ``` -- If you run **command** without a shell, then you must express the command as a JSON array and give the full path to the executable. This array form is the preferred form of **CMD**. All additional parameters must be individually expressed as strings in the array: ``` FROM ubuntu CMD ["/usr/bin/wc","--help"] ``` -- To make the container run the same executable every time, use **ENTRYPOINT** in combination with **CMD**. If the user specifies arguments to `docker run`, the specified commands override the default in **CMD**. Do not confuse **RUN** with **CMD**. **RUN** runs a command and commits the result. **CMD** executes nothing at build time, but specifies the intended command for the image. **LABEL** -- `LABEL [=] [[=] ...]` The **LABEL** instruction adds metadata to an image. A **LABEL** is a key-value pair. To include spaces within a **LABEL** value, use quotes and backslashes as you would in command-line parsing. ``` LABEL "com.example.vendor"="ACME Incorporated" ``` An image can have more than one label. To specify multiple labels, separate each key-value pair by a space. Labels are additive including `LABEL`s in `FROM` images. As the system encounters and then applies a new label, new `key`s override any previous labels with identical keys. To display an image's labels, use the `docker inspect` command. **EXPOSE** -- `EXPOSE [...]` The **EXPOSE** instruction informs Docker that the container listens on the specified network ports at runtime. Docker uses this information to interconnect containers using links, and to set up port redirection on the host system. **ENV** -- `ENV ` The **ENV** instruction sets the environment variable to the value ``. This value is passed to all future RUN, **ENTRYPOINT**, and **CMD** instructions. This is functionally equivalent to prefixing the command with `=`. The environment variables that are set with **ENV** persist when a container is run from the resulting image. Use `docker inspect` to inspect these values, and change them using `docker run --env =`. Note that setting "`ENV DEBIAN_FRONTEND noninteractive`" may cause unintended consequences, because it will persist when the container is run interactively, as with the following command: `docker run -t -i image bash` **ADD** -- **ADD** has two forms: ``` ADD # Required for paths with whitespace ADD ["", ""] ``` The **ADD** instruction copies new files, directories or remote file URLs to the filesystem of the container at path ``. Multiple `` resources may be specified but if they are files or directories then they must be relative to the source directory that is being built (the context of the build). The `` is the absolute path, or path relative to **WORKDIR**, into which the source is copied inside the target container. All new files and directories are created with mode 0755 and with the uid and gid of **0**. **COPY** -- **COPY** has two forms: ``` COPY # Required for paths with whitespace COPY ["", ""] ``` The **COPY** instruction copies new files from `` and adds them to the filesystem of the container at path . The `` must be the path to a file or directory relative to the source directory that is being built (the context of the build) or a remote file URL. The `` is an absolute path, or a path relative to **WORKDIR**, into which the source will be copied inside the target container. All new files and directories are created with mode **0755** and with the uid and gid of **0**. **ENTRYPOINT** -- **ENTRYPOINT** has two forms: ``` # executable form ENTRYPOINT ["executable", "param1", "param2"]` # run command in a shell - /bin/sh -c ENTRYPOINT command param1 param2 ``` -- An **ENTRYPOINT** helps you configure a container that can be run as an executable. When you specify an **ENTRYPOINT**, the whole container runs as if it was only that executable. The **ENTRYPOINT** instruction adds an entry command that is not overwritten when arguments are passed to docker run. This is different from the behavior of CMD. This allows arguments to be passed to the entrypoint, for instance `docker run -d` passes the -d argument to the **ENTRYPOINT**. Specify parameters either in the **ENTRYPOINT** JSON array (as in the preferred exec form above), or by using a **CMD** statement. Parameters in the **ENTRYPOINT** are not overwritten by the docker run arguments. Parameters specifies via **CMD** are overwritten by docker run arguments. Specify a plain string for the **ENTRYPOINT**, and it will execute in `/bin/sh -c`, like a **CMD** instruction: ``` FROM ubuntu ENTRYPOINT wc -l - ``` This means that the Dockerfile's image always takes stdin as input (that's what "-" means), and prints the number of lines (that's what "-l" means). To make this optional but default, use a **CMD**: ``` FROM ubuntu CMD ["-l", "-"] ENTRYPOINT ["/usr/bin/wc"] ``` **VOLUME** -- `VOLUME ["/data"]` The **VOLUME** instruction creates a mount point with the specified name and marks it as holding externally-mounted volumes from the native host or from other containers. **USER** -- `USER daemon` The **USER** instruction sets the username or UID that is used when running the image. **WRKDIR** -- `WORKDIR /path/to/workdir` The **WORKDIR** instruction sets the working directory for the **RUN**, **CMD**, **ENTRYPOINT**, **COPY** and **ADD** Dockerfile commands that follow it. It can be used multiple times in a single Dockerfile. Relative paths are defined relative to the path of the previous **WORKDIR** instruction. For example: ``` WORKDIR /a WORKDIR b WORKDIR c RUN pwd ``` In the above example, the output of the **pwd** command is **a/b/c**. **ONBUILD** -- `ONBUILD [INSTRUCTION]` The **ONBUILD** instruction adds a trigger instruction to an image. The trigger is executed at a later time, when the image is used as the base for another build. Docker executes the trigger in the context of the downstream build, as if the trigger existed immediately after the **FROM** instruction in the downstream Dockerfile. You can register any build instruction as a trigger. A trigger is useful if you are defining an image to use as a base for building other images. For example, if you are defining an application build environment or a daemon that is customized with a user-specific configuration. Consider an image intended as a reusable python application builder. It must add application source code to a particular directory, and might need a build script called after that. You can't just call **ADD** and **RUN** now, because you don't yet have access to the application source code, and it is different for each application build. -- Providing application developers with a boilerplate Dockerfile to copy-paste into their application is inefficient, error-prone, and difficult to update because it mixes with application-specific code. The solution is to use **ONBUILD** to register instructions in advance, to run later, during the next build stage. # HISTORY *May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation. *Feb 2015, updated by Brian Goff (cpuguy83@gmail.com) for readability docker-1.6.2/docs/man/docker-save.1.md0000644000175000017500000000234112524223634016723 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-save - Save an image(s) to a tar archive (streamed to STDOUT by default) # SYNOPSIS **docker save** [**--help**] [**-o**|**--output**[=*OUTPUT*]] IMAGE [IMAGE...] # DESCRIPTION Produces a tarred repository to the standard output stream. Contains all parent layers, and all tags + versions, or specified repo:tag. Stream to a file instead of STDOUT by using **-o**. # OPTIONS **--help** Print usage statement **-o**, **--output**="" Write to a file, instead of STDOUT # EXAMPLES Save all fedora repository images to a fedora-all.tar and save the latest fedora image to a fedora-latest.tar: $ sudo docker save fedora > fedora-all.tar $ sudo docker save --output=fedora-latest.tar fedora:latest $ ls -sh fedora-all.tar 721M fedora-all.tar $ ls -sh fedora-latest.tar 367M fedora-latest.tar # See also **docker-load(1)** to load an image from a tar archive on STDIN. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit November 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-login.1.md0000644000175000017500000000266312524223634017104 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-login - Register or log in to a Docker registry. # SYNOPSIS **docker login** [**-e**|**--email**[=*EMAIL*]] [**--help**] [**-p**|**--password**[=*PASSWORD*]] [**-u**|**--username**[=*USERNAME*]] [SERVER] # DESCRIPTION Register or log in to a Docker Registry Service located on the specified `SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you do not specify a `SERVER`, the command uses Docker's public registry located at `https://registry-1.docker.io/` by default. To get a username/password for Docker's public registry, create an account on Docker Hub. You can log into any public or private repository for which you have credentials. When you log in, the command stores encoded credentials in `$HOME/.dockercfg` on Linux or `%USERPROFILE%/.dockercfg` on Windows. # OPTIONS **-e**, **--email**="" Email **--help** Print usage statement **-p**, **--password**="" Password **-u**, **--username**="" Username # EXAMPLES ## Login to a registry on your localhost # docker login localhost:8080 # See also **docker-logout(1)** to log out from a Docker registry. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 docker-1.6.2/docs/man/docker-info.1.md0000644000175000017500000000254212524223634016723 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-info - Display system-wide information # SYNOPSIS **docker info** [**--help**] # DESCRIPTION This command displays system wide information regarding the Docker installation. Information displayed includes the number of containers and images, pool name, data file, metadata file, data space used, total data space, metadata space used , total metadata space, execution driver, and the kernel version. The data file is where the images are stored and the metadata file is where the meta data regarding those images are stored. When run for the first time Docker allocates a certain amount of data space and meta data space from the space available on the volume where `/var/lib/docker` is mounted. # OPTIONS **--help** Print usage statement # EXAMPLES ## Display Docker system information Here is a sample output: # docker info Containers: 14 Images: 52 Storage Driver: aufs Root Dir: /var/lib/docker/aufs Dirs: 80 Execution Driver: native-0.2 Kernel Version: 3.13.0-24-generic Operating System: Ubuntu 14.04 LTS CPUs: 1 Total Memory: 2 GiB # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-top.1.md0000644000175000017500000000142512524223634016571 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-top - Display the running processes of a container # SYNOPSIS **docker top** [**--help**] CONTAINER [ps OPTIONS] # DESCRIPTION Look up the running process of the container. ps-OPTION can be any of the options you would pass to a Linux ps command. # OPTIONS **--help** Print usage statement # EXAMPLES Run **docker top** with the ps option of -x: $ sudo docker top 8601afda2b -x PID TTY STAT TIME COMMAND 16623 ? Ss 0:00 sleep 99999 # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-images.1.md0000644000175000017500000000505212524223634017234 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-images - List images # SYNOPSIS **docker images** [**--help**] [**-a**|**--all**[=*false*]] [**--digests**[=*false*]] [**-f**|**--filter**[=*[]*]] [**--no-trunc**[=*false*]] [**-q**|**--quiet**[=*false*]] [REPOSITORY] # DESCRIPTION This command lists the images stored in the local Docker repository. By default, intermediate images, used during builds, are not listed. Some of the output, e.g., image ID, is truncated, for space reasons. However the truncated image ID, and often the first few characters, are enough to be used in other Docker commands that use the image ID. The output includes repository, tag, image ID, date created and the virtual size. The title REPOSITORY for the first title may seem confusing. It is essentially the image name. However, because you can tag a specific image, and multiple tags (image instances) can be associated with a single name, the name is really a repository for all tagged images of the same name. For example consider an image called fedora. It may be tagged with 18, 19, or 20, etc. to manage different versions. # OPTIONS **-a**, **--all**=*true*|*false* Show all images (by default filter out the intermediate image layers). The default is *false*. **--digests**=*true*|*false* Show image digests. The default is *false*. **-f**, **--filter**=[] Filters the output. The dangling=true filter finds unused images. While label=com.foo=amd64 filters for images with a com.foo value of amd64. The label=com.foo filter finds images with the label com.foo of any value. **--help** Print usage statement **--no-trunc**=*true*|*false* Don't truncate output. The default is *false*. **-q**, **--quiet**=*true*|*false* Only show numeric IDs. The default is *false*. # EXAMPLES ## Listing the images To list the images in a local repository (not the registry) run: docker images The list will contain the image repository name, a tag for the image, and an image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, IMAGE ID, CREATED, and VIRTUAL SIZE. To get a verbose list of images which contains all the intermediate images used in builds use **-a**: docker images -a ## Listing only the shortened image IDs Listing just the shortened image IDs. This can be useful for some automated tools. docker images -q # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-load.1.md0000644000175000017500000000307312524223634016707 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-load - Load an image from a tar archive on STDIN # SYNOPSIS **docker load** [**--help**] [**-i**|**--input**[=*INPUT*]] # DESCRIPTION Loads a tarred repository from a file or the standard input stream. Restores both images and tags. # OPTIONS **--help** Print usage statement **-i**, **--input**="" Read from a tar archive file, instead of STDIN # EXAMPLES $ sudo docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE busybox latest 769b9341d937 7 weeks ago 2.489 MB $ sudo docker load --input fedora.tar $ sudo docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE busybox latest 769b9341d937 7 weeks ago 2.489 MB fedora rawhide 0d20aec6529d 7 weeks ago 387 MB fedora 20 58394af37342 7 weeks ago 385.5 MB fedora heisenbug 58394af37342 7 weeks ago 385.5 MB fedora latest 58394af37342 7 weeks ago 385.5 MB # See also **docker-save(1)** to save an image(s) to a tar archive (streamed to STDOUT by default). # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-port.1.md0000644000175000017500000000256012524223634016754 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT # SYNOPSIS **docker port** [**--help**] CONTAINER [PRIVATE_PORT[/PROTO]] # DESCRIPTION List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT # OPTIONS **--help** Print usage statement # EXAMPLES You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or ask for just a specific mapping: $ docker ps test CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test $ docker port test 7890/tcp -> 0.0.0.0:4321 9876/tcp -> 0.0.0.0:1234 $ docker port test 7890/tcp 0.0.0.0:4321 $ docker port test 7890/udp 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test $ docker port test 7890 0.0.0.0:4321 # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) June 2014, updated by Sven Dowideit November 2014, updated by Sven Dowideit docker-1.6.2/docs/man/README.md0000644000175000017500000000234012524223634015315 0ustar tianontianonDocker Documentation ==================== This directory contains the Docker user manual in the Markdown format. Do *not* edit the man pages in the man1 directory. Instead, amend the Markdown (*.md) files. # Generating man pages from the Markdown files The recommended approach for generating the man pages is via a Docker container using the supplied `Dockerfile` to create an image with the correct environment. This uses `go-md2man`, a pure Go Markdown to man page generator. ## Building the md2man image There is a `Dockerfile` provided in the `docker/docs/man` directory. Using this `Dockerfile`, create a Docker image tagged `docker/md2man`: docker build -t docker/md2man . ## Utilizing the image Once the image is built, run a container using the image with *volumes*: docker run -v //docker/docs/man:/docs:rw \ -w /docs -i docker/md2man /docs/md2man-all.sh The `md2man` Docker container will process the Markdown files and generate the man pages inside the `docker/docs/man/man1` directory using Docker volumes. For more information on Docker volumes see the man page for `docker run` and also look at the article [Sharing Directories via Volumes] (http://docs.docker.com/use/working_with_volumes/). docker-1.6.2/docs/man/docker-pull.1.md0000644000175000017500000000501312524223634016740 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-pull - Pull an image or a repository from a registry # SYNOPSIS **docker pull** [**-a**|**--all-tags**[=*false*]] [**--help**] NAME[:TAG] # DESCRIPTION This command pulls down an image or a repository from a registry. If there is more than one image for a repository (e.g., fedora) then all images for that repository name are pulled down including any tags. If you do not specify a `REGISTRY_HOST`, the command uses Docker's public registry located at `registry-1.docker.io` by default. # OPTIONS **-a**, **--all-tags**=*true*|*false* Download all tagged images in the repository. The default is *false*. **--help** Print usage statement # EXAMPLE # Pull a repository with multiple images # Note that if the image is previously downloaded then the status would be # 'Status: Image is up to date for fedora' $ sudo docker pull fedora Pulling repository fedora ad57ef8d78d7: Download complete 105182bb5e8b: Download complete 511136ea3c5a: Download complete 73bd853d2ea5: Download complete Status: Downloaded newer image for fedora $ sudo docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB fedora 20 105182bb5e8b 5 days ago 372.7 MB fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB fedora latest 105182bb5e8b 5 days ago 372.7 MB # Pull an image, manually specifying path to Docker's public registry and tag # Note that if the image is previously downloaded then the status would be # 'Status: Image is up to date for registry.hub.docker.com/fedora:20' $ sudo docker pull registry.hub.docker.com/fedora:20 Pulling repository fedora 3f2fed40e4b0: Download complete 511136ea3c5a: Download complete fd241224e9cf: Download complete Status: Downloaded newer image for registry.hub.docker.com/fedora:20 $ sudo docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE fedora 20 3f2fed40e4b0 4 days ago 372.7 MB # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit August 2014, updated by Sven Dowideit April 2015, updated by John Willis April 2015, updated by Mary Anthony for v2 docker-1.6.2/docs/man/md2man-all.sh0000755000175000017500000000066312524223634016327 0ustar tianontianon#!/bin/bash set -e # get into this script's directory cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" [ "$1" = '-q' ] || { set -x pwd } for FILE in *.md; do base="$(basename "$FILE")" name="${base%.md}" num="${name##*.}" if [ -z "$num" -o "$name" = "$num" ]; then # skip files that aren't of the format xxxx.N.md (like README.md) continue fi mkdir -p "./man${num}" go-md2man -in "$FILE" -out "./man${num}/${name}" done docker-1.6.2/docs/man/docker-restart.1.md0000644000175000017500000000122712524223634017453 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-restart - Restart a running container # SYNOPSIS **docker restart** [**--help**] [**-t**|**--time**[=*10*]] CONTAINER [CONTAINER...] # DESCRIPTION Restart each container listed. # OPTIONS **--help** Print usage statement **-t**, **--time**=10 Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-tag.1.md0000644000175000017500000000366112524223634016546 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-tag - Tag an image into a repository # SYNOPSIS **docker tag** [**-f**|**--force**[=*false*]] [**--help**] IMAGE[:TAG] [REGISTRY_HOST/][USERNAME/]NAME[:TAG] # DESCRIPTION Assigns a new alias to an image in a registry. An alias refers to the entire image name including the optional `TAG` after the ':'. If you do not specify a `REGISTRY_HOST`, the command uses Docker's public registry located at `registry-1.docker.io` by default. # "OPTIONS" **-f**, **--force**=*true*|*false* When set to true, force the alias. The default is *false*. **REGISTRYHOST** The hostname of the registry if required. This may also include the port separated by a ':' **USERNAME** The username or other qualifying identifier for the image. **NAME** The image name. **TAG** The tag you are assigning to the image. Though this is arbitrary it is recommended to be used for a version to distinguish images with the same name. Note that here TAG is a part of the overall name or "tag". # OPTIONS **-f**, **--force**=*true*|*false* Force. The default is *false*. # EXAMPLES ## Giving an image a new alias Here is an example of aliasing an image (e.g., 0e5574283393) as "httpd" and tagging it into the "fedora" repository with "version1.0": docker tag 0e5574283393 fedora/httpd:version1.0 ## Tagging an image for a private repository To push an image to an private registry and not the central Docker registry you must tag it with the registry hostname and port (if needed). docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit July 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 docker-1.6.2/docs/man/docker-logs.1.md0000644000175000017500000000261412524223634016734 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-logs - Fetch the logs of a container # SYNOPSIS **docker logs** [**-f**|**--follow**[=*false*]] [**--help**] [**-t**|**--timestamps**[=*false*]] [**--tail**[=*"all"*]] CONTAINER # DESCRIPTION The **docker logs** command batch-retrieves whatever logs are present for a container at the time of execution. This does not guarantee execution order when combined with a docker run (i.e., your run may not have generated any logs at the time you execute docker logs). The **docker logs --follow** command combines commands **docker logs** and **docker attach**. It will first return all logs from the beginning and then continue streaming new output from the container’s stdout and stderr. **Warning**: This command works only for **json-file** logging driver. # OPTIONS **--help** Print usage statement **-f**, **--follow**=*true*|*false* Follow log output. The default is *false*. **-t**, **--timestamps**=*true*|*false* Show timestamps. The default is *false*. **--tail**="all" Output the specified number of lines at the end of logs (defaults to all logs) # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit July 2014, updated by Sven Dowideit docker-1.6.2/docs/man/Dockerfile0000644000175000017500000000047212524223634016034 0ustar tianontianonFROM golang:1.3 RUN mkdir -p /go/src/github.com/cpuguy83 RUN mkdir -p /go/src/github.com/cpuguy83 \ && git clone -b v1 https://github.com/cpuguy83/go-md2man.git /go/src/github.com/cpuguy83/go-md2man \ && cd /go/src/github.com/cpuguy83/go-md2man \ && go get -v ./... CMD ["/go/bin/go-md2man", "--help"] docker-1.6.2/docs/man/docker-create.1.md0000644000175000017500000001566012524223634017240 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-create - Create a new container # SYNOPSIS **docker create** [**-a**|**--attach**[=*[]*]] [**--add-host**[=*[]*]] [**-c**|**--cpu-shares**[=*0*]] [**--cap-add**[=*[]*]] [**--cap-drop**[=*[]*]] [**--cidfile**[=*CIDFILE*]] [**--cpuset-cpus**[=*CPUSET-CPUS*]] [**--device**[=*[]*]] [**--dns-search**[=*[]*]] [**--dns**[=*[]*]] [**-e**|**--env**[=*[]*]] [**--entrypoint**[=*ENTRYPOINT*]] [**--env-file**[=*[]*]] [**--expose**[=*[]*]] [**-h**|**--hostname**[=*HOSTNAME*]] [**--help**] [**-i**|**--interactive**[=*false*]] [**--ipc**[=*IPC*]] [**-l**|**--label**[=*[]*]] [**--label-file**[=*[]*]] [**--link**[=*[]*]] [**--lxc-conf**[=*[]*]] [**--log-driver**[=*[]*]] [**-m**|**--memory**[=*MEMORY*]] [**--memory-swap**[=*MEMORY-SWAP*]] [**--mac-address**[=*MAC-ADDRESS*]] [**--name**[=*NAME*]] [**--net**[=*"bridge"*]] [**-P**|**--publish-all**[=*false*]] [**-p**|**--publish**[=*[]*]] [**--pid**[=*[]*]] [**--privileged**[=*false*]] [**--read-only**[=*false*]] [**--restart**[=*RESTART*]] [**--security-opt**[=*[]*]] [**-t**|**--tty**[=*false*]] [**-u**|**--user**[=*USER*]] [**-v**|**--volume**[=*[]*]] [**--volumes-from**[=*[]*]] [**-w**|**--workdir**[=*WORKDIR*]] [**--cgroup-parent**[=*CGROUP-PATH*]] IMAGE [COMMAND] [ARG...] # OPTIONS **-a**, **--attach**=[] Attach to STDIN, STDOUT or STDERR. **--add-host**=[] Add a custom host-to-IP mapping (host:ip) **-c**, **--cpu-shares**=0 CPU shares (relative weight) **--cap-add**=[] Add Linux capabilities **--cap-drop**=[] Drop Linux capabilities **--cidfile**="" Write the container ID to the file **--cgroup-parent**="" Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. **--cpuset-cpus**="" CPUs in which to allow execution (0-3, 0,1) **--device**=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) **--dns-search**=[] Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) **--dns**=[] Set custom DNS servers **-e**, **--env**=[] Set environment variables **--entrypoint**="" Overwrite the default ENTRYPOINT of the image **--env-file**=[] Read in a line delimited file of environment variables **--expose**=[] Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host **-h**, **--hostname**="" Container host name **--help** Print usage statement **-i**, **--interactive**=*true*|*false* Keep STDIN open even if not attached. The default is *false*. **--ipc**="" Default is to create a private IPC namespace (POSIX SysV IPC) for the container 'container:': reuses another container shared memory, semaphores and message queues 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. **-l**, **--label**=[] Adds metadata to a container (e.g., --label=com.example.key=value) **--label-file**=[] Read labels from a file. Delimit each label with an EOL. **--link**=[] Add link to another container in the form of :alias **--lxc-conf**=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" **--log-driver**="|*json-file*|*syslog*|*none*" Logging driver for container. Default is defined by daemon `--log-driver` flag. **Warning**: `docker logs` command works only for `json-file` logging driver. **-m**, **--memory**="" Memory limit (format: , where unit = b, k, m or g) Allows you to constrain the memory available to a container. If the host supports swap memory, then the **-m** memory setting can be larger than physical RAM. If a limit of 0 is specified (not using **-m**), the container's memory is not limited. The actual limit may be rounded up to a multiple of the operating system's page size (the value would be very large, that's millions of trillions). **--memory-swap**="" Total memory limit (memory + swap) Set `-1` to disable swap (format: , where unit = b, k, m or g). This value should always larger than **-m**, so you should alway use this with **-m**. **--mac-address**="" Container MAC address (e.g. 92:d0:c6:0a:29:33) **--name**="" Assign a name to the container **--net**="bridge" Set the Network mode for the container 'bridge': creates a new network stack for the container on the docker bridge 'none': no networking for this container 'container:': reuses another container network stack 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. **-P**, **--publish-all**=*true*|*false* Publish all exposed ports to random ports on the host interfaces. The default is *false*. **-p**, **--publish**=[] Publish a container's port, or a range of ports, to the host format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort Both hostPort and containerPort can be specified as a range of ports. When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`) (use 'docker port' to see the actual mapping) **--pid**=host Set the PID mode for the container **host**: use the host's PID namespace inside the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. **--privileged**=*true*|*false* Give extended privileges to this container. The default is *false*. **--read-only**=*true*|*false* Mount the container's root filesystem as read only. **--restart**="no" Restart policy to apply when a container exits (no, on-failure[:max-retry], always) **--security-opt**=[] Security Options **-t**, **--tty**=*true*|*false* Allocate a pseudo-TTY. The default is *false*. **-u**, **--user**="" Username or UID **-v**, **--volume**=[] Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container) **--volumes-from**=[] Mount volumes from the specified container(s) **-w**, **--workdir**="" Working directory inside the container # HISTORY August 2014, updated by Sven Dowideit September 2014, updated by Sven Dowideit November 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-start.1.md0000644000175000017500000000153312524223634017124 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-start - Start one or more stopped containers # SYNOPSIS **docker start** [**-a**|**--attach**[=*false*]] [**--help**] [**-i**|**--interactive**[=*false*]] CONTAINER [CONTAINER...] # DESCRIPTION Start one or more stopped containers. # OPTIONS **-a**, **--attach**=*true*|*false* Attach container's STDOUT and STDERR and forward all signals to the process. The default is *false*. **--help** Print usage statement **-i**, **--interactive**=*true*|*false* Attach container's STDIN. The default is *false*. # See also **docker-stop(1)** to stop a running container. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-ps.1.md0000644000175000017500000000604312524223634016412 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % FEBRUARY 2015 # NAME docker-ps - List containers # SYNOPSIS **docker ps** [**-a**|**--all**[=*false*]] [**--before**[=*BEFORE*]] [**--help**] [**-f**|**--filter**[=*[]*]] [**-l**|**--latest**[=*false*]] [**-n**[=*-1*]] [**--no-trunc**[=*false*]] [**-q**|**--quiet**[=*false*]] [**-s**|**--size**[=*false*]] [**--since**[=*SINCE*]] # DESCRIPTION List the containers in the local repository. By default this show only the running containers. # OPTIONS **-a**, **--all**=*true*|*false* Show all containers. Only running containers are shown by default. The default is *false*. **--before**="" Show only container created before Id or Name, include non-running ones. **--help** Print usage statement **-f**, **--filter**=[] Provide filter values. Valid filters: exited= - containers with exit code of label= or label== status=(restarting|running|paused|exited) name= - container's name id= - container's ID **-l**, **--latest**=*true*|*false* Show only the latest created container, include non-running ones. The default is *false*. **-n**=-1 Show n last created containers, include non-running ones. **--no-trunc**=*true*|*false* Don't truncate output. The default is *false*. **-q**, **--quiet**=*true*|*false* Only display numeric IDs. The default is *false*. **-s**, **--size**=*true*|*false* Display total file sizes. The default is *false*. **--since**="" Show only containers created since Id or Name, include non-running ones. # EXAMPLES # Display all containers, including non-running # docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a87ecb4f327c fedora:20 /bin/sh -c #(nop) MA 20 minutes ago Exit 0 desperate_brattain 01946d9d34d8 vpavlin/rhel7:latest /bin/sh -c #(nop) MA 33 minutes ago Exit 0 thirsty_bell c1d3b0166030 acffc0358b9e /bin/sh -c yum -y up 2 weeks ago Exit 1 determined_torvalds 41d50ecd2f57 fedora:20 /bin/sh -c #(nop) MA 2 weeks ago Exit 0 drunk_pike # Display only IDs of all containers, including non-running # docker ps -a -q a87ecb4f327c 01946d9d34d8 c1d3b0166030 41d50ecd2f57 # Display only IDs of all containers that have the name `determined_torvalds` # docker ps -a -q --filter=name=determined_torvalds c1d3b0166030 # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit August 2014, updated by Sven Dowideit November 2014, updated by Sven Dowideit February 2015, updated by André Martins docker-1.6.2/docs/man/docker-cp.1.md0000644000175000017500000000455212524223634016375 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-cp - Copy files or folders from a container's PATH to a HOSTDIR or to STDOUT. # SYNOPSIS **docker cp** [**--help**] CONTAINER:PATH HOSTDIR|- # DESCRIPTION Copy files or folders from a `CONTAINER:PATH` to the `HOSTDIR` or to `STDOUT`. The `CONTAINER:PATH` is relative to the root of the container's filesystem. You can copy from either a running or stopped container. The `PATH` can be a file or directory. The `docker cp` command assumes all `PATH` values start at the `/` (root) directory. This means supplying the initial forward slash is optional; The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and `compassionate_darwin:tmp/foo/myfile.txt` as identical. The `HOSTDIR` refers to a directory on the host. If you do not specify an absolute path for your `HOSTDIR` value, Docker creates the directory relative to where you run the `docker cp` command. For example, suppose you want to copy the `/tmp/foo` directory from a container to the `/tmp` directory on your host. If you run `docker cp` in your `~` (home) directory on the host: $ docker cp compassionate_darwin:tmp/foo /tmp Docker creates a `/tmp/foo` directory on your host. Alternatively, you can omit the leading slash in the command. If you execute this command from your home directory: $ docker cp compassionate_darwin:tmp/foo tmp Docker creates a `~/tmp/foo` subdirectory. When copying files to an existing `HOSTDIR`, the `cp` command adds the new files to the directory. For example, this command: $ docker cp sharp_ptolemy:/tmp/foo/myfile.txt /tmp Creates a `/tmp/foo` directory on the host containing the `myfile.txt` file. If you repeat the command but change the filename: $ docker cp sharp_ptolemy:/tmp/foo/secondfile.txt /tmp Your host's `/tmp/foo` directory will contain both files: $ ls /tmp/foo myfile.txt secondfile.txt Finally, use '-' to write the data as a `tar` file to STDOUT. # OPTIONS **--help** Print usage statement # EXAMPLES An important shell script file, created in a bash shell, is copied from the exited container to the current dir on the host: # docker cp c071f3c3ee81:setup.sh . # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-rm.1.md0000644000175000017500000000335012524223634016404 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-rm - Remove one or more containers # SYNOPSIS **docker rm** [**-f**|**--force**[=*false*]] [**-l**|**--link**[=*false*]] [**-v**|**--volumes**[=*false*]] CONTAINER [CONTAINER...] # DESCRIPTION **docker rm** will remove one or more containers from the host node. The container name or ID can be used. This does not remove images. You cannot remove a running container unless you use the \fB-f\fR option. To see all containers on a host use the **docker ps -a** command. # OPTIONS **--help** Print usage statement **-f**, **--force**=*true*|*false* Force the removal of a running container (uses SIGKILL). The default is *false*. **-l**, **--link**=*true*|*false* Remove the specified link and not the underlying container. The default is *false*. **-v**, **--volumes**=*true*|*false* Remove the volumes associated with the container. The default is *false*. # EXAMPLES ##Removing a container using its ID## To remove a container using its ID, find either from a **docker ps -a** command, or use the ID returned from the **docker run** command, or retrieve it from a file used to store it using the **docker run --cidfile**: docker rm abebf7571666 ##Removing a container using the container name## The name of the container can be found using the **docker ps -a** command. The use that name as follows: docker rm hopeful_morse # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit July 2014, updated by Sven Dowideit August 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-stats.1.md0000644000175000017500000000136012524223634017123 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-stats - Display a live stream of one or more containers' resource usage statistics # SYNOPSIS **docker stats** [**--help**] CONTAINER [CONTAINER...] # DESCRIPTION Display a live stream of one or more containers' resource usage statistics # OPTIONS **--help** Print usage statement # EXAMPLES Run **docker stats** with multiple containers. $ sudo docker stats redis1 redis2 CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O redis1 0.07% 796 KiB/64 MiB 1.21% 788 B/648 B redis2 0.07% 2.746 MiB/64 MiB 4.29% 1.266 KiB/648 B docker-1.6.2/docs/man/docker-kill.1.md0000644000175000017500000000126612524223634016725 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-kill - Kill a running container using SIGKILL or a specified signal # SYNOPSIS **docker kill** [**--help**] [**-s**|**--signal**[=*"KILL"*]] CONTAINER [CONTAINER...] # DESCRIPTION The main process inside each container specified will be sent SIGKILL, or any signal specified with option --signal. # OPTIONS **--help** Print usage statement **-s**, **--signal**="KILL" Signal to send to the container # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-rmi.1.md0000644000175000017500000000206612524223634016560 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-rmi - Remove one or more images # SYNOPSIS **docker rmi** [**-f**|**--force**[=*false*]] [**--help**] [**--no-prune**[=*false*]] IMAGE [IMAGE...] # DESCRIPTION Removes one or more images from the host node. This does not remove images from a registry. You cannot remove an image of a running container unless you use the **-f** option. To see all images on a host use the **docker images** command. # OPTIONS **-f**, **--force**=*true*|*false* Force removal of the image. The default is *false*. **--help** Print usage statement **--no-prune**=*true*|*false* Do not delete untagged parents. The default is *false*. # EXAMPLES ## Removing an image Here is an example of removing and image: docker rmi fedora/httpd # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 docker-1.6.2/docs/man/docker-exec.1.md0000644000175000017500000000217212524223634016713 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-exec - Run a command in a running container # SYNOPSIS **docker exec** [**-d**|**--detach**[=*false*]] [**--help**] [**-i**|**--interactive**[=*false*]] [**-t**|**--tty**[=*false*]] CONTAINER COMMAND [ARG...] # DESCRIPTION Run a process in a running container. The command started using `docker exec` will only run while the container's primary process (`PID 1`) is running, and will not be restarted if the container is restarted. If the container is paused, then the `docker exec` command will wait until the container is unpaused, and then run # OPTIONS **-d**, **--detach**=*true*|*false* Detached mode: run command in the background. The default is *false*. **--help** Print usage statement **-i**, **--interactive**=*true*|*false* Keep STDIN open even if not attached. The default is *false*. **-t**, **--tty**=*true*|*false* Allocate a pseudo-TTY. The default is *false*. The **-t** option is incompatible with a redirection of the docker client standard input. # HISTORY November 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-import.1.md0000644000175000017500000000340512524223634017301 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. # SYNOPSIS **docker import** [**-c**|**--change**[= []**]] [**--help**] URL|- [REPOSITORY[:TAG]] # OPTIONS **-c**, **--change**=[] Apply specified Dockerfile instructions while importing the image Supported Dockerfile instructions: `ADD`|`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`FROM`|`MAINTAINER`|`RUN`|`USER`|`LABEL`|`VOLUME`|`WORKDIR`|`COPY` # DESCRIPTION Create a new filesystem image from the contents of a tarball (`.tar`, `.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it. # OPTIONS **--help** Print usage statement # EXAMPLES ## Import from a remote location # docker import http://example.com/exampleimage.tgz example/imagerepo ## Import from a local file Import to docker via pipe and stdin: # cat exampleimage.tgz | docker import - example/imagelocal ## Import from a local file and tag Import to docker via pipe and stdin: # cat exampleimageV2.tgz | docker import - example/imagelocal:V-2.0 ## Import from a local directory # tar -c . | docker import - exampleimagedir ## Apply specified Dockerfile instructions while importing the image This example sets the docker image ENV variable DEBUG to true by default. # tar -c . | docker import -c="ENV DEBUG true" - exampleimagedir # See also **docker-export(1)** to export the contents of a filesystem as a tar archive to STDOUT. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-commit.1.md0000644000175000017500000000367512524223634017270 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-commit - Create a new image from a container's changes # SYNOPSIS **docker commit** [**-a**|**--author**[=*AUTHOR*]] [**--help**] [**-c**|**--change**[= []**]] [**-m**|**--message**[=*MESSAGE*]] [**-p**|**--pause**[=*true*]] CONTAINER [REPOSITORY[:TAG]] # DESCRIPTION Using an existing container's name or ID you can create a new image. # OPTIONS **-a**, **--author**="" Author (e.g., "John Hannibal Smith ") **-c** , **--change**=[] Apply specified Dockerfile instructions while committing the image Supported Dockerfile instructions: ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|LABEL|VOLUME|WORKDIR|COPY **--help** Print usage statement **-m**, **--message**="" Commit message **-p**, **--pause**=*true*|*false* Pause container during commit. The default is *true*. # EXAMPLES ## Creating a new image from an existing container An existing Fedora based container has had Apache installed while running in interactive mode with the bash shell. Apache is also running. To create a new image run docker ps to find the container's ID and then run: # docker commit -m="Added Apache to Fedora base image" \ -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 ## Apply specified Dockerfile instructions while committing the image If an existing container was created without the DEBUG environment variable set to "true", you can create a new image based on that container by first getting the container's ID with docker ps and then running: # docker commit -c="ENV DEBUG true" 98bd7fc99854 debug-image # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and in June 2014, updated by Sven Dowideit July 2014, updated by Sven Dowideit Oct 2014, updated by Daniel, Dao Quang Minh docker-1.6.2/docs/man/docker-wait.1.md0000644000175000017500000000127212524223634016733 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-wait - Block until a container stops, then print its exit code. # SYNOPSIS **docker wait** [**--help**] CONTAINER [CONTAINER...] # DESCRIPTION Block until a container stops, then print its exit code. # OPTIONS **--help** Print usage statement # EXAMPLES $ sudo docker run -d fedora sleep 99 079b83f558a2bc52ecad6b2a5de13622d584e6bb1aea058c11b36511e85e7622 $ sudo docker wait 079b83f558a2bc 0 # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-events.1.md0000644000175000017500000000463112524223634017275 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-events - Get real time events from the server # SYNOPSIS **docker events** [**--help**] [**-f**|**--filter**[=*[]*]] [**--since**[=*SINCE*]] [**--until**[=*UNTIL*]] # DESCRIPTION Get event information from the Docker daemon. Information can include historical information and real-time information. Docker containers will report the following events: create, destroy, die, export, kill, pause, restart, start, stop, unpause and Docker images will report: untag, delete # OPTIONS **--help** Print usage statement **-f**, **--filter**=[] Provide filter values (i.e., 'event=stop') **--since**="" Show all events created since timestamp **--until**="" Stream events until this timestamp # EXAMPLES ## Listening for Docker events After running docker events a container 786d698004576 is started and stopped (The container name has been shortened in the output below): # docker events 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) start 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) die 2015-01-28T20:21:32.000000000-08:00 59211849bc10: (from whenry/testimage:latest) stop ## Listening for events since a given date Again the output container IDs have been shortened for the purposes of this document: # docker events --since '2015-01-28' 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start 2015-01-28T20:25:40.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die 2015-01-28T20:25:42.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die 2015-01-28T20:25:46.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-diff.1.md0000644000175000017500000000214112524223634016673 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-diff - Inspect changes on a container's filesystem # SYNOPSIS **docker diff** [**--help**] CONTAINER # DESCRIPTION Inspect changes on a container's filesystem. You can use the full or shortened container ID or the container name set using **docker run --name** option. # OPTIONS **--help** Print usage statement # EXAMPLES Inspect the changes to on a nginx container: # docker diff 1fdfd1f54c1b C /dev C /dev/console C /dev/core C /dev/stdout C /dev/fd C /dev/ptmx C /dev/stderr C /dev/stdin C /run A /run/nginx.pid C /var/lib/nginx/tmp A /var/lib/nginx/tmp/client_body A /var/lib/nginx/tmp/fastcgi A /var/lib/nginx/tmp/proxy A /var/lib/nginx/tmp/scgi A /var/lib/nginx/tmp/uwsgi C /var/log/nginx A /var/log/nginx/access.log A /var/log/nginx/error.log # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-export.1.md0000644000175000017500000000243012524223634017305 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-export - Export the contents of a filesystem as a tar archive to STDOUT # SYNOPSIS **docker export** [**--help**] CONTAINER # DESCRIPTION Export the contents of a container's filesystem using the full or shortened container ID or container name. The output is exported to STDOUT and can be redirected to a tar file. Stream to a file instead of STDOUT by using **-o**. # OPTIONS **--help** Print usage statement **-o**, **--output**="" Write to a file, instead of STDOUT # EXAMPLES Export the contents of the container called angry_bell to a tar file called angry_bell.tar: # docker export angry_bell > angry_bell.tar # docker export --output=angry_bell-latest.tar angry_bell # ls -sh angry_bell.tar 321M angry_bell.tar # ls -sh angry_bell-latest.tar 321M angry_bell-latest.tar # See also **docker-import(1)** to create an empty filesystem image and import the contents of the tarball into it, then optionally tag it. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit Janurary 2015, updated by Joseph Kern (josephakern at gmail dot com) docker-1.6.2/docs/man/docker-inspect.1.md0000644000175000017500000001672312524223634017443 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-inspect - Return low-level information on a container or image # SYNOPSIS **docker inspect** [**--help**] [**-f**|**--format**[=*FORMAT*]] CONTAINER|IMAGE [CONTAINER|IMAGE...] # DESCRIPTION This displays all the information available in Docker for a given container or image. By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result. # OPTIONS **--help** Print usage statement **-f**, **--format**="" Format the output using the given go template. # EXAMPLES ## Getting information on a container To get information on a container use it's ID or instance name: #docker inspect 1eb5fabf5a03 [{ "ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b", "Created": "2014-04-04T21:33:52.02361335Z", "Path": "/usr/sbin/nginx", "Args": [], "Config": { "Hostname": "1eb5fabf5a03", "Domainname": "", "User": "", "Memory": 0, "MemorySwap": 0, "CpuShares": 0, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "PortSpecs": null, "ExposedPorts": { "80/tcp": {} }, "Tty": true, "OpenStdin": false, "StdinOnce": false, "Env": [ "HOME=/", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": [ "/usr/sbin/nginx" ], "Dns": null, "DnsSearch": null, "Image": "summit/nginx", "Volumes": null, "VolumesFrom": "", "WorkingDir": "", "Entrypoint": null, "NetworkDisabled": false, "OnBuild": null, "Context": { "mount_label": "system_u:object_r:svirt_sandbox_file_t:s0:c0,c650", "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c0,c650" } }, "State": { "Running": true, "Pid": 858, "ExitCode": 0, "StartedAt": "2014-04-04T21:33:54.16259207Z", "FinishedAt": "0001-01-01T00:00:00Z", "Ghost": false }, "Image": "df53773a4390e25936f9fd3739e0c0e60a62d024ea7b669282b27e65ae8458e6", "Labels": { "com.example.vendor": "Acme", "com.example.license": "GPL", "com.example.version": "1.0" }, "NetworkSettings": { "IPAddress": "172.17.0.2", "IPPrefixLen": 16, "Gateway": "172.17.42.1", "Bridge": "docker0", "PortMapping": null, "Ports": { "80/tcp": [ { "HostIp": "0.0.0.0", "HostPort": "80" } ] } }, "ResolvConfPath": "/etc/resolv.conf", "HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname", "HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts", "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", "Name": "/ecstatic_ptolemy", "Driver": "devicemapper", "ExecDriver": "native-0.1", "Volumes": {}, "VolumesRW": {}, "HostConfig": { "Binds": null, "ContainerIDFile": "", "LxcConf": [], "Privileged": false, "PortBindings": { "80/tcp": [ { "HostIp": "0.0.0.0", "HostPort": "80" } ] }, "Links": null, "PublishAllPorts": false, "DriverOptions": { "lxc": null }, "CliAddress": "" } ## Getting the IP address of a container instance To get the IP address of a container use: # docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03 172.17.0.2 ## Listing all port bindings One can loop over arrays and maps in the results to produce simple text output: # docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03 80/tcp -> 80 ## Getting information on an image Use an image's ID or name (e.g., repository/name[:tag]) to get information on it. # docker inspect 58394af37342 [{ "id": "58394af373423902a1b97f209a31e3777932d9321ef10e64feaaa7b4df609cf9", "parent": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", "created": "2014-02-03T16:10:40.500814677Z", "container": "f718f19a28a5147da49313c54620306243734bafa63c76942ef6f8c4b4113bc5", "container_config": { "Hostname": "88807319f25e", "Domainname": "", "User": "", "Memory": 0, "MemorySwap": 0, "CpuShares": 0, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "PortSpecs": null, "ExposedPorts": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "HOME=/", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": [ "/bin/sh", "-c", "#(nop) ADD fedora-20-dummy.tar.xz in /" ], "Dns": null, "DnsSearch": null, "Image": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", "Volumes": null, "VolumesFrom": "", "WorkingDir": "", "Entrypoint": null, "NetworkDisabled": false, "OnBuild": null, "Context": null }, "docker_version": "0.6.3", "author": "I P Babble \u003clsm5@ipbabble.com\u003e - ./buildcontainers.sh", "config": { "Hostname": "88807319f25e", "Domainname": "", "User": "", "Memory": 0, "MemorySwap": 0, "CpuShares": 0, "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "PortSpecs": null, "ExposedPorts": null, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "HOME=/", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], "Cmd": null, "Dns": null, "DnsSearch": null, "Image": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", "Volumes": null, "VolumesFrom": "", "WorkingDir": "", "Entrypoint": null, "NetworkDisabled": false, "OnBuild": null, "Context": null }, "architecture": "x86_64", "Size": 385520098 }] # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-unpause.1.md0000644000175000017500000000116112524223634017444 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-unpause - Unpause all processes within a container # SYNOPSIS **docker unpause** CONTAINER [CONTAINER...] # DESCRIPTION The `docker unpause` command uses the cgroups freezer to un-suspend all processes in a container. See the [cgroups freezer documentation] (https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for further details. # OPTIONS There are no available options. # See also **docker-pause(1)** to pause all processes within a container. # HISTORY June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-pause.1.md0000644000175000017500000000154312524223634017105 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-pause - Pause all processes within a container # SYNOPSIS **docker pause** CONTAINER [CONTAINER...] # DESCRIPTION The `docker pause` command uses the cgroups freezer to suspend all processes in a container. Traditionally when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. See the [cgroups freezer documentation] (https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for further details. # OPTIONS There are no available options. # See also **docker-unpause(1)** to unpause all processes within a container. # HISTORY June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-history.1.md0000644000175000017500000000213412524223634017466 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-history - Show the history of an image # SYNOPSIS **docker history** [**--help**] [**--no-trunc**[=*false*]] [**-q**|**--quiet**[=*false*]] IMAGE # DESCRIPTION Show the history of when and how an image was created. # OPTIONS **--help** Print usage statement **--no-trunc**=*true*|*false* Don't truncate output. The default is *false*. **-q**, **--quiet**=*true*|*false* Only show numeric IDs. The default is *false*. # EXAMPLES $ sudo docker history fedora IMAGE CREATED CREATED BY SIZE 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B 511136ea3c5a 10 months ago 0 B # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-build.1.md0000644000175000017500000001166612524223634017076 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-build - Build a new image from the source code at PATH # SYNOPSIS **docker build** [**--help**] [**-f**|**--file**[=*PATH/Dockerfile*]] [**--force-rm**[=*false*]] [**--no-cache**[=*false*]] [**--pull**[=*false*]] [**-q**|**--quiet**[=*false*]] [**--rm**[=*true*]] [**-t**|**--tag**[=*TAG*]] [**-m**|**--memory**[=*MEMORY*]] [**--memory-swap**[=*MEMORY-SWAP*]] [**-c**|**--cpu-shares**[=*0*]] [**--cpuset-cpus**[=*CPUSET-CPUS*]] PATH | URL | - # DESCRIPTION This will read the Dockerfile from the directory specified in **PATH**. It also sends any other files and directories found in the current directory to the Docker daemon. The contents of this directory would be used by **ADD** commands found within the Dockerfile. Warning, this will send a lot of data to the Docker daemon depending on the contents of the current directory. The build is run by the Docker daemon, not by the CLI, so the whole context must be transferred to the daemon. The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to the daemon. When a single Dockerfile is given as the URL, then no context is set. When a Git repository is set as the **URL**, the repository is used as context. # OPTIONS **-f**, **--file**=*PATH/Dockerfile* Path to the Dockerfile to use. If the path is a relative path then it must be relative to the current directory. The file must be within the build context. The default is *Dockerfile*. **--force-rm**=*true*|*false* Always remove intermediate containers, even after unsuccessful builds. The default is *false*. **--no-cache**=*true*|*false* Do not use cache when building the image. The default is *false*. **--help** Print usage statement **--pull**=*true*|*false* Always attempt to pull a newer version of the image. The default is *false*. **-q**, **--quiet**=*true*|*false* Suppress the verbose output generated by the containers. The default is *false*. **--rm**=*true*|*false* Remove intermediate containers after a successful build. The default is *true*. **-t**, **--tag**="" Repository name (and optionally a tag) to be applied to the resulting image in case of success # EXAMPLES ## Building an image using a Dockerfile located inside the current directory Docker images can be built using the build command and a Dockerfile: docker build . During the build process Docker creates intermediate images. In order to keep them, you must explicitly set `--rm=false`. docker build --rm=false . A good practice is to make a sub-directory with a related name and create the Dockerfile in that directory. For example, a directory called mongo may contain a Dockerfile to create a Docker MongoDB image. Likewise, another directory called httpd may be used to store Dockerfiles for Apache web server images. It is also a good practice to add the files required for the image to the sub-directory. These files will then be specified with the `COPY` or `ADD` instructions in the `Dockerfile`. Note: If you include a tar file (a good practice), then Docker will automatically extract the contents of the tar file specified within the `ADD` instruction into the specified target. ## Building an image and naming that image A good practice is to give a name to the image you are building. There are no hard rules here but it is best to give the names consideration. The **-t**/**--tag** flag is used to rename an image. Here are some examples: Though it is not a good practice, image names can be arbitrary: docker build -t myimage . A better approach is to provide a fully qualified and meaningful repository, name, and tag (where the tag in this context means the qualifier after the ":"). In this example we build a JBoss image for the Fedora repository and give it the version 1.0: docker build -t fedora/jboss:1.0 The next example is for the "whenry" user repository and uses Fedora and JBoss and gives it the version 2.1 : docker build -t whenry/fedora-jboss:V2.1 If you do not provide a version tag then Docker will assign `latest`: docker build -t whenry/fedora-jboss When you list the images, the image above will have the tag `latest`. So renaming an image is arbitrary but consideration should be given to a useful convention that makes sense for consumers and should also take into account Docker community conventions. ## Building an image using a URL This will clone the specified Github repository from the URL and use it as context. The Dockerfile at the root of the repository is used as Dockerfile. This only works if the Github repository is a dedicated repository. docker build github.com/scollier/Fedora-Dockerfiles/tree/master/apache Note: You can set an arbitrary Git repository via the `git://` schema. # HISTORY March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-attach.1.md0000644000175000017500000000541712524223634017240 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-attach - Attach to a running container # SYNOPSIS **docker attach** [**--help**]/ [**--no-stdin**[=*false*]] [**--sig-proxy**[=*true*]] CONTAINER # DESCRIPTION The **docker attach** command allows you to attach to a running container using the container's ID or name, either to view its ongoing output or to control it interactively. You can attach to the same contained process multiple times simultaneously, screen sharing style, or quickly view the progress of your daemonized process. You can detach from the container (and leave it running) with `CTRL-p CTRL-q` (for a quiet exit) or `CTRL-c` which will send a `SIGKILL` to the container. When you are attached to a container, and exit its main process, the process's exit code will be returned to the client. It is forbidden to redirect the standard input of a `docker attach` command while attaching to a tty-enabled container (i.e.: launched with `-t`). # OPTIONS **--help** Print usage statement **--no-stdin**=*true*|*false* Do not attach STDIN. The default is *false*. **--sig-proxy**=*true*|*false* Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. # EXAMPLES ## Attaching to a container In this example the top command is run inside a container, from an image called fedora, in detached mode. The ID from the container is passed into the **docker attach** command: # ID=$(sudo docker run -d fedora /usr/bin/top -b) # sudo docker attach $ID top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st Mem: 373572k total, 355560k used, 18012k free, 27872k buffers Swap: 786428k total, 0k used, 786428k free, 221740k cached PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st Mem: 373572k total, 355244k used, 18328k free, 27872k buffers Swap: 786428k total, 0k used, 786428k free, 221776k cached PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/man/docker-rename.1.md0000644000175000017500000000031012524223634017226 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % OCTOBER 2014 # NAME docker-rename - Rename a container # SYNOPSIS **docker rename** OLD_NAME NEW_NAME # OPTIONS There are no available options. docker-1.6.2/docs/man/docker-search.1.md0000644000175000017500000000417012524223634017234 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-search - Search the Docker Hub for images # SYNOPSIS **docker search** [**--automated**[=*false*]] [**--help**] [**--no-trunc**[=*false*]] [**-s**|**--stars**[=*0*]] TERM # DESCRIPTION Search Docker Hub for an image with that matches the specified `TERM`. The table of images returned displays the name, description (truncated by default), number of stars awarded, whether the image is official, and whether it is automated. *Note* - Search queries will only return up to 25 results # OPTIONS **--automated**=*true*|*false* Only show automated builds. The default is *false*. **--help** Print usage statement **--no-trunc**=*true*|*false* Don't truncate output. The default is *false*. **-s**, **--stars**=0 Only displays with at least x stars # EXAMPLES ## Search Docker Hub for ranked images Search a registry for the term 'fedora' and only display those images ranked 3 or higher: $ sudo docker search -s 3 fedora NAME DESCRIPTION STARS OFFICIAL AUTOMATED mattdm/fedora A basic Fedora image corresponding roughly... 50 fedora (Semi) Official Fedora base image. 38 mattdm/fedora-small A small Fedora image on which to build. Co... 8 goldmann/wildfly A WildFly application server running on a ... 3 [OK] ## Search Docker Hub for automated images Search Docker Hub for the term 'fedora' and only display automated images ranked 1 or higher: $ sudo docker search -s 1 -t fedora NAME DESCRIPTION STARS OFFICIAL AUTOMATED goldmann/wildfly A WildFly application server running on a ... 3 [OK] tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit April 2015, updated by Mary Anthony for v2 docker-1.6.2/docs/man/docker.1.md0000644000175000017500000002473412524223634016001 0ustar tianontianon% DOCKER(1) Docker User Manuals % William Henry % APRIL 2014 # NAME docker \- Docker image and container command line interface # SYNOPSIS **docker** [OPTIONS] COMMAND [arg...] # DESCRIPTION **docker** has two distinct functions. It is used for starting the Docker daemon and to run the CLI (i.e., to command the daemon to manage images, containers etc.) So **docker** is both a server, as a daemon, and a client to the daemon, through the CLI. To run the Docker daemon you do not specify any of the commands listed below but must specify the **-d** option. The other options listed below are for the daemon only. The Docker CLI has over 30 commands. The commands are listed below and each has its own man page which explain usage and arguments. To see the man page for a command run **man docker **. # OPTIONS **-h**, **--help** Print usage statement **--api-cors-header**="" Set CORS headers in the remote API. Default is cors disabled. Give urls like "http://foo, http://bar, ...". Give "*" to allow all. **-b**, **--bridge**="" Attach containers to a pre\-existing network bridge; use 'none' to disable container networking **--bip**="" Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b **-D**, **--debug**=*true*|*false* Enable debug mode. Default is false. **-d**, **--daemon**=*true*|*false* Enable daemon mode. Default is false. **--dns**="" Force Docker to use specific DNS servers **-e**, **--exec-driver**="" Force Docker to use specific exec driver. Default is `native`. **--fixed-cidr**="" IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip) **--fixed-cidr-v6**="" IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64) **-G**, **--group**="" Group to assign the unix socket specified by -H when running in daemon mode. use '' (the empty string) to disable setting of a group. Default is `docker`. **-g**, **--graph**="" Path to use as the root of the Docker runtime. Default is `/var/lib/docker`. **-H**, **--host**=[unix:///var/run/docker.sock]: tcp://[host:port] to bind or unix://[/path/to/socket] to use. The socket(s) to bind to in daemon mode specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. **--icc**=*true*|*false* Allow unrestricted inter\-container and Docker daemon host communication. If disabled, containers can still be linked together using **--link** option (see **docker-run(1)**). Default is true. **--ip**="" Default IP address to use when binding container ports. Default is `0.0.0.0`. **--ip-forward**=*true*|*false* Docker will enable IP forwarding. Default is true. If `--fixed-cidr-v6` is set. IPv6 forwarding will be activated, too. This may reject Router Advertisements and interfere with the host's existing IPv6 configuration. For more information please consult the documentation about "Advanced Networking - IPv6". **--ip-masq**=*true*|*false* Enable IP masquerading for bridge's IP range. Default is true. **--iptables**=*true*|*false* Enable Docker's addition of iptables rules. Default is true. **--ipv6**=*true*|*false* Enable IPv6 support. Default is false. Docker will create an IPv6-enabled bridge with address fe80::1 which will allow you to create IPv6-enabled containers. Use together with `--fixed-cidr-v6` to provide globally routable IPv6 addresses. IPv6 forwarding will be enabled if not used with `--ip-forward=false`. This may collide with your host's current IPv6 settings. For more information please consult the documentation about "Advanced Networking - IPv6". **-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*"" Set the logging level. Default is `info`. **--label**="[]" Set key=value labels to the daemon (displayed in `docker info`) **--log-driver**="*json-file*|*syslog*|*none*" Container's logging driver. Default is `default`. **Warning**: `docker logs` command works only for `json-file` logging driver. **--mtu**=VALUE Set the containers network mtu. Default is `0`. **-p**, **--pidfile**="" Path to use for daemon PID file. Default is `/var/run/docker.pid` **--registry-mirror**=:// Prepend a registry mirror to be used for image pulls. May be specified multiple times. **-s**, **--storage-driver**="" Force the Docker runtime to use a specific storage driver. **--storage-opt**=[] Set storage driver options. See STORAGE DRIVER OPTIONS. **-tls**=*true*|*false* Use TLS; implied by --tlsverify. Default is false. **-tlsverify**=*true*|*false* Use TLS and verify the remote (daemon: verify client, client: verify daemon). Default is false. **-v**, **--version**=*true*|*false* Print version information and quit. Default is false. **--selinux-enabled**=*true*|*false* Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver. # COMMANDS **docker-attach(1)** Attach to a running container **docker-build(1)** Build an image from a Dockerfile **docker-commit(1)** Create a new image from a container's changes **docker-cp(1)** Copy files/folders from a container's filesystem to the host **docker-create(1)** Create a new container **docker-diff(1)** Inspect changes on a container's filesystem **docker-events(1)** Get real time events from the server **docker-exec(1)** Run a command in a running container **docker-export(1)** Stream the contents of a container as a tar archive **docker-history(1)** Show the history of an image **docker-images(1)** List images **docker-import(1)** Create a new filesystem image from the contents of a tarball **docker-info(1)** Display system-wide information **docker-inspect(1)** Return low-level information on a container or image **docker-kill(1)** Kill a running container (which includes the wrapper process and everything inside it) **docker-load(1)** Load an image from a tar archive **docker-login(1)** Register or login to a Docker Registry Service **docker-logout(1)** Log the user out of a Docker Registry Service **docker-logs(1)** Fetch the logs of a container **docker-pause(1)** Pause all processes within a container **docker-port(1)** Lookup the public-facing port which is NAT-ed to PRIVATE_PORT **docker-ps(1)** List containers **docker-pull(1)** Pull an image or a repository from a Docker Registry Service **docker-push(1)** Push an image or a repository to a Docker Registry Service **docker-restart(1)** Restart a running container **docker-rm(1)** Remove one or more containers **docker-rmi(1)** Remove one or more images **docker-run(1)** Run a command in a new container **docker-save(1)** Save an image to a tar archive **docker-search(1)** Search for an image in the Docker index **docker-start(1)** Start a stopped container **docker-stats(1)** Display a live stream of one or more containers' resource usage statistics **docker-stop(1)** Stop a running container **docker-tag(1)** Tag an image into a repository **docker-top(1)** Lookup the running processes of a container **docker-unpause(1)** Unpause all processes within a container **docker-version(1)** Show the Docker version information **docker-wait(1)** Block until a container stops, then print its exit code # STORAGE DRIVER OPTIONS Options to storage backend can be specified with **--storage-opt** flags. The only backend which currently takes options is *devicemapper*. Therefore use these flags with **-s=**devicemapper. Here is the list of *devicemapper* options: #### dm.basesize Specifies the size to use when creating the base device, which limits the size of images and containers. The default value is 10G. Note, thin devices are inherently "sparse", so a 10G device which is mostly empty doesn't use 10 GB of space on the pool. However, the filesystem will use more space for the empty case the larger the device is. **Warning**: This value affects the system-wide "base" empty filesystem that may already be initialized and inherited by pulled images. #### dm.loopdatasize Specifies the size to use when creating the loopback file for the "data" device which is used for the thin pool. The default size is 100G. Note that the file is sparse, so it will not initially take up this much space. #### dm.loopmetadatasize Specifies the size to use when creating the loopback file for the "metadadata" device which is used for the thin pool. The default size is 2G. Note that the file is sparse, so it will not initially take up this much space. #### dm.fs Specifies the filesystem type to use for the base device. The supported options are "ext4" and "xfs". The default is "ext4" #### dm.mkfsarg Specifies extra mkfs arguments to be used when creating the base device. #### dm.mountopt Specifies extra mount options used when mounting the thin devices. #### dm.datadev Specifies a custom blockdevice to use for data for the thin pool. If using a block device for device mapper storage, ideally both datadev and metadatadev should be specified to completely avoid using the loopback device. #### dm.metadatadev Specifies a custom blockdevice to use for metadata for the thin pool. For best performance the metadata should be on a different spindle than the data, or even better on an SSD. If setting up a new metadata pool it is required to be valid. This can be achieved by zeroing the first 4k to indicate empty metadata, like this: dd if=/dev/zero of=/dev/metadata_dev bs=4096 count=1 #### dm.blocksize Specifies a custom blocksize to use for the thin pool. The default blocksize is 64K. #### dm.blkdiscard Enables or disables the use of blkdiscard when removing devicemapper devices. This is enabled by default (only) if using loopback devices and is required to resparsify the loopback file on image/container removal. Disabling this on loopback can lead to *much* faster container removal times, but will prevent the space used in `/var/lib/docker` directory from being returned to the system for other use when containers are removed. # EXAMPLES Launching docker daemon with *devicemapper* backend with particular block devices for data and metadata: docker -d -s=devicemapper \ --storage-opt dm.datadev=/dev/vdb \ --storage-opt dm.metadatadev=/dev/vdc \ --storage-opt dm.basesize=20G #### Client For specific client examples please see the man page for the specific Docker command. For example: man docker-run # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. docker-1.6.2/docs/man/docker-stop.1.md0000644000175000017500000000142212524223634016751 0ustar tianontianon% DOCKER(1) Docker User Manuals % Docker Community % JUNE 2014 # NAME docker-stop - Stop a running container by sending SIGTERM and then SIGKILL after a grace period # SYNOPSIS **docker stop** [**--help**] [**-t**|**--time**[=*10*]] CONTAINER [CONTAINER...] # DESCRIPTION Stop a running container (Send SIGTERM, and then SIGKILL after grace period) # OPTIONS **--help** Print usage statement **-t**, **--time**=10 Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. #See also **docker-start(1)** to restart a stopped container. # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit docker-1.6.2/docs/mkdocs.yml0000644000175000017500000002575412524223634015304 0ustar tianontianonsite_name: Docker Documentation #site_url: http://docs.docker.com/ site_url: / site_description: Documentation for fast and lightweight Docker container based virtualization framework. site_favicon: img/favicon.png dev_addr: '0.0.0.0:8000' repo_url: https://github.com/docker/docker/ docs_dir: sources include_search: true use_absolute_urls: true # theme: docker theme_dir: ./theme/mkdocs/ theme_center_lead: false copyright: Copyright © 2014-2015, Docker, Inc. google_analytics: ['UA-6096819-11', 'docker.io'] pages: # Introduction: - ['index.md', 'About', 'Docker'] - ['release-notes.md', 'About', 'Release Notes'] - ['introduction/index.md', '**HIDDEN**'] - ['introduction/understanding-docker.md', 'About', 'Understanding Docker'] # Installation: - ['installation/index.md', '**HIDDEN**'] - ['installation/ubuntulinux.md', 'Installation', 'Ubuntu'] - ['installation/mac.md', 'Installation', 'Mac OS X'] - ['installation/windows.md', 'Installation', 'Microsoft Windows'] - ['installation/amazon.md', 'Installation', 'Amazon EC2'] - ['installation/archlinux.md', 'Installation', 'Arch Linux'] - ['installation/binaries.md', 'Installation', 'Binaries'] - ['installation/centos.md', 'Installation', 'CentOS'] - ['installation/cruxlinux.md', 'Installation', 'CRUX Linux'] - ['installation/debian.md', 'Installation', 'Debian'] - ['installation/fedora.md', 'Installation', 'Fedora'] - ['installation/frugalware.md', 'Installation', 'FrugalWare'] - ['installation/google.md', 'Installation', 'Google Cloud Platform'] - ['installation/gentoolinux.md', 'Installation', 'Gentoo'] - ['installation/softlayer.md', 'Installation', 'IBM Softlayer'] - ['installation/joyent.md', 'Installation', 'Joyent Compute Service'] - ['installation/azure.md', 'Installation', 'Microsoft Azure'] - ['installation/rackspace.md', 'Installation', 'Rackspace Cloud'] - ['installation/rhel.md', 'Installation', 'Red Hat Enterprise Linux'] - ['installation/oracle.md', 'Installation', 'Oracle Linux'] - ['installation/SUSE.md', 'Installation', 'SUSE'] - ['compose/install.md', 'Installation', 'Docker Compose'] # User Guide: - ['userguide/index.md', 'User Guide', 'The Docker User Guide' ] - ['userguide/dockerhub.md', 'User Guide', 'Getting Started with Docker Hub' ] - ['userguide/dockerizing.md', 'User Guide', 'Dockerizing Applications' ] - ['userguide/usingdocker.md', 'User Guide', 'Working with Containers' ] - ['userguide/dockerimages.md', 'User Guide', 'Working with Docker Images' ] - ['userguide/dockerlinks.md', 'User Guide', 'Linking containers together' ] - ['userguide/dockervolumes.md', 'User Guide', 'Managing data in containers' ] - ['userguide/labels-custom-metadata.md', 'User Guide', 'Apply custom metadata' ] - ['userguide/dockerrepos.md', 'User Guide', 'Working with Docker Hub' ] - ['userguide/level1.md', '**HIDDEN**' ] - ['userguide/level2.md', '**HIDDEN**' ] - ['compose/index.md', 'User Guide', 'Docker Compose' ] - ['machine/index.md', 'User Guide', 'Docker Machine' ] - ['swarm/index.md', 'User Guide', 'Docker Swarm' ] # Docker Hub docs: - ['docker-hub/index.md', 'Docker Hub', 'Docker Hub' ] - ['docker-hub/accounts.md', 'Docker Hub', 'Accounts'] - ['docker-hub/repos.md', 'Docker Hub', 'Repositories'] - ['docker-hub/builds.md', 'Docker Hub', 'Automated Builds'] - ['docker-hub/official_repos.md', 'Docker Hub', 'Official Repo Guidelines'] # Docker Hub Enterprise #- ['docker-hub-enterprise/index.md', '**HIDDEN**' ] #- ['docker-hub-enterprise/install-config.md', 'Docker Hub Enterprise', 'Installation and Configuration' ] #- ['docker-hub-enterprise/usage.md', 'Docker Hub Enterprise', 'User Guide' ] # Examples: - ['examples/index.md', '**HIDDEN**'] - ['examples/nodejs_web_app.md', 'Examples', 'Dockerizing a Node.js web application'] - ['examples/mongodb.md', 'Examples', 'Dockerizing MongoDB'] - ['examples/running_redis_service.md', 'Examples', 'Dockerizing a Redis service'] - ['examples/postgresql_service.md', 'Examples', 'Dockerizing a PostgreSQL service'] - ['examples/running_riak_service.md', 'Examples', 'Dockerizing a Riak service'] - ['examples/running_ssh_service.md', 'Examples', 'Dockerizing an SSH service'] - ['examples/couchdb_data_volumes.md', 'Examples', 'Dockerizing a CouchDB service'] - ['examples/apt-cacher-ng.md', 'Examples', 'Dockerizing an Apt-Cacher-ng service'] - ['compose/django.md', 'Examples', 'Getting started with Compose and Django'] - ['compose/rails.md', 'Examples', 'Getting started with Compose and Rails'] - ['compose/wordpress.md', 'Examples', 'Getting started with Compose and Wordpress'] # Articles - ['articles/index.md', '**HIDDEN**'] - ['articles/basics.md', 'Articles', 'Docker basics'] - ['articles/networking.md', 'Articles', 'Advanced networking'] - ['articles/security.md', 'Articles', 'Security'] - ['articles/https.md', 'Articles', 'Running Docker with HTTPS'] - ['articles/registry_mirror.md', 'Articles', 'Run a local registry mirror'] - ['articles/host_integration.md', 'Articles', 'Automatically starting containers'] - ['articles/baseimages.md', 'Articles', 'Creating a base image'] - ['articles/dockerfile_best-practices.md', 'Articles', 'Best practices for writing Dockerfiles'] - ['articles/certificates.md', 'Articles', 'Using certificates for repository client verification'] - ['articles/using_supervisord.md', 'Articles', 'Using Supervisor'] - ['articles/cfengine_process_management.md', 'Articles', 'Process management with CFEngine'] - ['articles/puppet.md', 'Articles', 'Using Puppet'] - ['articles/chef.md', 'Articles', 'Using Chef'] - ['articles/dsc.md', 'Articles', 'Using PowerShell DSC'] - ['articles/ambassador_pattern_linking.md', 'Articles', 'Cross-Host linking using ambassador containers'] - ['articles/runmetrics.md', 'Articles', 'Runtime metrics'] - ['articles/b2d_volume_resize.md', 'Articles', 'Increasing a Boot2Docker volume'] - ['articles/systemd.md', 'Articles', 'Controlling and configuring Docker using Systemd'] # Reference - ['reference/index.md', '**HIDDEN**'] - ['reference/commandline/index.md', '**HIDDEN**'] - ['reference/commandline/cli.md', 'Reference', 'Docker command line'] - ['reference/builder.md', 'Reference', 'Dockerfile'] - ['faq.md', 'Reference', 'FAQ'] - ['reference/run.md', 'Reference', 'Run Reference'] - ['compose/cli.md', 'Reference', 'Compose command line'] - ['compose/yml.md', 'Reference', 'Compose yml'] - ['compose/env.md', 'Reference', 'Compose ENV variables'] - ['compose/completion.md', 'Reference', 'Compose commandline completion'] - ['swarm/discovery.md', 'Reference', 'Swarm discovery'] - ['swarm/scheduler/strategy.md', 'Reference', 'Swarm strategies'] - ['swarm/scheduler/filter.md', 'Reference', 'Swarm filters'] - ['swarm/API.md', 'Reference', 'Swarm API'] - ['reference/api/index.md', '**HIDDEN**'] - ['registry/overview.md', 'Reference', 'Docker Registry 2.0'] - ['registry/deploying.md', 'Reference', '    ▪  Deploy a registry' ] - ['registry/configuration.md', 'Reference', '    ▪  Configure a registry' ] - ['registry/storagedrivers.md', 'Reference', '    ▪  Storage driver model' ] - ['registry/notifications.md', 'Reference', '    ▪  Work with notifications' ] - ['registry/spec/api.md', 'Reference', '    ▪  Registry Service API v2' ] - ['registry/spec/json.md', 'Reference', '    ▪  JSON format' ] - ['registry/spec/auth/token.md', 'Reference', '    ▪  Authenticate via central service' ] - ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry 1.0'] - ['reference/api/registry_api.md', 'Reference', '    ▪ Docker Registry API v1'] - ['reference/api/registry_api_client_libraries.md', 'Reference', '    ▪ Docker Registry 1.0 API Client Libraries'] #- ['reference/image-spec-v1.md', 'Reference', 'Docker Image Specification v1.0.0'] - ['reference/api/docker-io_api.md', 'Reference', 'Docker Hub API'] #- ['reference/image-spec-v1.md', 'Reference', 'Docker Image Specification v1.0.0'] - ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API'] - ['reference/api/docker_remote_api_v1.18.md', 'Reference', 'Docker Remote API v1.18'] - ['reference/api/docker_remote_api_v1.17.md', 'Reference', 'Docker Remote API v1.17'] - ['reference/api/docker_remote_api_v1.16.md', 'Reference', 'Docker Remote API v1.16'] - ['reference/api/docker_remote_api_v1.15.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.14.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.13.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.12.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.11.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.10.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.9.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.8.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.7.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.6.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.5.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.4.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.3.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.2.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**'] - ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries'] - ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub Accounts API'] - ['jsearch.md', '**HIDDEN**'] # - ['static_files/README.md', 'static_files', 'README'] - ['terms/index.md', '**HIDDEN**'] - ['terms/layer.md', '**HIDDEN**'] - ['terms/index.md', '**HIDDEN**'] - ['terms/registry.md', '**HIDDEN**'] - ['terms/container.md', '**HIDDEN**'] - ['terms/repository.md', '**HIDDEN**'] - ['terms/filesystem.md', '**HIDDEN**'] - ['terms/image.md', '**HIDDEN**'] # Project: - ['project/index.md', '**HIDDEN**'] - ['project/who-written-for.md', 'Contributor Guide', 'README first'] - ['project/software-required.md', 'Contributor Guide', 'Get required software'] - ['project/set-up-git.md', 'Contributor Guide', 'Configure Git for contributing'] - ['project/set-up-dev-env.md', 'Contributor Guide', 'Work with a development container'] - ['project/test-and-docs.md', 'Contributor Guide', 'Run tests and test documentation'] - ['project/make-a-contribution.md', 'Contributor Guide', 'Understand contribution workflow'] - ['project/find-an-issue.md', 'Contributor Guide', 'Find an issue'] - ['project/work-issue.md', 'Contributor Guide', 'Work on an issue'] - ['project/create-pr.md', 'Contributor Guide', 'Create a pull request'] - ['project/review-pr.md', 'Contributor Guide', 'Participate in the PR review'] - ['project/advanced-contributing.md', 'Contributor Guide', 'Advanced contributing'] - ['project/get-help.md', 'Contributor Guide', 'Where to get help'] - ['project/coding-style.md', 'Contributor Guide', 'Coding style guide'] - ['project/doc-style.md', 'Contributor Guide', 'Documentation style guide'] docker-1.6.2/docs/s3_website.json0000644000175000017500000001273012524223634016231 0ustar tianontianon{ "ErrorDocument": { "Key": "jsearch/index.html" }, "IndexDocument": { "Suffix": "index.html" }, "RoutingRules": [ { "Condition": { "KeyPrefixEquals": "en/latest/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, { "Condition": { "KeyPrefixEquals": "en/master/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, { "Condition": { "KeyPrefixEquals": "en/v0.6.3/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "" } }, { "Condition": { "KeyPrefixEquals": "jsearch/index.html" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "jsearch/" } }, { "Condition": { "KeyPrefixEquals": "index/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-io/" } }, { "Condition": { "KeyPrefixEquals": "reference/api/index_api/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "reference/api/docker-io_api/" } }, { "Condition": { "KeyPrefixEquals": "docker-hub/groups.png" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/hub-images/groups.png" } }, { "Condition": { "KeyPrefixEquals": "docker-hub/hub.png" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/hub-images/hub.png" } }, { "Condition": { "KeyPrefixEquals": "docker-hub/invite.png" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/hub-images/invite.png" } }, { "Condition": { "KeyPrefixEquals": "docker-hub/orgs.png" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/hub-images/orgs.png" } }, { "Condition": { "KeyPrefixEquals": "docker-hub/repos.png" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/hub-images/repos.png" } }, { "Condition": { "KeyPrefixEquals": "examples/hello_world/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerizing/" } }, { "Condition": { "KeyPrefixEquals": "examples/python_web_app/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerizing/" } }, { "Condition": { "KeyPrefixEquals": "use/working_with_volumes/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockervolumes/" } }, { "Condition": { "KeyPrefixEquals": "use/working_with_links_names/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerlinks/" } }, { "Condition": { "KeyPrefixEquals": "use/workingwithrepository/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerrepos/" } }, { "Condition": { "KeyPrefixEquals": "use/port_redirection" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "userguide/dockerlinks/" } }, { "Condition": { "KeyPrefixEquals": "use/networking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/networking/" } }, { "Condition": { "KeyPrefixEquals": "use/puppet/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/puppet/" } }, { "Condition": { "KeyPrefixEquals": "use/ambassador_pattern_linking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/ambassador_pattern_linking/" } }, { "Condition": { "KeyPrefixEquals": "use/basics/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/basics/" } }, { "Condition": { "KeyPrefixEquals": "use/chef/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/chef/" } }, { "Condition": { "KeyPrefixEquals": "use/host_integration/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/host_integration/" } }, { "Condition": { "KeyPrefixEquals": "docker-io/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/" } }, { "Condition": { "KeyPrefixEquals": "examples/cfengine_process_management/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/cfengine_process_management/" } }, { "Condition": { "KeyPrefixEquals": "examples/https/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/https/" } }, { "Condition": { "KeyPrefixEquals": "examples/ambassador_pattern_linking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/ambassador_pattern_linking/" } }, { "Condition": { "KeyPrefixEquals": "examples/using_supervisord/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/using_supervisord/" } }, { "Condition": { "KeyPrefixEquals": "reference/api/registry_index_spec/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "reference/api/hub_registry_spec/" } }, { "Condition": { "KeyPrefixEquals": "use/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "examples/" } }, { "Condition": { "KeyPrefixEquals": "installation/openSUSE/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "installation/SUSE/" } }, { "Condition": { "KeyPrefixEquals": "contributing/contributing/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "project/who-written-for/" } }, { "Condition": { "KeyPrefixEquals": "contributing/devenvironment/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "project/set-up-prereqs/" } }, { "Condition": { "KeyPrefixEquals": "contributing/docs_style-guide/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "project/doc-style/" } }, { "Condition": { "KeyPrefixEquals": "registry/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "registry/overview/" } } ] } docker-1.6.2/opts/0000755000175000017500000000000012524223634013321 5ustar tianontianondocker-1.6.2/opts/envfile.go0000644000175000017500000000245112524223634015302 0ustar tianontianonpackage opts import ( "bufio" "fmt" "os" "strings" ) /* Read in a line delimited file with environment variables enumerated */ func ParseEnvFile(filename string) ([]string, error) { fh, err := os.Open(filename) if err != nil { return []string{}, err } defer fh.Close() lines := []string{} scanner := bufio.NewScanner(fh) for scanner.Scan() { line := scanner.Text() // line is not empty, and not starting with '#' if len(line) > 0 && !strings.HasPrefix(line, "#") { if strings.Contains(line, "=") { data := strings.SplitN(line, "=", 2) // trim the front of a variable, but nothing else variable := strings.TrimLeft(data[0], whiteSpaces) if strings.ContainsAny(variable, whiteSpaces) { return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} } // pass the value through, no trimming lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) } else { // if only a pass-through variable is given, clean it up. lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) } } } return lines, nil } var whiteSpaces = " \t" type ErrBadEnvVariable struct { msg string } func (e ErrBadEnvVariable) Error() string { return fmt.Sprintf("poorly formatted environment: %s", e.msg) } docker-1.6.2/opts/opts_test.go0000644000175000017500000000747212524223634015706 0ustar tianontianonpackage opts import ( "strings" "testing" ) func TestValidateIPAddress(t *testing.T) { if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) } if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) } if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) } if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) } if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) } } func TestValidateMACAddress(t *testing.T) { if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) } if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") } if _, err := ValidateMACAddress(`random invalid string`); err == nil { t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") } } func TestListOpts(t *testing.T) { o := NewListOpts(nil) o.Set("foo") if o.String() != "[foo]" { t.Errorf("%s != [foo]", o.String()) } o.Set("bar") if o.Len() != 2 { t.Errorf("%d != 2", o.Len()) } if !o.Get("bar") { t.Error("o.Get(\"bar\") == false") } if o.Get("baz") { t.Error("o.Get(\"baz\") == true") } o.Delete("foo") if o.String() != "[bar]" { t.Errorf("%s != [bar]", o.String()) } } func TestValidateDnsSearch(t *testing.T) { valid := []string{ `.`, `a`, `a.`, `1.foo`, `17.foo`, `foo.bar`, `foo.bar.baz`, `foo.bar.`, `foo.bar.baz`, `foo1.bar2`, `foo1.bar2.baz`, `1foo.2bar.`, `1foo.2bar.baz`, `foo-1.bar-2`, `foo-1.bar-2.baz`, `foo-1.bar-2.`, `foo-1.bar-2.baz`, `1-foo.2-bar`, `1-foo.2-bar.baz`, `1-foo.2-bar.`, `1-foo.2-bar.baz`, } invalid := []string{ ``, ` `, ` `, `17`, `17.`, `.17`, `17-.`, `17-.foo`, `.foo`, `foo-.bar`, `-foo.bar`, `foo.bar-`, `foo.bar-.baz`, `foo.-bar`, `foo.-bar.baz`, `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, } for _, domain := range valid { if ret, err := ValidateDnsSearch(domain); err != nil || ret == "" { t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err) } } for _, domain := range invalid { if ret, err := ValidateDnsSearch(domain); err == nil || ret != "" { t.Fatalf("ValidateDnsSearch(`"+domain+"`) got %s %s", ret, err) } } } func TestValidateExtraHosts(t *testing.T) { valid := []string{ `myhost:192.168.0.1`, `thathost:10.0.2.1`, `anipv6host:2003:ab34:e::1`, `ipv6local:::1`, } invalid := map[string]string{ `myhost:192.notanipaddress.1`: `invalid IP`, `thathost-nosemicolon10.0.0.1`: `bad format`, `anipv6host:::::1`: `invalid IP`, `ipv6local:::0::`: `invalid IP`, } for _, extrahost := range valid { if _, err := ValidateExtraHost(extrahost); err != nil { t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) } } for extraHost, expectedError := range invalid { if _, err := ValidateExtraHost(extraHost); err == nil { t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) } else { if !strings.Contains(err.Error(), expectedError) { t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) } } } } docker-1.6.2/opts/opts.go0000644000175000017500000001347212524223634014644 0ustar tianontianonpackage opts import ( "fmt" "net" "os" "path" "regexp" "strings" "github.com/docker/docker/api" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/ulimit" "github.com/docker/docker/utils" ) var ( alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) ) func ListVar(values *[]string, names []string, usage string) { flag.Var(newListOptsRef(values, nil), names, usage) } func HostListVar(values *[]string, names []string, usage string) { flag.Var(newListOptsRef(values, api.ValidateHost), names, usage) } func IPListVar(values *[]string, names []string, usage string) { flag.Var(newListOptsRef(values, ValidateIPAddress), names, usage) } func DnsSearchListVar(values *[]string, names []string, usage string) { flag.Var(newListOptsRef(values, ValidateDnsSearch), names, usage) } func IPVar(value *net.IP, names []string, defaultValue, usage string) { flag.Var(NewIpOpt(value, defaultValue), names, usage) } func LabelListVar(values *[]string, names []string, usage string) { flag.Var(newListOptsRef(values, ValidateLabel), names, usage) } func UlimitMapVar(values map[string]*ulimit.Ulimit, names []string, usage string) { flag.Var(NewUlimitOpt(values), names, usage) } // ListOpts type type ListOpts struct { values *[]string validator ValidatorFctType } func NewListOpts(validator ValidatorFctType) ListOpts { var values []string return *newListOptsRef(&values, validator) } func newListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { return &ListOpts{ values: values, validator: validator, } } func (opts *ListOpts) String() string { return fmt.Sprintf("%v", []string((*opts.values))) } // Set validates if needed the input value and add it to the // internal slice. func (opts *ListOpts) Set(value string) error { if opts.validator != nil { v, err := opts.validator(value) if err != nil { return err } value = v } (*opts.values) = append((*opts.values), value) return nil } // Delete remove the given element from the slice. func (opts *ListOpts) Delete(key string) { for i, k := range *opts.values { if k == key { (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) return } } } // GetMap returns the content of values in a map in order to avoid // duplicates. // FIXME: can we remove this? func (opts *ListOpts) GetMap() map[string]struct{} { ret := make(map[string]struct{}) for _, k := range *opts.values { ret[k] = struct{}{} } return ret } // GetAll returns the values' slice. // FIXME: Can we remove this? func (opts *ListOpts) GetAll() []string { return (*opts.values) } // Get checks the existence of the given key. func (opts *ListOpts) Get(key string) bool { for _, k := range *opts.values { if k == key { return true } } return false } // Len returns the amount of element in the slice. func (opts *ListOpts) Len() int { return len((*opts.values)) } // Validators type ValidatorFctType func(val string) (string, error) type ValidatorFctListType func(val string) ([]string, error) func ValidateAttach(val string) (string, error) { s := strings.ToLower(val) for _, str := range []string{"stdin", "stdout", "stderr"} { if s == str { return s, nil } } return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR.") } func ValidateLink(val string) (string, error) { if _, err := parsers.PartParser("name:alias", val); err != nil { return val, err } return val, nil } func ValidatePath(val string) (string, error) { var containerPath string if strings.Count(val, ":") > 2 { return val, fmt.Errorf("bad format for volumes: %s", val) } splited := strings.SplitN(val, ":", 2) if len(splited) == 1 { containerPath = splited[0] val = path.Clean(splited[0]) } else { containerPath = splited[1] val = fmt.Sprintf("%s:%s", splited[0], path.Clean(splited[1])) } if !path.IsAbs(containerPath) { return val, fmt.Errorf("%s is not an absolute path", containerPath) } return val, nil } func ValidateEnv(val string) (string, error) { arr := strings.Split(val, "=") if len(arr) > 1 { return val, nil } if !utils.DoesEnvExist(val) { return val, nil } return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil } func ValidateIPAddress(val string) (string, error) { var ip = net.ParseIP(strings.TrimSpace(val)) if ip != nil { return ip.String(), nil } return "", fmt.Errorf("%s is not an ip address", val) } func ValidateMACAddress(val string) (string, error) { _, err := net.ParseMAC(strings.TrimSpace(val)) if err != nil { return "", err } else { return val, nil } } // Validates domain for resolvconf search configuration. // A zero length domain is represented by . func ValidateDnsSearch(val string) (string, error) { if val = strings.Trim(val, " "); val == "." { return val, nil } return validateDomain(val) } func validateDomain(val string) (string, error) { if alphaRegexp.FindString(val) == "" { return "", fmt.Errorf("%s is not a valid domain", val) } ns := domainRegexp.FindSubmatch([]byte(val)) if len(ns) > 0 && len(ns[1]) < 255 { return string(ns[1]), nil } return "", fmt.Errorf("%s is not a valid domain", val) } func ValidateExtraHost(val string) (string, error) { // allow for IPv6 addresses in extra hosts by only splitting on first ":" arr := strings.SplitN(val, ":", 2) if len(arr) != 2 || len(arr[0]) == 0 { return "", fmt.Errorf("bad format for add-host: %q", val) } if _, err := ValidateIPAddress(arr[1]); err != nil { return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) } return val, nil } func ValidateLabel(val string) (string, error) { if strings.Count(val, "=") != 1 { return "", fmt.Errorf("bad attribute format: %s", val) } return val, nil } docker-1.6.2/opts/ip.go0000644000175000017500000000065212524223634014263 0ustar tianontianonpackage opts import ( "fmt" "net" ) type IpOpt struct { *net.IP } func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt { o := &IpOpt{ IP: ref, } o.Set(defaultVal) return o } func (o *IpOpt) Set(val string) error { ip := net.ParseIP(val) if ip == nil { return fmt.Errorf("%s is not an ip address", val) } (*o.IP) = net.ParseIP(val) return nil } func (o *IpOpt) String() string { return (*o.IP).String() } docker-1.6.2/opts/ulimit.go0000644000175000017500000000125612524223634015157 0ustar tianontianonpackage opts import ( "fmt" "github.com/docker/docker/pkg/ulimit" ) type UlimitOpt struct { values map[string]*ulimit.Ulimit } func NewUlimitOpt(ref map[string]*ulimit.Ulimit) *UlimitOpt { return &UlimitOpt{ref} } func (o *UlimitOpt) Set(val string) error { l, err := ulimit.Parse(val) if err != nil { return err } o.values[l.Name] = l return nil } func (o *UlimitOpt) String() string { var out []string for _, v := range o.values { out = append(out, v.String()) } return fmt.Sprintf("%v", out) } func (o *UlimitOpt) GetList() []*ulimit.Ulimit { var ulimits []*ulimit.Ulimit for _, v := range o.values { ulimits = append(ulimits, v) } return ulimits } docker-1.6.2/pkg/0000755000175000017500000000000012524223634013115 5ustar tianontianondocker-1.6.2/pkg/httputils/0000755000175000017500000000000012524223634015155 5ustar tianontianondocker-1.6.2/pkg/httputils/resumablerequestreader.go0000644000175000017500000000544612524223634022270 0ustar tianontianonpackage httputils import ( "fmt" "io" "net/http" "time" log "github.com/Sirupsen/logrus" ) type resumableRequestReader struct { client *http.Client request *http.Request lastRange int64 totalSize int64 currentResponse *http.Response failures uint32 maxFailures uint32 } // ResumableRequestReader makes it possible to resume reading a request's body transparently // maxfail is the number of times we retry to make requests again (not resumes) // totalsize is the total length of the body; auto detect if not provided func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} } func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} } func (r *resumableRequestReader) Read(p []byte) (n int, err error) { if r.client == nil || r.request == nil { return 0, fmt.Errorf("client and request can't be nil\n") } isFreshRequest := false if r.lastRange != 0 && r.currentResponse == nil { readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) r.request.Header.Set("Range", readRange) time.Sleep(5 * time.Second) } if r.currentResponse == nil { r.currentResponse, err = r.client.Do(r.request) isFreshRequest = true } if err != nil && r.failures+1 != r.maxFailures { r.cleanUpResponse() r.failures++ time.Sleep(5 * time.Duration(r.failures) * time.Second) return 0, nil } else if err != nil { r.cleanUpResponse() return 0, err } if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { r.cleanUpResponse() return 0, io.EOF } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { r.cleanUpResponse() return 0, fmt.Errorf("the server doesn't support byte ranges") } if r.totalSize == 0 { r.totalSize = r.currentResponse.ContentLength } else if r.totalSize <= 0 { r.cleanUpResponse() return 0, fmt.Errorf("failed to auto detect content length") } n, err = r.currentResponse.Body.Read(p) r.lastRange += int64(n) if err != nil { r.cleanUpResponse() } if err != nil && err != io.EOF { log.Infof("encountered error during pull and clearing it before resume: %s", err) err = nil } return n, err } func (r *resumableRequestReader) Close() error { r.cleanUpResponse() r.client = nil r.request = nil return nil } func (r *resumableRequestReader) cleanUpResponse() { if r.currentResponse != nil { r.currentResponse.Body.Close() r.currentResponse = nil } } docker-1.6.2/pkg/ulimit/0000755000175000017500000000000012524223634014420 5ustar tianontianondocker-1.6.2/pkg/ulimit/ulimit_test.go0000644000175000017500000000177612524223634017324 0ustar tianontianonpackage ulimit import "testing" func TestParseInvalidLimitType(t *testing.T) { if _, err := Parse("notarealtype=1024:1024"); err == nil { t.Fatalf("expected error on invalid ulimit type") } } func TestParseBadFormat(t *testing.T) { if _, err := Parse("nofile:1024:1024"); err == nil { t.Fatal("expected error on bad syntax") } if _, err := Parse("nofile"); err == nil { t.Fatal("expected error on bad syntax") } if _, err := Parse("nofile="); err == nil { t.Fatal("expected error on bad syntax") } if _, err := Parse("nofile=:"); err == nil { t.Fatal("expected error on bad syntax") } if _, err := Parse("nofile=:1024"); err == nil { t.Fatal("expected error on bad syntax") } } func TestParseHardLessThanSoft(t *testing.T) { if _, err := Parse("nofile:1024:1"); err == nil { t.Fatal("expected error on hard limit less than soft limit") } } func TestParseInvalidValueType(t *testing.T) { if _, err := Parse("nofile:asdf"); err == nil { t.Fatal("expected error on bad value type") } } docker-1.6.2/pkg/ulimit/ulimit.go0000644000175000017500000000521612524223634016256 0ustar tianontianonpackage ulimit import ( "fmt" "strconv" "strings" ) // Human friendly version of Rlimit type Ulimit struct { Name string Hard int64 Soft int64 } type Rlimit struct { Type int `json:"type,omitempty"` Hard uint64 `json:"hard,omitempty"` Soft uint64 `json:"soft,omitempty"` } const ( // magic numbers for making the syscall // some of these are defined in the syscall package, but not all. // Also since Windows client doesn't get access to the syscall package, need to // define these here RLIMIT_AS = 9 RLIMIT_CORE = 4 RLIMIT_CPU = 0 RLIMIT_DATA = 2 RLIMIT_FSIZE = 1 RLIMIT_LOCKS = 10 RLIMIT_MEMLOCK = 8 RLIMIT_MSGQUEUE = 12 RLIMIT_NICE = 13 RLIMIT_NOFILE = 7 RLIMIT_NPROC = 6 RLIMIT_RSS = 5 RLIMIT_RTPRIO = 14 RLIMIT_RTTIME = 15 RLIMIT_SIGPENDING = 11 RLIMIT_STACK = 3 ) var ulimitNameMapping = map[string]int{ //"as": RLIMIT_AS, // Disbaled since this doesn't seem usable with the way Docker inits a container. "core": RLIMIT_CORE, "cpu": RLIMIT_CPU, "data": RLIMIT_DATA, "fsize": RLIMIT_FSIZE, "locks": RLIMIT_LOCKS, "memlock": RLIMIT_MEMLOCK, "msgqueue": RLIMIT_MSGQUEUE, "nice": RLIMIT_NICE, "nofile": RLIMIT_NOFILE, "nproc": RLIMIT_NPROC, "rss": RLIMIT_RSS, "rtprio": RLIMIT_RTPRIO, "rttime": RLIMIT_RTTIME, "sigpending": RLIMIT_SIGPENDING, "stack": RLIMIT_STACK, } func Parse(val string) (*Ulimit, error) { parts := strings.SplitN(val, "=", 2) if len(parts) != 2 { return nil, fmt.Errorf("invalid ulimit argument: %s", val) } if _, exists := ulimitNameMapping[parts[0]]; !exists { return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) } limitVals := strings.SplitN(parts[1], ":", 2) if len(limitVals) > 2 { return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) } soft, err := strconv.ParseInt(limitVals[0], 10, 64) if err != nil { return nil, err } hard := soft // in case no hard was set if len(limitVals) == 2 { hard, err = strconv.ParseInt(limitVals[1], 10, 64) } if soft > hard { return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) } return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil } func (u *Ulimit) GetRlimit() (*Rlimit, error) { t, exists := ulimitNameMapping[u.Name] if !exists { return nil, fmt.Errorf("invalid ulimit name %s", u.Name) } return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil } func (u *Ulimit) String() string { return fmt.Sprintf("%s=%s:%s", u.Name, u.Soft, u.Hard) } docker-1.6.2/pkg/listenbuffer/0000755000175000017500000000000012524223634015605 5ustar tianontianondocker-1.6.2/pkg/listenbuffer/buffer.go0000644000175000017500000000215312524223634017406 0ustar tianontianon/* Package to allow go applications to immediately start listening on a socket, unix, tcp, udp but hold connections until the application has booted and is ready to accept them */ package listenbuffer import "net" // NewListenBuffer returns a listener listening on addr with the protocol. func NewListenBuffer(proto, addr string, activate chan struct{}) (net.Listener, error) { wrapped, err := net.Listen(proto, addr) if err != nil { return nil, err } return &defaultListener{ wrapped: wrapped, activate: activate, }, nil } type defaultListener struct { wrapped net.Listener // the real listener to wrap ready bool // is the listner ready to start accpeting connections activate chan struct{} } func (l *defaultListener) Close() error { return l.wrapped.Close() } func (l *defaultListener) Addr() net.Addr { return l.wrapped.Addr() } func (l *defaultListener) Accept() (net.Conn, error) { // if the listen has been told it is ready then we can go ahead and // start returning connections if l.ready { return l.wrapped.Accept() } <-l.activate l.ready = true return l.Accept() } docker-1.6.2/pkg/testutils/0000755000175000017500000000000012524223634015155 5ustar tianontianondocker-1.6.2/pkg/testutils/README.md0000644000175000017500000000021412524223634016431 0ustar tianontianon`testutils` is a collection of utility functions to facilitate the writing of tests. It is used in various places by the Docker test suite. docker-1.6.2/pkg/testutils/utils.go0000644000175000017500000000140612524223634016645 0ustar tianontianonpackage testutils import ( "math/rand" "testing" "time" ) const chars = "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " // Timeout calls f and waits for 100ms for it to complete. // If it doesn't, it causes the tests to fail. // t must be a valid testing context. func Timeout(t *testing.T, f func()) { onTimeout := time.After(100 * time.Millisecond) onDone := make(chan bool) go func() { f() close(onDone) }() select { case <-onTimeout: t.Fatalf("timeout") case <-onDone: } } // RandomString returns random string of specified length func RandomString(length int) string { res := make([]byte, length) for i := 0; i < length; i++ { res[i] = chars[rand.Intn(len(chars))] } return string(res) } docker-1.6.2/pkg/directory/0000755000175000017500000000000012524223634015121 5ustar tianontianondocker-1.6.2/pkg/directory/directory_linux.go0000644000175000017500000000150512524223634020674 0ustar tianontianon// +build linux package directory import ( "os" "path/filepath" "syscall" ) // Size walks a directory tree and returns its total size in bytes. func Size(dir string) (size int64, err error) { data := make(map[uint64]struct{}) err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { // Ignore directory sizes if fileInfo == nil { return nil } s := fileInfo.Size() if fileInfo.IsDir() || s == 0 { return nil } // Check inode to handle hard links correctly inode := fileInfo.Sys().(*syscall.Stat_t).Ino // inode is not a uint64 on all platforms. Cast it to avoid issues. if _, exists := data[uint64(inode)]; exists { return nil } // inode is not a uint64 on all platforms. Cast it to avoid issues. data[uint64(inode)] = struct{}{} size += s return nil }) return } docker-1.6.2/pkg/directory/directory_windows.go0000644000175000017500000000070712524223634021232 0ustar tianontianon// +build windows package directory import ( "os" "path/filepath" ) // Size walks a directory tree and returns its total size in bytes. func Size(dir string) (size int64, err error) { err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error { // Ignore directory sizes if fileInfo == nil { return nil } s := fileInfo.Size() if fileInfo.IsDir() || s == 0 { return nil } size += s return nil }) return } docker-1.6.2/pkg/directory/directory_test.go0000644000175000017500000000732312524223634020520 0ustar tianontianonpackage directory import ( "io/ioutil" "os" "testing" ) // Size of an empty directory should be 0 func TestSizeEmpty(t *testing.T) { var dir string var err error if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyDirectory"); err != nil { t.Fatalf("failed to create directory: %s", err) } var size int64 if size, _ = Size(dir); size != 0 { t.Fatalf("empty directory has size: %d", size) } } // Size of a directory with one empty file should be 0 func TestSizeEmptyFile(t *testing.T) { var dir string var err error if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyFile"); err != nil { t.Fatalf("failed to create directory: %s", err) } var file *os.File if file, err = ioutil.TempFile(dir, "file"); err != nil { t.Fatalf("failed to create file: %s", err) } var size int64 if size, _ = Size(file.Name()); size != 0 { t.Fatalf("directory with one file has size: %d", size) } } // Size of a directory with one 5-byte file should be 5 func TestSizeNonemptyFile(t *testing.T) { var dir string var err error if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNonemptyFile"); err != nil { t.Fatalf("failed to create directory: %s", err) } var file *os.File if file, err = ioutil.TempFile(dir, "file"); err != nil { t.Fatalf("failed to create file: %s", err) } d := []byte{97, 98, 99, 100, 101} file.Write(d) var size int64 if size, _ = Size(file.Name()); size != 5 { t.Fatalf("directory with one 5-byte file has size: %d", size) } } // Size of a directory with one empty directory should be 0 func TestSizeNestedDirectoryEmpty(t *testing.T) { var dir string var err error if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNestedDirectoryEmpty"); err != nil { t.Fatalf("failed to create directory: %s", err) } if dir, err = ioutil.TempDir(dir, "nested"); err != nil { t.Fatalf("failed to create nested directory: %s", err) } var size int64 if size, _ = Size(dir); size != 0 { t.Fatalf("directory with one empty directory has size: %d", size) } } // Test directory with 1 file and 1 empty directory func TestSizeFileAndNestedDirectoryEmpty(t *testing.T) { var dir string var err error if dir, err = ioutil.TempDir(os.TempDir(), "testSizeFileAndNestedDirectoryEmpty"); err != nil { t.Fatalf("failed to create directory: %s", err) } if dir, err = ioutil.TempDir(dir, "nested"); err != nil { t.Fatalf("failed to create nested directory: %s", err) } var file *os.File if file, err = ioutil.TempFile(dir, "file"); err != nil { t.Fatalf("failed to create file: %s", err) } d := []byte{100, 111, 99, 107, 101, 114} file.Write(d) var size int64 if size, _ = Size(dir); size != 6 { t.Fatalf("directory with 6-byte file and empty directory has size: %d", size) } } // Test directory with 1 file and 1 non-empty directory func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) { var dir, dirNested string var err error if dir, err = ioutil.TempDir(os.TempDir(), "TestSizeFileAndNestedDirectoryNonempty"); err != nil { t.Fatalf("failed to create directory: %s", err) } if dirNested, err = ioutil.TempDir(dir, "nested"); err != nil { t.Fatalf("failed to create nested directory: %s", err) } var file *os.File if file, err = ioutil.TempFile(dir, "file"); err != nil { t.Fatalf("failed to create file: %s", err) } data := []byte{100, 111, 99, 107, 101, 114} file.Write(data) var nestedFile *os.File if nestedFile, err = ioutil.TempFile(dirNested, "file"); err != nil { t.Fatalf("failed to create file in nested directory: %s", err) } nestedData := []byte{100, 111, 99, 107, 101, 114} nestedFile.Write(nestedData) var size int64 if size, _ = Size(dir); size != 12 { t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) } } docker-1.6.2/pkg/mflag/0000755000175000017500000000000012524223634014203 5ustar tianontianondocker-1.6.2/pkg/mflag/README.md0000644000175000017500000000151712524223634015466 0ustar tianontianonPackage mflag (aka multiple-flag) implements command-line flag parsing. It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) It adds: * both short and long flag version `./example -s red` `./example --string blue` * multiple names for the same option ``` $>./example -h Usage of example: -s, --string="": a simple string ``` ___ It is very flexible on purpose, so you can do things like: ``` $>./example -h Usage of example: -s, -string, --string="": a simple string ``` Or: ``` $>./example -h Usage of example: -oldflag, --newflag="": a simple string ``` You can also hide some flags from the usage, so if we want only `--newflag`: ``` $>./example -h Usage of example: --newflag="": a simple string $>./example -oldflag str str ``` See [example.go](example/example.go) for more details. docker-1.6.2/pkg/mflag/LICENSE0000644000175000017500000000272512524223634015216 0ustar tianontianonCopyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. docker-1.6.2/pkg/mflag/flag.go0000644000175000017500000010351412524223634015447 0ustar tianontianon// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package flag implements command-line flag parsing. Usage: Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. import "flag /github.com/docker/docker/pkg/mflag" var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") If you like, you can bind the flag to a variable using the Var() functions. var flagvar int func init() { // -flaghidden will work, but will be hidden from the usage flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") } Or you can create custom flags that satisfy the Value interface (with pointer receivers) and couple them to flag parsing by flag.Var(&flagVal, []string{"name"}, "help message for flagname") For such flags, the default value is just the initial value of the variable. You can also add "deprecated" flags, they are still usable, but are not shown in the usage and will display a warning when you try to use them. `#` before an option means this option is deprecated, if there is an following option without `#` ahead, then that's the replacement, if not, it will just be removed: var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` so you can only use `-f`. You can also group one letter flags, bif you declare var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") you will be able to use the -vs or -sv After all flags are defined, call flag.Parse() to parse the command line into the defined flags. Flags may then be used directly. If you're using the flags themselves, they are all pointers; if you bind to variables, they're values. fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) After parsing, the arguments after the flag are available as the slice flag.Args() or individually as flag.Arg(i). The arguments are indexed from 0 through flag.NArg()-1. Command line flag syntax: -flag -flag=x -flag="x" -flag='x' -flag x // non-boolean flags only One or two minus signs may be used; they are equivalent. The last form is not permitted for boolean flags because the meaning of the command cmd -x * will change if there is a file called 0, false, etc. You must use the -flag=false form to turn off a boolean flag. Flag parsing stops just before the first non-flag argument ("-" is a non-flag argument) or after the terminator "--". Integer flags accept 1234, 0664, 0x1234 and may be negative. Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. Duration flags accept any input valid for time.ParseDuration. The default set of command-line flags is controlled by top-level functions. The FlagSet type allows one to define independent sets of flags, such as to implement subcommands in a command-line interface. The methods of FlagSet are analogous to the top-level functions for the command-line flag set. */ package mflag import ( "errors" "fmt" "io" "os" "runtime" "sort" "strconv" "strings" "text/tabwriter" "time" "github.com/docker/docker/pkg/homedir" ) // ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. var ErrHelp = errors.New("flag: help requested") // ErrRetry is the error returned if you need to try letter by letter var ErrRetry = errors.New("flag: retry") // -- bool Value type boolValue bool func newBoolValue(val bool, p *bool) *boolValue { *p = val return (*boolValue)(p) } func (b *boolValue) Set(s string) error { v, err := strconv.ParseBool(s) *b = boolValue(v) return err } func (b *boolValue) Get() interface{} { return bool(*b) } func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } func (b *boolValue) IsBoolFlag() bool { return true } // optional interface to indicate boolean flags that can be // supplied without "=value" text type boolFlag interface { Value IsBoolFlag() bool } // -- int Value type intValue int func newIntValue(val int, p *int) *intValue { *p = val return (*intValue)(p) } func (i *intValue) Set(s string) error { v, err := strconv.ParseInt(s, 0, 64) *i = intValue(v) return err } func (i *intValue) Get() interface{} { return int(*i) } func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } // -- int64 Value type int64Value int64 func newInt64Value(val int64, p *int64) *int64Value { *p = val return (*int64Value)(p) } func (i *int64Value) Set(s string) error { v, err := strconv.ParseInt(s, 0, 64) *i = int64Value(v) return err } func (i *int64Value) Get() interface{} { return int64(*i) } func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } // -- uint Value type uintValue uint func newUintValue(val uint, p *uint) *uintValue { *p = val return (*uintValue)(p) } func (i *uintValue) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) *i = uintValue(v) return err } func (i *uintValue) Get() interface{} { return uint(*i) } func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } // -- uint64 Value type uint64Value uint64 func newUint64Value(val uint64, p *uint64) *uint64Value { *p = val return (*uint64Value)(p) } func (i *uint64Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) *i = uint64Value(v) return err } func (i *uint64Value) Get() interface{} { return uint64(*i) } func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } // -- string Value type stringValue string func newStringValue(val string, p *string) *stringValue { *p = val return (*stringValue)(p) } func (s *stringValue) Set(val string) error { *s = stringValue(val) return nil } func (s *stringValue) Get() interface{} { return string(*s) } func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } // -- float64 Value type float64Value float64 func newFloat64Value(val float64, p *float64) *float64Value { *p = val return (*float64Value)(p) } func (f *float64Value) Set(s string) error { v, err := strconv.ParseFloat(s, 64) *f = float64Value(v) return err } func (f *float64Value) Get() interface{} { return float64(*f) } func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } // -- time.Duration Value type durationValue time.Duration func newDurationValue(val time.Duration, p *time.Duration) *durationValue { *p = val return (*durationValue)(p) } func (d *durationValue) Set(s string) error { v, err := time.ParseDuration(s) *d = durationValue(v) return err } func (d *durationValue) Get() interface{} { return time.Duration(*d) } func (d *durationValue) String() string { return (*time.Duration)(d).String() } // Value is the interface to the dynamic value stored in a flag. // (The default value is represented as a string.) // // If a Value has an IsBoolFlag() bool method returning true, // the command-line parser makes -name equivalent to -name=true // rather than using the next command-line argument. type Value interface { String() string Set(string) error } // Getter is an interface that allows the contents of a Value to be retrieved. // It wraps the Value interface, rather than being part of it, because it // appeared after Go 1 and its compatibility rules. All Value types provided // by this package satisfy the Getter interface. type Getter interface { Value Get() interface{} } // ErrorHandling defines how to handle flag parsing errors. type ErrorHandling int const ( ContinueOnError ErrorHandling = iota ExitOnError PanicOnError ) // A FlagSet represents a set of defined flags. The zero value of a FlagSet // has no name and has ContinueOnError error handling. type FlagSet struct { // Usage is the function called when an error occurs while parsing flags. // The field is a function (not a method) that may be changed to point to // a custom error handler. Usage func() name string parsed bool actual map[string]*Flag formal map[string]*Flag args []string // arguments after flags errorHandling ErrorHandling output io.Writer // nil means stderr; use Out() accessor nArgRequirements []nArgRequirement } // A Flag represents the state of a flag. type Flag struct { Names []string // name as it appears on command line Usage string // help message Value Value // value as set DefValue string // default value (as text); for usage message } type flagSlice []string func (p flagSlice) Len() int { return len(p) } func (p flagSlice) Less(i, j int) bool { pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) if lpi != lpj { return lpi < lpj } return pi < pj } func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[string]*Flag) []*Flag { var list flagSlice // The sorted list is based on the first name, when flag map might use the other names. nameMap := make(map[string]string) for n, f := range flags { fName := strings.TrimPrefix(f.Names[0], "#") nameMap[fName] = n if len(f.Names) == 1 { list = append(list, fName) continue } found := false for _, name := range list { if name == fName { found = true break } } if !found { list = append(list, fName) } } sort.Sort(list) result := make([]*Flag, len(list)) for i, name := range list { result[i] = flags[nameMap[name]] } return result } // Name returns the name of the FlagSet. func (f *FlagSet) Name() string { return f.name } // Out returns the destination for usage and error messages. func (f *FlagSet) Out() io.Writer { if f.output == nil { return os.Stderr } return f.output } // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (f *FlagSet) SetOutput(output io.Writer) { f.output = output } // VisitAll visits the flags in lexicographical order, calling fn for each. // It visits all flags, even those not set. func (f *FlagSet) VisitAll(fn func(*Flag)) { for _, flag := range sortFlags(f.formal) { fn(flag) } } // VisitAll visits the command-line flags in lexicographical order, calling // fn for each. It visits all flags, even those not set. func VisitAll(fn func(*Flag)) { CommandLine.VisitAll(fn) } // Visit visits the flags in lexicographical order, calling fn for each. // It visits only those flags that have been set. func (f *FlagSet) Visit(fn func(*Flag)) { for _, flag := range sortFlags(f.actual) { fn(flag) } } // Visit visits the command-line flags in lexicographical order, calling fn // for each. It visits only those flags that have been set. func Visit(fn func(*Flag)) { CommandLine.Visit(fn) } // Lookup returns the Flag structure of the named flag, returning nil if none exists. func (f *FlagSet) Lookup(name string) *Flag { return f.formal[name] } // Indicates whether the specified flag was specified at all on the cmd line func (f *FlagSet) IsSet(name string) bool { return f.actual[name] != nil } // Lookup returns the Flag structure of the named command-line flag, // returning nil if none exists. func Lookup(name string) *Flag { return CommandLine.formal[name] } // Indicates whether the specified flag was specified at all on the cmd line func IsSet(name string) bool { return CommandLine.IsSet(name) } type nArgRequirementType int // Indicator used to pass to BadArgs function const ( Exact nArgRequirementType = iota Max Min ) type nArgRequirement struct { Type nArgRequirementType N int } // Require adds a requirement about the number of arguments for the FlagSet. // The first parameter can be Exact, Max, or Min to respectively specify the exact, // the maximum, or the minimal number of arguments required. // The actual check is done in FlagSet.CheckArgs(). func (f *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { f.nArgRequirements = append(f.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) } // CheckArgs uses the requirements set by FlagSet.Require() to validate // the number of arguments. If the requirements are not met, // an error message string is returned. func (f *FlagSet) CheckArgs() (message string) { for _, req := range f.nArgRequirements { var arguments string if req.N == 1 { arguments = "1 argument" } else { arguments = fmt.Sprintf("%d arguments", req.N) } str := func(kind string) string { return fmt.Sprintf("%q requires %s%s", f.name, kind, arguments) } switch req.Type { case Exact: if f.NArg() != req.N { return str("") } case Max: if f.NArg() > req.N { return str("a maximum of ") } case Min: if f.NArg() < req.N { return str("a minimum of ") } } } return "" } // Set sets the value of the named flag. func (f *FlagSet) Set(name, value string) error { flag, ok := f.formal[name] if !ok { return fmt.Errorf("no such flag -%v", name) } err := flag.Value.Set(value) if err != nil { return err } if f.actual == nil { f.actual = make(map[string]*Flag) } f.actual[name] = flag return nil } // Set sets the value of the named command-line flag. func Set(name, value string) error { return CommandLine.Set(name, value) } // PrintDefaults prints, to standard error unless configured // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { writer := tabwriter.NewWriter(f.Out(), 20, 1, 3, ' ', 0) home := homedir.Get() // Don't substitute when HOME is / if runtime.GOOS != "windows" && home == "/" { home = "" } f.VisitAll(func(flag *Flag) { format := " -%s=%s" names := []string{} for _, name := range flag.Names { if name[0] != '#' { names = append(names, name) } } if len(names) > 0 { val := flag.DefValue if home != "" && strings.HasPrefix(val, home) { val = homedir.GetShortcutString() + val[len(home):] } fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) for i, line := range strings.Split(flag.Usage, "\n") { if i != 0 { line = " " + line } fmt.Fprintln(writer, "\t", line) } } }) writer.Flush() } // PrintDefaults prints to standard error the default values of all defined command-line flags. func PrintDefaults() { CommandLine.PrintDefaults() } // defaultUsage is the default function to print a usage message. func defaultUsage(f *FlagSet) { if f.name == "" { fmt.Fprintf(f.Out(), "Usage:\n") } else { fmt.Fprintf(f.Out(), "Usage of %s:\n", f.name) } f.PrintDefaults() } // NOTE: Usage is not just defaultUsage(CommandLine) // because it serves (via godoc flag Usage) as the example // for how to write your own usage function. // Usage prints to standard error a usage message documenting all defined command-line flags. // The function is a variable that may be changed to point to a custom function. var Usage = func() { fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) PrintDefaults() } // FlagCount returns the number of flags that have been defined. func (f *FlagSet) FlagCount() int { return len(sortFlags(f.formal)) } // FlagCountUndeprecated returns the number of undeprecated flags that have been defined. func (f *FlagSet) FlagCountUndeprecated() int { count := 0 for _, flag := range sortFlags(f.formal) { for _, name := range flag.Names { if name[0] != '#' { count++ break } } } return count } // NFlag returns the number of flags that have been set. func (f *FlagSet) NFlag() int { return len(f.actual) } // NFlag returns the number of command-line flags that have been set. func NFlag() int { return len(CommandLine.actual) } // Arg returns the i'th argument. Arg(0) is the first remaining argument // after flags have been processed. func (f *FlagSet) Arg(i int) string { if i < 0 || i >= len(f.args) { return "" } return f.args[i] } // Arg returns the i'th command-line argument. Arg(0) is the first remaining argument // after flags have been processed. func Arg(i int) string { return CommandLine.Arg(i) } // NArg is the number of arguments remaining after flags have been processed. func (f *FlagSet) NArg() int { return len(f.args) } // NArg is the number of arguments remaining after flags have been processed. func NArg() int { return len(CommandLine.args) } // Args returns the non-flag arguments. func (f *FlagSet) Args() []string { return f.args } // Args returns the non-flag command-line arguments. func Args() []string { return CommandLine.args } // BoolVar defines a bool flag with specified name, default value, and usage string. // The argument p points to a bool variable in which to store the value of the flag. func (f *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { f.Var(newBoolValue(value, p), names, usage) } // BoolVar defines a bool flag with specified name, default value, and usage string. // The argument p points to a bool variable in which to store the value of the flag. func BoolVar(p *bool, names []string, value bool, usage string) { CommandLine.Var(newBoolValue(value, p), names, usage) } // Bool defines a bool flag with specified name, default value, and usage string. // The return value is the address of a bool variable that stores the value of the flag. func (f *FlagSet) Bool(names []string, value bool, usage string) *bool { p := new(bool) f.BoolVar(p, names, value, usage) return p } // Bool defines a bool flag with specified name, default value, and usage string. // The return value is the address of a bool variable that stores the value of the flag. func Bool(names []string, value bool, usage string) *bool { return CommandLine.Bool(names, value, usage) } // IntVar defines an int flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. func (f *FlagSet) IntVar(p *int, names []string, value int, usage string) { f.Var(newIntValue(value, p), names, usage) } // IntVar defines an int flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. func IntVar(p *int, names []string, value int, usage string) { CommandLine.Var(newIntValue(value, p), names, usage) } // Int defines an int flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. func (f *FlagSet) Int(names []string, value int, usage string) *int { p := new(int) f.IntVar(p, names, value, usage) return p } // Int defines an int flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. func Int(names []string, value int, usage string) *int { return CommandLine.Int(names, value, usage) } // Int64Var defines an int64 flag with specified name, default value, and usage string. // The argument p points to an int64 variable in which to store the value of the flag. func (f *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { f.Var(newInt64Value(value, p), names, usage) } // Int64Var defines an int64 flag with specified name, default value, and usage string. // The argument p points to an int64 variable in which to store the value of the flag. func Int64Var(p *int64, names []string, value int64, usage string) { CommandLine.Var(newInt64Value(value, p), names, usage) } // Int64 defines an int64 flag with specified name, default value, and usage string. // The return value is the address of an int64 variable that stores the value of the flag. func (f *FlagSet) Int64(names []string, value int64, usage string) *int64 { p := new(int64) f.Int64Var(p, names, value, usage) return p } // Int64 defines an int64 flag with specified name, default value, and usage string. // The return value is the address of an int64 variable that stores the value of the flag. func Int64(names []string, value int64, usage string) *int64 { return CommandLine.Int64(names, value, usage) } // UintVar defines a uint flag with specified name, default value, and usage string. // The argument p points to a uint variable in which to store the value of the flag. func (f *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { f.Var(newUintValue(value, p), names, usage) } // UintVar defines a uint flag with specified name, default value, and usage string. // The argument p points to a uint variable in which to store the value of the flag. func UintVar(p *uint, names []string, value uint, usage string) { CommandLine.Var(newUintValue(value, p), names, usage) } // Uint defines a uint flag with specified name, default value, and usage string. // The return value is the address of a uint variable that stores the value of the flag. func (f *FlagSet) Uint(names []string, value uint, usage string) *uint { p := new(uint) f.UintVar(p, names, value, usage) return p } // Uint defines a uint flag with specified name, default value, and usage string. // The return value is the address of a uint variable that stores the value of the flag. func Uint(names []string, value uint, usage string) *uint { return CommandLine.Uint(names, value, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func (f *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { f.Var(newUint64Value(value, p), names, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func Uint64Var(p *uint64, names []string, value uint64, usage string) { CommandLine.Var(newUint64Value(value, p), names, usage) } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func (f *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { p := new(uint64) f.Uint64Var(p, names, value, usage) return p } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func Uint64(names []string, value uint64, usage string) *uint64 { return CommandLine.Uint64(names, value, usage) } // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. func (f *FlagSet) StringVar(p *string, names []string, value string, usage string) { f.Var(newStringValue(value, p), names, usage) } // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. func StringVar(p *string, names []string, value string, usage string) { CommandLine.Var(newStringValue(value, p), names, usage) } // String defines a string flag with specified name, default value, and usage string. // The return value is the address of a string variable that stores the value of the flag. func (f *FlagSet) String(names []string, value string, usage string) *string { p := new(string) f.StringVar(p, names, value, usage) return p } // String defines a string flag with specified name, default value, and usage string. // The return value is the address of a string variable that stores the value of the flag. func String(names []string, value string, usage string) *string { return CommandLine.String(names, value, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func (f *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { f.Var(newFloat64Value(value, p), names, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func Float64Var(p *float64, names []string, value float64, usage string) { CommandLine.Var(newFloat64Value(value, p), names, usage) } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func (f *FlagSet) Float64(names []string, value float64, usage string) *float64 { p := new(float64) f.Float64Var(p, names, value, usage) return p } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func Float64(names []string, value float64, usage string) *float64 { return CommandLine.Float64(names, value, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func (f *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { f.Var(newDurationValue(value, p), names, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { CommandLine.Var(newDurationValue(value, p), names, usage) } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func (f *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { p := new(time.Duration) f.DurationVar(p, names, value, usage) return p } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func Duration(names []string, value time.Duration, usage string) *time.Duration { return CommandLine.Duration(names, value, usage) } // Var defines a flag with the specified name and usage string. The type and // value of the flag are represented by the first argument, of type Value, which // typically holds a user-defined implementation of Value. For instance, the // caller could create a flag that turns a comma-separated string into a slice // of strings by giving the slice the methods of Value; in particular, Set would // decompose the comma-separated string into the slice. func (f *FlagSet) Var(value Value, names []string, usage string) { // Remember the default value as a string; it won't change. flag := &Flag{names, usage, value, value.String()} for _, name := range names { name = strings.TrimPrefix(name, "#") _, alreadythere := f.formal[name] if alreadythere { var msg string if f.name == "" { msg = fmt.Sprintf("flag redefined: %s", name) } else { msg = fmt.Sprintf("%s flag redefined: %s", f.name, name) } fmt.Fprintln(f.Out(), msg) panic(msg) // Happens only if flags are declared with identical names } if f.formal == nil { f.formal = make(map[string]*Flag) } f.formal[name] = flag } } // Var defines a flag with the specified name and usage string. The type and // value of the flag are represented by the first argument, of type Value, which // typically holds a user-defined implementation of Value. For instance, the // caller could create a flag that turns a comma-separated string into a slice // of strings by giving the slice the methods of Value; in particular, Set would // decompose the comma-separated string into the slice. func Var(value Value, names []string, usage string) { CommandLine.Var(value, names, usage) } // failf prints to standard error a formatted error and usage message and // returns the error. func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) fmt.Fprintln(f.Out(), err) if os.Args[0] == f.name { fmt.Fprintf(f.Out(), "See '%s --help'.\n", os.Args[0]) } else { fmt.Fprintf(f.Out(), "See '%s %s --help'.\n", os.Args[0], f.name) } return err } // usage calls the Usage method for the flag set, or the usage function if // the flag set is CommandLine. func (f *FlagSet) usage() { if f == CommandLine { Usage() } else if f.Usage == nil { defaultUsage(f) } else { f.Usage() } } func trimQuotes(str string) string { if len(str) == 0 { return str } type quote struct { start, end byte } // All valid quote types. quotes := []quote{ // Double quotes { start: '"', end: '"', }, // Single quotes { start: '\'', end: '\'', }, } for _, quote := range quotes { // Only strip if outermost match. if str[0] == quote.start && str[len(str)-1] == quote.end { str = str[1 : len(str)-1] break } } return str } // parseOne parses one flag. It reports whether a flag was seen. func (f *FlagSet) parseOne() (bool, string, error) { if len(f.args) == 0 { return false, "", nil } s := f.args[0] if len(s) == 0 || s[0] != '-' || len(s) == 1 { return false, "", nil } if s[1] == '-' && len(s) == 2 { // "--" terminates the flags f.args = f.args[1:] return false, "", nil } name := s[1:] if len(name) == 0 || name[0] == '=' { return false, "", f.failf("bad flag syntax: %s", s) } // it's a flag. does it have an argument? f.args = f.args[1:] has_value := false value := "" if i := strings.Index(name, "="); i != -1 { value = trimQuotes(name[i+1:]) has_value = true name = name[:i] } m := f.formal flag, alreadythere := m[name] // BUG if !alreadythere { if name == "-help" || name == "help" || name == "h" { // special case for nice help message. f.usage() return false, "", ErrHelp } if len(name) > 0 && name[0] == '-' { return false, "", f.failf("flag provided but not defined: -%s", name) } return false, name, ErrRetry } if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg if has_value { if err := fv.Set(value); err != nil { return false, "", f.failf("invalid boolean value %q for -%s: %v", value, name, err) } } else { fv.Set("true") } } else { // It must have a value, which might be the next argument. if !has_value && len(f.args) > 0 { // value is the next arg has_value = true value, f.args = f.args[0], f.args[1:] } if !has_value { return false, "", f.failf("flag needs an argument: -%s", name) } if err := flag.Value.Set(value); err != nil { return false, "", f.failf("invalid value %q for flag -%s: %v", value, name, err) } } if f.actual == nil { f.actual = make(map[string]*Flag) } f.actual[name] = flag for i, n := range flag.Names { if n == fmt.Sprintf("#%s", name) { replacement := "" for j := i; j < len(flag.Names); j++ { if flag.Names[j][0] != '#' { replacement = flag.Names[j] break } } if replacement != "" { fmt.Fprintf(f.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) } else { fmt.Fprintf(f.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) } } } return true, "", nil } // Parse parses flag definitions from the argument list, which should not // include the command name. Must be called after all flags in the FlagSet // are defined and before flags are accessed by the program. // The return value will be ErrHelp if -help was set but not defined. func (f *FlagSet) Parse(arguments []string) error { f.parsed = true f.args = arguments for { seen, name, err := f.parseOne() if seen { continue } if err == nil { break } if err == ErrRetry { if len(name) > 1 { err = nil for _, letter := range strings.Split(name, "") { f.args = append([]string{"-" + letter}, f.args...) seen2, _, err2 := f.parseOne() if seen2 { continue } if err2 != nil { err = f.failf("flag provided but not defined: -%s", name) break } } if err == nil { continue } } else { err = f.failf("flag provided but not defined: -%s", name) } } switch f.errorHandling { case ContinueOnError: return err case ExitOnError: os.Exit(2) case PanicOnError: panic(err) } } return nil } // Parsed reports whether f.Parse has been called. func (f *FlagSet) Parsed() bool { return f.parsed } // Parse parses the command-line flags from os.Args[1:]. Must be called // after all flags are defined and before flags are accessed by the program. func Parse() { // Ignore errors; CommandLine is set for ExitOnError. CommandLine.Parse(os.Args[1:]) } // Parsed returns true if the command-line flags have been parsed. func Parsed() bool { return CommandLine.Parsed() } // CommandLine is the default set of command-line flags, parsed from os.Args. // The top-level functions such as BoolVar, Arg, and on are wrappers for the // methods of CommandLine. var CommandLine = NewFlagSet(os.Args[0], ExitOnError) // NewFlagSet returns a new, empty flag set with the specified name and // error handling property. func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { f := &FlagSet{ name: name, errorHandling: errorHandling, } return f } // Init sets the name and error handling property for a flag set. // By default, the zero FlagSet uses an empty name and the // ContinueOnError error handling policy. func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { f.name = name f.errorHandling = errorHandling } docker-1.6.2/pkg/mflag/flag_test.go0000644000175000017500000003325712524223634016514 0ustar tianontianon// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package mflag import ( "bytes" "fmt" "os" "sort" "strings" "testing" "time" ) // ResetForTesting clears all flag state and sets the usage function as directed. // After calling ResetForTesting, parse errors in flag handling will not // exit the program. func ResetForTesting(usage func()) { CommandLine = NewFlagSet(os.Args[0], ContinueOnError) Usage = usage } func boolString(s string) string { if s == "0" { return "false" } return "true" } func TestEverything(t *testing.T) { ResetForTesting(nil) Bool([]string{"test_bool"}, false, "bool value") Int([]string{"test_int"}, 0, "int value") Int64([]string{"test_int64"}, 0, "int64 value") Uint([]string{"test_uint"}, 0, "uint value") Uint64([]string{"test_uint64"}, 0, "uint64 value") String([]string{"test_string"}, "0", "string value") Float64([]string{"test_float64"}, 0, "float64 value") Duration([]string{"test_duration"}, 0, "time.Duration value") m := make(map[string]*Flag) desired := "0" visitor := func(f *Flag) { for _, name := range f.Names { if len(name) > 5 && name[0:5] == "test_" { m[name] = f ok := false switch { case f.Value.String() == desired: ok = true case name == "test_bool" && f.Value.String() == boolString(desired): ok = true case name == "test_duration" && f.Value.String() == desired+"s": ok = true } if !ok { t.Error("Visit: bad value", f.Value.String(), "for", name) } } } } VisitAll(visitor) if len(m) != 8 { t.Error("VisitAll misses some flags") for k, v := range m { t.Log(k, *v) } } m = make(map[string]*Flag) Visit(visitor) if len(m) != 0 { t.Errorf("Visit sees unset flags") for k, v := range m { t.Log(k, *v) } } // Now set all flags Set("test_bool", "true") Set("test_int", "1") Set("test_int64", "1") Set("test_uint", "1") Set("test_uint64", "1") Set("test_string", "1") Set("test_float64", "1") Set("test_duration", "1s") desired = "1" Visit(visitor) if len(m) != 8 { t.Error("Visit fails after set") for k, v := range m { t.Log(k, *v) } } // Now test they're visited in sort order. var flagNames []string Visit(func(f *Flag) { for _, name := range f.Names { flagNames = append(flagNames, name) } }) if !sort.StringsAreSorted(flagNames) { t.Errorf("flag names not sorted: %v", flagNames) } } func TestGet(t *testing.T) { ResetForTesting(nil) Bool([]string{"test_bool"}, true, "bool value") Int([]string{"test_int"}, 1, "int value") Int64([]string{"test_int64"}, 2, "int64 value") Uint([]string{"test_uint"}, 3, "uint value") Uint64([]string{"test_uint64"}, 4, "uint64 value") String([]string{"test_string"}, "5", "string value") Float64([]string{"test_float64"}, 6, "float64 value") Duration([]string{"test_duration"}, 7, "time.Duration value") visitor := func(f *Flag) { for _, name := range f.Names { if len(name) > 5 && name[0:5] == "test_" { g, ok := f.Value.(Getter) if !ok { t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) return } switch name { case "test_bool": ok = g.Get() == true case "test_int": ok = g.Get() == int(1) case "test_int64": ok = g.Get() == int64(2) case "test_uint": ok = g.Get() == uint(3) case "test_uint64": ok = g.Get() == uint64(4) case "test_string": ok = g.Get() == "5" case "test_float64": ok = g.Get() == float64(6) case "test_duration": ok = g.Get() == time.Duration(7) } if !ok { t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) } } } } VisitAll(visitor) } func testParse(f *FlagSet, t *testing.T) { if f.Parsed() { t.Error("f.Parse() = true before Parse") } boolFlag := f.Bool([]string{"bool"}, false, "bool value") bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") f.Bool([]string{"bool3"}, false, "bool3 value") bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value") intFlag := f.Int([]string{"-int"}, 0, "int value") int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") uintFlag := f.Uint([]string{"uint"}, 0, "uint value") uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") stringFlag := f.String([]string{"string"}, "0", "string value") f.String([]string{"string2"}, "0", "string2 value") singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") extra := "one-extra-argument" args := []string{ "-bool", "-bool2=true", "-bool4=false", "--int", "22", "--int64", "0x23", "-uint", "24", "--uint64", "25", "-string", "hello", "-squote='single'", `-dquote="double"`, `-mquote='mixed"`, `-mquote2="mixed2'`, `-nquote="'single nested'"`, `-nquote2='"double nested"'`, "-float64", "2718e28", "-duration", "2m", extra, } if err := f.Parse(args); err != nil { t.Fatal(err) } if !f.Parsed() { t.Error("f.Parse() = false after Parse") } if *boolFlag != true { t.Error("bool flag should be true, is ", *boolFlag) } if *bool2Flag != true { t.Error("bool2 flag should be true, is ", *bool2Flag) } if !f.IsSet("bool2") { t.Error("bool2 should be marked as set") } if f.IsSet("bool3") { t.Error("bool3 should not be marked as set") } if !f.IsSet("bool4") { t.Error("bool4 should be marked as set") } if *bool4Flag != false { t.Error("bool4 flag should be false, is ", *bool4Flag) } if *intFlag != 22 { t.Error("int flag should be 22, is ", *intFlag) } if *int64Flag != 0x23 { t.Error("int64 flag should be 0x23, is ", *int64Flag) } if *uintFlag != 24 { t.Error("uint flag should be 24, is ", *uintFlag) } if *uint64Flag != 25 { t.Error("uint64 flag should be 25, is ", *uint64Flag) } if *stringFlag != "hello" { t.Error("string flag should be `hello`, is ", *stringFlag) } if !f.IsSet("string") { t.Error("string flag should be marked as set") } if f.IsSet("string2") { t.Error("string2 flag should not be marked as set") } if *singleQuoteFlag != "single" { t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) } if *doubleQuoteFlag != "double" { t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) } if *mixedQuoteFlag != `'mixed"` { t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) } if *mixed2QuoteFlag != `"mixed2'` { t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) } if *nestedQuoteFlag != "'single nested'" { t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) } if *nested2QuoteFlag != `"double nested"` { t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) } if *float64Flag != 2718e28 { t.Error("float64 flag should be 2718e28, is ", *float64Flag) } if *durationFlag != 2*time.Minute { t.Error("duration flag should be 2m, is ", *durationFlag) } if len(f.Args()) != 1 { t.Error("expected one argument, got", len(f.Args())) } else if f.Args()[0] != extra { t.Errorf("expected argument %q got %q", extra, f.Args()[0]) } } func testPanic(f *FlagSet, t *testing.T) { f.Int([]string{"-int"}, 0, "int value") if f.Parsed() { t.Error("f.Parse() = true before Parse") } args := []string{ "-int", "21", } f.Parse(args) } func TestParsePanic(t *testing.T) { ResetForTesting(func() {}) testPanic(CommandLine, t) } func TestParse(t *testing.T) { ResetForTesting(func() { t.Error("bad parse") }) testParse(CommandLine, t) } func TestFlagSetParse(t *testing.T) { testParse(NewFlagSet("test", ContinueOnError), t) } // Declare a user-defined flag type. type flagVar []string func (f *flagVar) String() string { return fmt.Sprint([]string(*f)) } func (f *flagVar) Set(value string) error { *f = append(*f, value) return nil } func TestUserDefined(t *testing.T) { var flags FlagSet flags.Init("test", ContinueOnError) var v flagVar flags.Var(&v, []string{"v"}, "usage") if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { t.Error(err) } if len(v) != 3 { t.Fatal("expected 3 args; got ", len(v)) } expect := "[1 2 3]" if v.String() != expect { t.Errorf("expected value %q got %q", expect, v.String()) } } // Declare a user-defined boolean flag type. type boolFlagVar struct { count int } func (b *boolFlagVar) String() string { return fmt.Sprintf("%d", b.count) } func (b *boolFlagVar) Set(value string) error { if value == "true" { b.count++ } return nil } func (b *boolFlagVar) IsBoolFlag() bool { return b.count < 4 } func TestUserDefinedBool(t *testing.T) { var flags FlagSet flags.Init("test", ContinueOnError) var b boolFlagVar var err error flags.Var(&b, []string{"b"}, "usage") if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { if b.count < 4 { t.Error(err) } } if b.count != 4 { t.Errorf("want: %d; got: %d", 4, b.count) } if err == nil { t.Error("expected error; got none") } } func TestSetOutput(t *testing.T) { var flags FlagSet var buf bytes.Buffer flags.SetOutput(&buf) flags.Init("test", ContinueOnError) flags.Parse([]string{"-unknown"}) if out := buf.String(); !strings.Contains(out, "-unknown") { t.Logf("expected output mentioning unknown; got %q", out) } } // This tests that one can reset the flags. This still works but not well, and is // superseded by FlagSet. func TestChangingArgs(t *testing.T) { ResetForTesting(func() { t.Fatal("bad parse") }) oldArgs := os.Args defer func() { os.Args = oldArgs }() os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} before := Bool([]string{"before"}, false, "") if err := CommandLine.Parse(os.Args[1:]); err != nil { t.Fatal(err) } cmd := Arg(0) os.Args = Args() after := Bool([]string{"after"}, false, "") Parse() args := Args() if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) } } // Test that -help invokes the usage message and returns ErrHelp. func TestHelp(t *testing.T) { var helpCalled = false fs := NewFlagSet("help test", ContinueOnError) fs.Usage = func() { helpCalled = true } var flag bool fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") // Regular flag invocation should work err := fs.Parse([]string{"-flag=true"}) if err != nil { t.Fatal("expected no error; got ", err) } if !flag { t.Error("flag was not set by -flag") } if helpCalled { t.Error("help called for regular flag") helpCalled = false // reset for next test } // Help flag should work as expected. err = fs.Parse([]string{"-help"}) if err == nil { t.Fatal("error expected") } if err != ErrHelp { t.Fatal("expected ErrHelp; got ", err) } if !helpCalled { t.Fatal("help was not called") } // If we define a help flag, that should override. var help bool fs.BoolVar(&help, []string{"help"}, false, "help flag") helpCalled = false err = fs.Parse([]string{"-help"}) if err != nil { t.Fatal("expected no error for defined -help; got ", err) } if helpCalled { t.Fatal("help was called; should not have been for defined help flag") } } // Test the flag count functions. func TestFlagCounts(t *testing.T) { fs := NewFlagSet("help test", ContinueOnError) var flag bool fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag") fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag") fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag") fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag") fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag") fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag") if fs.FlagCount() != 6 { t.Fatal("FlagCount wrong. ", fs.FlagCount()) } if fs.FlagCountUndeprecated() != 4 { t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated()) } if fs.NFlag() != 0 { t.Fatal("NFlag wrong. ", fs.NFlag()) } err := fs.Parse([]string{"-fd", "-g", "-flag4"}) if err != nil { t.Fatal("expected no error for defined -help; got ", err) } if fs.NFlag() != 4 { t.Fatal("NFlag wrong. ", fs.NFlag()) } } // Show up bug in sortFlags func TestSortFlags(t *testing.T) { fs := NewFlagSet("help TestSortFlags", ContinueOnError) var err error var b bool fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage") err = fs.Parse([]string{"--banana=true"}) if err != nil { t.Fatal("expected no error; got ", err) } count := 0 fs.VisitAll(func(flag *Flag) { count++ if flag == nil { t.Fatal("VisitAll should not return a nil flag") } }) flagcount := fs.FlagCount() if flagcount != count { t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count) } // Make sure its idempotent if flagcount != fs.FlagCount() { t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount()) } count = 0 fs.Visit(func(flag *Flag) { count++ if flag == nil { t.Fatal("Visit should not return a nil flag") } }) nflag := fs.NFlag() if nflag != count { t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count) } if nflag != fs.NFlag() { t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) } } docker-1.6.2/pkg/mflag/example/0000755000175000017500000000000012524223634015636 5ustar tianontianondocker-1.6.2/pkg/mflag/example/example.go0000644000175000017500000000221112524223634017614 0ustar tianontianonpackage main import ( "fmt" flag "github.com/docker/docker/pkg/mflag" ) var ( i int str string b, b2, h bool ) func init() { flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") flag.Parse() } func main() { if h { flag.PrintDefaults() } else { fmt.Printf("s/#hidden/-string: %s\n", str) fmt.Printf("b: %t\n", b) fmt.Printf("-bool: %t\n", b2) fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) fmt.Printf("ARGS: %v\n", flag.Args()) } } docker-1.6.2/pkg/truncindex/0000755000175000017500000000000012524223634015300 5ustar tianontianondocker-1.6.2/pkg/truncindex/truncindex.go0000644000175000017500000000520012524223634020007 0ustar tianontianonpackage truncindex import ( "errors" "fmt" "strings" "sync" "github.com/tchap/go-patricia/patricia" ) var ( ErrEmptyPrefix = errors.New("Prefix can't be empty") ErrAmbiguousPrefix = errors.New("Multiple IDs found with provided prefix") ) func init() { // Change patricia max prefix per node length, // because our len(ID) always 64 patricia.MaxPrefixPerNode = 64 } // TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. // This is used to retrieve image and container IDs by more convenient shorthand prefixes. type TruncIndex struct { sync.RWMutex trie *patricia.Trie ids map[string]struct{} } // NewTruncIndex creates a new TruncIndex and initializes with a list of IDs func NewTruncIndex(ids []string) (idx *TruncIndex) { idx = &TruncIndex{ ids: make(map[string]struct{}), trie: patricia.NewTrie(), } for _, id := range ids { idx.addID(id) } return } func (idx *TruncIndex) addID(id string) error { if strings.Contains(id, " ") { return fmt.Errorf("illegal character: ' '") } if id == "" { return ErrEmptyPrefix } if _, exists := idx.ids[id]; exists { return fmt.Errorf("id already exists: '%s'", id) } idx.ids[id] = struct{}{} if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { return fmt.Errorf("failed to insert id: %s", id) } return nil } // Add adds a new ID to the TruncIndex func (idx *TruncIndex) Add(id string) error { idx.Lock() defer idx.Unlock() if err := idx.addID(id); err != nil { return err } return nil } // Delete removes an ID from the TruncIndex. If there are multiple IDs // with the given prefix, an error is thrown. func (idx *TruncIndex) Delete(id string) error { idx.Lock() defer idx.Unlock() if _, exists := idx.ids[id]; !exists || id == "" { return fmt.Errorf("no such id: '%s'", id) } delete(idx.ids, id) if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { return fmt.Errorf("no such id: '%s'", id) } return nil } // Get retrieves an ID from the TruncIndex. If there are multiple IDs // with the given prefix, an error is thrown. func (idx *TruncIndex) Get(s string) (string, error) { if s == "" { return "", ErrEmptyPrefix } var ( id string ) subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { if id != "" { // we haven't found the ID if there are two or more IDs id = "" return ErrAmbiguousPrefix } id = string(prefix) return nil } idx.RLock() defer idx.RUnlock() if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { return "", err } if id != "" { return id, nil } return "", fmt.Errorf("no such id: %s", s) } docker-1.6.2/pkg/truncindex/truncindex_test.go0000644000175000017500000002307112524223634021054 0ustar tianontianonpackage truncindex import ( "math/rand" "testing" "github.com/docker/docker/pkg/common" ) // Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. func TestTruncIndex(t *testing.T) { ids := []string{} index := NewTruncIndex(ids) // Get on an empty index if _, err := index.Get("foobar"); err == nil { t.Fatal("Get on an empty index should return an error") } // Spaces should be illegal in an id if err := index.Add("I have a space"); err == nil { t.Fatalf("Adding an id with ' ' should return an error") } id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" // Add an id if err := index.Add(id); err != nil { t.Fatal(err) } // Add an empty id (should fail) if err := index.Add(""); err == nil { t.Fatalf("Adding an empty id should return an error") } // Get a non-existing id assertIndexGet(t, index, "abracadabra", "", true) // Get an empty id assertIndexGet(t, index, "", "", true) // Get the exact id assertIndexGet(t, index, id, id, false) // The first letter should match assertIndexGet(t, index, id[:1], id, false) // The first half should match assertIndexGet(t, index, id[:len(id)/2], id, false) // The second half should NOT match assertIndexGet(t, index, id[len(id)/2:], "", true) id2 := id[:6] + "blabla" // Add an id if err := index.Add(id2); err != nil { t.Fatal(err) } // Both exact IDs should work assertIndexGet(t, index, id, id, false) assertIndexGet(t, index, id2, id2, false) // 6 characters or less should conflict assertIndexGet(t, index, id[:6], "", true) assertIndexGet(t, index, id[:4], "", true) assertIndexGet(t, index, id[:1], "", true) // An ambiguous id prefix should return an error if _, err := index.Get(id[:4]); err == nil || err == nil { t.Fatal("An ambiguous id prefix should return an error") } // 7 characters should NOT conflict assertIndexGet(t, index, id[:7], id, false) assertIndexGet(t, index, id2[:7], id2, false) // Deleting a non-existing id should return an error if err := index.Delete("non-existing"); err == nil { t.Fatalf("Deleting a non-existing id should return an error") } // Deleting an empty id should return an error if err := index.Delete(""); err == nil { t.Fatal("Deleting an empty id should return an error") } // Deleting id2 should remove conflicts if err := index.Delete(id2); err != nil { t.Fatal(err) } // id2 should no longer work assertIndexGet(t, index, id2, "", true) assertIndexGet(t, index, id2[:7], "", true) assertIndexGet(t, index, id2[:11], "", true) // conflicts between id and id2 should be gone assertIndexGet(t, index, id[:6], id, false) assertIndexGet(t, index, id[:4], id, false) assertIndexGet(t, index, id[:1], id, false) // non-conflicting substrings should still not conflict assertIndexGet(t, index, id[:7], id, false) assertIndexGet(t, index, id[:15], id, false) assertIndexGet(t, index, id, id, false) } func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { if result, err := index.Get(input); err != nil && !expectError { t.Fatalf("Unexpected error getting '%s': %s", input, err) } else if err == nil && expectError { t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) } else if result != expectedResult { t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) } } func BenchmarkTruncIndexAdd100(b *testing.B) { var testSet []string for i := 0; i < 100; i++ { testSet = append(testSet, common.GenerateRandomID()) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexAdd250(b *testing.B) { var testSet []string for i := 0; i < 250; i++ { testSet = append(testSet, common.GenerateRandomID()) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexAdd500(b *testing.B) { var testSet []string for i := 0; i < 500; i++ { testSet = append(testSet, common.GenerateRandomID()) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexGet100(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 100; i++ { testSet = append(testSet, common.GenerateRandomID()) } index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } func BenchmarkTruncIndexGet250(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 250; i++ { testSet = append(testSet, common.GenerateRandomID()) } index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } func BenchmarkTruncIndexGet500(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 500; i++ { testSet = append(testSet, common.GenerateRandomID()) } index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } func BenchmarkTruncIndexDelete100(b *testing.B) { var testSet []string for i := 0; i < 100; i++ { testSet = append(testSet, common.GenerateRandomID()) } b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } b.StartTimer() for _, id := range testSet { if err := index.Delete(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexDelete250(b *testing.B) { var testSet []string for i := 0; i < 250; i++ { testSet = append(testSet, common.GenerateRandomID()) } b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } b.StartTimer() for _, id := range testSet { if err := index.Delete(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexDelete500(b *testing.B) { var testSet []string for i := 0; i < 500; i++ { testSet = append(testSet, common.GenerateRandomID()) } b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } b.StartTimer() for _, id := range testSet { if err := index.Delete(id); err != nil { b.Fatal(err) } } } } func BenchmarkTruncIndexNew100(b *testing.B) { var testSet []string for i := 0; i < 100; i++ { testSet = append(testSet, common.GenerateRandomID()) } b.ResetTimer() for i := 0; i < b.N; i++ { NewTruncIndex(testSet) } } func BenchmarkTruncIndexNew250(b *testing.B) { var testSet []string for i := 0; i < 250; i++ { testSet = append(testSet, common.GenerateRandomID()) } b.ResetTimer() for i := 0; i < b.N; i++ { NewTruncIndex(testSet) } } func BenchmarkTruncIndexNew500(b *testing.B) { var testSet []string for i := 0; i < 500; i++ { testSet = append(testSet, common.GenerateRandomID()) } b.ResetTimer() for i := 0; i < b.N; i++ { NewTruncIndex(testSet) } } func BenchmarkTruncIndexAddGet100(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 500; i++ { id := common.GenerateRandomID() testSet = append(testSet, id) l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } func BenchmarkTruncIndexAddGet250(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 500; i++ { id := common.GenerateRandomID() testSet = append(testSet, id) l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } func BenchmarkTruncIndexAddGet500(b *testing.B) { var testSet []string var testKeys []string for i := 0; i < 500; i++ { id := common.GenerateRandomID() testSet = append(testSet, id) l := rand.Intn(12) + 12 testKeys = append(testKeys, id[:l]) } b.ResetTimer() for i := 0; i < b.N; i++ { index := NewTruncIndex([]string{}) for _, id := range testSet { if err := index.Add(id); err != nil { b.Fatal(err) } } for _, id := range testKeys { if res, err := index.Get(id); err != nil { b.Fatal(res, err) } } } } docker-1.6.2/pkg/iptables/0000755000175000017500000000000012524223634014720 5ustar tianontianondocker-1.6.2/pkg/iptables/iptables.go0000644000175000017500000001724512524223634017063 0ustar tianontianonpackage iptables import ( "errors" "fmt" "net" "os/exec" "regexp" "strconv" "strings" log "github.com/Sirupsen/logrus" ) type Action string type Table string const ( Append Action = "-A" Delete Action = "-D" Insert Action = "-I" Nat Table = "nat" Filter Table = "filter" Mangle Table = "mangle" ) var ( iptablesPath string supportsXlock = false ErrIptablesNotFound = errors.New("Iptables not found") ) type Chain struct { Name string Bridge string Table Table } type ChainError struct { Chain string Output []byte } func (e *ChainError) Error() string { return fmt.Sprintf("Error iptables %s: %s", e.Chain, string(e.Output)) } func initCheck() error { if iptablesPath == "" { path, err := exec.LookPath("iptables") if err != nil { return ErrIptablesNotFound } iptablesPath = path supportsXlock = exec.Command(iptablesPath, "--wait", "-L", "-n").Run() == nil } return nil } func NewChain(name, bridge string, table Table) (*Chain, error) { c := &Chain{ Name: name, Bridge: bridge, Table: table, } if string(c.Table) == "" { c.Table = Filter } // Add chain if it doesn't exist if _, err := Raw("-t", string(c.Table), "-n", "-L", c.Name); err != nil { if output, err := Raw("-t", string(c.Table), "-N", c.Name); err != nil { return nil, err } else if len(output) != 0 { return nil, fmt.Errorf("Could not create %s/%s chain: %s", c.Table, c.Name, output) } } switch table { case Nat: preroute := []string{ "-m", "addrtype", "--dst-type", "LOCAL"} if !Exists(Nat, "PREROUTING", preroute...) { if err := c.Prerouting(Append, preroute...); err != nil { return nil, fmt.Errorf("Failed to inject docker in PREROUTING chain: %s", err) } } output := []string{ "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8"} if !Exists(Nat, "OUTPUT", output...) { if err := c.Output(Append, output...); err != nil { return nil, fmt.Errorf("Failed to inject docker in OUTPUT chain: %s", err) } } case Filter: link := []string{ "-o", c.Bridge, "-j", c.Name} if !Exists(Filter, "FORWARD", link...) { insert := append([]string{string(Insert), "FORWARD"}, link...) if output, err := Raw(insert...); err != nil { return nil, err } else if len(output) != 0 { return nil, fmt.Errorf("Could not create linking rule to %s/%s: %s", c.Table, c.Name, output) } } } return c, nil } func RemoveExistingChain(name string, table Table) error { c := &Chain{ Name: name, Table: table, } if string(c.Table) == "" { c.Table = Filter } return c.Remove() } // Add forwarding rule to 'filter' table and corresponding nat rule to 'nat' table func (c *Chain) Forward(action Action, ip net.IP, port int, proto, destAddr string, destPort int) error { daddr := ip.String() if ip.IsUnspecified() { // iptables interprets "0.0.0.0" as "0.0.0.0/32", whereas we // want "0.0.0.0/0". "0/0" is correctly interpreted as "any // value" by both iptables and ip6tables. daddr = "0/0" } if output, err := Raw("-t", string(Nat), string(action), c.Name, "-p", proto, "-d", daddr, "--dport", strconv.Itoa(port), "!", "-i", c.Bridge, "-j", "DNAT", "--to-destination", net.JoinHostPort(destAddr, strconv.Itoa(destPort))); err != nil { return err } else if len(output) != 0 { return &ChainError{Chain: "FORWARD", Output: output} } if output, err := Raw("-t", string(Filter), string(action), c.Name, "!", "-i", c.Bridge, "-o", c.Bridge, "-p", proto, "-d", destAddr, "--dport", strconv.Itoa(destPort), "-j", "ACCEPT"); err != nil { return err } else if len(output) != 0 { return &ChainError{Chain: "FORWARD", Output: output} } if output, err := Raw("-t", string(Nat), string(action), "POSTROUTING", "-p", proto, "-s", destAddr, "-d", destAddr, "--dport", strconv.Itoa(destPort), "-j", "MASQUERADE"); err != nil { return err } else if len(output) != 0 { return &ChainError{Chain: "FORWARD", Output: output} } return nil } // Add reciprocal ACCEPT rule for two supplied IP addresses. // Traffic is allowed from ip1 to ip2 and vice-versa func (c *Chain) Link(action Action, ip1, ip2 net.IP, port int, proto string) error { if output, err := Raw("-t", string(Filter), string(action), c.Name, "-i", c.Bridge, "-o", c.Bridge, "-p", proto, "-s", ip1.String(), "-d", ip2.String(), "--dport", strconv.Itoa(port), "-j", "ACCEPT"); err != nil { return err } else if len(output) != 0 { return fmt.Errorf("Error iptables forward: %s", output) } if output, err := Raw("-t", string(Filter), string(action), c.Name, "-i", c.Bridge, "-o", c.Bridge, "-p", proto, "-s", ip2.String(), "-d", ip1.String(), "--sport", strconv.Itoa(port), "-j", "ACCEPT"); err != nil { return err } else if len(output) != 0 { return fmt.Errorf("Error iptables forward: %s", output) } return nil } // Add linking rule to nat/PREROUTING chain. func (c *Chain) Prerouting(action Action, args ...string) error { a := []string{"-t", string(Nat), string(action), "PREROUTING"} if len(args) > 0 { a = append(a, args...) } if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { return &ChainError{Chain: "PREROUTING", Output: output} } return nil } // Add linking rule to an OUTPUT chain func (c *Chain) Output(action Action, args ...string) error { a := []string{"-t", string(c.Table), string(action), "OUTPUT"} if len(args) > 0 { a = append(a, args...) } if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { return &ChainError{Chain: "OUTPUT", Output: output} } return nil } func (c *Chain) Remove() error { // Ignore errors - This could mean the chains were never set up if c.Table == Nat { c.Prerouting(Delete, "-m", "addrtype", "--dst-type", "LOCAL") c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8") c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL") // Created in versions <= 0.1.6 c.Prerouting(Delete) c.Output(Delete) } Raw("-t", string(c.Table), "-F", c.Name) Raw("-t", string(c.Table), "-X", c.Name) return nil } // Check if a rule exists func Exists(table Table, chain string, rule ...string) bool { if string(table) == "" { table = Filter } // iptables -C, --check option was added in v.1.4.11 // http://ftp.netfilter.org/pub/iptables/changes-iptables-1.4.11.txt // try -C // if exit status is 0 then return true, the rule exists if _, err := Raw(append([]string{ "-t", string(table), "-C", chain}, rule...)...); err == nil { return true } // parse "iptables -S" for the rule (this checks rules in a specific chain // in a specific table) rule_string := strings.Join(rule, " ") existingRules, _ := exec.Command("iptables", "-t", string(table), "-S", chain).Output() // regex to replace ips in rule // because MASQUERADE rule will not be exactly what was passed re := regexp.MustCompile(`[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/[0-9]{1,2}`) return strings.Contains( re.ReplaceAllString(string(existingRules), "?"), re.ReplaceAllString(rule_string, "?"), ) } // Call 'iptables' system command, passing supplied arguments func Raw(args ...string) ([]byte, error) { if err := initCheck(); err != nil { return nil, err } if supportsXlock { args = append([]string{"--wait"}, args...) } log.Debugf("%s, %v", iptablesPath, args) output, err := exec.Command(iptablesPath, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("iptables failed: iptables %v: %s (%s)", strings.Join(args, " "), output, err) } // ignore iptables' message about xtables lock if strings.Contains(string(output), "waiting for it to exit") { output = []byte("") } return output, err } docker-1.6.2/pkg/iptables/iptables_test.go0000644000175000017500000000746012524223634020120 0ustar tianontianonpackage iptables import ( "net" "os/exec" "strconv" "strings" "testing" ) const chainName = "DOCKERTEST" var natChain *Chain var filterChain *Chain func TestNewChain(t *testing.T) { var err error natChain, err = NewChain(chainName, "lo", Nat) if err != nil { t.Fatal(err) } filterChain, err = NewChain(chainName, "lo", Filter) if err != nil { t.Fatal(err) } } func TestForward(t *testing.T) { ip := net.ParseIP("192.168.1.1") port := 1234 dstAddr := "172.17.0.1" dstPort := 4321 proto := "tcp" err := natChain.Forward(Insert, ip, port, proto, dstAddr, dstPort) if err != nil { t.Fatal(err) } dnatRule := []string{ "!", "-i", filterChain.Bridge, "-d", ip.String(), "-p", proto, "--dport", strconv.Itoa(port), "-j", "DNAT", "--to-destination", dstAddr + ":" + strconv.Itoa(dstPort), } if !Exists(natChain.Table, natChain.Name, dnatRule...) { t.Fatalf("DNAT rule does not exist") } filterRule := []string{ "!", "-i", filterChain.Bridge, "-o", filterChain.Bridge, "-d", dstAddr, "-p", proto, "--dport", strconv.Itoa(dstPort), "-j", "ACCEPT", } if !Exists(filterChain.Table, filterChain.Name, filterRule...) { t.Fatalf("filter rule does not exist") } masqRule := []string{ "-d", dstAddr, "-s", dstAddr, "-p", proto, "--dport", strconv.Itoa(dstPort), "-j", "MASQUERADE", } if !Exists(natChain.Table, "POSTROUTING", masqRule...) { t.Fatalf("MASQUERADE rule does not exist") } } func TestLink(t *testing.T) { var err error ip1 := net.ParseIP("192.168.1.1") ip2 := net.ParseIP("192.168.1.2") port := 1234 proto := "tcp" err = filterChain.Link(Append, ip1, ip2, port, proto) if err != nil { t.Fatal(err) } rule1 := []string{ "-i", filterChain.Bridge, "-o", filterChain.Bridge, "-p", proto, "-s", ip1.String(), "-d", ip2.String(), "--dport", strconv.Itoa(port), "-j", "ACCEPT"} if !Exists(filterChain.Table, filterChain.Name, rule1...) { t.Fatalf("rule1 does not exist") } rule2 := []string{ "-i", filterChain.Bridge, "-o", filterChain.Bridge, "-p", proto, "-s", ip2.String(), "-d", ip1.String(), "--sport", strconv.Itoa(port), "-j", "ACCEPT"} if !Exists(filterChain.Table, filterChain.Name, rule2...) { t.Fatalf("rule2 does not exist") } } func TestPrerouting(t *testing.T) { args := []string{ "-i", "lo", "-d", "192.168.1.1"} err := natChain.Prerouting(Insert, args...) if err != nil { t.Fatal(err) } rule := []string{ "-j", natChain.Name} rule = append(rule, args...) if !Exists(natChain.Table, "PREROUTING", rule...) { t.Fatalf("rule does not exist") } delRule := append([]string{"-D", "PREROUTING", "-t", string(Nat)}, rule...) if _, err = Raw(delRule...); err != nil { t.Fatal(err) } } func TestOutput(t *testing.T) { args := []string{ "-o", "lo", "-d", "192.168.1.1"} err := natChain.Output(Insert, args...) if err != nil { t.Fatal(err) } rule := []string{ "-j", natChain.Name} rule = append(rule, args...) if !Exists(natChain.Table, "OUTPUT", rule...) { t.Fatalf("rule does not exist") } delRule := append([]string{"-D", "OUTPUT", "-t", string(natChain.Table)}, rule...) if _, err = Raw(delRule...); err != nil { t.Fatal(err) } } func TestCleanup(t *testing.T) { var err error var rules []byte // Cleanup filter/FORWARD first otherwise output of iptables-save is dirty link := []string{"-t", string(filterChain.Table), string(Delete), "FORWARD", "-o", filterChain.Bridge, "-j", filterChain.Name} if _, err = Raw(link...); err != nil { t.Fatal(err) } filterChain.Remove() err = RemoveExistingChain(chainName, Nat) if err != nil { t.Fatal(err) } rules, err = exec.Command("iptables-save").Output() if err != nil { t.Fatal(err) } if strings.Contains(string(rules), chainName) { t.Fatalf("Removing chain failed. %s found in iptables-save", chainName) } } docker-1.6.2/pkg/README.md0000644000175000017500000000150312524223634014373 0ustar tianontianonpkg/ is a collection of utility packages used by the Docker project without being specific to its internals. Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the Docker organization, to facilitate re-use by other projects. However that is not the priority. The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! Because utility packages are small and neatly separated from the rest of the codebase, they are a good place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! docker-1.6.2/pkg/urlutil/0000755000175000017500000000000012524223634014615 5ustar tianontianondocker-1.6.2/pkg/urlutil/url.go0000644000175000017500000000055012524223634015746 0ustar tianontianonpackage urlutil import "strings" var validUrlPrefixes = []string{ "http://", "https://", } // IsURL returns true if the provided str is a valid URL by doing // a simple change for the transport of the url. func IsURL(str string) bool { for _, prefix := range validUrlPrefixes { if strings.HasPrefix(str, prefix) { return true } } return false } docker-1.6.2/pkg/urlutil/git_test.go0000644000175000017500000000172012524223634016766 0ustar tianontianonpackage urlutil import "testing" var ( gitUrls = []string{ "git://github.com/docker/docker", "git@github.com:docker/docker.git", "git@bitbucket.org:atlassianlabs/atlassian-docker.git", "https://github.com/docker/docker.git", "http://github.com/docker/docker.git", } incompleteGitUrls = []string{ "github.com/docker/docker", } ) func TestValidGitTransport(t *testing.T) { for _, url := range gitUrls { if IsGitTransport(url) == false { t.Fatalf("%q should be detected as valid Git prefix", url) } } for _, url := range incompleteGitUrls { if IsGitTransport(url) == true { t.Fatalf("%q should not be detected as valid Git prefix", url) } } } func TestIsGIT(t *testing.T) { for _, url := range gitUrls { if IsGitURL(url) == false { t.Fatalf("%q should be detected as valid Git url", url) } } for _, url := range incompleteGitUrls { if IsGitURL(url) == false { t.Fatalf("%q should be detected as valid Git url", url) } } } docker-1.6.2/pkg/urlutil/git.go0000644000175000017500000000124412524223634015730 0ustar tianontianonpackage urlutil import "strings" var ( validPrefixes = []string{ "git://", "github.com/", "git@", } ) // IsGitURL returns true if the provided str is a git repository URL. func IsGitURL(str string) bool { if IsURL(str) && strings.HasSuffix(str, ".git") { return true } for _, prefix := range validPrefixes { if strings.HasPrefix(str, prefix) { return true } } return false } // IsGitTransport returns true if the provided str is a git transport by inspecting // the prefix of the string for known protocols used in git. func IsGitTransport(str string) bool { return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") } docker-1.6.2/pkg/namesgenerator/0000755000175000017500000000000012524223634016127 5ustar tianontianondocker-1.6.2/pkg/namesgenerator/names-generator_test.go0000644000175000017500000000122412524223634022603 0ustar tianontianonpackage namesgenerator import ( "testing" ) // Make sure the generated names are awesome func TestGenerateAwesomeNames(t *testing.T) { name := GetRandomName(0) if !isAwesome(name) { t.Fatalf("Generated name '%s' is not awesome.", name) } } // To be awesome, a container name must involve cool inventors, be easy to remember, // be at least mildly funny, and always be politically correct for enterprise adoption. func isAwesome(name string) bool { coolInventorNames := true easyToRemember := true mildlyFunnyOnOccasion := true politicallyCorrect := true return coolInventorNames && easyToRemember && mildlyFunnyOnOccasion && politicallyCorrect } docker-1.6.2/pkg/namesgenerator/names-generator.go0000644000175000017500000003361012524223634021550 0ustar tianontianonpackage namesgenerator import ( "fmt" "math/rand" "time" ) var ( left = [...]string{ "admiring", "adoring", "agitated", "angry", "backstabbing", "berserk", "boring", "clever", "cocky", "compassionate", "condescending", "cranky", "desperate", "determined", "distracted", "dreamy", "drunk", "ecstatic", "elated", "elegant", "evil", "fervent", "focused", "furious", "gloomy", "goofy", "grave", "happy", "high", "hopeful", "hungry", "insane", "jolly", "jovial", "kickass", "lonely", "loving", "mad", "modest", "naughty", "nostalgic", "pensive", "prickly", "reverent", "romantic", "sad", "serene", "sharp", "sick", "silly", "sleepy", "stoic", "stupefied", "suspicious", "tender", "thirsty", "trusting", } // Docker, starting from 0.7.x, generates names from notable scientists and hackers. right = [...]string{ // Muhammad ibn JÄbir al-ḤarrÄnÄ« al-BattÄnÄ« was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB "albattani", // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida "almeida", // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes "archimedes", // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli "ardinghelli", // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. "babbage", // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach "banach", // William Shockley, Walter Houser Brattain and John Bardeen co-invented the transistor (thanks Brian Goff). // - https://en.wikipedia.org/wiki/John_Bardeen // - https://en.wikipedia.org/wiki/Walter_Houser_Brattain // - https://en.wikipedia.org/wiki/William_Shockley "bardeen", "brattain", "shockley", // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik "bartik", // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell "bell", // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell "blackwell", // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. "bohr", // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) "brown", // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson "carson", // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden "colden", // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori "cori", // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. "curie", // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. "darwin", // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. "davinci", // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein "einstein", // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion "elion", // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart "engelbart", // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid "euclid", // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat "fermat", // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. "fermi", // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman "feynman", // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. "franklin", // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei "galileo", // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine "goldstine", // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall "goodall", // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking "hawking", // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg "heisenberg", // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin "hodgkin", // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephon switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover "hoover", // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper "hopper", // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia "hypatia", // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil "jang", // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones "jones", // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch "kirch", // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya "kowalevski", // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande "lalande", // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey "leakey", // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) "lovelace", // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re "lumiere", // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer "mayer", // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) "mccarthy", // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock "mcclintock", // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean "mclean", // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner "meitner", // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf "mestorf", // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse "morse", // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton "newton", // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel "nobel", // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin "payne", // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 "pare", // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. "pasteur", // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman "perlman", // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike "pike", // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 "poincare", // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras "poitras", // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy "ptolemy", // Dennis Ritchie and Ken Thompson created UNIX and the C programming language. // - https://en.wikipedia.org/wiki/Dennis_Ritchie // - https://en.wikipedia.org/wiki/Ken_Thompson "ritchie", "thompson", // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin "rosalind", // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet "sammet", // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi "sinoussi", // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman "stallman", // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz "swartz", // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla "tesla", // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds "torvalds", // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. "turing", // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson "wilson", // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak "wozniak", // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers "wright", // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow "yalow", // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath "yonath", } ) func GetRandomName(retry int) string { rand.Seed(time.Now().UnixNano()) begin: name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) if name == "boring_wozniak" /* Steve Wozniak is not boring */ { goto begin } if retry > 0 { name = fmt.Sprintf("%s%d", name, rand.Intn(10)) } return name } docker-1.6.2/pkg/units/0000755000175000017500000000000012524223634014257 5ustar tianontianondocker-1.6.2/pkg/units/size.go0000644000175000017500000000425612524223634015567 0ustar tianontianonpackage units import ( "fmt" "regexp" "strconv" "strings" ) // See: http://en.wikipedia.org/wiki/Binary_prefix const ( // Decimal KB = 1000 MB = 1000 * KB GB = 1000 * MB TB = 1000 * GB PB = 1000 * TB // Binary KiB = 1024 MiB = 1024 * KiB GiB = 1024 * MiB TiB = 1024 * GiB PiB = 1024 * TiB ) type unitMap map[string]int64 var ( decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) ) var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} // HumanSize returns a human-readable approximation of a size // using SI standard (eg. "44kB", "17MB") func HumanSize(size float64) string { return intToString(float64(size), 1000.0, decimapAbbrs) } func BytesSize(size float64) string { return intToString(size, 1024.0, binaryAbbrs) } func intToString(size, unit float64, _map []string) string { i := 0 for size >= unit { size = size / unit i++ } return fmt.Sprintf("%.4g %s", size, _map[i]) } // FromHumanSize returns an integer from a human-readable specification of a // size using SI standard (eg. "44kB", "17MB") func FromHumanSize(size string) (int64, error) { return parseSize(size, decimalMap) } // RAMInBytes parses a human-readable string representing an amount of RAM // in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and // returns the number of bytes, or -1 if the string is unparseable. // Units are case-insensitive, and the 'b' suffix is optional. func RAMInBytes(size string) (int64, error) { return parseSize(size, binaryMap) } // Parses the human-readable size string into the amount it represents func parseSize(sizeStr string, uMap unitMap) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) if len(matches) != 3 { return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } size, err := strconv.ParseInt(matches[1], 10, 0) if err != nil { return -1, err } unitPrefix := strings.ToLower(matches[2]) if mul, ok := uMap[unitPrefix]; ok { size *= mul } return size, nil } docker-1.6.2/pkg/units/duration_test.go0000644000175000017500000000402512524223634017473 0ustar tianontianonpackage units import ( "testing" "time" ) func TestHumanDuration(t *testing.T) { // Useful duration abstractions day := 24 * time.Hour week := 7 * day month := 30 * day year := 365 * day assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) assertEquals(t, "2 days", HumanDuration(2*day)) assertEquals(t, "7 days", HumanDuration(7*day)) assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) assertEquals(t, "2 weeks", HumanDuration(2*week)) assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) assertEquals(t, "3 weeks", HumanDuration(3*week)) assertEquals(t, "4 weeks", HumanDuration(4*week)) assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) assertEquals(t, "4 weeks", HumanDuration(1*month)) assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) assertEquals(t, "8 weeks", HumanDuration(2*month)) assertEquals(t, "3 months", HumanDuration(3*month+1*week)) assertEquals(t, "5 months", HumanDuration(5*month+2*week)) assertEquals(t, "13 months", HumanDuration(13*month)) assertEquals(t, "23 months", HumanDuration(23*month)) assertEquals(t, "24 months", HumanDuration(24*month)) assertEquals(t, "2.010959 years", HumanDuration(24*month+2*week)) assertEquals(t, "3.164384 years", HumanDuration(3*year+2*month)) } docker-1.6.2/pkg/units/duration.go0000644000175000017500000000162412524223634016436 0ustar tianontianonpackage units import ( "fmt" "time" ) // HumanDuration returns a human-readable approximation of a duration // (eg. "About a minute", "4 hours ago", etc.) func HumanDuration(d time.Duration) string { if seconds := int(d.Seconds()); seconds < 1 { return "Less than a second" } else if seconds < 60 { return fmt.Sprintf("%d seconds", seconds) } else if minutes := int(d.Minutes()); minutes == 1 { return "About a minute" } else if minutes < 60 { return fmt.Sprintf("%d minutes", minutes) } else if hours := int(d.Hours()); hours == 1 { return "About an hour" } else if hours < 48 { return fmt.Sprintf("%d hours", hours) } else if hours < 24*7*2 { return fmt.Sprintf("%d days", hours/24) } else if hours < 24*30*3 { return fmt.Sprintf("%d weeks", hours/24/7) } else if hours < 24*365*2 { return fmt.Sprintf("%d months", hours/24/30) } return fmt.Sprintf("%f years", d.Hours()/24/365) } docker-1.6.2/pkg/units/size_test.go0000644000175000017500000000726212524223634016626 0ustar tianontianonpackage units import ( "reflect" "runtime" "strings" "testing" ) func TestBytesSize(t *testing.T) { assertEquals(t, "1 KiB", BytesSize(1024)) assertEquals(t, "1 MiB", BytesSize(1024*1024)) assertEquals(t, "1 MiB", BytesSize(1048576)) assertEquals(t, "2 MiB", BytesSize(2*MiB)) assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) } func TestHumanSize(t *testing.T) { assertEquals(t, "1 kB", HumanSize(1000)) assertEquals(t, "1.024 kB", HumanSize(1024)) assertEquals(t, "1 MB", HumanSize(1000000)) assertEquals(t, "1.049 MB", HumanSize(1048576)) assertEquals(t, "2 MB", HumanSize(2*MB)) assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB))) assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB))) assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB))) } func TestFromHumanSize(t *testing.T) { assertSuccessEquals(t, 32, FromHumanSize, "32") assertSuccessEquals(t, 32, FromHumanSize, "32b") assertSuccessEquals(t, 32, FromHumanSize, "32B") assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") assertError(t, FromHumanSize, "") assertError(t, FromHumanSize, "hello") assertError(t, FromHumanSize, "-32") assertError(t, FromHumanSize, "32.3") assertError(t, FromHumanSize, " 32 ") assertError(t, FromHumanSize, "32.3Kb") assertError(t, FromHumanSize, "32 mb") assertError(t, FromHumanSize, "32m b") assertError(t, FromHumanSize, "32bm") } func TestRAMInBytes(t *testing.T) { assertSuccessEquals(t, 32, RAMInBytes, "32") assertSuccessEquals(t, 32, RAMInBytes, "32b") assertSuccessEquals(t, 32, RAMInBytes, "32B") assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") assertError(t, RAMInBytes, "") assertError(t, RAMInBytes, "hello") assertError(t, RAMInBytes, "-32") assertError(t, RAMInBytes, "32.3") assertError(t, RAMInBytes, " 32 ") assertError(t, RAMInBytes, "32.3Kb") assertError(t, RAMInBytes, "32 mb") assertError(t, RAMInBytes, "32m b") assertError(t, RAMInBytes, "32bm") } func assertEquals(t *testing.T, expected, actual interface{}) { if expected != actual { t.Errorf("Expected '%v' but got '%v'", expected, actual) } } // func that maps to the parse function signatures as testing abstraction type parseFn func(string) (int64, error) // Define 'String()' for pretty-print func (fn parseFn) String() string { fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() return fnName[strings.LastIndex(fnName, ".")+1:] } func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { res, err := fn(arg) if err != nil || res != expected { t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) } } func assertError(t *testing.T, fn parseFn, arg string) { res, err := fn(arg) if err == nil && res != -1 { t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) } } docker-1.6.2/pkg/devicemapper/0000755000175000017500000000000012524223634015561 5ustar tianontianondocker-1.6.2/pkg/devicemapper/devmapper_wrapper.go0000644000175000017500000001657412524223634021650 0ustar tianontianon// +build linux package devicemapper /* #cgo LDFLAGS: -L. -ldevmapper #include #include // FIXME: present only for defines, maybe we can remove it? #include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? #ifndef LOOP_CTL_GET_FREE #define LOOP_CTL_GET_FREE 0x4C82 #endif #ifndef LO_FLAGS_PARTSCAN #define LO_FLAGS_PARTSCAN 8 #endif // FIXME: Can't we find a way to do the logging in pure Go? extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) { char buffer[256]; va_list ap; va_start(ap, f); vsnprintf(buffer, 256, f, ap); va_end(ap); DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); } static void log_with_errno_init() { dm_log_with_errno_init(log_cb); } */ import "C" import "unsafe" type ( CDmTask C.struct_dm_task CLoopInfo64 C.struct_loop_info64 LoopInfo64 struct { loDevice uint64 /* ioctl r/o */ loInode uint64 /* ioctl r/o */ loRdevice uint64 /* ioctl r/o */ loOffset uint64 loSizelimit uint64 /* bytes, 0 == max available */ loNumber uint32 /* ioctl r/o */ loEncrypt_type uint32 loEncrypt_key_size uint32 /* ioctl w/o */ loFlags uint32 /* ioctl r/o */ loFileName [LoNameSize]uint8 loCryptName [LoNameSize]uint8 loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ loInit [2]uint64 } ) // IOCTL consts const ( BlkGetSize64 = C.BLKGETSIZE64 BlkDiscard = C.BLKDISCARD LoopSetFd = C.LOOP_SET_FD LoopCtlGetFree = C.LOOP_CTL_GET_FREE LoopGetStatus64 = C.LOOP_GET_STATUS64 LoopSetStatus64 = C.LOOP_SET_STATUS64 LoopClrFd = C.LOOP_CLR_FD LoopSetCapacity = C.LOOP_SET_CAPACITY ) const ( LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY LoFlagsPartScan = C.LO_FLAGS_PARTSCAN LoKeySize = C.LO_KEY_SIZE LoNameSize = C.LO_NAME_SIZE ) const ( DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK ) var ( DmGetLibraryVersion = dmGetLibraryVersionFct DmGetNextTarget = dmGetNextTargetFct DmLogInitVerbose = dmLogInitVerboseFct DmSetDevDir = dmSetDevDirFct DmTaskAddTarget = dmTaskAddTargetFct DmTaskCreate = dmTaskCreateFct DmTaskDestroy = dmTaskDestroyFct DmTaskGetDeps = dmTaskGetDepsFct DmTaskGetInfo = dmTaskGetInfoFct DmTaskGetDriverVersion = dmTaskGetDriverVersionFct DmTaskRun = dmTaskRunFct DmTaskSetAddNode = dmTaskSetAddNodeFct DmTaskSetCookie = dmTaskSetCookieFct DmTaskSetMessage = dmTaskSetMessageFct DmTaskSetName = dmTaskSetNameFct DmTaskSetRo = dmTaskSetRoFct DmTaskSetSector = dmTaskSetSectorFct DmUdevWait = dmUdevWaitFct DmUdevSetSyncSupport = dmUdevSetSyncSupportFct DmUdevGetSyncSupport = dmUdevGetSyncSupportFct DmCookieSupported = dmCookieSupportedFct LogWithErrnoInit = logWithErrnoInitFct ) func free(p *C.char) { C.free(unsafe.Pointer(p)) } func dmTaskDestroyFct(task *CDmTask) { C.dm_task_destroy((*C.struct_dm_task)(task)) } func dmTaskCreateFct(taskType int) *CDmTask { return (*CDmTask)(C.dm_task_create(C.int(taskType))) } func dmTaskRunFct(task *CDmTask) int { ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) return int(ret) } func dmTaskSetNameFct(task *CDmTask, name string) int { Cname := C.CString(name) defer free(Cname) return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) } func dmTaskSetMessageFct(task *CDmTask, message string) int { Cmessage := C.CString(message) defer free(Cmessage) return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) } func dmTaskSetSectorFct(task *CDmTask, sector uint64) int { return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) } func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int { cCookie := C.uint32_t(*cookie) defer func() { *cookie = uint(cCookie) }() return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) } func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int { return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) } func dmTaskSetRoFct(task *CDmTask) int { return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) } func dmTaskAddTargetFct(task *CDmTask, start, size uint64, ttype, params string) int { Cttype := C.CString(ttype) defer free(Cttype) Cparams := C.CString(params) defer free(Cparams) return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) } func dmTaskGetDepsFct(task *CDmTask) *Deps { Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) if Cdeps == nil { return nil } deps := &Deps{ Count: uint32(Cdeps.count), Filler: uint32(Cdeps.filler), } for _, device := range Cdeps.device { deps.Device = append(deps.Device, (uint64)(device)) } return deps } func dmTaskGetInfoFct(task *CDmTask, info *Info) int { Cinfo := C.struct_dm_info{} defer func() { info.Exists = int(Cinfo.exists) info.Suspended = int(Cinfo.suspended) info.LiveTable = int(Cinfo.live_table) info.InactiveTable = int(Cinfo.inactive_table) info.OpenCount = int32(Cinfo.open_count) info.EventNr = uint32(Cinfo.event_nr) info.Major = uint32(Cinfo.major) info.Minor = uint32(Cinfo.minor) info.ReadOnly = int(Cinfo.read_only) info.TargetCount = int32(Cinfo.target_count) }() return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) } func dmTaskGetDriverVersionFct(task *CDmTask) string { buffer := C.malloc(128) defer C.free(buffer) res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) if res == 0 { return "" } return C.GoString((*C.char)(buffer)) } func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { var ( Cstart, Clength C.uint64_t CtargetType, Cparams *C.char ) defer func() { *start = uint64(Cstart) *length = uint64(Clength) *target = C.GoString(CtargetType) *params = C.GoString(Cparams) }() nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams) return uintptr(nextp) } func dmUdevSetSyncSupportFct(syncWithUdev int) { (C.dm_udev_set_sync_support(C.int(syncWithUdev))) } func dmUdevGetSyncSupportFct() int { return int(C.dm_udev_get_sync_support()) } func dmUdevWaitFct(cookie uint) int { return int(C.dm_udev_wait(C.uint32_t(cookie))) } func dmCookieSupportedFct() int { return int(C.dm_cookie_supported()) } func dmLogInitVerboseFct(level int) { C.dm_log_init_verbose(C.int(level)) } func logWithErrnoInitFct() { C.log_with_errno_init() } func dmSetDevDirFct(dir string) int { Cdir := C.CString(dir) defer free(Cdir) return int(C.dm_set_dev_dir(Cdir)) } func dmGetLibraryVersionFct(version *string) int { buffer := C.CString(string(make([]byte, 128))) defer free(buffer) defer func() { *version = C.GoString(buffer) }() return int(C.dm_get_library_version(buffer, 128)) } docker-1.6.2/pkg/devicemapper/ioctl.go0000644000175000017500000000334312524223634017225 0ustar tianontianon// +build linux package devicemapper import ( "syscall" "unsafe" ) func ioctlLoopCtlGetFree(fd uintptr) (int, error) { index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) if err != 0 { return 0, err } return int(index), nil } func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { return err } return nil } func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return err } return nil } func ioctlLoopClrFd(loopFd uintptr) error { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { return err } return nil } func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { loopInfo := &LoopInfo64{} if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return nil, err } return loopInfo, nil } func ioctlLoopSetCapacity(loopFd uintptr, value int) error { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { return err } return nil } func ioctlBlkGetSize64(fd uintptr) (int64, error) { var size int64 if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { return 0, err } return size, nil } func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { var r [2]uint64 r[0] = offset r[1] = length if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { return err } return nil } docker-1.6.2/pkg/devicemapper/devmapper.go0000644000175000017500000004211212524223634020073 0ustar tianontianon// +build linux package devicemapper import ( "errors" "fmt" "os" "runtime" "syscall" log "github.com/Sirupsen/logrus" ) type DevmapperLogger interface { DMLog(level int, file string, line int, dmError int, message string) } const ( DeviceCreate TaskType = iota DeviceReload DeviceRemove DeviceRemoveAll DeviceSuspend DeviceResume DeviceInfo DeviceDeps DeviceRename DeviceVersion DeviceStatus DeviceTable DeviceWaitevent DeviceList DeviceClear DeviceMknodes DeviceListVersions DeviceTargetMsg DeviceSetGeometry ) const ( AddNodeOnResume AddNodeType = iota AddNodeOnCreate ) var ( ErrTaskRun = errors.New("dm_task_run failed") ErrTaskSetName = errors.New("dm_task_set_name failed") ErrTaskSetMessage = errors.New("dm_task_set_message failed") ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") ErrTaskSetRo = errors.New("dm_task_set_ro failed") ErrTaskAddTarget = errors.New("dm_task_add_target failed") ErrTaskSetSector = errors.New("dm_task_set_sector failed") ErrTaskGetDeps = errors.New("dm_task_get_deps failed") ErrTaskGetInfo = errors.New("dm_task_get_info failed") ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") ErrNilCookie = errors.New("cookie ptr can't be nil") ErrAttachLoopbackDevice = errors.New("loopback mounting failed") ErrGetBlockSize = errors.New("Can't get block size") ErrUdevWait = errors.New("wait on udev cookie failed") ErrSetDevDir = errors.New("dm_set_dev_dir failed") ErrGetLibraryVersion = errors.New("dm_get_library_version failed") ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") ErrRunRemoveDevice = errors.New("running RemoveDevice failed") ErrInvalidAddNode = errors.New("Invalid AddNode type") ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") ErrBusy = errors.New("Device is Busy") ErrDeviceIdExists = errors.New("Device Id Exists") dmSawBusy bool dmSawExist bool ) type ( Task struct { unmanaged *CDmTask } Deps struct { Count uint32 Filler uint32 Device []uint64 } Info struct { Exists int Suspended int LiveTable int InactiveTable int OpenCount int32 EventNr uint32 Major uint32 Minor uint32 ReadOnly int TargetCount int32 } TaskType int AddNodeType int ) // Returns whether error conveys the information about device Id already // exist or not. This will be true if device creation or snap creation // operation fails if device or snap device already exists in pool. // Current implementation is little crude as it scans the error string // for exact pattern match. Replacing it with more robust implementation // is desirable. func DeviceIdExists(err error) bool { return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIdExists) } func (t *Task) destroy() { if t != nil { DmTaskDestroy(t.unmanaged) runtime.SetFinalizer(t, nil) } } // TaskCreateNamed is a convenience function for TaskCreate when a name // will be set on the task as well func TaskCreateNamed(t TaskType, name string) (*Task, error) { task := TaskCreate(t) if task == nil { return nil, fmt.Errorf("Can't create task of type %d", int(t)) } if err := task.SetName(name); err != nil { return nil, fmt.Errorf("Can't set task name %s", name) } return task, nil } // TaskCreate initializes a devicemapper task of tasktype func TaskCreate(tasktype TaskType) *Task { Ctask := DmTaskCreate(int(tasktype)) if Ctask == nil { return nil } task := &Task{unmanaged: Ctask} runtime.SetFinalizer(task, (*Task).destroy) return task } func (t *Task) Run() error { if res := DmTaskRun(t.unmanaged); res != 1 { return ErrTaskRun } return nil } func (t *Task) SetName(name string) error { if res := DmTaskSetName(t.unmanaged, name); res != 1 { return ErrTaskSetName } return nil } func (t *Task) SetMessage(message string) error { if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { return ErrTaskSetMessage } return nil } func (t *Task) SetSector(sector uint64) error { if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { return ErrTaskSetSector } return nil } func (t *Task) SetCookie(cookie *uint, flags uint16) error { if cookie == nil { return ErrNilCookie } if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { return ErrTaskSetCookie } return nil } func (t *Task) SetAddNode(addNode AddNodeType) error { if addNode != AddNodeOnResume && addNode != AddNodeOnCreate { return ErrInvalidAddNode } if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { return ErrTaskSetAddNode } return nil } func (t *Task) SetRo() error { if res := DmTaskSetRo(t.unmanaged); res != 1 { return ErrTaskSetRo } return nil } func (t *Task) AddTarget(start, size uint64, ttype, params string) error { if res := DmTaskAddTarget(t.unmanaged, start, size, ttype, params); res != 1 { return ErrTaskAddTarget } return nil } func (t *Task) GetDeps() (*Deps, error) { var deps *Deps if deps = DmTaskGetDeps(t.unmanaged); deps == nil { return nil, ErrTaskGetDeps } return deps, nil } func (t *Task) GetInfo() (*Info, error) { info := &Info{} if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { return nil, ErrTaskGetInfo } return info, nil } func (t *Task) GetDriverVersion() (string, error) { res := DmTaskGetDriverVersion(t.unmanaged) if res == "" { return "", ErrTaskGetDriverVersion } return res, nil } func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, length uint64, targetType string, params string) { return DmGetNextTarget(t.unmanaged, next, &start, &length, &targetType, ¶ms), start, length, targetType, params } func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { loopInfo, err := ioctlLoopGetStatus64(file.Fd()) if err != nil { log.Errorf("Error get loopback backing file: %s", err) return 0, 0, ErrGetLoopbackBackingFile } return loopInfo.loDevice, loopInfo.loInode, nil } func LoopbackSetCapacity(file *os.File) error { if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { log.Errorf("Error loopbackSetCapacity: %s", err) return ErrLoopbackSetCapacity } return nil } func FindLoopDeviceFor(file *os.File) *os.File { stat, err := file.Stat() if err != nil { return nil } targetInode := stat.Sys().(*syscall.Stat_t).Ino targetDevice := stat.Sys().(*syscall.Stat_t).Dev for i := 0; true; i++ { path := fmt.Sprintf("/dev/loop%d", i) file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { if os.IsNotExist(err) { return nil } // Ignore all errors until the first not-exist // we want to continue looking for the file continue } dev, inode, err := getLoopbackBackingFile(file) if err == nil && dev == targetDevice && inode == targetInode { return file } file.Close() } return nil } func UdevWait(cookie uint) error { if res := DmUdevWait(cookie); res != 1 { log.Debugf("Failed to wait on udev cookie %d", cookie) return ErrUdevWait } return nil } func LogInitVerbose(level int) { DmLogInitVerbose(level) } var dmLogger DevmapperLogger = nil // initialize the logger for the device mapper library func LogInit(logger DevmapperLogger) { dmLogger = logger LogWithErrnoInit() } func SetDevDir(dir string) error { if res := DmSetDevDir(dir); res != 1 { log.Debugf("Error dm_set_dev_dir") return ErrSetDevDir } return nil } func GetLibraryVersion() (string, error) { var version string if res := DmGetLibraryVersion(&version); res != 1 { return "", ErrGetLibraryVersion } return version, nil } // UdevSyncSupported returns whether device-mapper is able to sync with udev // // This is essential otherwise race conditions can arise where both udev and // device-mapper attempt to create and destroy devices. func UdevSyncSupported() bool { return DmUdevGetSyncSupport() != 0 } // UdevSetSyncSupport allows setting whether the udev sync should be enabled. // The return bool indicates the state of whether the sync is enabled. func UdevSetSyncSupport(enable bool) bool { if enable { DmUdevSetSyncSupport(1) } else { DmUdevSetSyncSupport(0) } return UdevSyncSupported() } // CookieSupported returns whether the version of device-mapper supports the // use of cookie's in the tasks. // This is largely a lower level call that other functions use. func CookieSupported() bool { return DmCookieSupported() != 0 } // Useful helper for cleanup func RemoveDevice(name string) error { log.Debugf("[devmapper] RemoveDevice START(%s)", name) defer log.Debugf("[devmapper] RemoveDevice END(%s)", name) task, err := TaskCreateNamed(DeviceRemove, name) if task == nil { return err } var cookie uint = 0 if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can not set cookie: %s", err) } defer UdevWait(cookie) dmSawBusy = false // reset before the task is run if err = task.Run(); err != nil { if dmSawBusy { return ErrBusy } return fmt.Errorf("Error running RemoveDevice %s", err) } return nil } func GetBlockDeviceSize(file *os.File) (uint64, error) { size, err := ioctlBlkGetSize64(file.Fd()) if err != nil { log.Errorf("Error getblockdevicesize: %s", err) return 0, ErrGetBlockSize } return uint64(size), nil } func BlockDeviceDiscard(path string) error { file, err := os.OpenFile(path, os.O_RDWR, 0) if err != nil { return err } defer file.Close() size, err := GetBlockDeviceSize(file) if err != nil { return err } if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { return err } // Without this sometimes the remove of the device that happens after // discard fails with EBUSY. syscall.Sync() return nil } // This is the programmatic example of "dmsetup create" func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { task, err := TaskCreateNamed(DeviceCreate, poolName) if task == nil { return err } size, err := GetBlockDeviceSize(dataFile) if err != nil { return fmt.Errorf("Can't get data size %s", err) } params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("Can't add target %s", err) } var cookie uint = 0 var flags uint16 = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag if err := task.SetCookie(&cookie, flags); err != nil { return fmt.Errorf("Can't set cookie %s", err) } defer UdevWait(cookie) if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceCreate (CreatePool) %s", err) } return nil } func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { task, err := TaskCreateNamed(DeviceReload, poolName) if task == nil { return err } size, err := GetBlockDeviceSize(dataFile) if err != nil { return fmt.Errorf("Can't get data size %s", err) } params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { return fmt.Errorf("Can't add target %s", err) } if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceCreate %s", err) } return nil } func GetDeps(name string) (*Deps, error) { task, err := TaskCreateNamed(DeviceDeps, name) if task == nil { return nil, err } if err := task.Run(); err != nil { return nil, err } return task.GetDeps() } func GetInfo(name string) (*Info, error) { task, err := TaskCreateNamed(DeviceInfo, name) if task == nil { return nil, err } if err := task.Run(); err != nil { return nil, err } return task.GetInfo() } func GetDriverVersion() (string, error) { task := TaskCreate(DeviceVersion) if task == nil { return "", fmt.Errorf("Can't create DeviceVersion task") } if err := task.Run(); err != nil { return "", err } return task.GetDriverVersion() } func GetStatus(name string) (uint64, uint64, string, string, error) { task, err := TaskCreateNamed(DeviceStatus, name) if task == nil { log.Debugf("GetStatus: Error TaskCreateNamed: %s", err) return 0, 0, "", "", err } if err := task.Run(); err != nil { log.Debugf("GetStatus: Error Run: %s", err) return 0, 0, "", "", err } devinfo, err := task.GetInfo() if err != nil { log.Debugf("GetStatus: Error GetInfo: %s", err) return 0, 0, "", "", err } if devinfo.Exists == 0 { log.Debugf("GetStatus: Non existing device %s", name) return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) } _, start, length, targetType, params := task.GetNextTarget(0) return start, length, targetType, params, nil } func SetTransactionId(poolName string, oldId uint64, newId uint64) error { task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { return err } if err := task.SetSector(0); err != nil { return fmt.Errorf("Can't set sector %s", err) } if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil { return fmt.Errorf("Can't set message %s", err) } if err := task.Run(); err != nil { return fmt.Errorf("Error running SetTransactionId %s", err) } return nil } func SuspendDevice(name string) error { task, err := TaskCreateNamed(DeviceSuspend, name) if task == nil { return err } if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceSuspend %s", err) } return nil } func ResumeDevice(name string) error { task, err := TaskCreateNamed(DeviceResume, name) if task == nil { return err } var cookie uint = 0 if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can't set cookie %s", err) } defer UdevWait(cookie) if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceResume %s", err) } return nil } func CreateDevice(poolName string, deviceId int) error { log.Debugf("[devmapper] CreateDevice(poolName=%v, deviceId=%v)", poolName, deviceId) task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { return err } if err := task.SetSector(0); err != nil { return fmt.Errorf("Can't set sector %s", err) } if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil { return fmt.Errorf("Can't set message %s", err) } dmSawExist = false // reset before the task is run if err := task.Run(); err != nil { // Caller wants to know about ErrDeviceIdExists so that it can try with a different device id. if dmSawExist { return ErrDeviceIdExists } else { return fmt.Errorf("Error running CreateDevice %s", err) } } return nil } func DeleteDevice(poolName string, deviceId int) error { task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { return err } if err := task.SetSector(0); err != nil { return fmt.Errorf("Can't set sector %s", err) } if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil { return fmt.Errorf("Can't set message %s", err) } if err := task.Run(); err != nil { return fmt.Errorf("Error running DeleteDevice %s", err) } return nil } func ActivateDevice(poolName string, name string, deviceId int, size uint64) error { task, err := TaskCreateNamed(DeviceCreate, name) if task == nil { return err } params := fmt.Sprintf("%s %d", poolName, deviceId) if err := task.AddTarget(0, size/512, "thin", params); err != nil { return fmt.Errorf("Can't add target %s", err) } if err := task.SetAddNode(AddNodeOnCreate); err != nil { return fmt.Errorf("Can't add node %s", err) } var cookie uint = 0 if err := task.SetCookie(&cookie, 0); err != nil { return fmt.Errorf("Can't set cookie %s", err) } defer UdevWait(cookie) if err := task.Run(); err != nil { return fmt.Errorf("Error running DeviceCreate (ActivateDevice) %s", err) } return nil } func CreateSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error { devinfo, _ := GetInfo(baseName) doSuspend := devinfo != nil && devinfo.Exists != 0 if doSuspend { if err := SuspendDevice(baseName); err != nil { return err } } task, err := TaskCreateNamed(DeviceTargetMsg, poolName) if task == nil { if doSuspend { ResumeDevice(baseName) } return err } if err := task.SetSector(0); err != nil { if doSuspend { ResumeDevice(baseName) } return fmt.Errorf("Can't set sector %s", err) } if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil { if doSuspend { ResumeDevice(baseName) } return fmt.Errorf("Can't set message %s", err) } dmSawExist = false // reset before the task is run if err := task.Run(); err != nil { if doSuspend { ResumeDevice(baseName) } // Caller wants to know about ErrDeviceIdExists so that it can try with a different device id. if dmSawExist { return ErrDeviceIdExists } else { return fmt.Errorf("Error running DeviceCreate (createSnapDevice) %s", err) } } if doSuspend { if err := ResumeDevice(baseName); err != nil { return err } } return nil } docker-1.6.2/pkg/devicemapper/devmapper_log.go0000644000175000017500000000123312524223634020733 0ustar tianontianon// +build linux package devicemapper import "C" import ( "strings" ) // Due to the way cgo works this has to be in a separate file, as devmapper.go has // definitions in the cgo block, which is incompatible with using "//export" //export DevmapperLogCallback func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { msg := C.GoString(message) if level < 7 { if strings.Contains(msg, "busy") { dmSawBusy = true } if strings.Contains(msg, "File exists") { dmSawExist = true } } if dmLogger != nil { dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dm_errno_or_class), msg) } } docker-1.6.2/pkg/devicemapper/attach_loopback.go0000644000175000017500000000633512524223634021235 0ustar tianontianon// +build linux package devicemapper import ( "fmt" "os" "syscall" log "github.com/Sirupsen/logrus" ) func stringToLoopName(src string) [LoNameSize]uint8 { var dst [LoNameSize]uint8 copy(dst[:], src[:]) return dst } func getNextFreeLoopbackIndex() (int, error) { f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) if err != nil { return 0, err } defer f.Close() index, err := ioctlLoopCtlGetFree(f.Fd()) if index < 0 { index = 0 } return index, err } func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { // Start looking for a free /dev/loop for { target := fmt.Sprintf("/dev/loop%d", index) index++ fi, err := os.Stat(target) if err != nil { if os.IsNotExist(err) { log.Errorf("There are no more loopback devices available.") } return nil, ErrAttachLoopbackDevice } if fi.Mode()&os.ModeDevice != os.ModeDevice { log.Errorf("Loopback device %s is not a block device.", target) continue } // OpenFile adds O_CLOEXEC loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) if err != nil { log.Errorf("Error opening loopback device: %s", err) return nil, ErrAttachLoopbackDevice } // Try to attach to the loop file if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { loopFile.Close() // If the error is EBUSY, then try the next loopback if err != syscall.EBUSY { log.Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } // Otherwise, we keep going with the loop continue } // In case of success, we finished. Break the loop. break } // This can't happen, but let's be sure if loopFile == nil { log.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) return nil, ErrAttachLoopbackDevice } return loopFile, nil } // attachLoopDevice attaches the given sparse file to the next // available loopback device. It returns an opened *os.File. func AttachLoopDevice(sparseName string) (loop *os.File, err error) { // Try to retrieve the next available loopback device via syscall. // If it fails, we discard error and start loopking for a // loopback from index 0. startIndex, err := getNextFreeLoopbackIndex() if err != nil { log.Debugf("Error retrieving the next available loopback: %s", err) } // OpenFile adds O_CLOEXEC sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) if err != nil { log.Errorf("Error opening sparse file %s: %s", sparseName, err) return nil, ErrAttachLoopbackDevice } defer sparseFile.Close() loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) if err != nil { return nil, err } // Set the status of the loopback device loopInfo := &LoopInfo64{ loFileName: stringToLoopName(loopFile.Name()), loOffset: 0, loFlags: LoFlagsAutoClear, } if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { log.Errorf("Cannot set up loopback device info: %s", err) // If the call failed, then free the loopback device if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { log.Errorf("Error while cleaning up the loopback device") } loopFile.Close() return nil, ErrAttachLoopbackDevice } return loopFile, nil } docker-1.6.2/pkg/devicemapper/log.go0000644000175000017500000000047212524223634016674 0ustar tianontianonpackage devicemapper // definitions from lvm2 lib/log/log.h const ( LogLevelFatal = 2 + iota // _LOG_FATAL LogLevelErr // _LOG_ERR LogLevelWarn // _LOG_WARN LogLevelNotice // _LOG_NOTICE LogLevelInfo // _LOG_INFO LogLevelDebug // _LOG_DEBUG ) docker-1.6.2/pkg/parsers/0000755000175000017500000000000012524223634014574 5ustar tianontianondocker-1.6.2/pkg/parsers/filters/0000755000175000017500000000000012524223634016244 5ustar tianontianondocker-1.6.2/pkg/parsers/filters/parse.go0000644000175000017500000000444212524223634017711 0ustar tianontianonpackage filters import ( "encoding/json" "errors" "regexp" "strings" ) type Args map[string][]string // Parse the argument to the filter flag. Like // // `docker ps -f 'created=today' -f 'image.name=ubuntu*'` // // If prev map is provided, then it is appended to, and returned. By default a new // map is created. func ParseFlag(arg string, prev Args) (Args, error) { var filters Args = prev if prev == nil { filters = Args{} } if len(arg) == 0 { return filters, nil } if !strings.Contains(arg, "=") { return filters, ErrorBadFormat } f := strings.SplitN(arg, "=", 2) name := strings.ToLower(strings.TrimSpace(f[0])) value := strings.TrimSpace(f[1]) filters[name] = append(filters[name], value) return filters, nil } var ErrorBadFormat = errors.New("bad format of filter (expected name=value)") // packs the Args into an string for easy transport from client to server func ToParam(a Args) (string, error) { // this way we don't URL encode {}, just empty space if len(a) == 0 { return "", nil } buf, err := json.Marshal(a) if err != nil { return "", err } return string(buf), nil } // unpacks the filter Args func FromParam(p string) (Args, error) { args := Args{} if len(p) == 0 { return args, nil } err := json.Unmarshal([]byte(p), &args) if err != nil { return nil, err } return args, nil } func (filters Args) MatchKVList(field string, sources map[string]string) bool { fieldValues := filters[field] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { return true } if sources == nil || len(sources) == 0 { return false } outer: for _, name2match := range fieldValues { testKV := strings.SplitN(name2match, "=", 2) for k, v := range sources { if len(testKV) == 1 { if k == testKV[0] { continue outer } } else if k == testKV[0] && v == testKV[1] { continue outer } } return false } return true } func (filters Args) Match(field, source string) bool { fieldValues := filters[field] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { return true } for _, name2match := range fieldValues { match, err := regexp.MatchString(name2match, source) if err != nil { continue } if match { return true } } return false } docker-1.6.2/pkg/parsers/filters/parse_test.go0000644000175000017500000000304312524223634020744 0ustar tianontianonpackage filters import ( "sort" "testing" ) func TestParseArgs(t *testing.T) { // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` flagArgs := []string{ "created=today", "image.name=ubuntu*", "image.name=*untu", } var ( args = Args{} err error ) for i := range flagArgs { args, err = ParseFlag(flagArgs[i], args) if err != nil { t.Errorf("failed to parse %s: %s", flagArgs[i], err) } } if len(args["created"]) != 1 { t.Errorf("failed to set this arg") } if len(args["image.name"]) != 2 { t.Errorf("the args should have collapsed") } } func TestParam(t *testing.T) { a := Args{ "created": []string{"today"}, "image.name": []string{"ubuntu*", "*untu"}, } v, err := ToParam(a) if err != nil { t.Errorf("failed to marshal the filters: %s", err) } v1, err := FromParam(v) if err != nil { t.Errorf("%s", err) } for key, vals := range v1 { if _, ok := a[key]; !ok { t.Errorf("could not find key %s in original set", key) } sort.Strings(vals) sort.Strings(a[key]) if len(vals) != len(a[key]) { t.Errorf("value lengths ought to match") continue } for i := range vals { if vals[i] != a[key][i] { t.Errorf("expected %s, but got %s", a[key][i], vals[i]) } } } } func TestEmpty(t *testing.T) { a := Args{} v, err := ToParam(a) if err != nil { t.Errorf("failed to marshal the filters: %s", err) } v1, err := FromParam(v) if err != nil { t.Errorf("%s", err) } if len(a) != len(v1) { t.Errorf("these should both be empty sets") } } docker-1.6.2/pkg/parsers/parsers.go0000644000175000017500000000711712524223634016610 0ustar tianontianonpackage parsers import ( "fmt" "strconv" "strings" ) // FIXME: Change this not to receive default value as parameter func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { addr = strings.TrimSpace(addr) if addr == "" { addr = fmt.Sprintf("unix://%s", defaultUnixAddr) } addrParts := strings.Split(addr, "://") if len(addrParts) == 1 { addrParts = []string{"tcp", addrParts[0]} } switch addrParts[0] { case "tcp": return ParseTCPAddr(addrParts[1], defaultTCPAddr) case "unix": return ParseUnixAddr(addrParts[1], defaultUnixAddr) case "fd": return addr, nil default: return "", fmt.Errorf("Invalid bind address format: %s", addr) } } func ParseUnixAddr(addr string, defaultAddr string) (string, error) { addr = strings.TrimPrefix(addr, "unix://") if strings.Contains(addr, "://") { return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) } if addr == "" { addr = defaultAddr } return fmt.Sprintf("unix://%s", addr), nil } func ParseTCPAddr(addr string, defaultAddr string) (string, error) { addr = strings.TrimPrefix(addr, "tcp://") if strings.Contains(addr, "://") || addr == "" { return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr) } hostParts := strings.Split(addr, ":") if len(hostParts) != 2 { return "", fmt.Errorf("Invalid bind address format: %s", addr) } host := hostParts[0] if host == "" { host = defaultAddr } p, err := strconv.Atoi(hostParts[1]) if err != nil && p == 0 { return "", fmt.Errorf("Invalid bind address format: %s", addr) } return fmt.Sprintf("tcp://%s:%d", host, p), nil } // Get a repos name and returns the right reposName + tag|digest // The tag can be confusing because of a port in a repository name. // Ex: localhost.localdomain:5000/samalba/hipache:latest // Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb func ParseRepositoryTag(repos string) (string, string) { n := strings.Index(repos, "@") if n >= 0 { parts := strings.Split(repos, "@") return parts[0], parts[1] } n = strings.LastIndex(repos, ":") if n < 0 { return repos, "" } if tag := repos[n+1:]; !strings.Contains(tag, "/") { return repos[:n], tag } return repos, "" } func PartParser(template, data string) (map[string]string, error) { // ip:public:private var ( templateParts = strings.Split(template, ":") parts = strings.Split(data, ":") out = make(map[string]string, len(templateParts)) ) if len(parts) != len(templateParts) { return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) } for i, t := range templateParts { value := "" if len(parts) > i { value = parts[i] } out[t] = value } return out, nil } func ParseKeyValueOpt(opt string) (string, string, error) { parts := strings.SplitN(opt, "=", 2) if len(parts) != 2 { return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) } return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } func ParsePortRange(ports string) (uint64, uint64, error) { if ports == "" { return 0, 0, fmt.Errorf("Empty string specified for ports.") } if !strings.Contains(ports, "-") { start, err := strconv.ParseUint(ports, 10, 16) end := start return start, end, err } parts := strings.Split(ports, "-") start, err := strconv.ParseUint(parts[0], 10, 16) if err != nil { return 0, 0, err } end, err := strconv.ParseUint(parts[1], 10, 16) if err != nil { return 0, 0, err } if end < start { return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) } return start, end, nil } docker-1.6.2/pkg/parsers/parsers_test.go0000644000175000017500000001424312524223634017645 0ustar tianontianonpackage parsers import ( "strings" "testing" ) func TestParseHost(t *testing.T) { var ( defaultHttpHost = "127.0.0.1" defaultUnix = "/var/run/docker.sock" ) if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.0"); err == nil { t.Errorf("tcp 0.0.0.0 address expected error return, but err == nil, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://"); err == nil { t.Errorf("default tcp:// address expected error return, but err == nil, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" { t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" { t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" { t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" { t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1"); err == nil { t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) } if addr, err := ParseHost(defaultHttpHost, defaultUnix, "udp://127.0.0.1:2375"); err == nil { t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr) } } func TestParseRepositoryTag(t *testing.T) { if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) } if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) } if repo, digest := ParseRepositoryTag("root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "root" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "root", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) } if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) } if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) } if repo, digest := ParseRepositoryTag("user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "user/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "user/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) } if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) } if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) } if repo, digest := ParseRepositoryTag("url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "url:5000/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "url:5000/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) } } func TestParsePortMapping(t *testing.T) { data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") if err != nil { t.Fatal(err) } if len(data) != 3 { t.FailNow() } if data["ip"] != "192.168.1.1" { t.Fail() } if data["public"] != "80" { t.Fail() } if data["private"] != "8080" { t.Fail() } } func TestParsePortRange(t *testing.T) { if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) } } func TestParsePortRangeIncorrectRange(t *testing.T) { if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) } } func TestParsePortRangeIncorrectEndRange(t *testing.T) { if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) } if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) } } func TestParsePortRangeIncorrectStartRange(t *testing.T) { if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) } if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) } } docker-1.6.2/pkg/parsers/operatingsystem/0000755000175000017500000000000012524223634020031 5ustar tianontianondocker-1.6.2/pkg/parsers/operatingsystem/operatingsystem.go0000644000175000017500000000150512524223634023616 0ustar tianontianonpackage operatingsystem import ( "bytes" "errors" "io/ioutil" ) var ( // file to use to detect if the daemon is running in a container proc1Cgroup = "/proc/1/cgroup" // file to check to determine Operating System etcOsRelease = "/etc/os-release" ) func GetOperatingSystem() (string, error) { b, err := ioutil.ReadFile(etcOsRelease) if err != nil { return "", err } if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 { b = b[i+13:] return string(b[:bytes.IndexByte(b, '"')]), nil } return "", errors.New("PRETTY_NAME not found") } func IsContainerized() (bool, error) { b, err := ioutil.ReadFile(proc1Cgroup) if err != nil { return false, err } for _, line := range bytes.Split(b, []byte{'\n'}) { if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) { return true, nil } } return false, nil } docker-1.6.2/pkg/parsers/operatingsystem/operatingsystem_test.go0000644000175000017500000000624712524223634024665 0ustar tianontianonpackage operatingsystem import ( "io/ioutil" "os" "path/filepath" "testing" ) func TestGetOperatingSystem(t *testing.T) { var ( backup = etcOsRelease ubuntuTrusty = []byte(`NAME="Ubuntu" VERSION="14.04, Trusty Tahr" ID=ubuntu ID_LIKE=debian PRETTY_NAME="Ubuntu 14.04 LTS" VERSION_ID="14.04" HOME_URL="http://www.ubuntu.com/" SUPPORT_URL="http://help.ubuntu.com/" BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) gentoo = []byte(`NAME=Gentoo ID=gentoo PRETTY_NAME="Gentoo/Linux" ANSI_COLOR="1;32" HOME_URL="http://www.gentoo.org/" SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" BUG_REPORT_URL="https://bugs.gentoo.org/" `) noPrettyName = []byte(`NAME="Ubuntu" VERSION="14.04, Trusty Tahr" ID=ubuntu ID_LIKE=debian VERSION_ID="14.04" HOME_URL="http://www.ubuntu.com/" SUPPORT_URL="http://help.ubuntu.com/" BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) ) dir := os.TempDir() etcOsRelease = filepath.Join(dir, "etcOsRelease") defer func() { os.Remove(etcOsRelease) etcOsRelease = backup }() for expect, osRelease := range map[string][]byte{ "Ubuntu 14.04 LTS": ubuntuTrusty, "Gentoo/Linux": gentoo, "": noPrettyName, } { if err := ioutil.WriteFile(etcOsRelease, osRelease, 0600); err != nil { t.Fatalf("failed to write to %s: %v", etcOsRelease, err) } s, err := GetOperatingSystem() if s != expect { if expect == "" { t.Fatalf("Expected error 'PRETTY_NAME not found', but got %v", err) } else { t.Fatalf("Expected '%s', but got '%s'. Err=%v", expect, s, err) } } } } func TestIsContainerized(t *testing.T) { var ( backup = proc1Cgroup nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ 13:hugetlb:/ 12:net_prio:/ 11:perf_event:/ 10:bfqio:/ 9:blkio:/ 8:net_cls:/ 7:freezer:/ 6:devices:/ 5:memory:/ 4:cpuacct:/ 3:cpu:/ 2:cpuset:/ `) containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 7:net_cls:/ 6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d 1:cpuset:/`) ) dir := os.TempDir() proc1Cgroup = filepath.Join(dir, "proc1Cgroup") defer func() { os.Remove(proc1Cgroup) proc1Cgroup = backup }() if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) } inContainer, err := IsContainerized() if err != nil { t.Fatal(err) } if inContainer { t.Fatal("Wrongly assuming containerized") } if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) } inContainer, err = IsContainerized() if err != nil { t.Fatal(err) } if !inContainer { t.Fatal("Wrongly assuming non-containerized") } } docker-1.6.2/pkg/parsers/kernel/0000755000175000017500000000000012524223634016054 5ustar tianontianondocker-1.6.2/pkg/parsers/kernel/kernel.go0000644000175000017500000000337412524223634017672 0ustar tianontianonpackage kernel import ( "bytes" "errors" "fmt" ) type KernelVersionInfo struct { Kernel int Major int Minor int Flavor string } func (k *KernelVersionInfo) String() string { return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) } // Compare two KernelVersionInfo struct. // Returns -1 if a < b, 0 if a == b, 1 it a > b func CompareKernelVersion(a, b *KernelVersionInfo) int { if a.Kernel < b.Kernel { return -1 } else if a.Kernel > b.Kernel { return 1 } if a.Major < b.Major { return -1 } else if a.Major > b.Major { return 1 } if a.Minor < b.Minor { return -1 } else if a.Minor > b.Minor { return 1 } return 0 } func GetKernelVersion() (*KernelVersionInfo, error) { var ( err error ) uts, err := uname() if err != nil { return nil, err } release := make([]byte, len(uts.Release)) i := 0 for _, c := range uts.Release { release[i] = byte(c) i++ } // Remove the \x00 from the release for Atoi to parse correctly release = release[:bytes.IndexByte(release, 0)] return ParseRelease(string(release)) } func ParseRelease(release string) (*KernelVersionInfo, error) { var ( kernel, major, minor, parsed int flavor, partial string ) // Ignore error from Sscanf to allow an empty flavor. Instead, just // make sure we got all the version numbers. parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) if parsed < 2 { return nil, errors.New("Can't parse kernel version " + release) } // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) if parsed < 1 { flavor = partial } return &KernelVersionInfo{ Kernel: kernel, Major: major, Minor: minor, Flavor: flavor, }, nil } docker-1.6.2/pkg/parsers/kernel/uname_linux.go0000644000175000017500000000033612524223634020731 0ustar tianontianonpackage kernel import ( "syscall" ) type Utsname syscall.Utsname func uname() (*syscall.Utsname, error) { uts := &syscall.Utsname{} if err := syscall.Uname(uts); err != nil { return nil, err } return uts, nil } docker-1.6.2/pkg/parsers/kernel/uname_unsupported.go0000644000175000017500000000032512524223634022160 0ustar tianontianon// +build !linux package kernel import ( "errors" ) type Utsname struct { Release [65]byte } func uname() (*Utsname, error) { return nil, errors.New("Kernel version detection is available only on linux") } docker-1.6.2/pkg/parsers/kernel/kernel_test.go0000644000175000017500000000432512524223634020726 0ustar tianontianonpackage kernel import ( "testing" ) func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) { var ( a *KernelVersionInfo ) a, _ = ParseRelease(release) if r := CompareKernelVersion(a, b); r != result { t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) } if a.Flavor != b.Flavor { t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) } } func TestParseRelease(t *testing.T) { assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) assertParseRelease(t, "3.12.8tag", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) assertParseRelease(t, "3.12-1-amd64", &KernelVersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) } func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) { if r := CompareKernelVersion(a, b); r != result { t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) } } func TestCompareKernelVersion(t *testing.T) { assertKernelVersion(t, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertKernelVersion(t, &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, -1) assertKernelVersion(t, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0}, 1) assertKernelVersion(t, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) assertKernelVersion(t, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5}, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 1) assertKernelVersion(t, &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20}, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, -1) } docker-1.6.2/pkg/tarsum/0000755000175000017500000000000012524223634014430 5ustar tianontianondocker-1.6.2/pkg/tarsum/fileinfosums.go0000644000175000017500000000560012524223634017463 0ustar tianontianonpackage tarsum import "sort" // This info will be accessed through interface so the actual name and sum cannot be medled with type FileInfoSumInterface interface { // File name Name() string // Checksum of this particular file and its headers Sum() string // Position of file in the tar Pos() int64 } type fileInfoSum struct { name string sum string pos int64 } func (fis fileInfoSum) Name() string { return fis.name } func (fis fileInfoSum) Sum() string { return fis.sum } func (fis fileInfoSum) Pos() int64 { return fis.pos } type FileInfoSums []FileInfoSumInterface // GetFile returns the first FileInfoSumInterface with a matching name func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { for i := range fis { if fis[i].Name() == name { return fis[i] } } return nil } // GetAllFile returns a FileInfoSums with all matching names func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { f := FileInfoSums{} for i := range fis { if fis[i].Name() == name { f = append(f, fis[i]) } } return f } func contains(s []string, e string) bool { for _, a := range s { if a == e { return true } } return false } func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. for i := range fis { f := fis[i] if _, ok := seen[f.Name()]; ok { dups = append(dups, f) } else { seen[f.Name()] = 0 } } return dups } func (fis FileInfoSums) Len() int { return len(fis) } func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } func (fis FileInfoSums) SortByPos() { sort.Sort(byPos{fis}) } func (fis FileInfoSums) SortByNames() { sort.Sort(byName{fis}) } func (fis FileInfoSums) SortBySums() { dups := fis.GetDuplicatePaths() if len(dups) > 0 { sort.Sort(bySum{fis, dups}) } else { sort.Sort(bySum{fis, nil}) } } // byName is a sort.Sort helper for sorting by file names. // If names are the same, order them by their appearance in the tar archive type byName struct{ FileInfoSums } func (bn byName) Less(i, j int) bool { if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() } return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() } // bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive type bySum struct { FileInfoSums dups FileInfoSums } func (bs bySum) Less(i, j int) bool { if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() } return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() } // byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order type byPos struct{ FileInfoSums } func (bp byPos) Less(i, j int) bool { return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() } docker-1.6.2/pkg/tarsum/writercloser.go0000644000175000017500000000040412524223634017501 0ustar tianontianonpackage tarsum import ( "io" ) type writeCloseFlusher interface { io.WriteCloser Flush() error } type nopCloseFlusher struct { io.Writer } func (n *nopCloseFlusher) Close() error { return nil } func (n *nopCloseFlusher) Flush() error { return nil } docker-1.6.2/pkg/tarsum/versioning.go0000644000175000017500000001003112524223634017135 0ustar tianontianonpackage tarsum import ( "errors" "sort" "strconv" "strings" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) // versioning of the TarSum algorithm // based on the prefix of the hash used // i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" type Version int // Prefix of "tarsum" const ( Version0 Version = iota Version1 // NOTE: this variable will be either the latest or an unsettled next-version of the TarSum calculation VersionDev ) // VersionLabelForChecksum returns the label for the given tarsum // checksum, i.e., everything before the first `+` character in // the string or an empty string if no label separator is found. func VersionLabelForChecksum(checksum string) string { // Checksums are in the form: {versionLabel}+{hashID}:{hex} sepIndex := strings.Index(checksum, "+") if sepIndex < 0 { return "" } return checksum[:sepIndex] } // Get a list of all known tarsum Version func GetVersions() []Version { v := []Version{} for k := range tarSumVersions { v = append(v, k) } return v } var ( tarSumVersions = map[Version]string{ Version0: "tarsum", Version1: "tarsum.v1", VersionDev: "tarsum.dev", } tarSumVersionsByName = map[string]Version{ "tarsum": Version0, "tarsum.v1": Version1, "tarsum.dev": VersionDev, } ) func (tsv Version) String() string { return tarSumVersions[tsv] } // GetVersionFromTarsum returns the Version from the provided string func GetVersionFromTarsum(tarsum string) (Version, error) { tsv := tarsum if strings.Contains(tarsum, "+") { tsv = strings.SplitN(tarsum, "+", 2)[0] } for v, s := range tarSumVersions { if s == tsv { return v, nil } } return -1, ErrNotVersion } // Errors that may be returned by functions in this package var ( ErrNotVersion = errors.New("string does not include a TarSum Version") ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") ) // tarHeaderSelector is the interface which different versions // of tarsum should use for selecting and ordering tar headers // for each item in the archive. type tarHeaderSelector interface { selectHeaders(h *tar.Header) (orderedHeaders [][2]string) } type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { return f(h) } func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { return [][2]string{ {"name", h.Name}, {"mode", strconv.Itoa(int(h.Mode))}, {"uid", strconv.Itoa(h.Uid)}, {"gid", strconv.Itoa(h.Gid)}, {"size", strconv.Itoa(int(h.Size))}, {"mtime", strconv.Itoa(int(h.ModTime.UTC().Unix()))}, {"typeflag", string([]byte{h.Typeflag})}, {"linkname", h.Linkname}, {"uname", h.Uname}, {"gname", h.Gname}, {"devmajor", strconv.Itoa(int(h.Devmajor))}, {"devminor", strconv.Itoa(int(h.Devminor))}, } } func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { // Get extended attributes. xAttrKeys := make([]string, len(h.Xattrs)) for k := range h.Xattrs { xAttrKeys = append(xAttrKeys, k) } sort.Strings(xAttrKeys) // Make the slice with enough capacity to hold the 11 basic headers // we want from the v0 selector plus however many xattrs we have. orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) // Copy all headers from v0 excluding the 'mtime' header (the 5th element). v0headers := v0TarHeaderSelect(h) orderedHeaders = append(orderedHeaders, v0headers[0:5]...) orderedHeaders = append(orderedHeaders, v0headers[6:]...) // Finally, append the sorted xattrs. for _, k := range xAttrKeys { orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) } return } var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ Version0: v0TarHeaderSelect, Version1: v1TarHeaderSelect, VersionDev: v1TarHeaderSelect, } func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { headerSelector, ok := registeredHeaderSelectors[v] if !ok { return nil, ErrVersionNotImplemented } return headerSelector, nil } docker-1.6.2/pkg/tarsum/tarsum_test.go0000644000175000017500000003507212524223634017340 0ustar tianontianonpackage tarsum import ( "bytes" "compress/gzip" "crypto/md5" "crypto/rand" "crypto/sha1" "crypto/sha256" "crypto/sha512" "encoding/hex" "fmt" "io" "io/ioutil" "os" "testing" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) type testLayer struct { filename string options *sizedOptions jsonfile string gzip bool tarsum string version Version hash THash } var testLayers = []testLayer{ { filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", version: Version0, tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, { filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", version: VersionDev, tarsum: "tarsum.dev+sha256:486b86e25c4db4551228154848bc4663b15dd95784b1588980f4ba1cb42e83e9"}, { filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", gzip: true, tarsum: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"}, { // Tests existing version of TarSum when xattrs are present filename: "testdata/xattr/layer.tar", jsonfile: "testdata/xattr/json", version: Version0, tarsum: "tarsum+sha256:e86f81a4d552f13039b1396ed03ca968ea9717581f9577ef1876ea6ff9b38c98"}, { // Tests next version of TarSum when xattrs are present filename: "testdata/xattr/layer.tar", jsonfile: "testdata/xattr/json", version: VersionDev, tarsum: "tarsum.dev+sha256:6235cd3a2afb7501bac541772a3d61a3634e95bc90bb39a4676e2cb98d08390d"}, { filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", tarsum: "tarsum+sha256:ac672ee85da9ab7f9667ae3c32841d3e42f33cc52c273c23341dabba1c8b0c8b"}, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, { // this tar has two files with the same path filename: "testdata/collision/collision-0.tar", tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, { // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above filename: "testdata/collision/collision-1.tar", tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, { // this tar has newer of collider-0.tar, ensuring is has different hash filename: "testdata/collision/collision-2.tar", tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, { // this tar has newer of collider-1.tar, ensuring is has different hash filename: "testdata/collision/collision-3.tar", tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", hash: md5THash, }, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", hash: sha1Hash, }, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", hash: sha224Hash, }, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", hash: sha384Hash, }, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", hash: sha512Hash, }, } type sizedOptions struct { num int64 size int64 isRand bool realFile bool } // make a tar: // * num is the number of files the tar should have // * size is the bytes per file // * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) // * realFile will write to a TempFile, instead of an in memory buffer func sizedTar(opts sizedOptions) io.Reader { var ( fh io.ReadWriter err error ) if opts.realFile { fh, err = ioutil.TempFile("", "tarsum") if err != nil { return nil } } else { fh = bytes.NewBuffer([]byte{}) } tarW := tar.NewWriter(fh) defer tarW.Close() for i := int64(0); i < opts.num; i++ { err := tarW.WriteHeader(&tar.Header{ Name: fmt.Sprintf("/testdata%d", i), Mode: 0755, Uid: 0, Gid: 0, Size: opts.size, }) if err != nil { return nil } var rBuf []byte if opts.isRand { rBuf = make([]byte, 8) _, err = rand.Read(rBuf) if err != nil { return nil } } else { rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} } for i := int64(0); i < opts.size/int64(8); i++ { tarW.Write(rBuf) } } return fh } func emptyTarSum(gzip bool) (TarSum, error) { reader, writer := io.Pipe() tarWriter := tar.NewWriter(writer) // Immediately close tarWriter and write-end of the // Pipe in a separate goroutine so we don't block. go func() { tarWriter.Close() writer.Close() }() return NewTarSum(reader, !gzip, Version0) } // TestEmptyTar tests that tarsum does not fail to read an empty tar // and correctly returns the hex digest of an empty hash. func TestEmptyTar(t *testing.T) { // Test without gzip. ts, err := emptyTarSum(false) if err != nil { t.Fatal(err) } zeroBlock := make([]byte, 1024) buf := new(bytes.Buffer) n, err := io.Copy(buf, ts) if err != nil { t.Fatal(err) } if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) } expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) resultSum := ts.Sum(nil) if resultSum != expectedSum { t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) } // Test with gzip. ts, err = emptyTarSum(true) if err != nil { t.Fatal(err) } buf.Reset() n, err = io.Copy(buf, ts) if err != nil { t.Fatal(err) } bufgz := new(bytes.Buffer) gz := gzip.NewWriter(bufgz) n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) gz.Close() gzBytes := bufgz.Bytes() if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) } resultSum = ts.Sum(nil) if resultSum != expectedSum { t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) } // Test without ever actually writing anything. if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { t.Fatal(err) } resultSum = ts.Sum(nil) if resultSum != expectedSum { t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) } } var ( md5THash = NewTHash("md5", md5.New) sha1Hash = NewTHash("sha1", sha1.New) sha224Hash = NewTHash("sha224", sha256.New224) sha384Hash = NewTHash("sha384", sha512.New384) sha512Hash = NewTHash("sha512", sha512.New) ) func TestTarSums(t *testing.T) { for _, layer := range testLayers { var ( fh io.Reader err error ) if len(layer.filename) > 0 { fh, err = os.Open(layer.filename) if err != nil { t.Errorf("failed to open %s: %s", layer.filename, err) continue } } else if layer.options != nil { fh = sizedTar(*layer.options) } else { // What else is there to test? t.Errorf("what to do with %#v", layer) continue } if file, ok := fh.(*os.File); ok { defer file.Close() } var ts TarSum if layer.hash == nil { // double negatives! ts, err = NewTarSum(fh, !layer.gzip, layer.version) } else { ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) } if err != nil { t.Errorf("%q :: %q", err, layer.filename) continue } // Read variable number of bytes to test dynamic buffer dBuf := make([]byte, 1) _, err = ts.Read(dBuf) if err != nil { t.Errorf("failed to read 1B from %s: %s", layer.filename, err) continue } dBuf = make([]byte, 16*1024) _, err = ts.Read(dBuf) if err != nil { t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) continue } // Read and discard remaining bytes _, err = io.Copy(ioutil.Discard, ts) if err != nil { t.Errorf("failed to copy from %s: %s", layer.filename, err) continue } var gotSum string if len(layer.jsonfile) > 0 { jfh, err := os.Open(layer.jsonfile) if err != nil { t.Errorf("failed to open %s: %s", layer.jsonfile, err) continue } buf, err := ioutil.ReadAll(jfh) if err != nil { t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) continue } gotSum = ts.Sum(buf) } else { gotSum = ts.Sum(nil) } if layer.tarsum != gotSum { t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) } } } func TestIteration(t *testing.T) { headerTests := []struct { expectedSum string // TODO(vbatts) it would be nice to get individual sums of each version Version hdr *tar.Header data []byte }{ { "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", Version0, &tar.Header{ Name: "file.txt", Size: 0, Typeflag: tar.TypeReg, Devminor: 0, Devmajor: 0, }, []byte(""), }, { "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", VersionDev, &tar.Header{ Name: "file.txt", Size: 0, Typeflag: tar.TypeReg, Devminor: 0, Devmajor: 0, }, []byte(""), }, { "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", VersionDev, &tar.Header{ Name: "another.txt", Uid: 1000, Gid: 1000, Uname: "slartibartfast", Gname: "users", Size: 4, Typeflag: tar.TypeReg, Devminor: 0, Devmajor: 0, }, []byte("test"), }, { "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", VersionDev, &tar.Header{ Name: "xattrs.txt", Uid: 1000, Gid: 1000, Uname: "slartibartfast", Gname: "users", Size: 4, Typeflag: tar.TypeReg, Xattrs: map[string]string{ "user.key1": "value1", "user.key2": "value2", }, }, []byte("test"), }, { "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", VersionDev, &tar.Header{ Name: "xattrs.txt", Uid: 1000, Gid: 1000, Uname: "slartibartfast", Gname: "users", Size: 4, Typeflag: tar.TypeReg, Xattrs: map[string]string{ "user.KEY1": "value1", // adding different case to ensure different sum "user.key2": "value2", }, }, []byte("test"), }, { "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", Version0, &tar.Header{ Name: "xattrs.txt", Uid: 1000, Gid: 1000, Uname: "slartibartfast", Gname: "users", Size: 4, Typeflag: tar.TypeReg, Xattrs: map[string]string{ "user.NOT": "CALCULATED", }, }, []byte("test"), }, } for _, htest := range headerTests { s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) if err != nil { t.Fatal(err) } if s != htest.expectedSum { t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) } } } func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { buf := bytes.NewBuffer(nil) // first build our test tar tw := tar.NewWriter(buf) if err := tw.WriteHeader(h); err != nil { return "", err } if _, err := tw.Write(data); err != nil { return "", err } tw.Close() ts, err := NewTarSum(buf, true, v) if err != nil { return "", err } tr := tar.NewReader(ts) for { hdr, err := tr.Next() if hdr == nil || err == io.EOF { // Signals the end of the archive. break } if err != nil { return "", err } if _, err = io.Copy(ioutil.Discard, tr); err != nil { return "", err } } return ts.Sum(nil), nil } func Benchmark9kTar(b *testing.B) { buf := bytes.NewBuffer([]byte{}) fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") if err != nil { b.Error(err) return } n, err := io.Copy(buf, fh) fh.Close() reader := bytes.NewReader(buf.Bytes()) b.SetBytes(n) b.ResetTimer() for i := 0; i < b.N; i++ { reader.Seek(0, 0) ts, err := NewTarSum(reader, true, Version0) if err != nil { b.Error(err) return } io.Copy(ioutil.Discard, ts) ts.Sum(nil) } } func Benchmark9kTarGzip(b *testing.B) { buf := bytes.NewBuffer([]byte{}) fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") if err != nil { b.Error(err) return } n, err := io.Copy(buf, fh) fh.Close() reader := bytes.NewReader(buf.Bytes()) b.SetBytes(n) b.ResetTimer() for i := 0; i < b.N; i++ { reader.Seek(0, 0) ts, err := NewTarSum(reader, false, Version0) if err != nil { b.Error(err) return } io.Copy(ioutil.Discard, ts) ts.Sum(nil) } } // this is a single big file in the tar archive func Benchmark1mbSingleFileTar(b *testing.B) { benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) } // this is a single big file in the tar archive func Benchmark1mbSingleFileTarGzip(b *testing.B) { benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) } // this is 1024 1k files in the tar archive func Benchmark1kFilesTar(b *testing.B) { benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) } // this is 1024 1k files in the tar archive func Benchmark1kFilesTarGzip(b *testing.B) { benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) } func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { var fh *os.File tarReader := sizedTar(opts) if br, ok := tarReader.(*os.File); ok { fh = br } defer os.Remove(fh.Name()) defer fh.Close() b.SetBytes(opts.size * opts.num) b.ResetTimer() for i := 0; i < b.N; i++ { ts, err := NewTarSum(fh, !isGzip, Version0) if err != nil { b.Error(err) return } io.Copy(ioutil.Discard, ts) ts.Sum(nil) fh.Seek(0, 0) } } docker-1.6.2/pkg/tarsum/versioning_test.go0000644000175000017500000000225312524223634020203 0ustar tianontianonpackage tarsum import ( "testing" ) func TestVersion(t *testing.T) { expected := "tarsum" var v Version if v.String() != expected { t.Errorf("expected %q, got %q", expected, v.String()) } expected = "tarsum.v1" v = 1 if v.String() != expected { t.Errorf("expected %q, got %q", expected, v.String()) } expected = "tarsum.dev" v = 2 if v.String() != expected { t.Errorf("expected %q, got %q", expected, v.String()) } } func TestGetVersion(t *testing.T) { testSet := []struct { Str string Expected Version }{ {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, {"tarsum+sha256", Version0}, {"tarsum", Version0}, {"tarsum.dev", VersionDev}, {"tarsum.dev+sha256:deadbeef", VersionDev}, } for _, ts := range testSet { v, err := GetVersionFromTarsum(ts.Str) if err != nil { t.Fatalf("%q : %s", err, ts.Str) } if v != ts.Expected { t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) } } // test one that does not exist, to ensure it errors str := "weak+md5:abcdeabcde" _, err := GetVersionFromTarsum(str) if err != ErrNotVersion { t.Fatalf("%q : %s", err, str) } } docker-1.6.2/pkg/tarsum/tarsum.go0000644000175000017500000001537412524223634016304 0ustar tianontianonpackage tarsum import ( "bytes" "compress/gzip" "crypto" "crypto/sha256" "encoding/hex" "errors" "fmt" "hash" "io" "strings" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) const ( buf8K = 8 * 1024 buf16K = 16 * 1024 buf32K = 32 * 1024 ) // NewTarSum creates a new interface for calculating a fixed time checksum of a // tar archive. // // This is used for calculating checksums of layers of an image, in some cases // including the byte payload of the image's json metadata as well, and for // calculating the checksums for buildcache. func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { return NewTarSumHash(r, dc, v, DefaultTHash) } // Create a new TarSum, providing a THash to use rather than the DefaultTHash func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { headerSelector, err := getTarHeaderSelector(v) if err != nil { return nil, err } ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} err = ts.initTarSum() return ts, err } // Create a new TarSum using the provided TarSum version+hash label. func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { parts := strings.SplitN(label, "+", 2) if len(parts) != 2 { return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") } versionName, hashName := parts[0], parts[1] version, ok := tarSumVersionsByName[versionName] if !ok { return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) } hashConfig, ok := standardHashConfigs[hashName] if !ok { return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) } tHash := NewTHash(hashConfig.name, hashConfig.hash.New) return NewTarSumHash(r, disableCompression, version, tHash) } // TarSum is the generic interface for calculating fixed time // checksums of a tar archive type TarSum interface { io.Reader GetSums() FileInfoSums Sum([]byte) string Version() Version Hash() THash } // tarSum struct is the structure for a Version0 checksum calculation type tarSum struct { io.Reader tarR *tar.Reader tarW *tar.Writer writer writeCloseFlusher bufTar *bytes.Buffer bufWriter *bytes.Buffer bufData []byte h hash.Hash tHash THash sums FileInfoSums fileCounter int64 currentFile string finished bool first bool DisableCompression bool // false by default. When false, the output gzip compressed. tarSumVersion Version // this field is not exported so it can not be mutated during use headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive } func (ts tarSum) Hash() THash { return ts.tHash } func (ts tarSum) Version() Version { return ts.tarSumVersion } // A hash.Hash type generator and its name type THash interface { Hash() hash.Hash Name() string } // Convenience method for creating a THash func NewTHash(name string, h func() hash.Hash) THash { return simpleTHash{n: name, h: h} } type tHashConfig struct { name string hash crypto.Hash } var ( // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. standardHashConfigs = map[string]tHashConfig{ "sha256": {name: "sha256", hash: crypto.SHA256}, "sha512": {name: "sha512", hash: crypto.SHA512}, } ) // TarSum default is "sha256" var DefaultTHash = NewTHash("sha256", sha256.New) type simpleTHash struct { n string h func() hash.Hash } func (sth simpleTHash) Name() string { return sth.n } func (sth simpleTHash) Hash() hash.Hash { return sth.h() } func (ts *tarSum) encodeHeader(h *tar.Header) error { for _, elem := range ts.headerSelector.selectHeaders(h) { if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { return err } } return nil } func (ts *tarSum) initTarSum() error { ts.bufTar = bytes.NewBuffer([]byte{}) ts.bufWriter = bytes.NewBuffer([]byte{}) ts.tarR = tar.NewReader(ts.Reader) ts.tarW = tar.NewWriter(ts.bufTar) if !ts.DisableCompression { ts.writer = gzip.NewWriter(ts.bufWriter) } else { ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} } if ts.tHash == nil { ts.tHash = DefaultTHash } ts.h = ts.tHash.Hash() ts.h.Reset() ts.first = true ts.sums = FileInfoSums{} return nil } func (ts *tarSum) Read(buf []byte) (int, error) { if ts.finished { return ts.bufWriter.Read(buf) } if len(ts.bufData) < len(buf) { switch { case len(buf) <= buf8K: ts.bufData = make([]byte, buf8K) case len(buf) <= buf16K: ts.bufData = make([]byte, buf16K) case len(buf) <= buf32K: ts.bufData = make([]byte, buf32K) default: ts.bufData = make([]byte, len(buf)) } } buf2 := ts.bufData[:len(buf)] n, err := ts.tarR.Read(buf2) if err != nil { if err == io.EOF { if _, err := ts.h.Write(buf2[:n]); err != nil { return 0, err } if !ts.first { ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) ts.fileCounter++ ts.h.Reset() } else { ts.first = false } currentHeader, err := ts.tarR.Next() if err != nil { if err == io.EOF { if err := ts.tarW.Close(); err != nil { return 0, err } if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { return 0, err } if err := ts.writer.Close(); err != nil { return 0, err } ts.finished = true return n, nil } return n, err } ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") if err := ts.encodeHeader(currentHeader); err != nil { return 0, err } if err := ts.tarW.WriteHeader(currentHeader); err != nil { return 0, err } if _, err := ts.tarW.Write(buf2[:n]); err != nil { return 0, err } ts.tarW.Flush() if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { return 0, err } ts.writer.Flush() return ts.bufWriter.Read(buf) } return n, err } // Filling the hash buffer if _, err = ts.h.Write(buf2[:n]); err != nil { return 0, err } // Filling the tar writter if _, err = ts.tarW.Write(buf2[:n]); err != nil { return 0, err } ts.tarW.Flush() // Filling the output writer if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { return 0, err } ts.writer.Flush() return ts.bufWriter.Read(buf) } func (ts *tarSum) Sum(extra []byte) string { ts.sums.SortBySums() h := ts.tHash.Hash() if extra != nil { h.Write(extra) } for _, fis := range ts.sums { h.Write([]byte(fis.Sum())) } checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) return checksum } func (ts *tarSum) GetSums() FileInfoSums { return ts.sums } docker-1.6.2/pkg/tarsum/builder_context.go0000644000175000017500000000116112524223634020150 0ustar tianontianonpackage tarsum // This interface extends TarSum by adding the Remove method. In general // there was concern about adding this method to TarSum itself so instead // it is being added just to "BuilderContext" which will then only be used // during the .dockerignore file processing - see builder/evaluator.go type BuilderContext interface { TarSum Remove(string) } func (bc *tarSum) Remove(filename string) { for i, fis := range bc.sums { if fis.Name() == filename { bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) // Note, we don't just return because there could be // more than one with this name } } } docker-1.6.2/pkg/tarsum/fileinfosums_test.go0000644000175000017500000000237012524223634020523 0ustar tianontianonpackage tarsum import "testing" func newFileInfoSums() FileInfoSums { return FileInfoSums{ fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, } } func TestSortFileInfoSums(t *testing.T) { dups := newFileInfoSums().GetAllFile("dup1") if len(dups) != 2 { t.Errorf("expected length 2, got %d", len(dups)) } dups.SortByNames() if dups[0].Pos() != 4 { t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) } fis := newFileInfoSums() expected := "0abcdef1234567890" fis.SortBySums() got := fis[0].Sum() if got != expected { t.Errorf("Expected %q, got %q", expected, got) } fis = newFileInfoSums() expected = "dup1" fis.SortByNames() gotFis := fis[0] if gotFis.Name() != expected { t.Errorf("Expected %q, got %q", expected, gotFis.Name()) } // since a duplicate is first, ensure it is ordered first by position too if gotFis.Pos() != 4 { t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) } } docker-1.6.2/pkg/tarsum/tarsum_spec.md0000644000175000017500000002111712524223634017301 0ustar tianontianonpage_title: TarSum checksum specification page_description: Documentation for algorithms used in the TarSum checksum calculation page_keywords: docker, checksum, validation, tarsum # TarSum Checksum Specification ## Abstract This document describes the algorithms used in performing the TarSum checksum calculation on filesystem layers, the need for this method over existing methods, and the versioning of this calculation. ## Introduction The transportation of filesystems, regarding Docker, is done with tar(1) archives. There are a variety of tar serialization formats [2], and a key concern here is ensuring a repeatable checksum given a set of inputs from a generic tar archive. Types of transportation include distribution to and from a registry endpoint, saving and loading through commands or Docker daemon APIs, transferring the build context from client to Docker daemon, and committing the filesystem of a container to become an image. As tar archives are used for transit, but not preserved in many situations, the focus of the algorithm is to ensure the integrity of the preserved filesystem, while maintaining a deterministic accountability. This includes neither constraining the ordering or manipulation of the files during the creation or unpacking of the archive, nor include additional metadata state about the file system attributes. ## Intended Audience This document is outlining the methods used for consistent checksum calculation for filesystems transported via tar archives. Auditing these methodologies is an open and iterative process. This document should accommodate the review of source code. Ultimately, this document should be the starting point of further refinements to the algorithm and its future versions. ## Concept The checksum mechanism must ensure the integrity and assurance of the filesystem payload. ## Checksum Algorithm Profile A checksum mechanism must define the following operations and attributes: * Associated hashing cipher - used to checksum each file payload and attribute information. * Checksum list - each file of the filesystem archive has its checksum calculated from the payload and attributes of the file. The final checksum is calculated from this list, with specific ordering. * Version - as the algorithm adapts to requirements, there are behaviors of the algorithm to manage by versioning. * Archive being calculated - the tar archive having its checksum calculated ## Elements of TarSum checksum The calculated sum output is a text string. The elements included in the output of the calculated sum comprise the information needed for validation of the sum (TarSum version and hashing cipher used) and the expected checksum in hexadecimal form. There are two delimiters used: * '+' separates TarSum version from hashing cipher * ':' separates calculation mechanics from expected hash Example: ``` "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" | | \ | | | \ | |_version_|_cipher__|__ | | \ | |_calculation_mechanics_|______________________expected_sum_______________________| ``` ## Versioning Versioning was introduced [0] to accommodate differences in calculation needed, and ability to maintain reverse compatibility. The general algorithm will be describe further in the 'Calculation'. ### Version0 This is the initial version of TarSum. Its element in the TarSum checksum string is `tarsum`. ### Version1 Its element in the TarSum checksum is `tarsum.v1`. The notable changes in this version: * Exclusion of file `mtime` from the file information headers, in each file checksum calculation * Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax tar file info headers) keys and values in each file checksum calculation ### VersionDev *Do not use unless validating refinements to the checksum algorithm* Its element in the TarSum checksum is `tarsum.dev`. This is a floating place holder for a next version and grounds for testing changes. The methods used for calculation are subject to change without notice, and this version is for testing and not for production use. ## Ciphers The official default and standard hashing cipher used in the calculation mechanic is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. Though the TarSum algorithm itself is not exclusively bound to the single hashing cipher `sha256`, support for alternate hashing ciphers was later added [1]. Use cases for alternate cipher could include future-proofing TarSum checksum format and using faster cipher hashes for tar filesystem checksums. ## Calculation ### Requirement As mentioned earlier, the calculation is such that it takes into consideration the lifecycle of the tar archive. In that the tar archive is not an immutable, permanent artifact. Otherwise options like relying on a known hashing cipher checksum of the archive itself would be reliable enough. The tar archive of the filesystem is used as a transportation medium for Docker images, and the archive is discarded once its contents are extracted. Therefore, for consistent validation items such as order of files in the tar archive and time stamps are subject to change once an image is received. ### Process The method is typically iterative due to reading tar info headers from the archive stream, though this is not a strict requirement. #### Files Each file in the tar archive have their contents (headers and body) checksummed individually using the designated associated hashing cipher. The ordered headers of the file are written to the checksum calculation first, and then the payload of the file body. The resulting checksum of the file is appended to the list of file sums. The sum is encoded as a string of the hexadecimal digest. Additionally, the file name and position in the archive is kept as reference for special ordering. #### Headers The following headers are read, in this order ( and the corresponding representation of its value): * 'name' - string * 'mode' - string of the base10 integer * 'uid' - string of the integer * 'gid' - string of the integer * 'size' - string of the integer * 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC * 'typeflag' - string of the char * 'linkname' - string * 'uname' - string * 'gname' - string * 'devmajor' - string of the integer * 'devminor' - string of the integer For >= Version1, the extented attribute headers ("SCHILY.xattr." prefixed pax headers) included after the above list. These xattrs key/values are first sorted by the keys. #### Header Format The ordered headers are written to the hash in the format of "{.key}{.value}" with no newline. #### Body After the order headers of the file have been added to the checksum for the file, the body of the file is written to the hash. #### List of file sums The list of file sums is sorted by the string of the hexadecimal digest. If there are two files in the tar with matching paths, the order of occurrence for that path is reflected for the sums of the corresponding file header and body. #### Final Checksum Begin with a fresh or initial state of the associated hash cipher. If there is additional payload to include in the TarSum calculation for the archive, it is written first. Then each checksum from the ordered list of file sums is written to the hash. The resulting digest is formatted per the Elements of TarSum checksum, including the TarSum version, the associated hash cipher and the hexadecimal encoded checksum digest. ## Security Considerations The initial version of TarSum has undergone one update that could invalidate handcrafted tar archives. The tar archive format supports appending of files with same names as prior files in the archive. The latter file will clobber the prior file of the same path. Due to this the algorithm now accounts for files with matching paths, and orders the list of file sums accordingly [3]. ## Footnotes * [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 * [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e * [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 * [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 ## Acknowledgements Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the TarSum calculation. docker-1.6.2/pkg/tarsum/testdata/0000755000175000017500000000000012524223634016241 5ustar tianontianondocker-1.6.2/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/0000755000175000017500000000000012524223634026520 5ustar tianontianon././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootdocker-1.6.2/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/jsondocker-1.6.2/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/js0000644000175000017500000000107512524223634027062 0ustar tianontianon{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootdocker-1.6.2/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tardocker-1.6.2/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/la0000644000175000017500000000300012524223634027030 0ustar tianontianon./0040755000000000000000000000000012156431635007413 5ustar0000000000000000docker-1.6.2/pkg/tarsum/testdata/collision/0000755000175000017500000000000012524223634020234 5ustar tianontianondocker-1.6.2/pkg/tarsum/testdata/collision/collision-2.tar0000644000175000017500000002400012524223634023072 0ustar tianontianonfile0000644000175000017500000000000612402144551011302 0ustar vbattsvbattsHOWDY docker-1.6.2/pkg/tarsum/testdata/collision/collision-1.tar0000644000175000017500000002400012524223634023071 0ustar tianontianonfile0000644000175000017500000000000612402144551011302 0ustar vbattsvbattsHOWDY file0000644000175000017500000000000612402144462011303 0ustar vbattsvbattsfarts docker-1.6.2/pkg/tarsum/testdata/collision/collision-3.tar0000644000175000017500000002400012524223634023073 0ustar tianontianonfile0000644000175000017500000000000612402144462011303 0ustar vbattsvbattsfarts docker-1.6.2/pkg/tarsum/testdata/collision/collision-0.tar0000644000175000017500000002400012524223634023070 0ustar tianontianonfile0000644000175000017500000000000612402144462011303 0ustar vbattsvbattsfarts file0000644000175000017500000000000612402144551011302 0ustar vbattsvbattsHOWDY docker-1.6.2/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/0000755000175000017500000000000012524223634027052 5ustar tianontianon././@LongLink0000644000000000000000000000014700000000000011605 Lustar rootrootdocker-1.6.2/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/jsondocker-1.6.2/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/js0000644000175000017500000000264712524223634027422 0ustar tianontianon{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425}././@LongLink0000644000000000000000000000015400000000000011603 Lustar rootrootdocker-1.6.2/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tardocker-1.6.2/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/la0000644000175000017500000002200012524223634027363 0ustar tianontianondev/0040755000000000000000000000000012320410536010022 5ustar0000000000000000dev/core0120777000000000000000000000000012320410536012741 2/proc/kcoreustar0000000000000000dev/stderr0120777000000000000000000000000012320410536013674 2/proc/self/fd/2ustar0000000000000000dev/stdout0120777000000000000000000000000012320410536013712 2/proc/self/fd/1ustar0000000000000000dev/fd0120777000000000000000000000000012320410536012621 2/proc/self/fdustar0000000000000000dev/ptmx0120777000000000000000000000000012320410536012452 2pts/ptmxustar0000000000000000dev/stdin0120777000000000000000000000000012320410536013510 2/proc/self/fd/0ustar0000000000000000etc/0040755000000000000000000000000012320410536010017 5ustar0000000000000000etc/sudoers0100440000000000000000000000642412320410536011423 0ustar0000000000000000## Sudoers allows particular users to run various commands as ## the root user, without needing the root password. ## ## Examples are provided at the bottom of the file for collections ## of related commands, which can then be delegated out to particular ## users or groups. ## ## This file must be edited with the 'visudo' command. ## Host Aliases ## Groups of machines. You may prefer to use hostnames (perhaps using ## wildcards for entire domains) or IP addresses instead. # Host_Alias FILESERVERS = fs1, fs2 # Host_Alias MAILSERVERS = smtp, smtp2 ## User Aliases ## These aren't often necessary, as you can use regular groups ## (ie, from files, LDAP, NIS, etc) in this file - just use %groupname ## rather than USERALIAS # User_Alias ADMINS = jsmith, mikem ## Command Aliases ## These are groups of related commands... ## Networking # Cmnd_Alias NETWORKING = /sbin/route, /sbin/ifconfig, /bin/ping, /sbin/dhclient, /usr/bin/net, /sbin/iptables, /usr/bin/rfcomm, /usr/bin/wvdial, /sbin/iwconfig, /sbin/mii-tool ## Installation and management of software # Cmnd_Alias SOFTWARE = /bin/rpm, /usr/bin/up2date, /usr/bin/yum ## Services # Cmnd_Alias SERVICES = /sbin/service, /sbin/chkconfig ## Updating the locate database # Cmnd_Alias LOCATE = /usr/bin/updatedb ## Storage # Cmnd_Alias STORAGE = /sbin/fdisk, /sbin/sfdisk, /sbin/parted, /sbin/partprobe, /bin/mount, /bin/umount ## Delegating permissions # Cmnd_Alias DELEGATING = /usr/sbin/visudo, /bin/chown, /bin/chmod, /bin/chgrp ## Processes # Cmnd_Alias PROCESSES = /bin/nice, /bin/kill, /usr/bin/kill, /usr/bin/killall ## Drivers # Cmnd_Alias DRIVERS = /sbin/modprobe # Defaults specification # # Disable "ssh hostname sudo ", because it will show the password in clear. # You have to run "ssh -t hostname sudo ". # Defaults requiretty Defaults env_reset Defaults env_keep = "COLORS DISPLAY HOSTNAME HISTSIZE INPUTRC KDEDIR LS_COLORS" Defaults env_keep += "MAIL PS1 PS2 QTDIR USERNAME LANG LC_ADDRESS LC_CTYPE" Defaults env_keep += "LC_COLLATE LC_IDENTIFICATION LC_MEASUREMENT LC_MESSAGES" Defaults env_keep += "LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER LC_TELEPHONE" Defaults env_keep += "LC_TIME LC_ALL LANGUAGE LINGUAS _XKB_CHARSET XAUTHORITY" Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin ## Next comes the main part: which users can run what software on ## which machines (the sudoers file can be shared between multiple ## systems). ## Syntax: ## ## user MACHINE=COMMANDS ## ## The COMMANDS section may have other options added to it. ## ## Allow root to run any commands anywhere root ALL=(ALL) ALL ## Allows members of the 'sys' group to run networking, software, ## service management apps and more. # %sys ALL = NETWORKING, SOFTWARE, SERVICES, STORAGE, DELEGATING, PROCESSES, LOCATE, DRIVERS ## Allows people in group wheel to run all commands %wheel ALL=(ALL) NOPASSWD: ALL ## Same thing without a password # %wheel ALL=(ALL) NOPASSWD: ALL ## Allows members of the users group to mount and unmount the ## cdrom as root # %users ALL=/sbin/mount /mnt/cdrom, /sbin/umount /mnt/cdrom ## Allows members of the users group to shutdown this system # %users localhost=/sbin/shutdown -h now ## Read drop-in files from /etc/sudoers.d (the # here does not mean a comment) #includedir /etc/sudoers.d docker-1.6.2/pkg/tarsum/testdata/xattr/0000755000175000017500000000000012524223634017403 5ustar tianontianondocker-1.6.2/pkg/tarsum/testdata/xattr/json0000644000175000017500000000270412524223634020302 0ustar tianontianon{"id":"4439c3c7f847954100b42b267e7e5529cac1d6934db082f65795c5ca2e594d93","parent":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","created":"2014-05-16T17:19:44.091534414Z","container":"5f92fb06cc58f357f0cde41394e2bbbb664e663974b2ac1693ab07b7a306749b","container_config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","setcap 'cap_setgid,cap_setuid+ep' ./file \u0026\u0026 getcap ./file"],"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.11.1-dev","config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":0}docker-1.6.2/pkg/tarsum/testdata/xattr/layer.tar0000644000175000017500000000500012524223634021222 0ustar tianontianonPaxHeaders.12099/file0000000000000000000000000000007112335444166012715 xustar000000000000000057 SCHILY.xattr.security.capability=À file0100644000000000000000000000000012335444166010105 0ustar0000000000000000docker-1.6.2/pkg/ioutils/0000755000175000017500000000000012524223634014605 5ustar tianontianondocker-1.6.2/pkg/ioutils/readers.go0000644000175000017500000001222012524223634016556 0ustar tianontianonpackage ioutils import ( "bytes" "crypto/rand" "io" "math/big" "sync" "time" ) type readCloserWrapper struct { io.Reader closer func() error } func (r *readCloserWrapper) Close() error { return r.closer() } func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { return &readCloserWrapper{ Reader: r, closer: closer, } } type readerErrWrapper struct { reader io.Reader closer func() } func (r *readerErrWrapper) Read(p []byte) (int, error) { n, err := r.reader.Read(p) if err != nil { r.closer() } return n, err } func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { return &readerErrWrapper{ reader: r, closer: closer, } } // bufReader allows the underlying reader to continue to produce // output by pre-emptively reading from the wrapped reader. // This is achieved by buffering this data in bufReader's // expanding buffer. type bufReader struct { sync.Mutex buf *bytes.Buffer reader io.Reader err error wait sync.Cond drainBuf []byte reuseBuf []byte maxReuse int64 resetTimeout time.Duration bufLenResetThreshold int64 maxReadDataReset int64 } func NewBufReader(r io.Reader) *bufReader { var timeout int if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil { timeout = int(randVal.Int64()) + 180 } else { timeout = 300 } reader := &bufReader{ buf: &bytes.Buffer{}, drainBuf: make([]byte, 1024), reuseBuf: make([]byte, 4096), maxReuse: 1000, resetTimeout: time.Second * time.Duration(timeout), bufLenResetThreshold: 100 * 1024, maxReadDataReset: 10 * 1024 * 1024, reader: r, } reader.wait.L = &reader.Mutex go reader.drain() return reader } func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader { reader := &bufReader{ buf: buffer, drainBuf: drainBuffer, reader: r, } reader.wait.L = &reader.Mutex go reader.drain() return reader } func (r *bufReader) drain() { var ( duration time.Duration lastReset time.Time now time.Time reset bool bufLen int64 dataSinceReset int64 maxBufLen int64 reuseBufLen int64 reuseCount int64 ) reuseBufLen = int64(len(r.reuseBuf)) lastReset = time.Now() for { n, err := r.reader.Read(r.drainBuf) dataSinceReset += int64(n) r.Lock() bufLen = int64(r.buf.Len()) if bufLen > maxBufLen { maxBufLen = bufLen } // Avoid unbounded growth of the buffer over time. // This has been discovered to be the only non-intrusive // solution to the unbounded growth of the buffer. // Alternative solutions such as compression, multiple // buffers, channels and other similar pieces of code // were reducing throughput, overall Docker performance // or simply crashed Docker. // This solution releases the buffer when specific // conditions are met to avoid the continuous resizing // of the buffer for long lived containers. // // Move data to the front of the buffer if it's // smaller than what reuseBuf can store if bufLen > 0 && reuseBufLen >= bufLen { n, _ := r.buf.Read(r.reuseBuf) r.buf.Write(r.reuseBuf[0:n]) // Take action if the buffer has been reused too many // times and if there's data in the buffer. // The timeout is also used as means to avoid doing // these operations more often or less often than // required. // The various conditions try to detect heavy activity // in the buffer which might be indicators of heavy // growth of the buffer. } else if reuseCount >= r.maxReuse && bufLen > 0 { now = time.Now() duration = now.Sub(lastReset) timeoutReached := duration >= r.resetTimeout // The timeout has been reached and the // buffered data couldn't be moved to the front // of the buffer, so the buffer gets reset. if timeoutReached && bufLen > reuseBufLen { reset = true } // The amount of buffered data is too high now, // reset the buffer. if timeoutReached && maxBufLen >= r.bufLenResetThreshold { reset = true } // Reset the buffer if a certain amount of // data has gone through the buffer since the // last reset. if timeoutReached && dataSinceReset >= r.maxReadDataReset { reset = true } // The buffered data is moved to a fresh buffer, // swap the old buffer with the new one and // reset all counters. if reset { newbuf := &bytes.Buffer{} newbuf.ReadFrom(r.buf) r.buf = newbuf lastReset = now reset = false dataSinceReset = 0 maxBufLen = 0 reuseCount = 0 } } if err != nil { r.err = err } else { r.buf.Write(r.drainBuf[0:n]) } reuseCount++ r.wait.Signal() r.Unlock() if err != nil { break } } } func (r *bufReader) Read(p []byte) (n int, err error) { r.Lock() defer r.Unlock() for { n, err = r.buf.Read(p) if n > 0 { return n, err } if r.err != nil { return 0, r.err } r.wait.Wait() } } func (r *bufReader) Close() error { closer, ok := r.reader.(io.ReadCloser) if !ok { return nil } return closer.Close() } docker-1.6.2/pkg/ioutils/readers_test.go0000644000175000017500000000355612524223634017631 0ustar tianontianonpackage ioutils import ( "bytes" "io" "io/ioutil" "testing" ) func TestBufReader(t *testing.T) { reader, writer := io.Pipe() bufreader := NewBufReader(reader) // Write everything down to a Pipe // Usually, a pipe should block but because of the buffered reader, // the writes will go through done := make(chan bool) go func() { writer.Write([]byte("hello world")) writer.Close() done <- true }() // Drain the reader *after* everything has been written, just to verify // it is indeed buffering <-done output, err := ioutil.ReadAll(bufreader) if err != nil { t.Fatal(err) } if !bytes.Equal(output, []byte("hello world")) { t.Error(string(output)) } } type repeatedReader struct { readCount int maxReads int data []byte } func newRepeatedReader(max int, data []byte) *repeatedReader { return &repeatedReader{0, max, data} } func (r *repeatedReader) Read(p []byte) (int, error) { if r.readCount >= r.maxReads { return 0, io.EOF } r.readCount++ n := copy(p, r.data) return n, nil } func testWithData(data []byte, reads int) { reader := newRepeatedReader(reads, data) bufReader := NewBufReader(reader) io.Copy(ioutil.Discard, bufReader) } func Benchmark1M10BytesReads(b *testing.B) { reads := 1000000 readSize := int64(10) data := make([]byte, readSize) b.SetBytes(readSize * int64(reads)) b.ResetTimer() for i := 0; i < b.N; i++ { testWithData(data, reads) } } func Benchmark1M1024BytesReads(b *testing.B) { reads := 1000000 readSize := int64(1024) data := make([]byte, readSize) b.SetBytes(readSize * int64(reads)) b.ResetTimer() for i := 0; i < b.N; i++ { testWithData(data, reads) } } func Benchmark10k32KBytesReads(b *testing.B) { reads := 10000 readSize := int64(32 * 1024) data := make([]byte, readSize) b.SetBytes(readSize * int64(reads)) b.ResetTimer() for i := 0; i < b.N; i++ { testWithData(data, reads) } } docker-1.6.2/pkg/ioutils/writers.go0000644000175000017500000000120612524223634016632 0ustar tianontianonpackage ioutils import "io" type NopWriter struct{} func (*NopWriter) Write(buf []byte) (int, error) { return len(buf), nil } type nopWriteCloser struct { io.Writer } func (w *nopWriteCloser) Close() error { return nil } func NopWriteCloser(w io.Writer) io.WriteCloser { return &nopWriteCloser{w} } type NopFlusher struct{} func (f *NopFlusher) Flush() {} type writeCloserWrapper struct { io.Writer closer func() error } func (r *writeCloserWrapper) Close() error { return r.closer() } func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { return &writeCloserWrapper{ Writer: r, closer: closer, } } docker-1.6.2/pkg/system/0000755000175000017500000000000012524223634014441 5ustar tianontianondocker-1.6.2/pkg/system/lstat_test.go0000644000175000017500000000074512524223634017164 0ustar tianontianonpackage system import ( "os" "testing" ) func TestLstat(t *testing.T) { file, invalid, _, dir := prepareFiles(t) defer os.RemoveAll(dir) statFile, err := Lstat(file) if err != nil { t.Fatal(err) } if statFile == nil { t.Fatal("returned empty stat for existing file") } statInvalid, err := Lstat(invalid) if err == nil { t.Fatal("did not return error for non-existing file") } if statInvalid != nil { t.Fatal("returned non-nil stat for non-existing file") } } docker-1.6.2/pkg/system/umask_windows.go0000644000175000017500000000024512524223634017663 0ustar tianontianon// +build windows package system func Umask(newmask int) (oldmask int, err error) { // should not be called on cli code path return 0, ErrNotSupportedPlatform } docker-1.6.2/pkg/system/mknod_windows.go0000644000175000017500000000044312524223634017653 0ustar tianontianon// +build windows package system func Mknod(path string, mode uint32, dev int) error { // should not be called on cli code path return ErrNotSupportedPlatform } func Mkdev(major int64, minor int64) uint32 { panic("Mkdev not implemented on windows, should not be called on cli code") } docker-1.6.2/pkg/system/meminfo_linux_test.go0000644000175000017500000000141412524223634020700 0ustar tianontianonpackage system import ( "strings" "testing" "github.com/docker/docker/pkg/units" ) func TestMemInfo(t *testing.T) { const input = ` MemTotal: 1 kB MemFree: 2 kB SwapTotal: 3 kB SwapFree: 4 kB Malformed1: Malformed2: 1 Malformed3: 2 MB Malformed4: X kB ` meminfo, err := parseMemInfo(strings.NewReader(input)) if err != nil { t.Fatal(err) } if meminfo.MemTotal != 1*units.KiB { t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) } if meminfo.MemFree != 2*units.KiB { t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) } if meminfo.SwapTotal != 3*units.KiB { t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) } if meminfo.SwapFree != 4*units.KiB { t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) } } docker-1.6.2/pkg/system/errors.go0000644000175000017500000000020212524223634016276 0ustar tianontianonpackage system import ( "errors" ) var ( ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") ) docker-1.6.2/pkg/system/umask.go0000644000175000017500000000022412524223634016106 0ustar tianontianon// +build !windows package system import ( "syscall" ) func Umask(newmask int) (oldmask int, err error) { return syscall.Umask(newmask), nil } docker-1.6.2/pkg/system/stat_unsupported.go0000644000175000017500000000037712524223634020422 0ustar tianontianon// +build !linux,!windows package system import ( "syscall" ) func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { return &Stat_t{size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), mtim: s.Mtimespec}, nil } docker-1.6.2/pkg/system/utimes_test.go0000644000175000017500000000253612524223634017343 0ustar tianontianonpackage system import ( "io/ioutil" "os" "path/filepath" "syscall" "testing" ) func prepareFiles(t *testing.T) (string, string, string, string) { dir, err := ioutil.TempDir("", "docker-system-test") if err != nil { t.Fatal(err) } file := filepath.Join(dir, "exist") if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { t.Fatal(err) } invalid := filepath.Join(dir, "doesnt-exist") symlink := filepath.Join(dir, "symlink") if err := os.Symlink(file, symlink); err != nil { t.Fatal(err) } return file, invalid, symlink, dir } func TestLUtimesNano(t *testing.T) { file, invalid, symlink, dir := prepareFiles(t) defer os.RemoveAll(dir) before, err := os.Stat(file) if err != nil { t.Fatal(err) } ts := []syscall.Timespec{{0, 0}, {0, 0}} if err := LUtimesNano(symlink, ts); err != nil { t.Fatal(err) } symlinkInfo, err := os.Lstat(symlink) if err != nil { t.Fatal(err) } if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { t.Fatal("The modification time of the symlink should be different") } fileInfo, err := os.Stat(file) if err != nil { t.Fatal(err) } if before.ModTime().Unix() != fileInfo.ModTime().Unix() { t.Fatal("The modification time of the file should be same") } if err := LUtimesNano(invalid, ts); err == nil { t.Fatal("Doesn't return an error on a non-existing file") } } docker-1.6.2/pkg/system/lstat_windows.go0000644000175000017500000000023712524223634017673 0ustar tianontianon// +build windows package system func Lstat(path string) (*Stat_t, error) { // should not be called on cli code path return nil, ErrNotSupportedPlatform } docker-1.6.2/pkg/system/utimes_freebsd.go0000644000175000017500000000076212524223634017775 0ustar tianontianonpackage system import ( "syscall" "unsafe" ) func LUtimesNano(path string, ts []syscall.Timespec) error { var _path *byte _path, err := syscall.BytePtrFromString(path) if err != nil { return err } if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { return err } return nil } func UtimesNano(path string, ts []syscall.Timespec) error { return syscall.UtimesNano(path, ts) } docker-1.6.2/pkg/system/stat_windows.go0000644000175000017500000000052112524223634017513 0ustar tianontianon// +build windows package system import ( "errors" "syscall" ) func fromStatT(s *syscall.Win32FileAttributeData) (*Stat_t, error) { return nil, errors.New("fromStatT should not be called on windows path") } func Stat(path string) (*Stat_t, error) { // should not be called on cli code path return nil, ErrNotSupportedPlatform } docker-1.6.2/pkg/system/utimes_linux.go0000644000175000017500000000121312524223634017512 0ustar tianontianonpackage system import ( "syscall" "unsafe" ) func LUtimesNano(path string, ts []syscall.Timespec) error { // These are not currently available in syscall AT_FDCWD := -100 AT_SYMLINK_NOFOLLOW := 0x100 var _path *byte _path, err := syscall.BytePtrFromString(path) if err != nil { return err } if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS { return err } return nil } func UtimesNano(path string, ts []syscall.Timespec) error { return syscall.UtimesNano(path, ts) } docker-1.6.2/pkg/system/xattrs_unsupported.go0000644000175000017500000000036012524223634020764 0ustar tianontianon// +build !linux package system func Lgetxattr(path string, attr string) ([]byte, error) { return nil, ErrNotSupportedPlatform } func Lsetxattr(path string, attr string, data []byte, flags int) error { return ErrNotSupportedPlatform } docker-1.6.2/pkg/system/stat_linux.go0000644000175000017500000000055512524223634017167 0ustar tianontianonpackage system import ( "syscall" ) func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { return &Stat_t{size: s.Size, mode: s.Mode, uid: s.Uid, gid: s.Gid, rdev: s.Rdev, mtim: s.Mtim}, nil } func Stat(path string) (*Stat_t, error) { s := &syscall.Stat_t{} err := syscall.Stat(path, s) if err != nil { return nil, err } return fromStatT(s) } docker-1.6.2/pkg/system/stat_test.go0000644000175000017500000000110312524223634016775 0ustar tianontianonpackage system import ( "os" "syscall" "testing" ) func TestFromStatT(t *testing.T) { file, _, _, dir := prepareFiles(t) defer os.RemoveAll(dir) stat := &syscall.Stat_t{} err := syscall.Lstat(file, stat) s, err := fromStatT(stat) if err != nil { t.Fatal(err) } if stat.Mode != s.Mode() { t.Fatal("got invalid mode") } if stat.Uid != s.Uid() { t.Fatal("got invalid uid") } if stat.Gid != s.Gid() { t.Fatal("got invalid gid") } if stat.Rdev != s.Rdev() { t.Fatal("got invalid rdev") } if stat.Mtim != s.Mtim() { t.Fatal("got invalid mtim") } } docker-1.6.2/pkg/system/xattrs_linux.go0000644000175000017500000000312112524223634017531 0ustar tianontianonpackage system import ( "syscall" "unsafe" ) // Returns a nil slice and nil error if the xattr is not set func Lgetxattr(path string, attr string) ([]byte, error) { pathBytes, err := syscall.BytePtrFromString(path) if err != nil { return nil, err } attrBytes, err := syscall.BytePtrFromString(attr) if err != nil { return nil, err } dest := make([]byte, 128) destBytes := unsafe.Pointer(&dest[0]) sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) if errno == syscall.ENODATA { return nil, nil } if errno == syscall.ERANGE { dest = make([]byte, sz) destBytes := unsafe.Pointer(&dest[0]) sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) } if errno != 0 { return nil, errno } return dest[:sz], nil } var _zero uintptr func Lsetxattr(path string, attr string, data []byte, flags int) error { pathBytes, err := syscall.BytePtrFromString(path) if err != nil { return err } attrBytes, err := syscall.BytePtrFromString(attr) if err != nil { return err } var dataBytes unsafe.Pointer if len(data) > 0 { dataBytes = unsafe.Pointer(&data[0]) } else { dataBytes = unsafe.Pointer(&_zero) } _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) if errno != 0 { return errno } return nil } docker-1.6.2/pkg/system/utimes_darwin.go0000644000175000017500000000034512524223634017644 0ustar tianontianonpackage system import "syscall" func LUtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform } func UtimesNano(path string, ts []syscall.Timespec) error { return syscall.UtimesNano(path, ts) } docker-1.6.2/pkg/system/stat.go0000644000175000017500000000102312524223634015737 0ustar tianontianonpackage system import ( "syscall" ) type Stat_t struct { mode uint32 uid uint32 gid uint32 rdev uint64 size int64 mtim syscall.Timespec } func (s Stat_t) Mode() uint32 { return s.mode } func (s Stat_t) Uid() uint32 { return s.uid } func (s Stat_t) Gid() uint32 { return s.gid } func (s Stat_t) Rdev() uint64 { return s.rdev } func (s Stat_t) Size() int64 { return s.size } func (s Stat_t) Mtim() syscall.Timespec { return s.mtim } func (s Stat_t) GetLastModification() syscall.Timespec { return s.Mtim() } docker-1.6.2/pkg/system/meminfo_unsupported.go0000644000175000017500000000016012524223634021067 0ustar tianontianon// +build !linux package system func ReadMemInfo() (*MemInfo, error) { return nil, ErrNotSupportedPlatform } docker-1.6.2/pkg/system/utimes_unsupported.go0000644000175000017500000000040312524223634020743 0ustar tianontianon// +build !linux,!freebsd,!darwin package system import "syscall" func LUtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform } func UtimesNano(path string, ts []syscall.Timespec) error { return ErrNotSupportedPlatform } docker-1.6.2/pkg/system/lstat.go0000644000175000017500000000033212524223634016115 0ustar tianontianon// +build !windows package system import ( "syscall" ) func Lstat(path string) (*Stat_t, error) { s := &syscall.Stat_t{} err := syscall.Lstat(path, s) if err != nil { return nil, err } return fromStatT(s) } docker-1.6.2/pkg/system/mknod.go0000644000175000017500000000076512524223634016110 0ustar tianontianon// +build !windows package system import ( "syscall" ) func Mknod(path string, mode uint32, dev int) error { return syscall.Mknod(path, mode, dev) } // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, // then the top 12 bits of the minor func Mkdev(major int64, minor int64) uint32 { return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) } docker-1.6.2/pkg/system/meminfo.go0000644000175000017500000000060012524223634016416 0ustar tianontianonpackage system // MemInfo contains memory statistics of the host system. type MemInfo struct { // Total usable RAM (i.e. physical RAM minus a few reserved bits and the // kernel binary code). MemTotal int64 // Amount of free memory. MemFree int64 // Total amount of swap space available. SwapTotal int64 // Amount of swap space that is currently unused. SwapFree int64 } docker-1.6.2/pkg/system/meminfo_linux.go0000644000175000017500000000236712524223634017651 0ustar tianontianonpackage system import ( "bufio" "errors" "io" "os" "strconv" "strings" "github.com/docker/docker/pkg/units" ) var ( ErrMalformed = errors.New("malformed file") ) // Retrieve memory statistics of the host system and parse them into a MemInfo // type. func ReadMemInfo() (*MemInfo, error) { file, err := os.Open("/proc/meminfo") if err != nil { return nil, err } defer file.Close() return parseMemInfo(file) } func parseMemInfo(reader io.Reader) (*MemInfo, error) { meminfo := &MemInfo{} scanner := bufio.NewScanner(reader) for scanner.Scan() { // Expected format: ["MemTotal:", "1234", "kB"] parts := strings.Fields(scanner.Text()) // Sanity checks: Skip malformed entries. if len(parts) < 3 || parts[2] != "kB" { continue } // Convert to bytes. size, err := strconv.Atoi(parts[1]) if err != nil { continue } bytes := int64(size) * units.KiB switch parts[0] { case "MemTotal:": meminfo.MemTotal = bytes case "MemFree:": meminfo.MemFree = bytes case "SwapTotal:": meminfo.SwapTotal = bytes case "SwapFree:": meminfo.SwapFree = bytes } } // Handle errors that may have occurred during the reading of the file. if err := scanner.Err(); err != nil { return nil, err } return meminfo, nil } docker-1.6.2/pkg/symlink/0000755000175000017500000000000012524223634014603 5ustar tianontianondocker-1.6.2/pkg/symlink/fs.go0000644000175000017500000001041012524223634015536 0ustar tianontianon// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.BSD file. // This code is a modified version of path/filepath/symlink.go from the Go standard library. package symlink import ( "bytes" "errors" "os" "path/filepath" "strings" ) // FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path func FollowSymlinkInScope(path, root string) (string, error) { path, err := filepath.Abs(path) if err != nil { return "", err } root, err = filepath.Abs(root) if err != nil { return "", err } return evalSymlinksInScope(path, root) } // evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return // a result guaranteed to be contained within the scope `root`, at the time of the call. // Symlinks in `root` are not evaluated and left as-is. // Errors encountered while attempting to evaluate symlinks in path will be returned. // Non-existing paths are valid and do not constitute an error. // `path` has to contain `root` as a prefix, or else an error will be returned. // Trying to break out from `root` does not constitute an error. // // Example: // If /foo/bar -> /outside, // FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" // // IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks // are created and not to create subsequently, additional symlinks that could potentially make a // previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") // would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should // no longer be considered safely contained in "/foo". func evalSymlinksInScope(path, root string) (string, error) { root = filepath.Clean(root) if path == root { return path, nil } if !strings.HasPrefix(path, root) { return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) } const maxIter = 255 originalPath := path // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" path = path[len(root):] if root == string(filepath.Separator) { path = string(filepath.Separator) + path } if !strings.HasPrefix(path, string(filepath.Separator)) { return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) } path = filepath.Clean(path) // consume path by taking each frontmost path element, // expanding it if it's a symlink, and appending it to b var b bytes.Buffer // b here will always be considered to be the "current absolute path inside // root" when we append paths to it, we also append a slash and use // filepath.Clean after the loop to trim the trailing slash for n := 0; path != ""; n++ { if n > maxIter { return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) } // find next path component, p i := strings.IndexRune(path, filepath.Separator) var p string if i == -1 { p, path = path, "" } else { p, path = path[:i], path[i+1:] } if p == "" { continue } // this takes a b.String() like "b/../" and a p like "c" and turns it // into "/b/../c" which then gets filepath.Cleaned into "/c" and then // root gets prepended and we Clean again (to remove any trailing slash // if the first Clean gave us just "/") cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) if cleanP == string(filepath.Separator) { // never Lstat "/" itself b.Reset() continue } fullP := filepath.Clean(root + cleanP) fi, err := os.Lstat(fullP) if os.IsNotExist(err) { // if p does not exist, accept it b.WriteString(p) b.WriteRune(filepath.Separator) continue } if err != nil { return "", err } if fi.Mode()&os.ModeSymlink == 0 { b.WriteString(p + string(filepath.Separator)) continue } // it's a symlink, put it at the front of path dest, err := os.Readlink(fullP) if err != nil { return "", err } if filepath.IsAbs(dest) { b.Reset() } path = dest + string(filepath.Separator) + path } // see note above on "fullP := ..." for why this is double-cleaned and // what's happening here return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil } docker-1.6.2/pkg/symlink/README.md0000644000175000017500000000047312524223634016066 0ustar tianontianonPackage symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks from the [Go standard library](https://golang.org/pkg/path/filepath). The code from filepath.EvalSymlinks has been adapted in fs.go. Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. docker-1.6.2/pkg/symlink/fs_test.go0000644000175000017500000002520012524223634016600 0ustar tianontianon// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE package symlink import ( "fmt" "io/ioutil" "os" "path/filepath" "testing" ) type dirOrLink struct { path string target string } func makeFs(tmpdir string, fs []dirOrLink) error { for _, s := range fs { s.path = filepath.Join(tmpdir, s.path) if s.target == "" { os.MkdirAll(s.path, 0755) continue } if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { return err } if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { return err } } return nil } func testSymlink(tmpdir, path, expected, scope string) error { rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) if err != nil { return err } expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) if err != nil { return err } if expected != rewrite { return fmt.Errorf("Expected %q got %q", expected, rewrite) } return nil } func TestFollowSymlinkAbsolute(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { t.Fatal(err) } } func TestFollowSymlinkRelativePath(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { t.Fatal(err) } } func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{ {path: "linkdir", target: "realdir"}, {path: "linkdir/foo/bar"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { t.Fatal(err) } } func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { t.Fatal("expected an error") } } func TestFollowSymlinkLastLink(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { t.Fatal(err) } } func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { t.Fatal(err) } // avoid letting allowing symlink e lead us to ../b // normalize to the "testdata/fs/a" if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { t.Fatal(err) } } func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { t.Fatal(err) } // avoid letting symlink f lead us out of the "testdata" scope // we don't normalize because symlink f is in scope and there is no // information leak if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { t.Fatal(err) } // avoid letting symlink f lead us out of the "testdata/fs" scope // we don't normalize because symlink f is in scope and there is no // information leak if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { t.Fatal(err) } } func TestFollowSymlinkRelativeLinkChain(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) // avoid letting symlink g (pointed at by symlink h) take out of scope // TODO: we should probably normalize to scope here because ../[....]/root // is out of scope and we leak information if err := makeFs(tmpdir, []dirOrLink{ {path: "testdata/fs/b/h", target: "../g"}, {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { t.Fatal(err) } } func TestFollowSymlinkBreakoutPath(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) // avoid letting symlink -> ../directory/file escape from scope // normalize to "testdata/fs/j" if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { t.Fatal(err) } } func TestFollowSymlinkToRoot(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) // make sure we don't allow escaping to / // normalize to dir if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } } func TestFollowSymlinkSlashDotdot(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) tmpdir = filepath.Join(tmpdir, "dir", "subdir") // make sure we don't allow escaping to / // normalize to dir if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } } func TestFollowSymlinkDotdot(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) tmpdir = filepath.Join(tmpdir, "dir", "subdir") // make sure we stay in scope without leaking information // this also checks for escaping to / // normalize to dir if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "foo", "", ""); err != nil { t.Fatal(err) } } func TestFollowSymlinkRelativePath2(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { t.Fatal(err) } } func TestFollowSymlinkScopeLink(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{ {path: "root2"}, {path: "root", target: "root2"}, {path: "root2/foo", target: "../bar"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { t.Fatal(err) } } func TestFollowSymlinkRootScope(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) expected, err := filepath.EvalSymlinks(tmpdir) if err != nil { t.Fatal(err) } rewrite, err := FollowSymlinkInScope(tmpdir, "/") if err != nil { t.Fatal(err) } if rewrite != expected { t.Fatalf("expected %q got %q", expected, rewrite) } } func TestFollowSymlinkEmpty(t *testing.T) { res, err := FollowSymlinkInScope("", "") if err != nil { t.Fatal(err) } wd, err := os.Getwd() if err != nil { t.Fatal(err) } if res != wd { t.Fatalf("expected %q got %q", wd, res) } } func TestFollowSymlinkCircular(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { t.Fatal("expected an error for foo -> foo") } if err := makeFs(tmpdir, []dirOrLink{ {path: "root/bar", target: "baz"}, {path: "root/baz", target: "../bak"}, {path: "root/bak", target: "/bar"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { t.Fatal("expected an error for bar -> baz -> bak -> bar") } } func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{ {path: "root2"}, {path: "root", target: "root2"}, {path: "root/a", target: "r/s"}, {path: "root/r", target: "../root/t"}, {path: "root/root/t/s/b", target: "/../u"}, {path: "root/u/c", target: "."}, {path: "root/u/x/y", target: "../v"}, {path: "root/u/v", target: "/../w"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { t.Fatal(err) } } func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{ {path: "root/slash", target: "/"}, {path: "root/sym", target: "/idontexist/../slash"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { t.Fatal(err) } } func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) if err := makeFs(tmpdir, []dirOrLink{ {path: "root/sym", target: "/foo/bar"}, {path: "root/hello", target: "/sym/../baz"}, }); err != nil { t.Fatal(err) } if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { t.Fatal(err) } } docker-1.6.2/pkg/symlink/LICENSE.APACHE0000644000175000017500000002501312524223634016531 0ustar tianontianon Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2014-2015 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-1.6.2/pkg/symlink/LICENSE.BSD0000644000175000017500000000272512524223634016225 0ustar tianontianonCopyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. docker-1.6.2/pkg/systemd/0000755000175000017500000000000012524223634014605 5ustar tianontianondocker-1.6.2/pkg/systemd/listendfd.go0000644000175000017500000000153512524223634017114 0ustar tianontianonpackage systemd import ( "errors" "net" "strconv" "github.com/coreos/go-systemd/activation" ) // ListenFD returns the specified socket activated files as a slice of // net.Listeners or all of the activated files if "*" is given. func ListenFD(addr string) ([]net.Listener, error) { // socket activation listeners, err := activation.Listeners(false) if err != nil { return nil, err } if listeners == nil || len(listeners) == 0 { return nil, errors.New("No sockets found") } // default to all fds just like unix:// and tcp:// if addr == "" { addr = "*" } fdNum, _ := strconv.Atoi(addr) fdOffset := fdNum - 3 if (addr != "*") && (len(listeners) < int(fdOffset)+1) { return nil, errors.New("Too few socket activated files passed in") } if addr == "*" { return listeners, nil } return []net.Listener{listeners[fdOffset]}, nil } docker-1.6.2/pkg/systemd/booted.go0000644000175000017500000000031112524223634016403 0ustar tianontianonpackage systemd import ( "os" ) // Conversion to Go of systemd's sd_booted() func SdBooted() bool { s, err := os.Stat("/run/systemd/system") if err != nil { return false } return s.IsDir() } docker-1.6.2/pkg/systemd/sd_notify.go0000644000175000017500000000102612524223634017131 0ustar tianontianonpackage systemd import ( "errors" "net" "os" ) var SdNotifyNoSocket = errors.New("No socket") // Send a message to the init daemon. It is common to ignore the error. func SdNotify(state string) error { socketAddr := &net.UnixAddr{ Name: os.Getenv("NOTIFY_SOCKET"), Net: "unixgram", } if socketAddr.Name == "" { return SdNotifyNoSocket } conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) if err != nil { return err } _, err = conn.Write([]byte(state)) if err != nil { return err } return nil } docker-1.6.2/pkg/timeoutconn/0000755000175000017500000000000012524223634015461 5ustar tianontianondocker-1.6.2/pkg/timeoutconn/timeoutconn.go0000644000175000017500000000071312524223634020355 0ustar tianontianonpackage timeoutconn import ( "net" "time" ) func New(netConn net.Conn, timeout time.Duration) net.Conn { return &conn{netConn, timeout} } // A net.Conn that sets a deadline for every Read or Write operation type conn struct { net.Conn timeout time.Duration } func (c *conn) Read(b []byte) (int, error) { if c.timeout > 0 { err := c.Conn.SetReadDeadline(time.Now().Add(c.timeout)) if err != nil { return 0, err } } return c.Conn.Read(b) } docker-1.6.2/pkg/timeoutconn/timeoutconn_test.go0000644000175000017500000000142212524223634021412 0ustar tianontianonpackage timeoutconn import ( "bufio" "fmt" "net" "net/http" "net/http/httptest" "testing" "time" ) func TestRead(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "hello") })) defer ts.Close() conn, err := net.Dial("tcp", ts.URL[7:]) if err != nil { t.Fatalf("failed to create connection to %q: %v", ts.URL, err) } tconn := New(conn, 1*time.Second) if _, err = bufio.NewReader(tconn).ReadString('\n'); err == nil { t.Fatalf("expected timeout error, got none") } if _, err := fmt.Fprintf(tconn, "GET / HTTP/1.0\r\n\r\n"); err != nil { t.Errorf("unexpected error: %v", err) } if _, err = bufio.NewReader(tconn).ReadString('\n'); err != nil { t.Errorf("unexpected error: %v", err) } } docker-1.6.2/pkg/tailfile/0000755000175000017500000000000012524223634014706 5ustar tianontianondocker-1.6.2/pkg/tailfile/tailfile_test.go0000644000175000017500000000567612524223634020103 0ustar tianontianonpackage tailfile import ( "io/ioutil" "os" "testing" ) func TestTailFile(t *testing.T) { f, err := ioutil.TempFile("", "tail-test") if err != nil { t.Fatal(err) } defer f.Close() defer os.RemoveAll(f.Name()) testFile := []byte(`first line second line third line fourth line fifth line next first line next second line next third line next fourth line next fifth line last first line next first line next second line next third line next fourth line next fifth line next first line next second line next third line next fourth line next fifth line last second line last third line last fourth line last fifth line truncated line`) if _, err := f.Write(testFile); err != nil { t.Fatal(err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { t.Fatal(err) } expected := []string{"last fourth line", "last fifth line"} res, err := TailFile(f, 2) if err != nil { t.Fatal(err) } for i, l := range res { t.Logf("%s", l) if expected[i] != string(l) { t.Fatalf("Expected line %s, got %s", expected[i], l) } } } func TestTailFileManyLines(t *testing.T) { f, err := ioutil.TempFile("", "tail-test") if err != nil { t.Fatal(err) } defer f.Close() defer os.RemoveAll(f.Name()) testFile := []byte(`first line second line truncated line`) if _, err := f.Write(testFile); err != nil { t.Fatal(err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { t.Fatal(err) } expected := []string{"first line", "second line"} res, err := TailFile(f, 10000) if err != nil { t.Fatal(err) } for i, l := range res { t.Logf("%s", l) if expected[i] != string(l) { t.Fatalf("Expected line %s, got %s", expected[i], l) } } } func TestTailEmptyFile(t *testing.T) { f, err := ioutil.TempFile("", "tail-test") if err != nil { t.Fatal(err) } defer f.Close() defer os.RemoveAll(f.Name()) res, err := TailFile(f, 10000) if err != nil { t.Fatal(err) } if len(res) != 0 { t.Fatal("Must be empty slice from empty file") } } func TestTailNegativeN(t *testing.T) { f, err := ioutil.TempFile("", "tail-test") if err != nil { t.Fatal(err) } defer f.Close() defer os.RemoveAll(f.Name()) testFile := []byte(`first line second line truncated line`) if _, err := f.Write(testFile); err != nil { t.Fatal(err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { t.Fatal(err) } if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) } if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) } } func BenchmarkTail(b *testing.B) { f, err := ioutil.TempFile("", "tail-test") if err != nil { b.Fatal(err) } defer f.Close() defer os.RemoveAll(f.Name()) for i := 0; i < 10000; i++ { if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { b.Fatal(err) } } b.ResetTimer() for i := 0; i < b.N; i++ { if _, err := TailFile(f, 1000); err != nil { b.Fatal(err) } } } docker-1.6.2/pkg/tailfile/tailfile.go0000644000175000017500000000231212524223634017024 0ustar tianontianonpackage tailfile import ( "bytes" "errors" "os" ) const blockSize = 1024 var eol = []byte("\n") var ErrNonPositiveLinesNumber = errors.New("Lines number must be positive") //TailFile returns last n lines of file f func TailFile(f *os.File, n int) ([][]byte, error) { if n <= 0 { return nil, ErrNonPositiveLinesNumber } size, err := f.Seek(0, os.SEEK_END) if err != nil { return nil, err } block := -1 var data []byte var cnt int for { var b []byte step := int64(block * blockSize) left := size + step // how many bytes to beginning if left < 0 { if _, err := f.Seek(0, os.SEEK_SET); err != nil { return nil, err } b = make([]byte, blockSize+left) if _, err := f.Read(b); err != nil { return nil, err } data = append(b, data...) break } else { b = make([]byte, blockSize) if _, err := f.Seek(step, os.SEEK_END); err != nil { return nil, err } if _, err := f.Read(b); err != nil { return nil, err } data = append(b, data...) } cnt += bytes.Count(b, eol) if cnt > n { break } block-- } lines := bytes.Split(data, eol) if n < len(lines) { return lines[len(lines)-n-1 : len(lines)-1], nil } return lines[:len(lines)-1], nil } docker-1.6.2/pkg/fileutils/0000755000175000017500000000000012524223634015115 5ustar tianontianondocker-1.6.2/pkg/fileutils/fileutils.go0000644000175000017500000000124712524223634017450 0ustar tianontianonpackage fileutils import ( log "github.com/Sirupsen/logrus" "path/filepath" ) // Matches returns true if relFilePath matches any of the patterns func Matches(relFilePath string, patterns []string) (bool, error) { for _, exclude := range patterns { matched, err := filepath.Match(exclude, relFilePath) if err != nil { log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude) return false, err } if matched { if filepath.Clean(relFilePath) == "." { log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude) continue } log.Debugf("Skipping excluded path: %s", relFilePath) return true, nil } } return false, nil } docker-1.6.2/pkg/reexec/0000755000175000017500000000000012524223634014370 5ustar tianontianondocker-1.6.2/pkg/reexec/README.md0000644000175000017500000000044112524223634015646 0ustar tianontianon## reexec The `reexec` package facilitates the busybox style reexec of the docker binary that we require because of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of the exec of the binary will be used to find and execute custom init paths. docker-1.6.2/pkg/reexec/command_unsupported.go0000644000175000017500000000016012524223634021002 0ustar tianontianon// +build !linux package reexec import ( "os/exec" ) func Command(args ...string) *exec.Cmd { return nil } docker-1.6.2/pkg/reexec/reexec.go0000644000175000017500000000214712524223634016176 0ustar tianontianonpackage reexec import ( "fmt" "os" "os/exec" "path/filepath" ) var registeredInitializers = make(map[string]func()) // Register adds an initialization func under the specified name func Register(name string, initializer func()) { if _, exists := registeredInitializers[name]; exists { panic(fmt.Sprintf("reexec func already registred under name %q", name)) } registeredInitializers[name] = initializer } // Init is called as the first part of the exec process and returns true if an // initialization function was called. func Init() bool { initializer, exists := registeredInitializers[os.Args[0]] if exists { initializer() return true } return false } // Self returns the path to the current processes binary func Self() string { name := os.Args[0] if filepath.Base(name) == name { if lp, err := exec.LookPath(name); err == nil { return lp } } // handle conversion of relative paths to absolute if absName, err := filepath.Abs(name); err == nil { return absName } // if we coudn't get absolute name, return original // (NOTE: Go only errors on Abs() if os.Getwd fails) return name } docker-1.6.2/pkg/reexec/command_linux.go0000644000175000017500000000035312524223634017555 0ustar tianontianon// +build linux package reexec import ( "os/exec" "syscall" ) func Command(args ...string) *exec.Cmd { return &exec.Cmd{ Path: Self(), Args: args, SysProcAttr: &syscall.SysProcAttr{ Pdeathsig: syscall.SIGTERM, }, } } docker-1.6.2/pkg/broadcastwriter/0000755000175000017500000000000012524223634016314 5ustar tianontianondocker-1.6.2/pkg/broadcastwriter/broadcastwriter.go0000644000175000017500000000450412524223634022045 0ustar tianontianonpackage broadcastwriter import ( "bytes" "io" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/jsonlog" ) // BroadcastWriter accumulate multiple io.WriteCloser by stream. type BroadcastWriter struct { sync.Mutex buf *bytes.Buffer jsLogBuf *bytes.Buffer streams map[string](map[io.WriteCloser]struct{}) } // AddWriter adds new io.WriteCloser for stream. // If stream is "", then all writes proceed as is. Otherwise every line from // input will be packed to serialized jsonlog.JSONLog. func (w *BroadcastWriter) AddWriter(writer io.WriteCloser, stream string) { w.Lock() if _, ok := w.streams[stream]; !ok { w.streams[stream] = make(map[io.WriteCloser]struct{}) } w.streams[stream][writer] = struct{}{} w.Unlock() } // Write writes bytes to all writers. Failed writers will be evicted during // this call. func (w *BroadcastWriter) Write(p []byte) (n int, err error) { created := time.Now().UTC() w.Lock() if writers, ok := w.streams[""]; ok { for sw := range writers { if n, err := sw.Write(p); err != nil || n != len(p) { // On error, evict the writer delete(writers, sw) } } } if w.jsLogBuf == nil { w.jsLogBuf = new(bytes.Buffer) w.jsLogBuf.Grow(1024) } w.buf.Write(p) for { line, err := w.buf.ReadString('\n') if err != nil { w.buf.WriteString(line) break } for stream, writers := range w.streams { if stream == "" { continue } jsonLog := jsonlog.JSONLog{Log: line, Stream: stream, Created: created} err = jsonLog.MarshalJSONBuf(w.jsLogBuf) if err != nil { log.Errorf("Error making JSON log line: %s", err) continue } w.jsLogBuf.WriteByte('\n') b := w.jsLogBuf.Bytes() for sw := range writers { if _, err := sw.Write(b); err != nil { delete(writers, sw) } } } w.jsLogBuf.Reset() } w.jsLogBuf.Reset() w.Unlock() return len(p), nil } // Clean closes and removes all writers. Last non-eol-terminated part of data // will be saved. func (w *BroadcastWriter) Clean() error { w.Lock() for _, writers := range w.streams { for w := range writers { w.Close() } } w.streams = make(map[string](map[io.WriteCloser]struct{})) w.Unlock() return nil } func New() *BroadcastWriter { return &BroadcastWriter{ streams: make(map[string](map[io.WriteCloser]struct{})), buf: bytes.NewBuffer(nil), } } docker-1.6.2/pkg/broadcastwriter/broadcastwriter_test.go0000644000175000017500000000607412524223634023110 0ustar tianontianonpackage broadcastwriter import ( "bytes" "errors" "testing" ) type dummyWriter struct { buffer bytes.Buffer failOnWrite bool } func (dw *dummyWriter) Write(p []byte) (n int, err error) { if dw.failOnWrite { return 0, errors.New("Fake fail") } return dw.buffer.Write(p) } func (dw *dummyWriter) String() string { return dw.buffer.String() } func (dw *dummyWriter) Close() error { return nil } func TestBroadcastWriter(t *testing.T) { writer := New() // Test 1: Both bufferA and bufferB should contain "foo" bufferA := &dummyWriter{} writer.AddWriter(bufferA, "") bufferB := &dummyWriter{} writer.AddWriter(bufferB, "") writer.Write([]byte("foo")) if bufferA.String() != "foo" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferB.String() != "foo" { t.Errorf("Buffer contains %v", bufferB.String()) } // Test2: bufferA and bufferB should contain "foobar", // while bufferC should only contain "bar" bufferC := &dummyWriter{} writer.AddWriter(bufferC, "") writer.Write([]byte("bar")) if bufferA.String() != "foobar" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferB.String() != "foobar" { t.Errorf("Buffer contains %v", bufferB.String()) } if bufferC.String() != "bar" { t.Errorf("Buffer contains %v", bufferC.String()) } // Test3: Test eviction on failure bufferA.failOnWrite = true writer.Write([]byte("fail")) if bufferA.String() != "foobar" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferC.String() != "barfail" { t.Errorf("Buffer contains %v", bufferC.String()) } // Even though we reset the flag, no more writes should go in there bufferA.failOnWrite = false writer.Write([]byte("test")) if bufferA.String() != "foobar" { t.Errorf("Buffer contains %v", bufferA.String()) } if bufferC.String() != "barfailtest" { t.Errorf("Buffer contains %v", bufferC.String()) } writer.Clean() } type devNullCloser int func (d devNullCloser) Close() error { return nil } func (d devNullCloser) Write(buf []byte) (int, error) { return len(buf), nil } // This test checks for races. It is only useful when run with the race detector. func TestRaceBroadcastWriter(t *testing.T) { writer := New() c := make(chan bool) go func() { writer.AddWriter(devNullCloser(0), "") c <- true }() writer.Write([]byte("hello")) <-c } func BenchmarkBroadcastWriter(b *testing.B) { writer := New() setUpWriter := func() { for i := 0; i < 100; i++ { writer.AddWriter(devNullCloser(0), "stdout") writer.AddWriter(devNullCloser(0), "stderr") writer.AddWriter(devNullCloser(0), "") } } testLine := "Line that thinks that it is log line from docker" var buf bytes.Buffer for i := 0; i < 100; i++ { buf.Write([]byte(testLine + "\n")) } // line without eol buf.Write([]byte(testLine)) testText := buf.Bytes() b.SetBytes(int64(5 * len(testText))) b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() setUpWriter() b.StartTimer() for j := 0; j < 5; j++ { if _, err := writer.Write(testText); err != nil { b.Fatal(err) } } b.StopTimer() writer.Clean() b.StartTimer() } } docker-1.6.2/pkg/chrootarchive/0000755000175000017500000000000012524223634015755 5ustar tianontianondocker-1.6.2/pkg/chrootarchive/archive.go0000644000175000017500000000565112524223634017734 0ustar tianontianonpackage chrootarchive import ( "encoding/json" "flag" "fmt" "io" "os" "path/filepath" "runtime" "syscall" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" ) var chrootArchiver = &archive.Archiver{Untar: Untar} func chroot(path string) error { if err := syscall.Chroot(path); err != nil { return err } return syscall.Chdir("/") } func untar() { runtime.LockOSThread() flag.Parse() var options *archive.TarOptions if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { fatal(err) } if err := chroot(flag.Arg(0)); err != nil { fatal(err) } if err := archive.Unpack(os.Stdin, "/", options); err != nil { fatal(err) } // fully consume stdin in case it is zero padded flush(os.Stdin) os.Exit(0) } func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } if options == nil { options = &archive.TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { if err := os.MkdirAll(dest, 0777); err != nil { return err } } // We can't pass the exclude list directly via cmd line // because we easily overrun the shell max argument list length // when the full image list is passed (e.g. when this is used // by `docker load`). Instead we will add the JSON marshalled // and placed in the env, which has significantly larger // max size data, err := json.Marshal(options) if err != nil { return fmt.Errorf("Untar json encode: %v", err) } decompressedArchive, err := archive.DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() cmd := reexec.Command("docker-untar", dest) cmd.Stdin = decompressedArchive cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("Untar %s %s", err, out) } return nil } func TarUntar(src, dst string) error { return chrootArchiver.TarUntar(src, dst) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func CopyWithTar(src, dst string) error { return chrootArchiver.CopyWithTar(src, dst) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. // // If `dst` ends with a trailing slash '/', the final destination path // will be `dst/base(src)`. func CopyFileWithTar(src, dst string) (err error) { return chrootArchiver.CopyFileWithTar(src, dst) } // UntarPath is a convenience function which looks for an archive // at filesystem path `src`, and unpacks it at `dst`. func UntarPath(src, dst string) error { return chrootArchiver.UntarPath(src, dst) } docker-1.6.2/pkg/chrootarchive/diff.go0000644000175000017500000000340512524223634017216 0ustar tianontianonpackage chrootarchive import ( "bytes" "encoding/json" "flag" "fmt" "io/ioutil" "os" "path/filepath" "runtime" "syscall" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" ) type applyLayerResponse struct { LayerSize int64 `json:"layerSize"` } func applyLayer() { runtime.LockOSThread() flag.Parse() if err := chroot(flag.Arg(0)); err != nil { fatal(err) } // We need to be able to set any perms oldmask := syscall.Umask(0) defer syscall.Umask(oldmask) tmpDir, err := ioutil.TempDir("/", "temp-docker-extract") if err != nil { fatal(err) } os.Setenv("TMPDIR", tmpDir) size, err := archive.UnpackLayer("/", os.Stdin) os.RemoveAll(tmpDir) if err != nil { fatal(err) } encoder := json.NewEncoder(os.Stdout) if err := encoder.Encode(applyLayerResponse{size}); err != nil { fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) } flush(os.Stdout) flush(os.Stdin) os.Exit(0) } func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) { dest = filepath.Clean(dest) decompressed, err := archive.DecompressStream(layer) if err != nil { return 0, err } defer decompressed.Close() cmd := reexec.Command("docker-applyLayer", dest) cmd.Stdin = decompressed outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) cmd.Stdout, cmd.Stderr = outBuf, errBuf if err = cmd.Run(); err != nil { return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) } // Stdout should be a valid JSON struct representing an applyLayerResponse. response := applyLayerResponse{} decoder := json.NewDecoder(outBuf) if err = decoder.Decode(&response); err != nil { return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) } return response.LayerSize, nil } docker-1.6.2/pkg/chrootarchive/init.go0000644000175000017500000000063212524223634017250 0ustar tianontianonpackage chrootarchive import ( "fmt" "io" "io/ioutil" "os" "github.com/docker/docker/pkg/reexec" ) func init() { reexec.Register("docker-untar", untar) reexec.Register("docker-applyLayer", applyLayer) } func fatal(err error) { fmt.Fprint(os.Stderr, err) os.Exit(1) } // flush consumes all the bytes from the reader discarding // any errors func flush(r io.Reader) { io.Copy(ioutil.Discard, r) } docker-1.6.2/pkg/chrootarchive/archive_test.go0000644000175000017500000000577312524223634021000 0ustar tianontianonpackage chrootarchive import ( "io" "io/ioutil" "os" "path/filepath" "testing" "time" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" ) func init() { reexec.Init() } func TestChrootTarUntar(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") if err := os.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { t.Fatal(err) } stream, err := archive.Tar(src, archive.Uncompressed) if err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "src") if err := os.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { t.Fatal(err) } } type slowEmptyTarReader struct { size int offset int chunkSize int } // Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") func (s *slowEmptyTarReader) Read(p []byte) (int, error) { time.Sleep(100 * time.Millisecond) count := s.chunkSize if len(p) < s.chunkSize { count = len(p) } for i := 0; i < count; i++ { p[i] = 0 } s.offset += count if s.offset > s.size { return count, io.EOF } return count, nil } func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") if err := os.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} if err := Untar(stream, dest, nil); err != nil { t.Fatal(err) } } func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") if err := os.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} if _, err := ApplyLayer(dest, stream); err != nil { t.Fatal(err) } } func TestChrootApplyDotDotFile(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") if err := os.MkdirAll(src, 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { t.Fatal(err) } stream, err := archive.Tar(src, archive.Uncompressed) if err != nil { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") if err := os.MkdirAll(dest, 0700); err != nil { t.Fatal(err) } if _, err := ApplyLayer(dest, stream); err != nil { t.Fatal(err) } } docker-1.6.2/pkg/archive/0000755000175000017500000000000012524223634014536 5ustar tianontianondocker-1.6.2/pkg/archive/archive.go0000644000175000017500000005352712524223634016522 0ustar tianontianonpackage archive import ( "bufio" "bytes" "compress/bzip2" "compress/gzip" "errors" "fmt" "io" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strings" "syscall" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" ) type ( Archive io.ReadCloser ArchiveReader io.Reader Compression int TarOptions struct { IncludeFiles []string ExcludePatterns []string Compression Compression NoLchown bool Name string } // Archiver allows the reuse of most utility functions of this package // with a pluggable Untar function. Archiver struct { Untar func(io.Reader, string, *TarOptions) error } // breakoutError is used to differentiate errors related to breaking out // When testing archive breakout in the unit tests, this error is expected // in order for the test to pass. breakoutError error ) var ( ErrNotImplemented = errors.New("Function not implemented") defaultArchiver = &Archiver{Untar} ) const ( Uncompressed Compression = iota Bzip2 Gzip Xz ) func IsArchive(header []byte) bool { compression := DetectCompression(header) if compression != Uncompressed { return true } r := tar.NewReader(bytes.NewBuffer(header)) _, err := r.Next() return err == nil } func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ Bzip2: {0x42, 0x5A, 0x68}, Gzip: {0x1F, 0x8B, 0x08}, Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, } { if len(source) < len(m) { log.Debugf("Len too short") continue } if bytes.Compare(m, source[:len(m)]) == 0 { return compression } } return Uncompressed } func xzDecompress(archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} return CmdStream(exec.Command(args[0], args[1:]...), archive) } func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) if err != nil { return nil, err } compression := DetectCompression(bs) switch compression { case Uncompressed: readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: gzReader, err := gzip.NewReader(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) return readBufWrapper, nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: xzReader, err := xzDecompress(buf) if err != nil { return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) return readBufWrapper, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { case Uncompressed: writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) return writeBufWrapper, nil case Gzip: gzWriter := gzip.NewWriter(dest) writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) return writeBufWrapper, nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } func (compression *Compression) Extension() string { switch *compression { case Uncompressed: return "tar" case Bzip2: return "tar.bz2" case Gzip: return "tar.gz" case Xz: return "tar.xz" } return "" } type tarAppender struct { TarWriter *tar.Writer Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string } // canonicalTarName provides a platform-independent and consistent posix-style //path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) (string, error) { name, err := CanonicalTarNameForPath(name) if err != nil { return "", err } // suffix with '/' for directories if isDir && !strings.HasSuffix(name, "/") { name += "/" } return name, nil } func (ta *tarAppender) addTarFile(path, name string) error { fi, err := os.Lstat(path) if err != nil { return err } link := "" if fi.Mode()&os.ModeSymlink != 0 { if link, err = os.Readlink(path); err != nil { return err } } hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return err } hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) name, err = canonicalTarName(name, fi.IsDir()) if err != nil { return fmt.Errorf("tar: cannot canonicalize path: %v", err) } hdr.Name = name nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) if err != nil { return err } // if it's a regular file and has more than 1 link, // it's hardlinked, so set the type flag accordingly if fi.Mode().IsRegular() && nlink > 1 { // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { hdr.Typeflag = tar.TypeLink hdr.Linkname = oldpath hdr.Size = 0 // This Must be here for the writer math to add up! } else { ta.SeenFiles[inode] = name } } capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability) } if err := ta.TarWriter.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg { file, err := os.Open(path) if err != nil { return err } ta.Buffer.Reset(ta.TarWriter) defer ta.Buffer.Reset(nil) _, err = io.Copy(ta.Buffer, file) file.Close() if err != nil { return err } err = ta.Buffer.Flush() if err != nil { return err } } return nil } func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: mode := uint32(hdr.Mode & 07777) switch hdr.Typeflag { case tar.TypeBlock: mode |= syscall.S_IFBLK case tar.TypeChar: mode |= syscall.S_IFCHR case tar.TypeFifo: mode |= syscall.S_IFIFO } if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: log.Debugf("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) } if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil && Lchown { return err } for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { return err } } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if hdr.Typeflag != tar.TypeSymlink { if err := os.Chmod(path, hdrInfo.Mode()); err != nil { return err } } ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and if hdr.Typeflag != tar.TypeSymlink { if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } else { if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil } // Tar creates an archive from the directory at `path`, and returns it as a // stream of bytes. func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } func escapeName(name string) string { escaped := make([]byte, 0) for i, c := range []byte(name) { if i == 0 && c == '/' { continue } // all printable chars except "-" which is 0x2d if (0x20 <= c && c <= 0x7E) && c != 0x2d { escaped = append(escaped, c) } else { escaped = append(escaped, fmt.Sprintf("\\%03o", c)...) } } return string(escaped) } // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) if err != nil { return nil, err } go func() { ta := &tarAppender{ TarWriter: tar.NewWriter(compressWriter), Buffer: pools.BufioWriter32KPool.Get(nil), SeenFiles: make(map[uint64]string), } // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this if options.IncludeFiles == nil { options.IncludeFiles = []string{"."} } seen := make(map[string]bool) var renamedRelFilePath string // For when tar.Options.Name is set for _, include := range options.IncludeFiles { filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { if err != nil { log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) if err != nil || (relFilePath == "." && f.IsDir()) { // Error getting relative path OR we are looking // at the root path. Skip in both situations. return nil } skip := false // If "include" is an exact match for the current file // then even if there's an "excludePatterns" pattern that // matches it, don't skip it. IOW, assume an explicit 'include' // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { skip, err = fileutils.Matches(relFilePath, options.ExcludePatterns) if err != nil { log.Debugf("Error matching %s", relFilePath, err) return err } } if skip { if f.IsDir() { return filepath.SkipDir } return nil } if seen[relFilePath] { return nil } seen[relFilePath] = true // Rename the base resource if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) { renamedRelFilePath = relFilePath } // Set this to make sure the items underneath also get renamed if options.Name != "" { relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1) } if err := ta.addTarFile(filePath, relFilePath); err != nil { log.Debugf("Can't add file %s to tar: %s", filePath, err) } return nil }) } // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { log.Debugf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { log.Debugf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { log.Debugf("Can't close pipe writer: %s", err) } }() return pipeReader, nil } func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/" hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } if !strings.HasSuffix(hdr.Name, "/") { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = os.MkdirAll(parentPath, 0777) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, "../") { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} if err := syscall.UtimesNano(path, ts); err != nil { return err } } return nil } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(archive io.Reader, dest string, options *TarOptions) error { if archive == nil { return fmt.Errorf("Empty archive") } dest = filepath.Clean(dest) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } decompressedArchive, err := DecompressStream(archive) if err != nil { return err } defer decompressedArchive.Close() return Unpack(decompressedArchive, dest, options) } func (archiver *Archiver) TarUntar(src, dst string) error { log.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err } defer archive.Close() return archiver.Untar(archive, dst, nil) } // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. // If either Tar or Untar fails, TarUntar aborts and returns the error. func TarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) if err != nil { return err } defer archive.Close() if err := archiver.Untar(archive, dst, nil); err != nil { return err } return nil } // UntarPath is a convenience function which looks for an archive // at filesystem path `src`, and unpacks it at `dst`. func UntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func (archiver *Archiver) CopyWithTar(src, dst string) error { srcSt, err := os.Stat(src) if err != nil { return err } if !srcSt.IsDir() { return archiver.CopyFileWithTar(src, dst) } // Create dst, copy src's content into it log.Debugf("Creating dest directory: %s", dst) if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { return err } log.Debugf("Calling TarUntar(%s, %s)", src, dst) return archiver.TarUntar(src, dst) } // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no // intermediary disk IO. func CopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { log.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) if err != nil { return err } if srcSt.IsDir() { return fmt.Errorf("Can't copy a directory") } // Clean up the trailing / if dst[len(dst)-1] == '/' { dst = path.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { return err } r, w := io.Pipe() errC := promise.Go(func() error { defer w.Close() srcF, err := os.Open(src) if err != nil { return err } defer srcF.Close() hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) tw := tar.NewWriter(w) defer tw.Close() if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := io.Copy(tw, srcF); err != nil { return err } return nil }) defer func() { if er := <-errC; err != nil { err = er } }() return archiver.Untar(r, filepath.Dir(dst), nil) } // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. // // If `dst` ends with a trailing slash '/', the final destination path // will be `dst/base(src)`. func CopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } // CmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { if input != nil { stdin, err := cmd.StdinPipe() if err != nil { return nil, err } // Write stdin if any go func() { io.Copy(stdin, input) stdin.Close() }() } stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stderr, err := cmd.StderrPipe() if err != nil { return nil, err } pipeR, pipeW := io.Pipe() errChan := make(chan []byte) // Collect stderr, we will use it in case of an error go func() { errText, e := ioutil.ReadAll(stderr) if e != nil { errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") } errChan <- errText }() // Copy stdout to the returned pipe go func() { _, err := io.Copy(pipeW, stdout) if err != nil { pipeW.CloseWithError(err) } errText := <-errChan if err := cmd.Wait(); err != nil { pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) } else { pipeW.Close() } }() // Run the command and return the pipe if err := cmd.Start(); err != nil { return nil, err } return pipeR, nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents // of that file as an archive. The archive can only be read once - as soon as reading completes, // the file will be deleted. func NewTempArchive(src Archive, dir string) (*TempArchive, error) { f, err := ioutil.TempFile(dir, "") if err != nil { return nil, err } if _, err := io.Copy(f, src); err != nil { return nil, err } if err = f.Sync(); err != nil { return nil, err } if _, err := f.Seek(0, 0); err != nil { return nil, err } st, err := f.Stat() if err != nil { return nil, err } size := st.Size() return &TempArchive{File: f, Size: size}, nil } type TempArchive struct { *os.File Size int64 // Pre-computed from Stat().Size() as a convenience read int64 closed bool } // Close closes the underlying file if it's still open, or does a no-op // to allow callers to try to close the TempArchive multiple times safely. func (archive *TempArchive) Close() error { if archive.closed { return nil } archive.closed = true return archive.File.Close() } func (archive *TempArchive) Read(data []byte) (int, error) { n, err := archive.File.Read(data) archive.read += int64(n) if err != nil || archive.read == archive.Size { archive.Close() os.Remove(archive.File.Name()) } return n, err } docker-1.6.2/pkg/archive/diff.go0000644000175000017500000001125712524223634016003 0ustar tianontianonpackage archive import ( "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "syscall" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { tr := tar.NewReader(layer) trBuf := pools.BufioReader32KPool.Get(tr) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return 0, err } size += hdr.Size // Normalize name, for safety and for a simple is-root check hdr.Name = filepath.Clean(hdr.Name) if !strings.HasSuffix(hdr.Name, "/") { // Not the root directory, ensure that the parent directory exists. // This happened in some tests where an image had a tarfile without any // parent directories. parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = os.MkdirAll(parentPath, 0600) if err != nil { return 0, err } } } // Skip AUFS metadata dirs if strings.HasPrefix(hdr.Name, ".wh..wh.") { // Regular files inside /.wh..wh.plnk can be used as hardlink targets // We don't want this directory, but we need the files in them so that // such hardlinks can be resolved. if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { basename := filepath.Base(hdr.Name) aufsHardlinks[basename] = hdr if aufsTempdir == "" { if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { return 0, err } defer os.RemoveAll(aufsTempdir) } if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil { return 0, err } } continue } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return 0, err } if strings.HasPrefix(rel, "../") { return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } base := filepath.Base(path) if strings.HasPrefix(base, ".wh.") { originalBase := base[len(".wh."):] originalPath := filepath.Join(filepath.Dir(path), originalBase) if err := os.RemoveAll(originalPath); err != nil { return 0, err } } else { // If path exits we almost always just want to remove and replace it. // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return 0, err } } } trBuf.Reset(tr) srcData := io.Reader(trBuf) srcHdr := hdr // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so // we manually retarget these into the temporary files we extracted them into if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { linkBasename := filepath.Base(hdr.Linkname) srcHdr = aufsHardlinks[linkBasename] if srcHdr == nil { return 0, fmt.Errorf("Invalid aufs hardlink") } tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) if err != nil { return 0, err } defer tmpFile.Close() srcData = tmpFile } if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil { return 0, err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} if err := syscall.UtimesNano(path, ts); err != nil { return 0, err } } return size, nil } // ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. func ApplyLayer(dest string, layer ArchiveReader) (int64, error) { dest = filepath.Clean(dest) // We need to be able to set any perms oldmask, err := system.Umask(0) if err != nil { return 0, err } defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform layer, err = DecompressStream(layer) if err != nil { return 0, err } return UnpackLayer(dest, layer) } docker-1.6.2/pkg/archive/time_unsupported.go0000644000175000017500000000035012524223634020471 0ustar tianontianon// +build !linux package archive import ( "syscall" "time" ) func timeToTimespec(time time.Time) (ts syscall.Timespec) { nsec := int64(0) if !time.IsZero() { nsec = time.UnixNano() } return syscall.NsecToTimespec(nsec) } docker-1.6.2/pkg/archive/README.md0000644000175000017500000000010412524223634016010 0ustar tianontianonThis code provides helper functions for dealing with archive files. docker-1.6.2/pkg/archive/archive_unix_test.go0000644000175000017500000000251012524223634020606 0ustar tianontianon// +build !windows package archive import ( "os" "testing" ) func TestCanonicalTarNameForPath(t *testing.T) { cases := []struct{ in, expected string }{ {"foo", "foo"}, {"foo/bar", "foo/bar"}, {"foo/dir/", "foo/dir/"}, } for _, v := range cases { if out, err := CanonicalTarNameForPath(v.in); err != nil { t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) } else if out != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) } } } func TestCanonicalTarName(t *testing.T) { cases := []struct { in string isDir bool expected string }{ {"foo", false, "foo"}, {"foo", true, "foo/"}, {"foo/bar", false, "foo/bar"}, {"foo/bar", true, "foo/bar/"}, } for _, v := range cases { if out, err := canonicalTarName(v.in, v.isDir); err != nil { t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) } else if out != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) } } } func TestChmodTarEntry(t *testing.T) { cases := []struct { in, expected os.FileMode }{ {0000, 0000}, {0777, 0777}, {0644, 0644}, {0755, 0755}, {0444, 0444}, } for _, v := range cases { if out := chmodTarEntry(v.in); out != v.expected { t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) } } } docker-1.6.2/pkg/archive/wrap.go0000644000175000017500000000274112524223634016042 0ustar tianontianonpackage archive import ( "bytes" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io/ioutil" ) // Generate generates a new archive from the content provided // as input. // // `files` is a sequence of path/content pairs. A new file is // added to the archive for each pair. // If the last pair is incomplete, the file is created with an // empty content. For example: // // Generate("foo.txt", "hello world", "emptyfile") // // The above call will return an archive with 2 files: // * ./foo.txt with content "hello world" // * ./empty with empty content // // FIXME: stream content instead of buffering // FIXME: specify permissions and other archive metadata func Generate(input ...string) (Archive, error) { files := parseStringPairs(input...) buf := new(bytes.Buffer) tw := tar.NewWriter(buf) for _, file := range files { name, content := file[0], file[1] hdr := &tar.Header{ Name: name, Size: int64(len(content)), } if err := tw.WriteHeader(hdr); err != nil { return nil, err } if _, err := tw.Write([]byte(content)); err != nil { return nil, err } } if err := tw.Close(); err != nil { return nil, err } return ioutil.NopCloser(buf), nil } func parseStringPairs(input ...string) (output [][2]string) { output = make([][2]string, 0, len(input)/2+1) for i := 0; i < len(input); i += 2 { var pair [2]string pair[0] = input[i] if i+1 < len(input) { pair[1] = input[i+1] } output = append(output, pair) } return } docker-1.6.2/pkg/archive/changes_test.go0000644000175000017500000001542312524223634017541 0ustar tianontianonpackage archive import ( "io/ioutil" "os" "os/exec" "path" "sort" "testing" "time" ) func max(x, y int) int { if x >= y { return x } return y } func copyDir(src, dst string) error { cmd := exec.Command("cp", "-a", src, dst) if err := cmd.Run(); err != nil { return err } return nil } type FileType uint32 const ( Regular FileType = iota Dir Symlink ) type FileData struct { filetype FileType path string contents string permissions os.FileMode } func createSampleDir(t *testing.T, root string) { files := []FileData{ {Regular, "file1", "file1\n", 0600}, {Regular, "file2", "file2\n", 0666}, {Regular, "file3", "file3\n", 0404}, {Regular, "file4", "file4\n", 0600}, {Regular, "file5", "file5\n", 0600}, {Regular, "file6", "file6\n", 0600}, {Regular, "file7", "file7\n", 0600}, {Dir, "dir1", "", 0740}, {Regular, "dir1/file1-1", "file1-1\n", 01444}, {Regular, "dir1/file1-2", "file1-2\n", 0666}, {Dir, "dir2", "", 0700}, {Regular, "dir2/file2-1", "file2-1\n", 0666}, {Regular, "dir2/file2-2", "file2-2\n", 0666}, {Dir, "dir3", "", 0700}, {Regular, "dir3/file3-1", "file3-1\n", 0666}, {Regular, "dir3/file3-2", "file3-2\n", 0666}, {Dir, "dir4", "", 0700}, {Regular, "dir4/file3-1", "file4-1\n", 0666}, {Regular, "dir4/file3-2", "file4-2\n", 0666}, {Symlink, "symlink1", "target1", 0666}, {Symlink, "symlink2", "target2", 0666}, } now := time.Now() for _, info := range files { p := path.Join(root, info.path) if info.filetype == Dir { if err := os.MkdirAll(p, info.permissions); err != nil { t.Fatal(err) } } else if info.filetype == Regular { if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { t.Fatal(err) } } else if info.filetype == Symlink { if err := os.Symlink(info.contents, p); err != nil { t.Fatal(err) } } if info.filetype != Symlink { // Set a consistent ctime, atime for all files and dirs if err := os.Chtimes(p, now, now); err != nil { t.Fatal(err) } } } } // Create an directory, copy it, make sure we report no changes between the two func TestChangesDirsEmpty(t *testing.T) { src, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } createSampleDir(t, src) dst := src + "-copy" if err := copyDir(src, dst); err != nil { t.Fatal(err) } changes, err := ChangesDirs(dst, src) if err != nil { t.Fatal(err) } if len(changes) != 0 { t.Fatalf("Reported changes for identical dirs: %v", changes) } os.RemoveAll(src) os.RemoveAll(dst) } func mutateSampleDir(t *testing.T, root string) { // Remove a regular file if err := os.RemoveAll(path.Join(root, "file1")); err != nil { t.Fatal(err) } // Remove a directory if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { t.Fatal(err) } // Remove a symlink if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { t.Fatal(err) } // Rewrite a file if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { t.Fatal(err) } // Replace a file if err := os.RemoveAll(path.Join(root, "file3")); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { t.Fatal(err) } // Touch file if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { t.Fatal(err) } // Replace file with dir if err := os.RemoveAll(path.Join(root, "file5")); err != nil { t.Fatal(err) } if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { t.Fatal(err) } // Create new file if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { t.Fatal(err) } // Create new dir if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { t.Fatal(err) } // Create a new symlink if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { t.Fatal(err) } // Change a symlink if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { t.Fatal(err) } if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { t.Fatal(err) } // Replace dir with file if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { t.Fatal(err) } // Touch dir if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { t.Fatal(err) } } func TestChangesDirsMutated(t *testing.T) { src, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } createSampleDir(t, src) dst := src + "-copy" if err := copyDir(src, dst); err != nil { t.Fatal(err) } defer os.RemoveAll(src) defer os.RemoveAll(dst) mutateSampleDir(t, dst) changes, err := ChangesDirs(dst, src) if err != nil { t.Fatal(err) } sort.Sort(changesByPath(changes)) expectedChanges := []Change{ {"/dir1", ChangeDelete}, {"/dir2", ChangeModify}, {"/dirnew", ChangeAdd}, {"/file1", ChangeDelete}, {"/file2", ChangeModify}, {"/file3", ChangeModify}, {"/file4", ChangeModify}, {"/file5", ChangeModify}, {"/filenew", ChangeAdd}, {"/symlink1", ChangeDelete}, {"/symlink2", ChangeModify}, {"/symlinknew", ChangeAdd}, } for i := 0; i < max(len(changes), len(expectedChanges)); i++ { if i >= len(expectedChanges) { t.Fatalf("unexpected change %s\n", changes[i].String()) } if i >= len(changes) { t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) } if changes[i].Path == expectedChanges[i].Path { if changes[i] != expectedChanges[i] { t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) } } else if changes[i].Path < expectedChanges[i].Path { t.Fatalf("unexpected change %s\n", changes[i].String()) } else { t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) } } } func TestApplyLayer(t *testing.T) { src, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } createSampleDir(t, src) defer os.RemoveAll(src) dst := src + "-copy" if err := copyDir(src, dst); err != nil { t.Fatal(err) } mutateSampleDir(t, dst) defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) if err != nil { t.Fatal(err) } layer, err := ExportChanges(dst, changes) if err != nil { t.Fatal(err) } layerCopy, err := NewTempArchive(layer, "") if err != nil { t.Fatal(err) } if _, err := ApplyLayer(src, layerCopy); err != nil { t.Fatal(err) } changes2, err := ChangesDirs(src, dst) if err != nil { t.Fatal(err) } if len(changes2) != 0 { t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) } } docker-1.6.2/pkg/archive/archive_test.go0000644000175000017500000003566612524223634017565 0ustar tianontianonpackage archive import ( "bytes" "fmt" "io" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strings" "syscall" "testing" "time" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := CmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error) go func() { _, err := io.Copy(ioutil.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, err := CmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := ioutil.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") out, err := CmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := ioutil.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := ioutil.TempDir("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := ioutil.TempDir("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = TarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := ioutil.TempDir("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != "/3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptions(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 1}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func TestTarWithHardLink(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { t.Fatal(err) } var i1, i2 uint64 if i1, err = getNlink(path.Join(origin, "1")); err != nil { t.Fatal(err) } // sanity check that we can hardlink if i1 != 2 { t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) } dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") if err != nil { t.Fatal(err) } defer os.RemoveAll(dest) // we'll do this in two steps to separate failure fh, err := Tar(origin, Uncompressed) if err != nil { t.Fatal(err) } // ensure we can read the whole thing with no error, before writing back out buf, err := ioutil.ReadAll(fh) if err != nil { t.Fatal(err) } bRdr := bytes.NewReader(buf) err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) if err != nil { t.Fatal(err) } if i1, err = getInode(path.Join(dest, "1")); err != nil { t.Fatal(err) } if i2, err = getInode(path.Join(dest, "2")); err != nil { t.Fatal(err) } if i1 != i2 { t.Errorf("expected matching inodes, but got %d and %d", i1, i2) } } func getNlink(path string) (uint64, error) { stat, err := os.Stat(path) if err != nil { return 0, err } statT, ok := stat.Sys().(*syscall.Stat_t) if !ok { return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) } return statT.Nlink, nil } func getInode(path string) (uint64, error) { stat, err := os.Stat(path) if err != nil { return 0, err } statT, ok := stat.Sys().(*syscall.Stat_t) if !ok { return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) } return statT.Ino, nil } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := path.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := TarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := path.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := TarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := ioutil.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") buf := make([]byte, 10) n, err := tempArchive.Read(buf) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } docker-1.6.2/pkg/archive/archive_windows_test.go0000644000175000017500000000307012524223634021317 0ustar tianontianon// +build windows package archive import ( "os" "testing" ) func TestCanonicalTarNameForPath(t *testing.T) { cases := []struct { in, expected string shouldFail bool }{ {"foo", "foo", false}, {"foo/bar", "___", true}, // unix-styled windows path must fail {`foo\bar`, "foo/bar", false}, } for _, v := range cases { if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) } else if v.shouldFail && err == nil { t.Fatalf("canonical path call should have pailed with error. in=%s out=%s", v.in, out) } else if !v.shouldFail && out != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) } } } func TestCanonicalTarName(t *testing.T) { cases := []struct { in string isDir bool expected string }{ {"foo", false, "foo"}, {"foo", true, "foo/"}, {`foo\bar`, false, "foo/bar"}, {`foo\bar`, true, "foo/bar/"}, } for _, v := range cases { if out, err := canonicalTarName(v.in, v.isDir); err != nil { t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) } else if out != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) } } } func TestChmodTarEntry(t *testing.T) { cases := []struct { in, expected os.FileMode }{ {0000, 0111}, {0777, 0755}, {0644, 0755}, {0755, 0755}, {0444, 0555}, } for _, v := range cases { if out := chmodTarEntry(v.in); out != v.expected { t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) } } } docker-1.6.2/pkg/archive/changes_posix_test.go0000644000175000017500000000547312524223634020767 0ustar tianontianonpackage archive import ( "archive/tar" "fmt" "io" "io/ioutil" "os" "path" "sort" "testing" ) func TestHardLinkOrder(t *testing.T) { names := []string{"file1.txt", "file2.txt", "file3.txt"} msg := []byte("Hey y'all") // Create dir src, err := ioutil.TempDir("", "docker-hardlink-test-src-") if err != nil { t.Fatal(err) } //defer os.RemoveAll(src) for _, name := range names { func() { fh, err := os.Create(path.Join(src, name)) if err != nil { t.Fatal(err) } defer fh.Close() if _, err = fh.Write(msg); err != nil { t.Fatal(err) } }() } // Create dest, with changes that includes hardlinks dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") if err != nil { t.Fatal(err) } os.RemoveAll(dest) // we just want the name, at first if err := copyDir(src, dest); err != nil { t.Fatal(err) } defer os.RemoveAll(dest) for _, name := range names { for i := 0; i < 5; i++ { if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { t.Fatal(err) } } } // get changes changes, err := ChangesDirs(dest, src) if err != nil { t.Fatal(err) } // sort sort.Sort(changesByPath(changes)) // ExportChanges ar, err := ExportChanges(dest, changes) if err != nil { t.Fatal(err) } hdrs, err := walkHeaders(ar) if err != nil { t.Fatal(err) } // reverse sort sort.Sort(sort.Reverse(changesByPath(changes))) // ExportChanges arRev, err := ExportChanges(dest, changes) if err != nil { t.Fatal(err) } hdrsRev, err := walkHeaders(arRev) if err != nil { t.Fatal(err) } // line up the two sets sort.Sort(tarHeaders(hdrs)) sort.Sort(tarHeaders(hdrsRev)) // compare Size and LinkName for i := range hdrs { if hdrs[i].Name != hdrsRev[i].Name { t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) } if hdrs[i].Size != hdrsRev[i].Size { t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) } if hdrs[i].Typeflag != hdrsRev[i].Typeflag { t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) } if hdrs[i].Linkname != hdrsRev[i].Linkname { t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) } } } type tarHeaders []tar.Header func (th tarHeaders) Len() int { return len(th) } func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } func walkHeaders(r io.Reader) ([]tar.Header, error) { t := tar.NewReader(r) headers := []tar.Header{} for { hdr, err := t.Next() if err != nil { if err == io.EOF { break } return headers, err } headers = append(headers, *hdr) } return headers, nil } docker-1.6.2/pkg/archive/changes.go0000644000175000017500000002526312524223634016505 0ustar tianontianonpackage archive import ( "bytes" "fmt" "io" "os" "path/filepath" "sort" "strings" "syscall" "time" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) type ChangeType int const ( ChangeModify = iota ChangeAdd ChangeDelete ) type Change struct { Path string Kind ChangeType } func (change *Change) String() string { var kind string switch change.Kind { case ChangeModify: kind = "C" case ChangeAdd: kind = "A" case ChangeDelete: kind = "D" } return fmt.Sprintf("%s %s", kind, change.Path) } // for sort.Sort type changesByPath []Change func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } func (c changesByPath) Len() int { return len(c) } func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } // Gnu tar and the go tar writer don't have sub-second mtime // precision, which is problematic when we apply changes via tar // files, we handle this by comparing for exact times, *or* same // second count and either a or b having exactly 0 nanoseconds func sameFsTime(a, b time.Time) bool { return a == b || (a.Unix() == b.Unix() && (a.Nanosecond() == 0 || b.Nanosecond() == 0)) } func sameFsTimeSpec(a, b syscall.Timespec) bool { return a.Sec == b.Sec && (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) } // Changes walks the path rw and determines changes for the files in the path, // with respect to the parent layers func Changes(layers []string, rw string) ([]Change, error) { var changes []Change err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path path, err = filepath.Rel(rw, path) if err != nil { return err } path = filepath.Join("/", path) // Skip root if path == "/" { return nil } // Skip AUFS metadata if matched, err := filepath.Match("/.wh..wh.*", path); err != nil || matched { return err } change := Change{ Path: path, } // Find out what kind of modification happened file := filepath.Base(path) // If there is a whiteout, then the file was removed if strings.HasPrefix(file, ".wh.") { originalFile := file[len(".wh."):] change.Path = filepath.Join(filepath.Dir(path), originalFile) change.Kind = ChangeDelete } else { // Otherwise, the file was added change.Kind = ChangeAdd // ...Unless it already existed in a top layer, in which case, it's a modification for _, layer := range layers { stat, err := os.Stat(filepath.Join(layer, path)) if err != nil && !os.IsNotExist(err) { return err } if err == nil { // The file existed in the top layer, so that's a modification // However, if it's a directory, maybe it wasn't actually modified. // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar if stat.IsDir() && f.IsDir() { if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { // Both directories are the same, don't record the change return nil } } change.Kind = ChangeModify break } } } // Record change changes = append(changes, change) return nil }) if err != nil && !os.IsNotExist(err) { return nil, err } return changes, nil } type FileInfo struct { parent *FileInfo name string stat *system.Stat_t children map[string]*FileInfo capability []byte added bool } func (root *FileInfo) LookUp(path string) *FileInfo { parent := root if path == "/" { return root } pathElements := strings.Split(path, "/") for _, elem := range pathElements { if elem != "" { child := parent.children[elem] if child == nil { return nil } parent = child } } return parent } func (info *FileInfo) path() string { if info.parent == nil { return "/" } return filepath.Join(info.parent.path(), info.name) } func (info *FileInfo) isDir() bool { return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR == syscall.S_IFDIR } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { sizeAtEntry := len(*changes) if oldInfo == nil { // add change := Change{ Path: info.path(), Kind: ChangeAdd, } *changes = append(*changes, change) info.added = true } // We make a copy so we can modify it to detect additions // also, we only recurse on the old dir if the new info is a directory // otherwise any previous delete/change is considered recursive oldChildren := make(map[string]*FileInfo) if oldInfo != nil && info.isDir() { for k, v := range oldInfo.children { oldChildren[k] = v } } for name, newChild := range info.children { oldChild, _ := oldChildren[name] if oldChild != nil { // change? oldStat := oldChild.stat newStat := newChild.stat // Note: We can't compare inode or ctime or blocksize here, because these change // when copying a file into a container. However, that is not generally a problem // because any content change will change mtime, and any status change should // be visible when actually comparing the stat fields. The only time this // breaks down is if some code intentionally hides a change by setting // back mtime if oldStat.Mode() != newStat.Mode() || oldStat.Uid() != newStat.Uid() || oldStat.Gid() != newStat.Gid() || oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) || bytes.Compare(oldChild.capability, newChild.capability) != 0 { change := Change{ Path: newChild.path(), Kind: ChangeModify, } *changes = append(*changes, change) newChild.added = true } // Remove from copy so we can detect deletions delete(oldChildren, name) } newChild.addChanges(oldChild, changes) } for _, oldChild := range oldChildren { // delete change := Change{ Path: oldChild.path(), Kind: ChangeDelete, } *changes = append(*changes, change) } // If there were changes inside this directory, we need to add it, even if the directory // itself wasn't changed. This is needed to properly save and restore filesystem permissions. if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != "/" { change := Change{ Path: info.path(), Kind: ChangeModify, } // Let's insert the directory entry before the recently added entries located inside this dir *changes = append(*changes, change) // just to resize the slice, will be overwritten copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) (*changes)[sizeAtEntry] = change } } func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { var changes []Change info.addChanges(oldInfo, &changes) return changes } func newRootFileInfo() *FileInfo { root := &FileInfo{ name: "/", children: make(map[string]*FileInfo), } return root } func collectFileInfo(sourceDir string) (*FileInfo, error) { root := newRootFileInfo() err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path relPath, err := filepath.Rel(sourceDir, path) if err != nil { return err } relPath = filepath.Join("/", relPath) if relPath == "/" { return nil } parent := root.LookUp(filepath.Dir(relPath)) if parent == nil { return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) } info := &FileInfo{ name: filepath.Base(relPath), children: make(map[string]*FileInfo), parent: parent, } s, err := system.Lstat(path) if err != nil { return err } info.stat = s info.capability, _ = system.Lgetxattr(path, "security.capability") parent.children[info.name] = info return nil }) if err != nil { return nil, err } return root, nil } // ChangesDirs compares two directories and generates an array of Change objects describing the changes. // If oldDir is "", then all files in newDir will be Add-Changes. func ChangesDirs(newDir, oldDir string) ([]Change, error) { var ( oldRoot, newRoot *FileInfo err1, err2 error errs = make(chan error, 2) ) go func() { if oldDir != "" { oldRoot, err1 = collectFileInfo(oldDir) } errs <- err1 }() go func() { newRoot, err2 = collectFileInfo(newDir) errs <- err2 }() // block until both routines have returned for i := 0; i < 2; i++ { if err := <-errs; err != nil { return nil, err } } return newRoot.Changes(oldRoot), nil } // ChangesSize calculates the size in bytes of the provided changes, based on newDir. func ChangesSize(newDir string, changes []Change) int64 { var size int64 for _, change := range changes { if change.Kind == ChangeModify || change.Kind == ChangeAdd { file := filepath.Join(newDir, change.Path) fileInfo, _ := os.Lstat(file) if fileInfo != nil && !fileInfo.IsDir() { size += fileInfo.Size() } } } return size } // ExportChanges produces an Archive from the provided changes, relative to dir. func ExportChanges(dir string, changes []Change) (Archive, error) { reader, writer := io.Pipe() go func() { ta := &tarAppender{ TarWriter: tar.NewWriter(writer), Buffer: pools.BufioWriter32KPool.Get(nil), SeenFiles: make(map[uint64]string), } // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) sort.Sort(changesByPath(changes)) // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors // from this for _, change := range changes { if change.Kind == ChangeDelete { whiteOutDir := filepath.Dir(change.Path) whiteOutBase := filepath.Base(change.Path) whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) timestamp := time.Now() hdr := &tar.Header{ Name: whiteOut[1:], Size: 0, ModTime: timestamp, AccessTime: timestamp, ChangeTime: timestamp, } if err := ta.TarWriter.WriteHeader(hdr); err != nil { log.Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) if err := ta.addTarFile(path, change.Path[1:]); err != nil { log.Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { log.Debugf("Can't close layer: %s", err) } if err := writer.Close(); err != nil { log.Debugf("failed close Changes writer: %s", err) } }() return reader, nil } docker-1.6.2/pkg/archive/diff_test.go0000644000175000017500000000771612524223634017047 0ustar tianontianonpackage archive import ( "testing" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) func TestApplyLayerInvalidFilenames(t *testing.T) { for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestApplyLayerInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestApplyLayerInvalidSymlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } docker-1.6.2/pkg/archive/time_linux.go0000644000175000017500000000041412524223634017241 0ustar tianontianonpackage archive import ( "syscall" "time" ) func timeToTimespec(time time.Time) (ts syscall.Timespec) { if time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 ts.Nsec = ((1 << 30) - 2) return } return syscall.NsecToTimespec(time.UnixNano()) } docker-1.6.2/pkg/archive/archive_unix.go0000644000175000017500000000252012524223634017550 0ustar tianontianon// +build !windows package archive import ( "errors" "os" "syscall" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) // canonicalTarNameForPath returns platform-specific filepath // to canonical posix-style path for tar archival. p is relative // path. func CanonicalTarNameForPath(p string) (string, error) { return p, nil // already unix-style } // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { s, ok := stat.(*syscall.Stat_t) if !ok { err = errors.New("cannot convert stat value to syscall.Stat_t") return } nlink = uint32(s.Nlink) inode = uint64(s.Ino) // Currently go does not fil in the major/minors if s.Mode&syscall.S_IFBLK == syscall.S_IFBLK || s.Mode&syscall.S_IFCHR == syscall.S_IFCHR { hdr.Devmajor = int64(major(uint64(s.Rdev))) hdr.Devminor = int64(minor(uint64(s.Rdev))) } return } func major(device uint64) uint64 { return (device >> 8) & 0xfff } func minor(device uint64) uint64 { return (device & 0xff) | ((device >> 12) & 0xfff00) } docker-1.6.2/pkg/archive/archive_windows.go0000644000175000017500000000231012524223634020254 0ustar tianontianon// +build windows package archive import ( "fmt" "os" "strings" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) // canonicalTarNameForPath returns platform-specific filepath // to canonical posix-style path for tar archival. p is relative // path. func CanonicalTarNameForPath(p string) (string, error) { // windows: convert windows style relative path with backslashes // into forward slashes. since windows does not allow '/' or '\' // in file names, it is mostly safe to replace however we must // check just in case if strings.Contains(p, "/") { return "", fmt.Errorf("windows path contains forward slash: %s", p) } return strings.Replace(p, string(os.PathSeparator), "/", -1), nil } // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { perm &= 0755 // Add the x bit: make everything +x from windows perm |= 0111 return perm } func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows return } docker-1.6.2/pkg/archive/testdata/0000755000175000017500000000000012524223634016347 5ustar tianontianondocker-1.6.2/pkg/archive/testdata/broken.tar0000644000175000017500000003300012524223634020333 0ustar tianontianonroot/0040700000000000000000000000000012332704605010223 5ustar0000000000000000root/.cpanm/0040755000000000000000000000000012332704605011411 5ustar0000000000000000root/.cpanm/work/0040755000000000000000000000000012332704605012373 5ustar0000000000000000root/.cpanm/work/1395823785.24209/0040755000000000000000000000000012332704605014154 5ustar0000000000000000root/.cpanm/work/1395823785.24209/File-Find-Rule-0.33/0040755000000000000000000000000012332704605017177 5ustar0000000000000000root/.cpanm/work/1395823785.24209/File-Find-Rule-0.33/META.yml0100644€1çÓ€-­æ0000000112211635626623021652 0ustar 00000000000000--- #YAML:1.0 name: File-Find-Rule version: 0.33 abstract: ~ author: [] license: unknown distribution_type: module configure_requires: ExtUtils::MakeMaker: 0 build_requires: ExtUtils::MakeMaker: 0 requires: File::Find: 0 File::Spec: 0 Number::Compare: 0 Test::More: 0 Text::Glob: 0.07 no_index: directory: - t - inc generated_by: ExtUtils::MakeMaker version 6.57_05 meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html version: 1.4 root/.cpanm/work/1395823785.24209/Plack-1.0030/0040755000000000000000000000000012332704605015665 5ustar 00000000000000root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/0040755000000000000000000000000012332704605016575 5ustar 00000000000000root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/0040755000000000000000000000000012332704605017433 5ustar 00000000000000Plack::Middleware::LighttpdScriptNameFix.3pm0100644000000000000000000001400512314512430027460 0ustar 00000000000000root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3.\" Automatically generated by Pod::Man 2.27 (Pod::Simple 3.28) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' . ds C` . ds C' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is turned on, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .\" .\" Avoid warning from groff about undefined register 'F'. .de IX .. .nr rF 0 .if \n(.g .if rF .nr rF 1 .if (\n(rF:(\n(.g==0)) \{ . if \nF \{ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . if !\nF==2 \{ . nr % 0 . nr F 2 . \} . \} .\} .rr rF .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "Plack::Middleware::LighttpdScriptNameFix 3pm" .TH Plack::Middleware::LighttpdScriptNameFix 3pm "2013-11-23" "perl v5.18.2" "User Contributed Perl Documentation" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" Plack::Middleware::LighttpdScriptNameFix \- fixes wrong SCRIPT_NAME and PATH_INFO that lighttpd sets .SH "SYNOPSIS" .IX Header "SYNOPSIS" .Vb 2 \& # in your app.psgi \& use Plack::Builder; \& \& builder { \& enable "LighttpdScriptNameFix"; \& $app; \& }; \& \& # Or from the command line \& plackup \-s FCGI \-e \*(Aqenable "LighttpdScriptNameFix"\*(Aq /path/to/app.psgi .Ve .SH "DESCRIPTION" .IX Header "DESCRIPTION" This middleware fixes wrong \f(CW\*(C`SCRIPT_NAME\*(C'\fR and \f(CW\*(C`PATH_INFO\*(C'\fR set by lighttpd when you mount your app under the root path (\*(L"/\*(R"). If you use lighttpd 1.4.23 or later you can instead enable \f(CW\*(C`fix\-root\-scriptname\*(C'\fR flag inside \f(CW\*(C`fastcgi.server\*(C'\fR instead of using this middleware. .SH "CONFIGURATION" .IX Header "CONFIGURATION" .IP "script_name" 4 .IX Item "script_name" Even with \f(CW\*(C`fix\-root\-scriptname\*(C'\fR, lighttpd \fIstill\fR sets weird \&\f(CW\*(C`SCRIPT_NAME\*(C'\fR and \f(CW\*(C`PATH_INFO\*(C'\fR if you mount your application at \f(CW""\fR or something that ends with \f(CW\*(C`/\*(C'\fR. Setting \f(CW\*(C`script_name\*(C'\fR option tells the middleware how to reconstruct the new correct \f(CW\*(C`SCRIPT_NAME\*(C'\fR and \&\f(CW\*(C`PATH_INFO\*(C'\fR. .Sp If you mount the app under \f(CW\*(C`/something/\*(C'\fR, you should set: .Sp .Vb 1 \& enable "LighttpdScriptNameFix", script_name => "/something"; .Ve .Sp and when a request for \f(CW\*(C`/something/a/b?param=1\*(C'\fR comes, \f(CW\*(C`SCRIPT_NAME\*(C'\fR becomes \f(CW\*(C`/something\*(C'\fR and \f(CW\*(C`PATH_INFO\*(C'\fR becomes \f(CW\*(C`/a/b\*(C'\fR. .Sp \&\f(CW\*(C`script_name\*(C'\fR option is set to empty by default, which means all the request path is set to \f(CW\*(C`PATH_INFO\*(C'\fR and it behaves like your fastcgi application is mounted in the root path. .SH "AUTHORS" .IX Header "AUTHORS" Yury Zavarin .PP Tatsuhiko Miyagawa .SH "SEE ALSO" .IX Header "SEE ALSO" Plack::Handler::FCGI docker-1.6.2/pkg/archive/utils_test.go0000644000175000017500000001140012524223634017260 0ustar tianontianonpackage archive import ( "bytes" "fmt" "io" "io/ioutil" "os" "path/filepath" "time" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) var testUntarFns = map[string]func(string, io.Reader) error{ "untar": func(dest string, r io.Reader) error { return Untar(r, dest, nil) }, "applylayer": func(dest string, r io.Reader) error { _, err := ApplyLayer(dest, ArchiveReader(r)) return err }, } // testBreakout is a helper function that, within the provided `tmpdir` directory, // creates a `victim` folder with a generated `hello` file in it. // `untar` extracts to a directory named `dest`, the tar file created from `headers`. // // Here are the tested scenarios: // - removed `victim` folder (write) // - removed files from `victim` folder (write) // - new files in `victim` folder (write) // - modified files in `victim` folder (write) // - file in `dest` with same content as `victim/hello` (read) // // When using testBreakout make sure you cover one of the scenarios listed above. func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { tmpdir, err := ioutil.TempDir("", tmpdir) if err != nil { return err } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") if err := os.Mkdir(dest, 0755); err != nil { return err } victim := filepath.Join(tmpdir, "victim") if err := os.Mkdir(victim, 0755); err != nil { return err } hello := filepath.Join(victim, "hello") helloData, err := time.Now().MarshalText() if err != nil { return err } if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { return err } helloStat, err := os.Stat(hello) if err != nil { return err } reader, writer := io.Pipe() go func() { t := tar.NewWriter(writer) for _, hdr := range headers { t.WriteHeader(hdr) } t.Close() }() untar := testUntarFns[untarFn] if untar == nil { return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) } if err := untar(dest, reader); err != nil { if _, ok := err.(breakoutError); !ok { // If untar returns an error unrelated to an archive breakout, // then consider this an unexpected error and abort. return err } // Here, untar detected the breakout. // Let's move on verifying that indeed there was no breakout. fmt.Printf("breakoutError: %v\n", err) } // Check victim folder f, err := os.Open(victim) if err != nil { // codepath taken if victim folder was removed return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) } defer f.Close() // Check contents of victim folder // // We are only interested in getting 2 files from the victim folder, because if all is well // we expect only one result, the `hello` file. If there is a second result, it cannot // hold the same name `hello` and we assume that a new file got created in the victim folder. // That is enough to detect an archive breakout. names, err := f.Readdirnames(2) if err != nil { // codepath taken if victim is not a folder return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) } for _, name := range names { if name != "hello" { // codepath taken if new file was created in victim folder return fmt.Errorf("archive breakout: new file %q", name) } } // Check victim/hello f, err = os.Open(hello) if err != nil { // codepath taken if read permissions were removed return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) } defer f.Close() b, err := ioutil.ReadAll(f) if err != nil { return err } fi, err := f.Stat() if err != nil { return err } if helloStat.IsDir() != fi.IsDir() || // TODO: cannot check for fi.ModTime() change helloStat.Mode() != fi.Mode() || helloStat.Size() != fi.Size() || !bytes.Equal(helloData, b) { // codepath taken if hello has been modified return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v.", hello, helloData, b, helloStat, fi) } // Check that nothing in dest/ has the same content as victim/hello. // Since victim/hello was generated with time.Now(), it is safe to assume // that any file whose content matches exactly victim/hello, managed somehow // to access victim/hello. return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { if info.IsDir() { if err != nil { // skip directory if error return filepath.SkipDir } // enter directory return nil } if err != nil { // skip file if error return nil } b, err := ioutil.ReadFile(path) if err != nil { // Houston, we have a problem. Aborting (space)walk. return err } if bytes.Equal(helloData, b) { return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) } return nil }) } docker-1.6.2/pkg/archive/example_changes.go0000644000175000017500000000434412524223634020215 0ustar tianontianon// +build ignore // Simple tool to create an archive stream from an old and new directory // // By default it will stream the comparison of two temporary directories with junk files package main import ( "flag" "fmt" "io" "io/ioutil" "os" "path" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" ) var ( flDebug = flag.Bool("D", false, "debugging output") flNewDir = flag.String("newdir", "", "") flOldDir = flag.String("olddir", "", "") log = logrus.New() ) func main() { flag.Usage = func() { fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") fmt.Printf("%s [OPTIONS]\n", os.Args[0]) flag.PrintDefaults() } flag.Parse() log.Out = os.Stderr if (len(os.Getenv("DEBUG")) > 0) || *flDebug { logrus.SetLevel(logrus.DebugLevel) } var newDir, oldDir string if len(*flNewDir) == 0 { var err error newDir, err = ioutil.TempDir("", "docker-test-newDir") if err != nil { log.Fatal(err) } defer os.RemoveAll(newDir) if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { log.Fatal(err) } } else { newDir = *flNewDir } if len(*flOldDir) == 0 { oldDir, err := ioutil.TempDir("", "docker-test-oldDir") if err != nil { log.Fatal(err) } defer os.RemoveAll(oldDir) } else { oldDir = *flOldDir } changes, err := archive.ChangesDirs(newDir, oldDir) if err != nil { log.Fatal(err) } a, err := archive.ExportChanges(newDir, changes) if err != nil { log.Fatal(err) } defer a.Close() i, err := io.Copy(os.Stdout, a) if err != nil && err != io.EOF { log.Fatal(err) } fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } docker-1.6.2/pkg/timeutils/0000755000175000017500000000000012524223634015134 5ustar tianontianondocker-1.6.2/pkg/timeutils/json.go0000644000175000017500000000144012524223634016433 0ustar tianontianonpackage timeutils import ( "errors" "time" ) const ( // RFC3339NanoFixed is our own version of RFC339Nano because we want one // that pads the nano seconds part with zeros to ensure // the timestamps are aligned in the logs. RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" // JSONFormat is the format used by FastMarshalJSON JSONFormat = `"` + time.RFC3339Nano + `"` ) // FastMarshalJSON avoids one of the extra allocations that // time.MarshalJSON is making. func FastMarshalJSON(t time.Time) (string, error) { if y := t.Year(); y < 0 || y >= 10000 { // RFC 3339 is clear that years are 4 digits exactly. // See golang.org/issue/4556#c15 for more discussion. return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") } return t.Format(JSONFormat), nil } docker-1.6.2/pkg/homedir/0000755000175000017500000000000012524223634014544 5ustar tianontianondocker-1.6.2/pkg/homedir/homedir_test.go0000644000175000017500000000062512524223634017564 0ustar tianontianonpackage homedir import ( "path/filepath" "testing" ) func TestGet(t *testing.T) { home := Get() if home == "" { t.Fatal("returned home directory is empty") } if !filepath.IsAbs(home) { t.Fatalf("returned path is not absolute: %s", home) } } func TestGetShortcutString(t *testing.T) { shortcut := GetShortcutString() if shortcut == "" { t.Fatal("returned shortcut string is empty") } } docker-1.6.2/pkg/homedir/homedir.go0000644000175000017500000000167112524223634016527 0ustar tianontianonpackage homedir import ( "os" "runtime" "github.com/docker/libcontainer/user" ) // Key returns the env var name for the user's home dir based on // the platform being run on func Key() string { if runtime.GOOS == "windows" { return "USERPROFILE" } return "HOME" } // Get returns the home directory of the current user with the help of // environment variables depending on the target operating system. // Returned path should be used with "path/filepath" to form new paths. func Get() string { home := os.Getenv(Key()) if home == "" && runtime.GOOS != "windows" { if u, err := user.CurrentUser(); err == nil { return u.Home } } return home } // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { if runtime.GOOS == "windows" { return "%USERPROFILE%" // be careful while using in format functions } return "~" } docker-1.6.2/pkg/graphdb/0000755000175000017500000000000012524223634014524 5ustar tianontianondocker-1.6.2/pkg/graphdb/sort_test.go0000644000175000017500000000074112524223634017103 0ustar tianontianonpackage graphdb import ( "testing" ) func TestSort(t *testing.T) { paths := []string{ "/", "/myreallylongname", "/app/db", } sortByDepth(paths) if len(paths) != 3 { t.Fatalf("Expected 3 parts got %d", len(paths)) } if paths[0] != "/app/db" { t.Fatalf("Expected /app/db got %s", paths[0]) } if paths[1] != "/myreallylongname" { t.Fatalf("Expected /myreallylongname got %s", paths[1]) } if paths[2] != "/" { t.Fatalf("Expected / got %s", paths[2]) } } docker-1.6.2/pkg/graphdb/conn_unsupported.go0000644000175000017500000000016212524223634020457 0ustar tianontianon// +build !cgo package graphdb func NewSqliteConn(root string) (*Database, error) { panic("Not implemented") } docker-1.6.2/pkg/graphdb/graphdb.go0000644000175000017500000002615112524223634016467 0ustar tianontianonpackage graphdb import ( "database/sql" "fmt" "path" "strings" "sync" ) const ( createEntityTable = ` CREATE TABLE IF NOT EXISTS entity ( id text NOT NULL PRIMARY KEY );` createEdgeTable = ` CREATE TABLE IF NOT EXISTS edge ( "entity_id" text NOT NULL, "parent_id" text NULL, "name" text NOT NULL, CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"), CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id") ); ` createEdgeIndices = ` CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name); ` ) // Entity with a unique id type Entity struct { id string } // An Edge connects two entities together type Edge struct { EntityID string Name string ParentID string } type Entities map[string]*Entity type Edges []*Edge type WalkFunc func(fullPath string, entity *Entity) error // Graph database for storing entities and their relationships type Database struct { conn *sql.DB mux sync.RWMutex } func IsNonUniqueNameError(err error) bool { str := err.Error() // sqlite 3.7.17-1ubuntu1 returns: // Set failure: Abort due to constraint violation: columns parent_id, name are not unique if strings.HasSuffix(str, "name are not unique") { return true } // sqlite-3.8.3-1.fc20 returns: // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { return true } // sqlite-3.6.20-1.el6 returns: // Set failure: Abort due to constraint violation: constraint failed if strings.HasSuffix(str, "constraint failed") { return true } return false } // Create a new graph database initialized with a root entity func NewDatabase(conn *sql.DB) (*Database, error) { if conn == nil { return nil, fmt.Errorf("Database connection cannot be nil") } db := &Database{conn: conn} // Create root entities tx, err := conn.Begin() if err != nil { return nil, err } if _, err := tx.Exec(createEntityTable); err != nil { return nil, err } if _, err := tx.Exec(createEdgeTable); err != nil { return nil, err } if _, err := tx.Exec(createEdgeIndices); err != nil { return nil, err } if _, err := tx.Exec("DELETE FROM entity where id = ?", "0"); err != nil { tx.Rollback() return nil, err } if _, err := tx.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { tx.Rollback() return nil, err } if _, err := tx.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { tx.Rollback() return nil, err } if _, err := tx.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { tx.Rollback() return nil, err } if err := tx.Commit(); err != nil { return nil, err } return db, nil } // Close the underlying connection to the database func (db *Database) Close() error { return db.conn.Close() } // Set the entity id for a given path func (db *Database) Set(fullPath, id string) (*Entity, error) { db.mux.Lock() defer db.mux.Unlock() tx, err := db.conn.Begin() if err != nil { return nil, err } var entityID string if err := tx.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { if err == sql.ErrNoRows { if _, err := tx.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { tx.Rollback() return nil, err } } else { tx.Rollback() return nil, err } } e := &Entity{id} parentPath, name := splitPath(fullPath) if err := db.setEdge(parentPath, name, e, tx); err != nil { tx.Rollback() return nil, err } if err := tx.Commit(); err != nil { return nil, err } return e, nil } // Return true if a name already exists in the database func (db *Database) Exists(name string) bool { db.mux.RLock() defer db.mux.RUnlock() e, err := db.get(name) if err != nil { return false } return e != nil } func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) error { parent, err := db.get(parentPath) if err != nil { return err } if parent.id == e.id { return fmt.Errorf("Cannot set self as child") } if _, err := tx.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil { return err } return nil } // Return the root "/" entity for the database func (db *Database) RootEntity() *Entity { return &Entity{ id: "0", } } // Return the entity for a given path func (db *Database) Get(name string) *Entity { db.mux.RLock() defer db.mux.RUnlock() e, err := db.get(name) if err != nil { return nil } return e } func (db *Database) get(name string) (*Entity, error) { e := db.RootEntity() // We always know the root name so return it if // it is requested if name == "/" { return e, nil } parts := split(name) for i := 1; i < len(parts); i++ { p := parts[i] if p == "" { continue } next := db.child(e, p) if next == nil { return nil, fmt.Errorf("Cannot find child for %s", name) } e = next } return e, nil } // List all entities by from the name // The key will be the full path of the entity func (db *Database) List(name string, depth int) Entities { db.mux.RLock() defer db.mux.RUnlock() out := Entities{} e, err := db.get(name) if err != nil { return out } children, err := db.children(e, name, depth, nil) if err != nil { return out } for _, c := range children { out[c.FullPath] = c.Entity } return out } // Walk through the child graph of an entity, calling walkFunc for each child entity. // It is safe for walkFunc to call graph functions. func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { children, err := db.Children(name, depth) if err != nil { return err } // Note: the database lock must not be held while calling walkFunc for _, c := range children { if err := walkFunc(c.FullPath, c.Entity); err != nil { return err } } return nil } // Return the children of the specified entity func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { db.mux.RLock() defer db.mux.RUnlock() e, err := db.get(name) if err != nil { return nil, err } return db.children(e, name, depth, nil) } // Return the parents of a specified entity func (db *Database) Parents(name string) ([]string, error) { db.mux.RLock() defer db.mux.RUnlock() e, err := db.get(name) if err != nil { return nil, err } return db.parents(e) } // Return the refrence count for a specified id func (db *Database) Refs(id string) int { db.mux.RLock() defer db.mux.RUnlock() var count int if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { return 0 } return count } // Return all the id's path references func (db *Database) RefPaths(id string) Edges { db.mux.RLock() defer db.mux.RUnlock() refs := Edges{} rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) if err != nil { return refs } defer rows.Close() for rows.Next() { var name string var parentID string if err := rows.Scan(&name, &parentID); err != nil { return refs } refs = append(refs, &Edge{ EntityID: id, Name: name, ParentID: parentID, }) } return refs } // Delete the reference to an entity at a given path func (db *Database) Delete(name string) error { db.mux.Lock() defer db.mux.Unlock() if name == "/" { return fmt.Errorf("Cannot delete root entity") } parentPath, n := splitPath(name) parent, err := db.get(parentPath) if err != nil { return err } if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil { return err } return nil } // Remove the entity with the specified id // Walk the graph to make sure all references to the entity // are removed and return the number of references removed func (db *Database) Purge(id string) (int, error) { db.mux.Lock() defer db.mux.Unlock() tx, err := db.conn.Begin() if err != nil { return -1, err } // Delete all edges rows, err := tx.Exec("DELETE FROM edge WHERE entity_id = ?;", id) if err != nil { tx.Rollback() return -1, err } changes, err := rows.RowsAffected() if err != nil { return -1, err } // Delete entity if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil { tx.Rollback() return -1, err } if err := tx.Commit(); err != nil { return -1, err } return int(changes), nil } // Rename an edge for a given path func (db *Database) Rename(currentName, newName string) error { db.mux.Lock() defer db.mux.Unlock() parentPath, name := splitPath(currentName) newParentPath, newEdgeName := splitPath(newName) if parentPath != newParentPath { return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath) } parent, err := db.get(parentPath) if err != nil { return err } rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name) if err != nil { return err } i, err := rows.RowsAffected() if err != nil { return err } if i == 0 { return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name) } return nil } type WalkMeta struct { Parent *Entity Entity *Entity FullPath string Edge *Edge } func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) { if e == nil { return entities, nil } rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) if err != nil { return nil, err } defer rows.Close() for rows.Next() { var entityID, entityName string if err := rows.Scan(&entityID, &entityName); err != nil { return nil, err } child := &Entity{entityID} edge := &Edge{ ParentID: e.id, Name: entityName, EntityID: child.id, } meta := WalkMeta{ Parent: e, Entity: child, FullPath: path.Join(name, edge.Name), Edge: edge, } entities = append(entities, meta) if depth != 0 { nDepth := depth if depth != -1 { nDepth -= 1 } entities, err = db.children(child, meta.FullPath, nDepth, entities) if err != nil { return nil, err } } } return entities, nil } func (db *Database) parents(e *Entity) (parents []string, err error) { if e == nil { return parents, nil } rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id) if err != nil { return nil, err } defer rows.Close() for rows.Next() { var parentID string if err := rows.Scan(&parentID); err != nil { return nil, err } parents = append(parents, parentID) } return parents, nil } // Return the entity based on the parent path and name func (db *Database) child(parent *Entity, name string) *Entity { var id string if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil { return nil } return &Entity{id} } // Return the id used to reference this entity func (e *Entity) ID() string { return e.id } // Return the paths sorted by depth func (e Entities) Paths() []string { out := make([]string, len(e)) var i int for k := range e { out[i] = k i++ } sortByDepth(out) return out } docker-1.6.2/pkg/graphdb/sort.go0000644000175000017500000000073612524223634016050 0ustar tianontianonpackage graphdb import "sort" type pathSorter struct { paths []string by func(i, j string) bool } func sortByDepth(paths []string) { s := &pathSorter{paths, func(i, j string) bool { return PathDepth(i) > PathDepth(j) }} sort.Sort(s) } func (s *pathSorter) Len() int { return len(s.paths) } func (s *pathSorter) Swap(i, j int) { s.paths[i], s.paths[j] = s.paths[j], s.paths[i] } func (s *pathSorter) Less(i, j int) bool { return s.by(s.paths[i], s.paths[j]) } docker-1.6.2/pkg/graphdb/conn_sqlite3.go0000644000175000017500000000043112524223634017452 0ustar tianontianon// +build cgo package graphdb import ( "database/sql" _ "code.google.com/p/gosqlite/sqlite3" // registers sqlite ) func NewSqliteConn(root string) (*Database, error) { conn, err := sql.Open("sqlite3", root) if err != nil { return nil, err } return NewDatabase(conn) } docker-1.6.2/pkg/graphdb/utils.go0000644000175000017500000000076512524223634016223 0ustar tianontianonpackage graphdb import ( "path" "strings" ) // Split p on / func split(p string) []string { return strings.Split(p, "/") } // Returns the depth or number of / in a given path func PathDepth(p string) int { parts := split(p) if len(parts) == 2 && parts[1] == "" { return 1 } return len(parts) } func splitPath(p string) (parent, name string) { if p[0] != '/' { p = "/" + p } parent, name = path.Split(p) l := len(parent) if parent[l-1] == '/' { parent = parent[:l-1] } return } docker-1.6.2/pkg/graphdb/graphdb_test.go0000644000175000017500000002722312524223634017527 0ustar tianontianonpackage graphdb import ( "database/sql" "fmt" "os" "path" "strconv" "testing" _ "code.google.com/p/gosqlite/sqlite3" ) func newTestDb(t *testing.T) (*Database, string) { p := path.Join(os.TempDir(), "sqlite.db") conn, err := sql.Open("sqlite3", p) db, err := NewDatabase(conn) if err != nil { t.Fatal(err) } return db, p } func destroyTestDb(dbPath string) { os.Remove(dbPath) } func TestNewDatabase(t *testing.T) { db, dbpath := newTestDb(t) if db == nil { t.Fatal("Database should not be nil") } db.Close() defer destroyTestDb(dbpath) } func TestCreateRootEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) root := db.RootEntity() if root == nil { t.Fatal("Root entity should not be nil") } } func TestGetRootEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) e := db.Get("/") if e == nil { t.Fatal("Entity should not be nil") } if e.ID() != "0" { t.Fatalf("Enity id should be 0, got %s", e.ID()) } } func TestSetEntityWithDifferentName(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/test", "1") if _, err := db.Set("/other", "1"); err != nil { t.Fatal(err) } } func TestSetDuplicateEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) if _, err := db.Set("/foo", "42"); err != nil { t.Fatal(err) } if _, err := db.Set("/foo", "43"); err == nil { t.Fatalf("Creating an entry with a duplciate path did not cause an error") } } func TestCreateChild(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) child, err := db.Set("/db", "1") if err != nil { t.Fatal(err) } if child == nil { t.Fatal("Child should not be nil") } if child.ID() != "1" { t.Fail() } } func TestParents(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) for i := 1; i < 6; i++ { a := strconv.Itoa(i) if _, err := db.Set("/"+a, a); err != nil { t.Fatal(err) } } for i := 6; i < 11; i++ { a := strconv.Itoa(i) p := strconv.Itoa(i - 5) key := fmt.Sprintf("/%s/%s", p, a) if _, err := db.Set(key, a); err != nil { t.Fatal(err) } parents, err := db.Parents(key) if err != nil { t.Fatal(err) } if len(parents) != 1 { t.Fatalf("Expected 2 entries for %s got %d", key, len(parents)) } if parents[0] != p { t.Fatalf("ID %s received, %s expected", parents[0], p) } } } func TestChildren(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) str := "/" for i := 1; i < 6; i++ { a := strconv.Itoa(i) if _, err := db.Set(str+a, a); err != nil { t.Fatal(err) } str = str + a + "/" } str = "/" for i := 10; i < 30; i++ { // 20 entities a := strconv.Itoa(i) if _, err := db.Set(str+a, a); err != nil { t.Fatal(err) } str = str + a + "/" } entries, err := db.Children("/", 5) if err != nil { t.Fatal(err) } if len(entries) != 11 { t.Fatalf("Expect 11 entries for / got %d", len(entries)) } entries, err = db.Children("/", 20) if err != nil { t.Fatal(err) } if len(entries) != 25 { t.Fatalf("Expect 25 entries for / got %d", len(entries)) } } func TestListAllRootChildren(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) for i := 1; i < 6; i++ { a := strconv.Itoa(i) if _, err := db.Set("/"+a, a); err != nil { t.Fatal(err) } } entries := db.List("/", -1) if len(entries) != 5 { t.Fatalf("Expect 5 entries for / got %d", len(entries)) } } func TestListAllSubChildren(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } entries := db.List("/webapp", 1) if len(entries) != 3 { t.Fatalf("Expect 3 entries for / got %d", len(entries)) } entries = db.List("/webapp", 0) if len(entries) != 2 { t.Fatalf("Expect 2 entries for / got %d", len(entries)) } } func TestAddSelfAsChild(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) child, err := db.Set("/test", "1") if err != nil { t.Fatal(err) } if _, err := db.Set("/test/other", child.ID()); err == nil { t.Fatal("Error should not be nil") } } func TestAddChildToNonExistantRoot(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) if _, err := db.Set("/myapp", "1"); err != nil { t.Fatal(err) } if _, err := db.Set("/myapp/proxy/db", "2"); err == nil { t.Fatal("Error should not be nil") } } func TestWalkAll(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/db/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } if err := db.Walk("/", func(p string, e *Entity) error { t.Logf("Path: %s Entity: %s", p, e.ID()) return nil }, -1); err != nil { t.Fatal(err) } } func TestGetEntityByPath(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } entity := db.Get("/webapp/db/logs") if entity == nil { t.Fatal("Entity should not be nil") } if entity.ID() != "4" { t.Fatalf("Expected to get entity with id 4, got %s", entity.ID()) } } func TestEnitiesPaths(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } out := db.List("/", -1) for _, p := range out.Paths() { t.Log(p) } } func TestDeleteRootEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) if err := db.Delete("/"); err == nil { t.Fatal("Error should not be nil") } } func TestDeleteEntity(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) _, err := db.Set("/webapp", "1") if err != nil { t.Fatal(err) } child2, err := db.Set("/db", "2") if err != nil { t.Fatal(err) } child4, err := db.Set("/logs", "4") if err != nil { t.Fatal(err) } if _, err := db.Set("/db/logs", child4.ID()); err != nil { t.Fatal(err) } child3, err := db.Set("/sentry", "3") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/db", child2.ID()); err != nil { t.Fatal(err) } child5, err := db.Set("/gograph", "5") if err != nil { t.Fatal(err) } if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { t.Fatal(err) } if err := db.Delete("/webapp/sentry"); err != nil { t.Fatal(err) } entity := db.Get("/webapp/sentry") if entity != nil { t.Fatal("Entity /webapp/sentry should be nil") } } func TestCountRefs(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") if db.Refs("1") != 1 { t.Fatal("Expect reference count to be 1") } db.Set("/db", "2") db.Set("/webapp/db", "2") if db.Refs("2") != 2 { t.Fatal("Expect reference count to be 2") } } func TestPurgeId(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") if db.Refs("1") != 1 { t.Fatal("Expect reference count to be 1") } db.Set("/db", "2") db.Set("/webapp/db", "2") count, err := db.Purge("2") if err != nil { t.Fatal(err) } if count != 2 { t.Fatal("Expected 2 references to be removed") } } func TestRename(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") if db.Refs("1") != 1 { t.Fatal("Expect reference count to be 1") } db.Set("/db", "2") db.Set("/webapp/db", "2") if db.Get("/webapp/db") == nil { t.Fatal("Cannot find entity at path /webapp/db") } if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil { t.Fatal(err) } if db.Get("/webapp/db") != nil { t.Fatal("Entity should not exist at /webapp/db") } if db.Get("/webapp/newdb") == nil { t.Fatal("Cannot find entity at path /webapp/newdb") } } func TestCreateMultipleNames(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/db", "1") if _, err := db.Set("/myapp", "1"); err != nil { t.Fatal(err) } db.Walk("/", func(p string, e *Entity) error { t.Logf("%s\n", p) return nil }, -1) } func TestRefPaths(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/webapp", "1") db.Set("/db", "2") db.Set("/webapp/db", "2") refs := db.RefPaths("2") if len(refs) != 2 { t.Fatalf("Expected reference count to be 2, got %d", len(refs)) } } func TestExistsTrue(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/testing", "1") if !db.Exists("/testing") { t.Fatalf("/tesing should exist") } } func TestExistsFalse(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/toerhe", "1") if db.Exists("/testing") { t.Fatalf("/tesing should not exist") } } func TestGetNameWithTrailingSlash(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) db.Set("/todo", "1") e := db.Get("/todo/") if e == nil { t.Fatalf("Entity should not be nil") } } func TestConcurrentWrites(t *testing.T) { db, dbpath := newTestDb(t) defer destroyTestDb(dbpath) errs := make(chan error, 2) save := func(name string, id string) { if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil { errs <- err } errs <- nil } purge := func(id string) { if _, err := db.Purge(id); err != nil { errs <- err } errs <- nil } save("/1", "1") go purge("1") go save("/2", "2") any := false for i := 0; i < 2; i++ { if err := <-errs; err != nil { any = true t.Log(err) } } if any { t.Fail() } } docker-1.6.2/pkg/networkfs/0000755000175000017500000000000012524223634015137 5ustar tianontianondocker-1.6.2/pkg/networkfs/resolvconf/0000755000175000017500000000000012524223634017317 5ustar tianontianondocker-1.6.2/pkg/networkfs/resolvconf/resolvconf_test.go0000644000175000017500000001704612524223634023075 0ustar tianontianonpackage resolvconf import ( "bytes" "io/ioutil" "os" "testing" ) func TestGet(t *testing.T) { resolvConfUtils, err := Get() if err != nil { t.Fatal(err) } resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") if err != nil { t.Fatal(err) } if string(resolvConfUtils) != string(resolvConfSystem) { t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.") } } func TestGetNameservers(t *testing.T) { for resolv, result := range map[string][]string{` nameserver 1.2.3.4 nameserver 40.3.200.10 search example.com`: {"1.2.3.4", "40.3.200.10"}, `search example.com`: {}, `nameserver 1.2.3.4 search example.com nameserver 4.30.20.100`: {"1.2.3.4", "4.30.20.100"}, ``: {}, ` nameserver 1.2.3.4 `: {"1.2.3.4"}, `search example.com nameserver 1.2.3.4 #nameserver 4.3.2.1`: {"1.2.3.4"}, `search example.com nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4"}, } { test := GetNameservers([]byte(resolv)) if !strSlicesEqual(test, result) { t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) } } } func TestGetNameserversAsCIDR(t *testing.T) { for resolv, result := range map[string][]string{` nameserver 1.2.3.4 nameserver 40.3.200.10 search example.com`: {"1.2.3.4/32", "40.3.200.10/32"}, `search example.com`: {}, `nameserver 1.2.3.4 search example.com nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"}, ``: {}, ` nameserver 1.2.3.4 `: {"1.2.3.4/32"}, `search example.com nameserver 1.2.3.4 #nameserver 4.3.2.1`: {"1.2.3.4/32"}, `search example.com nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"}, } { test := GetNameserversAsCIDR([]byte(resolv)) if !strSlicesEqual(test, result) { t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) } } } func TestGetSearchDomains(t *testing.T) { for resolv, result := range map[string][]string{ `search example.com`: {"example.com"}, `search example.com # ignored`: {"example.com"}, ` search example.com `: {"example.com"}, ` search example.com # ignored`: {"example.com"}, `search foo.example.com example.com`: {"foo.example.com", "example.com"}, ` search foo.example.com example.com `: {"foo.example.com", "example.com"}, ` search foo.example.com example.com # ignored`: {"foo.example.com", "example.com"}, ``: {}, `# ignored`: {}, `nameserver 1.2.3.4 search foo.example.com example.com`: {"foo.example.com", "example.com"}, `nameserver 1.2.3.4 search dup1.example.com dup2.example.com search foo.example.com example.com`: {"foo.example.com", "example.com"}, `nameserver 1.2.3.4 search foo.example.com example.com nameserver 4.30.20.100`: {"foo.example.com", "example.com"}, } { test := GetSearchDomains([]byte(resolv)) if !strSlicesEqual(test, result) { t.Fatalf("Wrong search domain string {%s} should be %v. Input: %s", test, result, resolv) } } } func strSlicesEqual(a, b []string) bool { if len(a) != len(b) { return false } for i, v := range a { if v != b[i] { return false } } return true } func TestBuild(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(file.Name()) err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"search1"}) if err != nil { t.Fatal(err) } content, err := ioutil.ReadFile(file.Name()) if err != nil { t.Fatal(err) } if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\nsearch search1\n"; !bytes.Contains(content, []byte(expected)) { t.Fatalf("Expected to find '%s' got '%s'", expected, content) } } func TestBuildWithZeroLengthDomainSearch(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(file.Name()) err = Build(file.Name(), []string{"ns1", "ns2", "ns3"}, []string{"."}) if err != nil { t.Fatal(err) } content, err := ioutil.ReadFile(file.Name()) if err != nil { t.Fatal(err) } if expected := "nameserver ns1\nnameserver ns2\nnameserver ns3\n"; !bytes.Contains(content, []byte(expected)) { t.Fatalf("Expected to find '%s' got '%s'", expected, content) } if notExpected := "search ."; bytes.Contains(content, []byte(notExpected)) { t.Fatalf("Expected to not find '%s' got '%s'", notExpected, content) } } func TestFilterResolvDns(t *testing.T) { ns0 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\n" if result, _ := FilterResolvDns([]byte(ns0), false); result != nil { if ns0 != string(result) { t.Fatalf("Failed No Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) } } ns1 := "nameserver 10.16.60.14\nnameserver 10.16.60.21\nnameserver 127.0.0.1\n" if result, _ := FilterResolvDns([]byte(ns1), false); result != nil { if ns0 != string(result) { t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) } } ns1 = "nameserver 10.16.60.14\nnameserver 127.0.0.1\nnameserver 10.16.60.21\n" if result, _ := FilterResolvDns([]byte(ns1), false); result != nil { if ns0 != string(result) { t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) } } ns1 = "nameserver 127.0.1.1\nnameserver 10.16.60.14\nnameserver 10.16.60.21\n" if result, _ := FilterResolvDns([]byte(ns1), false); result != nil { if ns0 != string(result) { t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) } } ns1 = "nameserver ::1\nnameserver 10.16.60.14\nnameserver 127.0.2.1\nnameserver 10.16.60.21\n" if result, _ := FilterResolvDns([]byte(ns1), false); result != nil { if ns0 != string(result) { t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) } } ns1 = "nameserver 10.16.60.14\nnameserver ::1\nnameserver 10.16.60.21\nnameserver ::1" if result, _ := FilterResolvDns([]byte(ns1), false); result != nil { if ns0 != string(result) { t.Fatalf("Failed Localhost: expected \n<%s> got \n<%s>", ns0, string(result)) } } // with IPv6 disabled (false param), the IPv6 nameserver should be removed ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1" if result, _ := FilterResolvDns([]byte(ns1), false); result != nil { if ns0 != string(result) { t.Fatalf("Failed Localhost+IPv6 off: expected \n<%s> got \n<%s>", ns0, string(result)) } } // with IPv6 enabled, the IPv6 nameserver should be preserved ns0 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\n" ns1 = "nameserver 10.16.60.14\nnameserver 2002:dead:beef::1\nnameserver 10.16.60.21\nnameserver ::1" if result, _ := FilterResolvDns([]byte(ns1), true); result != nil { if ns0 != string(result) { t.Fatalf("Failed Localhost+IPv6 on: expected \n<%s> got \n<%s>", ns0, string(result)) } } // with IPv6 enabled, and no non-localhost servers, Google defaults (both IPv4+IPv6) should be added ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\nnameserver 2001:4860:4860::8888\nnameserver 2001:4860:4860::8844" ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1" if result, _ := FilterResolvDns([]byte(ns1), true); result != nil { if ns0 != string(result) { t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result)) } } // with IPv6 disabled, and no non-localhost servers, Google defaults (only IPv4) should be added ns0 = "\nnameserver 8.8.8.8\nnameserver 8.8.4.4" ns1 = "nameserver 127.0.0.1\nnameserver ::1\nnameserver 127.0.2.1" if result, _ := FilterResolvDns([]byte(ns1), false); result != nil { if ns0 != string(result) { t.Fatalf("Failed no Localhost+IPv6 enabled: expected \n<%s> got \n<%s>", ns0, string(result)) } } } docker-1.6.2/pkg/networkfs/resolvconf/resolvconf.go0000644000175000017500000001464512524223634022040 0ustar tianontianonpackage resolvconf import ( "bytes" "io/ioutil" "regexp" "strings" "sync" log "github.com/Sirupsen/logrus" "github.com/docker/docker/utils" ) var ( // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"} defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"} ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)` ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants // -- e.g. other link-local types -- either won't work in containers or are unnecessary. // For readability and sufficiency for Docker purposes this seemed more reasonable than a // 1000+ character regexp with exact and complete IPv6 validation ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})` ipLocalhost = `((127\.([0-9]{1,3}.){2}[0-9]{1,3})|(::1))` localhostIPRegexp = regexp.MustCompile(ipLocalhost) localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipLocalhost + `\s*\n*`) nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) ) var lastModified struct { sync.Mutex sha256 string contents []byte } func Get() ([]byte, error) { resolv, err := ioutil.ReadFile("/etc/resolv.conf") if err != nil { return nil, err } return resolv, nil } // Retrieves the host /etc/resolv.conf file, checks against the last hash // and, if modified since last check, returns the bytes and new hash. // This feature is used by the resolv.conf updater for containers func GetIfChanged() ([]byte, string, error) { lastModified.Lock() defer lastModified.Unlock() resolv, err := ioutil.ReadFile("/etc/resolv.conf") if err != nil { return nil, "", err } newHash, err := utils.HashData(bytes.NewReader(resolv)) if err != nil { return nil, "", err } if lastModified.sha256 != newHash { lastModified.sha256 = newHash lastModified.contents = resolv return resolv, newHash, nil } // nothing changed, so return no data return nil, "", nil } // retrieve the last used contents and hash of the host resolv.conf // Used by containers updating on restart func GetLastModified() ([]byte, string) { lastModified.Lock() defer lastModified.Unlock() return lastModified.contents, lastModified.sha256 } // FilterResolvDns has two main jobs: // 1. It looks for localhost (127.*|::1) entries in the provided // resolv.conf, removing local nameserver entries, and, if the resulting // cleaned config has no defined nameservers left, adds default DNS entries // 2. Given the caller provides the enable/disable state of IPv6, the filter // code will remove all IPv6 nameservers if it is not enabled for containers // // It also returns a boolean to notify the caller if changes were made at all func FilterResolvDns(resolvConf []byte, ipv6Enabled bool) ([]byte, bool) { changed := false cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{}) // if IPv6 is not enabled, also clean out any IPv6 address nameserver if !ipv6Enabled { cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{}) } // if the resulting resolvConf has no more nameservers defined, add appropriate // default DNS servers for IPv4 and (optionally) IPv6 if len(GetNameservers(cleanedResolvConf)) == 0 { log.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers : %v", defaultIPv4Dns) dns := defaultIPv4Dns if ipv6Enabled { log.Infof("IPv6 enabled; Adding default IPv6 external servers : %v", defaultIPv6Dns) dns = append(dns, defaultIPv6Dns...) } cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...) } if !bytes.Equal(resolvConf, cleanedResolvConf) { changed = true } return cleanedResolvConf, changed } // getLines parses input into lines and strips away comments. func getLines(input []byte, commentMarker []byte) [][]byte { lines := bytes.Split(input, []byte("\n")) var output [][]byte for _, currentLine := range lines { var commentIndex = bytes.Index(currentLine, commentMarker) if commentIndex == -1 { output = append(output, currentLine) } else { output = append(output, currentLine[:commentIndex]) } } return output } // returns true if the IP string matches the localhost IP regular expression. // Used for determining if nameserver settings are being passed which are // localhost addresses func IsLocalhost(ip string) bool { return localhostIPRegexp.MatchString(ip) } // GetNameservers returns nameservers (if any) listed in /etc/resolv.conf func GetNameservers(resolvConf []byte) []string { nameservers := []string{} for _, line := range getLines(resolvConf, []byte("#")) { var ns = nsRegexp.FindSubmatch(line) if len(ns) > 0 { nameservers = append(nameservers, string(ns[1])) } } return nameservers } // GetNameserversAsCIDR returns nameservers (if any) listed in // /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") // This function's output is intended for net.ParseCIDR func GetNameserversAsCIDR(resolvConf []byte) []string { nameservers := []string{} for _, nameserver := range GetNameservers(resolvConf) { nameservers = append(nameservers, nameserver+"/32") } return nameservers } // GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf // If more than one search line is encountered, only the contents of the last // one is returned. func GetSearchDomains(resolvConf []byte) []string { domains := []string{} for _, line := range getLines(resolvConf, []byte("#")) { match := searchRegexp.FindSubmatch(line) if match == nil { continue } domains = strings.Fields(string(match[1])) } return domains } func Build(path string, dns, dnsSearch []string) error { content := bytes.NewBuffer(nil) for _, dns := range dns { if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { return err } } if len(dnsSearch) > 0 { if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." { if _, err := content.WriteString("search " + searchString + "\n"); err != nil { return err } } } return ioutil.WriteFile(path, content.Bytes(), 0644) } docker-1.6.2/pkg/networkfs/etchosts/0000755000175000017500000000000012524223634016773 5ustar tianontianondocker-1.6.2/pkg/networkfs/etchosts/etchosts_test.go0000644000175000017500000000603712524223634022223 0ustar tianontianonpackage etchosts import ( "bytes" "io/ioutil" "os" "testing" ) func TestBuildDefault(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(file.Name()) // check that /etc/hosts has consistent ordering for i := 0; i <= 5; i++ { err = Build(file.Name(), "", "", "", nil) if err != nil { t.Fatal(err) } content, err := ioutil.ReadFile(file.Name()) if err != nil { t.Fatal(err) } expected := "127.0.0.1\tlocalhost\n::1\tlocalhost ip6-localhost ip6-loopback\nfe00::0\tip6-localnet\nff00::0\tip6-mcastprefix\nff02::1\tip6-allnodes\nff02::2\tip6-allrouters\n" if expected != string(content) { t.Fatalf("Expected to find '%s' got '%s'", expected, content) } } } func TestBuildHostnameDomainname(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(file.Name()) err = Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname", nil) if err != nil { t.Fatal(err) } content, err := ioutil.ReadFile(file.Name()) if err != nil { t.Fatal(err) } if expected := "10.11.12.13\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) { t.Fatalf("Expected to find '%s' got '%s'", expected, content) } } func TestBuildHostname(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(file.Name()) err = Build(file.Name(), "10.11.12.13", "testhostname", "", nil) if err != nil { t.Fatal(err) } content, err := ioutil.ReadFile(file.Name()) if err != nil { t.Fatal(err) } if expected := "10.11.12.13\ttesthostname\n"; !bytes.Contains(content, []byte(expected)) { t.Fatalf("Expected to find '%s' got '%s'", expected, content) } } func TestBuildNoIP(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(file.Name()) err = Build(file.Name(), "", "testhostname", "", nil) if err != nil { t.Fatal(err) } content, err := ioutil.ReadFile(file.Name()) if err != nil { t.Fatal(err) } if expected := ""; !bytes.Contains(content, []byte(expected)) { t.Fatalf("Expected to find '%s' got '%s'", expected, content) } } func TestUpdate(t *testing.T) { file, err := ioutil.TempFile("", "") if err != nil { t.Fatal(err) } defer os.Remove(file.Name()) if err := Build(file.Name(), "10.11.12.13", "testhostname", "testdomainname", nil); err != nil { t.Fatal(err) } content, err := ioutil.ReadFile(file.Name()) if err != nil { t.Fatal(err) } if expected := "10.11.12.13\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) { t.Fatalf("Expected to find '%s' got '%s'", expected, content) } if err := Update(file.Name(), "1.1.1.1", "testhostname"); err != nil { t.Fatal(err) } content, err = ioutil.ReadFile(file.Name()) if err != nil { t.Fatal(err) } if expected := "1.1.1.1\ttesthostname.testdomainname testhostname\n"; !bytes.Contains(content, []byte(expected)) { t.Fatalf("Expected to find '%s' got '%s'", expected, content) } } docker-1.6.2/pkg/networkfs/etchosts/etchosts.go0000644000175000017500000000272612524223634021165 0ustar tianontianonpackage etchosts import ( "bytes" "fmt" "io" "io/ioutil" "regexp" ) type Record struct { Hosts string IP string } func (r Record) WriteTo(w io.Writer) (int64, error) { n, err := fmt.Fprintf(w, "%s\t%s\n", r.IP, r.Hosts) return int64(n), err } var defaultContent = []Record{ {Hosts: "localhost", IP: "127.0.0.1"}, {Hosts: "localhost ip6-localhost ip6-loopback", IP: "::1"}, {Hosts: "ip6-localnet", IP: "fe00::0"}, {Hosts: "ip6-mcastprefix", IP: "ff00::0"}, {Hosts: "ip6-allnodes", IP: "ff02::1"}, {Hosts: "ip6-allrouters", IP: "ff02::2"}, } func Build(path, IP, hostname, domainname string, extraContent []Record) error { content := bytes.NewBuffer(nil) if IP != "" { var mainRec Record mainRec.IP = IP if domainname != "" { mainRec.Hosts = fmt.Sprintf("%s.%s %s", hostname, domainname, hostname) } else { mainRec.Hosts = hostname } if _, err := mainRec.WriteTo(content); err != nil { return err } } for _, r := range defaultContent { if _, err := r.WriteTo(content); err != nil { return err } } for _, r := range extraContent { if _, err := r.WriteTo(content); err != nil { return err } } return ioutil.WriteFile(path, content.Bytes(), 0644) } func Update(path, IP, hostname string) error { old, err := ioutil.ReadFile(path) if err != nil { return err } var re = regexp.MustCompile(fmt.Sprintf("(\\S*)(\\t%s)", regexp.QuoteMeta(hostname))) return ioutil.WriteFile(path, re.ReplaceAll(old, []byte(IP+"$2")), 0644) } docker-1.6.2/pkg/progressreader/0000755000175000017500000000000012524223634016144 5ustar tianontianondocker-1.6.2/pkg/progressreader/progressreader.go0000644000175000017500000000332512524223634021525 0ustar tianontianonpackage progressreader import ( "io" ) type StreamFormatter interface { FormatProg(string, string, interface{}) []byte FormatStatus(string, string, ...interface{}) []byte FormatError(error) []byte } type PR_JSONProgress interface { GetCurrent() int GetTotal() int } type JSONProg struct { Current int Total int } func (j *JSONProg) GetCurrent() int { return j.Current } func (j *JSONProg) GetTotal() int { return j.Total } // Reader with progress bar type Config struct { In io.ReadCloser // Stream to read from Out io.Writer // Where to send progress bar to Formatter StreamFormatter Size int Current int LastUpdate int NewLines bool ID string Action string } func New(newReader Config) *Config { return &newReader } func (config *Config) Read(p []byte) (n int, err error) { read, err := config.In.Read(p) config.Current += read updateEvery := 1024 * 512 //512kB if config.Size > 0 { // Update progress for every 1% read if 1% < 512kB if increment := int(0.01 * float64(config.Size)); increment < updateEvery { updateEvery = increment } } if config.Current-config.LastUpdate > updateEvery || err != nil { config.Out.Write(config.Formatter.FormatProg(config.ID, config.Action, &JSONProg{Current: config.Current, Total: config.Size})) config.LastUpdate = config.Current } // Send newline when complete if config.NewLines && err != nil && read == 0 { config.Out.Write(config.Formatter.FormatStatus("", "")) } return read, err } func (config *Config) Close() error { config.Current = config.Size config.Out.Write(config.Formatter.FormatProg(config.ID, config.Action, &JSONProg{Current: config.Current, Total: config.Size})) return config.In.Close() } docker-1.6.2/pkg/signal/0000755000175000017500000000000012524223634014372 5ustar tianontianondocker-1.6.2/pkg/signal/signal_darwin.go0000644000175000017500000000171412524223634017545 0ustar tianontianonpackage signal import ( "syscall" ) var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.SIGABRT, "ALRM": syscall.SIGALRM, "BUG": syscall.SIGBUS, "CHLD": syscall.SIGCHLD, "CONT": syscall.SIGCONT, "EMT": syscall.SIGEMT, "FPE": syscall.SIGFPE, "HUP": syscall.SIGHUP, "ILL": syscall.SIGILL, "INFO": syscall.SIGINFO, "INT": syscall.SIGINT, "IO": syscall.SIGIO, "IOT": syscall.SIGIOT, "KILL": syscall.SIGKILL, "PIPE": syscall.SIGPIPE, "PROF": syscall.SIGPROF, "QUIT": syscall.SIGQUIT, "SEGV": syscall.SIGSEGV, "STOP": syscall.SIGSTOP, "SYS": syscall.SIGSYS, "TERM": syscall.SIGTERM, "TRAP": syscall.SIGTRAP, "TSTP": syscall.SIGTSTP, "TTIN": syscall.SIGTTIN, "TTOU": syscall.SIGTTOU, "URG": syscall.SIGURG, "USR1": syscall.SIGUSR1, "USR2": syscall.SIGUSR2, "VTALRM": syscall.SIGVTALRM, "WINCH": syscall.SIGWINCH, "XCPU": syscall.SIGXCPU, "XFSZ": syscall.SIGXFSZ, } docker-1.6.2/pkg/signal/signal_unsupported.go0000644000175000017500000000016612524223634020651 0ustar tianontianon// +build !linux,!darwin,!freebsd package signal import ( "syscall" ) var SignalMap = map[string]syscall.Signal{} docker-1.6.2/pkg/signal/signal_freebsd.go0000644000175000017500000000200212524223634017662 0ustar tianontianonpackage signal import ( "syscall" ) var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.SIGABRT, "ALRM": syscall.SIGALRM, "BUF": syscall.SIGBUS, "CHLD": syscall.SIGCHLD, "CONT": syscall.SIGCONT, "EMT": syscall.SIGEMT, "FPE": syscall.SIGFPE, "HUP": syscall.SIGHUP, "ILL": syscall.SIGILL, "INFO": syscall.SIGINFO, "INT": syscall.SIGINT, "IO": syscall.SIGIO, "IOT": syscall.SIGIOT, "KILL": syscall.SIGKILL, "LWP": syscall.SIGLWP, "PIPE": syscall.SIGPIPE, "PROF": syscall.SIGPROF, "QUIT": syscall.SIGQUIT, "SEGV": syscall.SIGSEGV, "STOP": syscall.SIGSTOP, "SYS": syscall.SIGSYS, "TERM": syscall.SIGTERM, "THR": syscall.SIGTHR, "TRAP": syscall.SIGTRAP, "TSTP": syscall.SIGTSTP, "TTIN": syscall.SIGTTIN, "TTOU": syscall.SIGTTOU, "URG": syscall.SIGURG, "USR1": syscall.SIGUSR1, "USR2": syscall.SIGUSR2, "VTALRM": syscall.SIGVTALRM, "WINCH": syscall.SIGWINCH, "XCPU": syscall.SIGXCPU, "XFSZ": syscall.SIGXFSZ, } docker-1.6.2/pkg/signal/signal_unix.go0000644000175000017500000000034512524223634017243 0ustar tianontianon// +build !windows package signal import ( "syscall" ) // Signals used in api/client (no windows equivalent, use // invalid signals so they don't get handled) const SIGCHLD = syscall.SIGCHLD const SIGWINCH = syscall.SIGWINCH docker-1.6.2/pkg/signal/signal_linux.go0000644000175000017500000000204312524223634017414 0ustar tianontianonpackage signal import ( "syscall" ) var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.SIGABRT, "ALRM": syscall.SIGALRM, "BUS": syscall.SIGBUS, "CHLD": syscall.SIGCHLD, "CLD": syscall.SIGCLD, "CONT": syscall.SIGCONT, "FPE": syscall.SIGFPE, "HUP": syscall.SIGHUP, "ILL": syscall.SIGILL, "INT": syscall.SIGINT, "IO": syscall.SIGIO, "IOT": syscall.SIGIOT, "KILL": syscall.SIGKILL, "PIPE": syscall.SIGPIPE, "POLL": syscall.SIGPOLL, "PROF": syscall.SIGPROF, "PWR": syscall.SIGPWR, "QUIT": syscall.SIGQUIT, "SEGV": syscall.SIGSEGV, "STKFLT": syscall.SIGSTKFLT, "STOP": syscall.SIGSTOP, "SYS": syscall.SIGSYS, "TERM": syscall.SIGTERM, "TRAP": syscall.SIGTRAP, "TSTP": syscall.SIGTSTP, "TTIN": syscall.SIGTTIN, "TTOU": syscall.SIGTTOU, "UNUSED": syscall.SIGUNUSED, "URG": syscall.SIGURG, "USR1": syscall.SIGUSR1, "USR2": syscall.SIGUSR2, "VTALRM": syscall.SIGVTALRM, "WINCH": syscall.SIGWINCH, "XCPU": syscall.SIGXCPU, "XFSZ": syscall.SIGXFSZ, } docker-1.6.2/pkg/signal/trap.go0000644000175000017500000000302712524223634015671 0ustar tianontianonpackage signal import ( "os" gosignal "os/signal" "sync/atomic" "syscall" log "github.com/Sirupsen/logrus" ) // Trap sets up a simplified signal "trap", appropriate for common // behavior expected from a vanilla unix command-line tool in general // (and the Docker engine in particular). // // * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. // * If SIGINT or SIGTERM are repeated 3 times before cleanup is complete, then cleanup is // skipped and the process terminated directly. // * If "DEBUG" is set in the environment, SIGQUIT causes an exit without cleanup. // func Trap(cleanup func()) { c := make(chan os.Signal, 1) signals := []os.Signal{os.Interrupt, syscall.SIGTERM} if os.Getenv("DEBUG") == "" { signals = append(signals, syscall.SIGQUIT) } gosignal.Notify(c, signals...) go func() { interruptCount := uint32(0) for sig := range c { go func(sig os.Signal) { log.Infof("Received signal '%v', starting shutdown of docker...", sig) switch sig { case os.Interrupt, syscall.SIGTERM: // If the user really wants to interrupt, let him do so. if atomic.LoadUint32(&interruptCount) < 3 { // Initiate the cleanup only once if atomic.AddUint32(&interruptCount, 1) == 1 { // Call cleanup handler cleanup() os.Exit(0) } else { return } } else { log.Infof("Force shutdown of docker, interrupting cleanup") } case syscall.SIGQUIT: } os.Exit(128 + int(sig.(syscall.Signal))) }(sig) } }() } docker-1.6.2/pkg/signal/signal_windows.go0000644000175000017500000000035512524223634017753 0ustar tianontianon// +build windows package signal import ( "syscall" ) // Signals used in api/client (no windows equivalent, use // invalid signals so they don't get handled) const SIGCHLD = syscall.Signal(0xff) const SIGWINCH = syscall.Signal(0xff) docker-1.6.2/pkg/signal/signal.go0000644000175000017500000000045312524223634016200 0ustar tianontianonpackage signal import ( "os" "os/signal" ) func CatchAll(sigc chan os.Signal) { handledSigs := []os.Signal{} for _, s := range SignalMap { handledSigs = append(handledSigs, s) } signal.Notify(sigc, handledSigs...) } func StopCatch(sigc chan os.Signal) { signal.Stop(sigc) close(sigc) } docker-1.6.2/pkg/pools/0000755000175000017500000000000012524223634014251 5ustar tianontianondocker-1.6.2/pkg/pools/pools_nopool.go0000644000175000017500000000300312524223634017316 0ustar tianontianon// +build !go1.3 package pools import ( "bufio" "io" "github.com/docker/docker/pkg/ioutils" ) var ( BufioReader32KPool *BufioReaderPool BufioWriter32KPool *BufioWriterPool ) const buffer32K = 32 * 1024 type BufioReaderPool struct { size int } func init() { BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) } func newBufioReaderPoolWithSize(size int) *BufioReaderPool { return &BufioReaderPool{size: size} } func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { return bufio.NewReaderSize(r, bufPool.size) } func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { b.Reset(nil) } func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { return ioutils.NewReadCloserWrapper(r, func() error { if readCloser, ok := r.(io.ReadCloser); ok { return readCloser.Close() } return nil }) } type BufioWriterPool struct { size int } func newBufioWriterPoolWithSize(size int) *BufioWriterPool { return &BufioWriterPool{size: size} } func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { return bufio.NewWriterSize(w, bufPool.size) } func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { b.Reset(nil) } func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { return ioutils.NewWriteCloserWrapper(w, func() error { buf.Flush() if writeCloser, ok := w.(io.WriteCloser); ok { return writeCloser.Close() } return nil }) } docker-1.6.2/pkg/pools/pools.go0000644000175000017500000000607412524223634015743 0ustar tianontianon// +build go1.3 // Package pools provides a collection of pools which provide various // data types with buffers. These can be used to lower the number of // memory allocations and reuse buffers. // // New pools should be added to this package to allow them to be // shared across packages. // // Utility functions which operate on pools should be added to this // package to allow them to be reused. package pools import ( "bufio" "io" "sync" "github.com/docker/docker/pkg/ioutils" ) var ( // Pool which returns bufio.Reader with a 32K buffer BufioReader32KPool *BufioReaderPool // Pool which returns bufio.Writer with a 32K buffer BufioWriter32KPool *BufioWriterPool ) const buffer32K = 32 * 1024 type BufioReaderPool struct { pool sync.Pool } func init() { BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) } // newBufioReaderPoolWithSize is unexported because new pools should be // added here to be shared where required. func newBufioReaderPoolWithSize(size int) *BufioReaderPool { pool := sync.Pool{ New: func() interface{} { return bufio.NewReaderSize(nil, size) }, } return &BufioReaderPool{pool: pool} } // Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { buf := bufPool.pool.Get().(*bufio.Reader) buf.Reset(r) return buf } // Put puts the bufio.Reader back into the pool. func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { b.Reset(nil) bufPool.pool.Put(b) } // NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back // into the pool and closes the reader if it's an io.ReadCloser. func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { return ioutils.NewReadCloserWrapper(r, func() error { if readCloser, ok := r.(io.ReadCloser); ok { readCloser.Close() } bufPool.Put(buf) return nil }) } type BufioWriterPool struct { pool sync.Pool } // newBufioWriterPoolWithSize is unexported because new pools should be // added here to be shared where required. func newBufioWriterPoolWithSize(size int) *BufioWriterPool { pool := sync.Pool{ New: func() interface{} { return bufio.NewWriterSize(nil, size) }, } return &BufioWriterPool{pool: pool} } // Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { buf := bufPool.pool.Get().(*bufio.Writer) buf.Reset(w) return buf } // Put puts the bufio.Writer back into the pool. func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { b.Reset(nil) bufPool.pool.Put(b) } // NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back // into the pool and closes the writer if it's an io.Writecloser. func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { return ioutils.NewWriteCloserWrapper(w, func() error { buf.Flush() if writeCloser, ok := w.(io.WriteCloser); ok { writeCloser.Close() } bufPool.Put(buf) return nil }) } docker-1.6.2/pkg/mount/0000755000175000017500000000000012524223634014257 5ustar tianontianondocker-1.6.2/pkg/mount/mount.go0000644000175000017500000000322112524223634015746 0ustar tianontianonpackage mount import ( "time" ) func GetMounts() ([]*MountInfo, error) { return parseMountTable() } // Looks at /proc/self/mountinfo to determine of the specified // mountpoint has been mounted func Mounted(mountpoint string) (bool, error) { entries, err := parseMountTable() if err != nil { return false, err } // Search the table for the mountpoint for _, e := range entries { if e.Mountpoint == mountpoint { return true, nil } } return false, nil } // Mount the specified options at the target path only if // the target is not mounted // Options must be specified as fstab style func Mount(device, target, mType, options string) error { flag, _ := parseOptions(options) if flag&REMOUNT != REMOUNT { if mounted, err := Mounted(target); err != nil || mounted { return err } } return ForceMount(device, target, mType, options) } // Mount the specified options at the target path // reguardless if the target is mounted or not // Options must be specified as fstab style func ForceMount(device, target, mType, options string) error { flag, data := parseOptions(options) if err := mount(device, target, mType, uintptr(flag), data); err != nil { return err } return nil } // Unmount the target only if it is mounted func Unmount(target string) error { if mounted, err := Mounted(target); err != nil || !mounted { return err } return ForceUnmount(target) } // Unmount the target reguardless if it is mounted or not func ForceUnmount(target string) (err error) { // Simple retry logic for unmount for i := 0; i < 10; i++ { if err = unmount(target, 0); err == nil { return nil } time.Sleep(100 * time.Millisecond) } return } docker-1.6.2/pkg/mount/mounter_freebsd.go0000644000175000017500000000237112524223634017774 0ustar tianontianonpackage mount /* #include #include #include #include #include #include */ import "C" import ( "fmt" "strings" "syscall" "unsafe" ) func allocateIOVecs(options []string) []C.struct_iovec { out := make([]C.struct_iovec, len(options)) for i, option := range options { out[i].iov_base = unsafe.Pointer(C.CString(option)) out[i].iov_len = C.size_t(len(option) + 1) } return out } func mount(device, target, mType string, flag uintptr, data string) error { isNullFS := false xs := strings.Split(data, ",") for _, x := range xs { if x == "bind" { isNullFS = true } } options := []string{"fspath", target} if isNullFS { options = append(options, "fstype", "nullfs", "target", device) } else { options = append(options, "fstype", mType, "from", device) } rawOptions := allocateIOVecs(options) for _, rawOption := range rawOptions { defer C.free(rawOption.iov_base) } if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { reason := C.GoString(C.strerror(*C.__error())) return fmt.Errorf("Failed to call nmount: %s", reason) } return nil } func unmount(target string, flag int) error { return syscall.Unmount(target, flag) } docker-1.6.2/pkg/mount/mounter_unsupported.go0000644000175000017500000000035212524223634020747 0ustar tianontianon// +build !linux,!freebsd freebsd,!cgo package mount func mount(device, target, mType string, flag uintptr, data string) error { panic("Not implemented") } func unmount(target string, flag int) error { panic("Not implemented") } docker-1.6.2/pkg/mount/mount_test.go0000644000175000017500000000472312524223634017015 0ustar tianontianonpackage mount import ( "os" "path" "testing" ) func TestMountOptionsParsing(t *testing.T) { options := "noatime,ro,size=10k" flag, data := parseOptions(options) if data != "size=10k" { t.Fatalf("Expected size=10 got %s", data) } expectedFlag := NOATIME | RDONLY if flag != expectedFlag { t.Fatalf("Expected %d got %d", expectedFlag, flag) } } func TestMounted(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") sourcePath = path.Join(sourceDir, "file.txt") targetPath = path.Join(targetDir, "file.txt") ) os.Mkdir(sourceDir, 0777) os.Mkdir(targetDir, 0777) f, err := os.Create(sourcePath) if err != nil { t.Fatal(err) } f.WriteString("hello") f.Close() f, err = os.Create(targetPath) if err != nil { t.Fatal(err) } f.Close() if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() mounted, err := Mounted(targetDir) if err != nil { t.Fatal(err) } if !mounted { t.Fatalf("Expected %s to be mounted", targetDir) } if _, err := os.Stat(targetDir); err != nil { t.Fatal(err) } } func TestMountReadonly(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") sourcePath = path.Join(sourceDir, "file.txt") targetPath = path.Join(targetDir, "file.txt") ) os.Mkdir(sourceDir, 0777) os.Mkdir(targetDir, 0777) f, err := os.Create(sourcePath) if err != nil { t.Fatal(err) } f.WriteString("hello") f.Close() f, err = os.Create(targetPath) if err != nil { t.Fatal(err) } f.Close() if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) if err == nil { t.Fatal("Should not be able to open a ro file as rw") } } func TestGetMounts(t *testing.T) { mounts, err := GetMounts() if err != nil { t.Fatal(err) } root := false for _, entry := range mounts { if entry.Mountpoint == "/" { root = true } } if !root { t.Fatal("/ should be mounted at least") } } docker-1.6.2/pkg/mount/mountinfo.go0000644000175000017500000000024112524223634016621 0ustar tianontianonpackage mount type MountInfo struct { Id, Parent, Major, Minor int Root, Mountpoint, Opts, Optional string Fstype, Source, VfsOpts string } docker-1.6.2/pkg/mount/flags_freebsd.go0000644000175000017500000000103612524223634017374 0ustar tianontianon// +build freebsd,cgo package mount /* #include */ import "C" const ( RDONLY = C.MNT_RDONLY NOSUID = C.MNT_NOSUID NOEXEC = C.MNT_NOEXEC SYNCHRONOUS = C.MNT_SYNCHRONOUS NOATIME = C.MNT_NOATIME BIND = 0 DIRSYNC = 0 MANDLOCK = 0 NODEV = 0 NODIRATIME = 0 UNBINDABLE = 0 RUNBINDABLE = 0 PRIVATE = 0 RPRIVATE = 0 SHARED = 0 RSHARED = 0 SLAVE = 0 RSLAVE = 0 RBIND = 0 RELATIVE = 0 RELATIME = 0 REMOUNT = 0 STRICTATIME = 0 ) docker-1.6.2/pkg/mount/mountinfo_linux.go0000644000175000017500000000476312524223634020055 0ustar tianontianon// +build linux package mount import ( "bufio" "fmt" "io" "os" "strings" ) const ( /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) (1) mount ID: unique identifier of the mount (may be reused after umount) (2) parent ID: ID of parent (or of self for the top of the mount tree) (3) major:minor: value of st_dev for files on filesystem (4) root: root of the mount within the filesystem (5) mount point: mount point relative to the process's root (6) mount options: per mount options (7) optional fields: zero or more fields of the form "tag[:value]" (8) separator: marks the end of the optional fields (9) filesystem type: name of filesystem of the form "type[.subtype]" (10) mount source: filesystem specific information or "none" (11) super options: per super block options*/ mountinfoFormat = "%d %d %d:%d %s %s %s %s" ) // Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts func parseMountTable() ([]*MountInfo, error) { f, err := os.Open("/proc/self/mountinfo") if err != nil { return nil, err } defer f.Close() return parseInfoFile(f) } func parseInfoFile(r io.Reader) ([]*MountInfo, error) { var ( s = bufio.NewScanner(r) out = []*MountInfo{} ) for s.Scan() { if err := s.Err(); err != nil { return nil, err } var ( p = &MountInfo{} text = s.Text() optionalFields string ) if _, err := fmt.Sscanf(text, mountinfoFormat, &p.Id, &p.Parent, &p.Major, &p.Minor, &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) } // Safe as mountinfo encodes mountpoints with spaces as \040. index := strings.Index(text, " - ") postSeparatorFields := strings.Fields(text[index+3:]) if len(postSeparatorFields) < 3 { return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) } if optionalFields != "-" { p.Optional = optionalFields } p.Fstype = postSeparatorFields[0] p.Source = postSeparatorFields[1] p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") out = append(out, p) } return out, nil } // PidMountInfo collects the mounts for a specific Pid func PidMountInfo(pid int) ([]*MountInfo, error) { f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) if err != nil { return nil, err } defer f.Close() return parseInfoFile(f) } docker-1.6.2/pkg/mount/sharedsubtree_linux_test.go0000644000175000017500000001776212524223634021741 0ustar tianontianon// +build linux package mount import ( "os" "path" "syscall" "testing" ) // nothing is propagated in or out func TestSubtreePrivate(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") outside1Dir = path.Join(tmp, "outside1") outside2Dir = path.Join(tmp, "outside2") outside1Path = path.Join(outside1Dir, "file.txt") outside2Path = path.Join(outside2Dir, "file.txt") outside1CheckPath = path.Join(targetDir, "a", "file.txt") outside2CheckPath = path.Join(sourceDir, "b", "file.txt") ) if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { t.Fatal(err) } if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(targetDir, 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(outside1Dir, 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(outside2Dir, 0777); err != nil { t.Fatal(err) } if err := createFile(outside1Path); err != nil { t.Fatal(err) } if err := createFile(outside2Path); err != nil { t.Fatal(err) } // mount the shared directory to a target if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() // next, make the target private if err := MakePrivate(targetDir); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() // mount in an outside path to a mounted path inside the _source_ if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(path.Join(sourceDir, "a")); err != nil { t.Fatal(err) } }() // check that this file _does_not_ show in the _target_ if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { t.Fatal(err) } else if err == nil { t.Fatalf("%q should not be visible, but is", outside1CheckPath) } // next mount outside2Dir into the _target_ if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(path.Join(targetDir, "b")); err != nil { t.Fatal(err) } }() // check that this file _does_not_ show in the _source_ if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { t.Fatal(err) } else if err == nil { t.Fatalf("%q should not be visible, but is", outside2CheckPath) } } // Testing that when a target is a shared mount, // then child mounts propogate to the source func TestSubtreeShared(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") outsideDir = path.Join(tmp, "outside") outsidePath = path.Join(outsideDir, "file.txt") sourceCheckPath = path.Join(sourceDir, "a", "file.txt") ) if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(targetDir, 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(outsideDir, 0777); err != nil { t.Fatal(err) } if err := createFile(outsidePath); err != nil { t.Fatal(err) } // mount the source as shared if err := MakeShared(sourceDir); err != nil { t.Fatal(err) } defer func() { if err := Unmount(sourceDir); err != nil { t.Fatal(err) } }() // mount the shared directory to a target if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() // mount in an outside path to a mounted path inside the target if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(path.Join(targetDir, "a")); err != nil { t.Fatal(err) } }() // NOW, check that the file from the outside directory is avaible in the source directory if _, err := os.Stat(sourceCheckPath); err != nil { t.Fatal(err) } } // testing that mounts to a shared source show up in the slave target, // and that mounts into a slave target do _not_ show up in the shared source func TestSubtreeSharedSlave(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") outside1Dir = path.Join(tmp, "outside1") outside2Dir = path.Join(tmp, "outside2") outside1Path = path.Join(outside1Dir, "file.txt") outside2Path = path.Join(outside2Dir, "file.txt") outside1CheckPath = path.Join(targetDir, "a", "file.txt") outside2CheckPath = path.Join(sourceDir, "b", "file.txt") ) if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { t.Fatal(err) } if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(targetDir, 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(outside1Dir, 0777); err != nil { t.Fatal(err) } if err := os.Mkdir(outside2Dir, 0777); err != nil { t.Fatal(err) } if err := createFile(outside1Path); err != nil { t.Fatal(err) } if err := createFile(outside2Path); err != nil { t.Fatal(err) } // mount the source as shared if err := MakeShared(sourceDir); err != nil { t.Fatal(err) } defer func() { if err := Unmount(sourceDir); err != nil { t.Fatal(err) } }() // mount the shared directory to a target if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() // next, make the target slave if err := MakeSlave(targetDir); err != nil { t.Fatal(err) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() // mount in an outside path to a mounted path inside the _source_ if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(path.Join(sourceDir, "a")); err != nil { t.Fatal(err) } }() // check that this file _does_ show in the _target_ if _, err := os.Stat(outside1CheckPath); err != nil { t.Fatal(err) } // next mount outside2Dir into the _target_ if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { t.Fatal(err) } defer func() { if err := Unmount(path.Join(targetDir, "b")); err != nil { t.Fatal(err) } }() // check that this file _does_not_ show in the _source_ if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { t.Fatal(err) } else if err == nil { t.Fatalf("%q should not be visible, but is", outside2CheckPath) } } func TestSubtreeUnbindable(t *testing.T) { tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) } defer os.RemoveAll(tmp) var ( sourceDir = path.Join(tmp, "source") targetDir = path.Join(tmp, "target") ) if err := os.MkdirAll(sourceDir, 0777); err != nil { t.Fatal(err) } if err := os.MkdirAll(targetDir, 0777); err != nil { t.Fatal(err) } // next, make the source unbindable if err := MakeUnbindable(sourceDir); err != nil { t.Fatal(err) } defer func() { if err := Unmount(sourceDir); err != nil { t.Fatal(err) } }() // then attempt to mount it to target. It should fail if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { t.Fatal(err) } else if err == nil { t.Fatalf("%q should not have been bindable", sourceDir) } defer func() { if err := Unmount(targetDir); err != nil { t.Fatal(err) } }() } func createFile(path string) error { f, err := os.Create(path) if err != nil { return err } f.WriteString("hello world!") return f.Close() } docker-1.6.2/pkg/mount/mountinfo_unsupported.go0000644000175000017500000000036112524223634021274 0ustar tianontianon// +build !linux,!freebsd freebsd,!cgo package mount import ( "fmt" "runtime" ) func parseMountTable() ([]*MountInfo, error) { return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } docker-1.6.2/pkg/mount/mountinfo_freebsd.go0000644000175000017500000000164412524223634020323 0ustar tianontianonpackage mount /* #include #include #include */ import "C" import ( "fmt" "reflect" "unsafe" ) // Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts func parseMountTable() ([]*MountInfo, error) { var rawEntries *C.struct_statfs count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) if count == 0 { return nil, fmt.Errorf("Failed to call getmntinfo") } var entries []C.struct_statfs header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) header.Cap = count header.Len = count header.Data = uintptr(unsafe.Pointer(rawEntries)) var out []*MountInfo for _, entry := range entries { var mountinfo MountInfo mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) out = append(out, &mountinfo) } return out, nil } docker-1.6.2/pkg/mount/flags_linux.go0000644000175000017500000000156312524223634017126 0ustar tianontianonpackage mount import ( "syscall" ) const ( RDONLY = syscall.MS_RDONLY NOSUID = syscall.MS_NOSUID NODEV = syscall.MS_NODEV NOEXEC = syscall.MS_NOEXEC SYNCHRONOUS = syscall.MS_SYNCHRONOUS DIRSYNC = syscall.MS_DIRSYNC REMOUNT = syscall.MS_REMOUNT MANDLOCK = syscall.MS_MANDLOCK NOATIME = syscall.MS_NOATIME NODIRATIME = syscall.MS_NODIRATIME BIND = syscall.MS_BIND RBIND = syscall.MS_BIND | syscall.MS_REC UNBINDABLE = syscall.MS_UNBINDABLE RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC PRIVATE = syscall.MS_PRIVATE RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC SLAVE = syscall.MS_SLAVE RSLAVE = syscall.MS_SLAVE | syscall.MS_REC SHARED = syscall.MS_SHARED RSHARED = syscall.MS_SHARED | syscall.MS_REC RELATIME = syscall.MS_RELATIME STRICTATIME = syscall.MS_STRICTATIME ) docker-1.6.2/pkg/mount/mounter_linux.go0000644000175000017500000000102212524223634017511 0ustar tianontianonpackage mount import ( "syscall" ) func mount(device, target, mType string, flag uintptr, data string) error { if err := syscall.Mount(device, target, mType, flag, data); err != nil { return err } // If we have a bind mount or remount, remount... if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) } return nil } func unmount(target string, flag int) error { return syscall.Unmount(target, flag) } docker-1.6.2/pkg/mount/flags_unsupported.go0000644000175000017500000000071012524223634020350 0ustar tianontianon// +build !linux,!freebsd freebsd,!cgo package mount const ( BIND = 0 DIRSYNC = 0 MANDLOCK = 0 NOATIME = 0 NODEV = 0 NODIRATIME = 0 NOEXEC = 0 NOSUID = 0 UNBINDABLE = 0 RUNBINDABLE = 0 PRIVATE = 0 RPRIVATE = 0 SHARED = 0 RSHARED = 0 SLAVE = 0 RSLAVE = 0 RBIND = 0 RELATIME = 0 RELATIVE = 0 REMOUNT = 0 STRICTATIME = 0 SYNCHRONOUS = 0 RDONLY = 0 ) docker-1.6.2/pkg/mount/flags.go0000644000175000017500000000355712524223634015714 0ustar tianontianonpackage mount import ( "strings" ) // Parse fstab type mount options into mount() flags // and device specific data func parseOptions(options string) (int, string) { var ( flag int data []string ) flags := map[string]struct { clear bool flag int }{ "defaults": {false, 0}, "ro": {false, RDONLY}, "rw": {true, RDONLY}, "suid": {true, NOSUID}, "nosuid": {false, NOSUID}, "dev": {true, NODEV}, "nodev": {false, NODEV}, "exec": {true, NOEXEC}, "noexec": {false, NOEXEC}, "sync": {false, SYNCHRONOUS}, "async": {true, SYNCHRONOUS}, "dirsync": {false, DIRSYNC}, "remount": {false, REMOUNT}, "mand": {false, MANDLOCK}, "nomand": {true, MANDLOCK}, "atime": {true, NOATIME}, "noatime": {false, NOATIME}, "diratime": {true, NODIRATIME}, "nodiratime": {false, NODIRATIME}, "bind": {false, BIND}, "rbind": {false, RBIND}, "unbindable": {false, UNBINDABLE}, "runbindable": {false, RUNBINDABLE}, "private": {false, PRIVATE}, "rprivate": {false, RPRIVATE}, "shared": {false, SHARED}, "rshared": {false, RSHARED}, "slave": {false, SLAVE}, "rslave": {false, RSLAVE}, "relatime": {false, RELATIME}, "norelatime": {true, RELATIME}, "strictatime": {false, STRICTATIME}, "nostrictatime": {true, STRICTATIME}, } for _, o := range strings.Split(options, ",") { // If the option does not exist in the flags table or the flag // is not supported on the platform, // then it is a data value for a specific fs type if f, exists := flags[o]; exists && f.flag != 0 { if f.clear { flag &= ^f.flag } else { flag |= f.flag } } else { data = append(data, o) } } return flag, strings.Join(data, ",") } docker-1.6.2/pkg/mount/sharedsubtree_linux.go0000644000175000017500000000220312524223634020662 0ustar tianontianon// +build linux package mount func MakeShared(mountPoint string) error { return ensureMountedAs(mountPoint, "shared") } func MakeRShared(mountPoint string) error { return ensureMountedAs(mountPoint, "rshared") } func MakePrivate(mountPoint string) error { return ensureMountedAs(mountPoint, "private") } func MakeRPrivate(mountPoint string) error { return ensureMountedAs(mountPoint, "rprivate") } func MakeSlave(mountPoint string) error { return ensureMountedAs(mountPoint, "slave") } func MakeRSlave(mountPoint string) error { return ensureMountedAs(mountPoint, "rslave") } func MakeUnbindable(mountPoint string) error { return ensureMountedAs(mountPoint, "unbindable") } func MakeRUnbindable(mountPoint string) error { return ensureMountedAs(mountPoint, "runbindable") } func ensureMountedAs(mountPoint, options string) error { mounted, err := Mounted(mountPoint) if err != nil { return err } if !mounted { if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { return err } } mounted, err = Mounted(mountPoint) if err != nil { return err } return ForceMount("", mountPoint, "none", options) } docker-1.6.2/pkg/mount/mountinfo_linux_test.go0000644000175000017500000017140012524223634021105 0ustar tianontianon// +build linux package mount import ( "bytes" "testing" ) const ( fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw 16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered 21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw 23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw 24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw 25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k 26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children 27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw 28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu 29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw 31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct 32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory 33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices 34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer 35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio 36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event 37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb 38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd 39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw 17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw 20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw 22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw 23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw 24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc 26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children 27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children 28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children 29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children 30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children 31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children 32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children 33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro 34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota 35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw 36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw 42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw 43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw 44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c 85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered 38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c 39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c 40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c 41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c 45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c 46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c 47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c 48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c 49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c 50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c 51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c 52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c 53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c 54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c 55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c 56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c 57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c 59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c 60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c 61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c 62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c 63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c 64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c 65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c 66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c 70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c 71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c 72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c 73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c 76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c 77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c 78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c 79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c 80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c 81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c 82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c 83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c 84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c 94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c 95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c 96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c 97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c 98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c 102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c 103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c 104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c 105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c 106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c 107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c 108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c 109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c 110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c 111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c 112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c 113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c 114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c 117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c 118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c 119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c 120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c 121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c 122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c 123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c 126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c 127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c 128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c 130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c 131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c 132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c 133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c 134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c 135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c 136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c 137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c 138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c 139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c 140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c 141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c 142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c 143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c 144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c 147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c 150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c 151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c 152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c 153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c 154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c 155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c 156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c 157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c 158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c 159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c 160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c 162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c 163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c 164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c 165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c 166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c 167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c 168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c 169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c 170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c 171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c 172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c 173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c 174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c 184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c 187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c 188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c 189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c 190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c 191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c 192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c 193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c 194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c 195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c 196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c 197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c 198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c 199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c 200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c 201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c 202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c 203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c 204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c 205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c 206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c 207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c 208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c 209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c 210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c 211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c 212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c 213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c 214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c 215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c 216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c 217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c 218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c 219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c 220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c 221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c 222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c 223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c 224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c 225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c 226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c 227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c 228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c 229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c 230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c 231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c 232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c 233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c 234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c 235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c 237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c 238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c 239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c 240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c 241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c 242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c 243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c 244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c 245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c 246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c 247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c 249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c 250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c 251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c 252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c 253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c 254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c 255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c 256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c 257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c 259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c 260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c 261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c 262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c 263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c 264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c 58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c 67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c 265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c 270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c 273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c 278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c 281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c 286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c 289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c 99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` ) func TestParseFedoraMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(fedoraMountinfo)) _, err := parseInfoFile(r) if err != nil { t.Fatal(err) } } func TestParseUbuntuMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(ubuntuMountInfo)) _, err := parseInfoFile(r) if err != nil { t.Fatal(err) } } func TestParseGentooMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(gentooMountinfo)) _, err := parseInfoFile(r) if err != nil { t.Fatal(err) } } func TestParseFedoraMountinfoFields(t *testing.T) { r := bytes.NewBuffer([]byte(fedoraMountinfo)) infos, err := parseInfoFile(r) if err != nil { t.Fatal(err) } expectedLength := 58 if len(infos) != expectedLength { t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) } mi := MountInfo{ Id: 15, Parent: 35, Major: 0, Minor: 3, Root: "/", Mountpoint: "/proc", Opts: "rw,nosuid,nodev,noexec,relatime", Optional: "shared:5", Fstype: "proc", Source: "proc", VfsOpts: "rw", } if *infos[0] != mi { t.Fatalf("expected %#v, got %#v", mi, infos[0]) } } docker-1.6.2/pkg/version/0000755000175000017500000000000012524223634014602 5ustar tianontianondocker-1.6.2/pkg/version/version.go0000644000175000017500000000260512524223634016621 0ustar tianontianonpackage version import ( "strconv" "strings" ) // Version provides utility methods for comparing versions. type Version string func (v Version) compareTo(other Version) int { var ( currTab = strings.Split(string(v), ".") otherTab = strings.Split(string(other), ".") ) max := len(currTab) if len(otherTab) > max { max = len(otherTab) } for i := 0; i < max; i++ { var currInt, otherInt int if len(currTab) > i { currInt, _ = strconv.Atoi(currTab[i]) } if len(otherTab) > i { otherInt, _ = strconv.Atoi(otherTab[i]) } if currInt > otherInt { return 1 } if otherInt > currInt { return -1 } } return 0 } // LessThan checks if a version is less than another version func (v Version) LessThan(other Version) bool { return v.compareTo(other) == -1 } // LessThanOrEqualTo checks if a version is less than or equal to another func (v Version) LessThanOrEqualTo(other Version) bool { return v.compareTo(other) <= 0 } // GreaterThan checks if a version is greater than another one func (v Version) GreaterThan(other Version) bool { return v.compareTo(other) == 1 } // GreaterThanOrEqualTo checks ia version is greater than or equal to another func (v Version) GreaterThanOrEqualTo(other Version) bool { return v.compareTo(other) >= 0 } // Equal checks if a version is equal to another func (v Version) Equal(other Version) bool { return v.compareTo(other) == 0 } docker-1.6.2/pkg/version/version_test.go0000644000175000017500000000136412524223634017661 0ustar tianontianonpackage version import ( "testing" ) func assertVersion(t *testing.T, a, b string, result int) { if r := Version(a).compareTo(Version(b)); r != result { t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) } } func TestCompareVersion(t *testing.T) { assertVersion(t, "1.12", "1.12", 0) assertVersion(t, "1.0.0", "1", 0) assertVersion(t, "1", "1.0.0", 0) assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) assertVersion(t, "1", "1.0.1", -1) assertVersion(t, "1.0.1", "1", 1) assertVersion(t, "1.0.1", "1.0.2", -1) assertVersion(t, "1.0.2", "1.0.3", -1) assertVersion(t, "1.0.3", "1.1", -1) assertVersion(t, "1.1", "1.1.1", -1) assertVersion(t, "1.1.1", "1.1.2", -1) assertVersion(t, "1.1.2", "1.2", -1) } docker-1.6.2/pkg/proxy/0000755000175000017500000000000012524223634014276 5ustar tianontianondocker-1.6.2/pkg/proxy/stub_proxy.go0000644000175000017500000000076512524223634017053 0ustar tianontianonpackage proxy import ( "net" ) type StubProxy struct { frontendAddr net.Addr backendAddr net.Addr } func (p *StubProxy) Run() {} func (p *StubProxy) Close() {} func (p *StubProxy) FrontendAddr() net.Addr { return p.frontendAddr } func (p *StubProxy) BackendAddr() net.Addr { return p.backendAddr } func NewStubProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { return &StubProxy{ frontendAddr: frontendAddr, backendAddr: backendAddr, }, nil } docker-1.6.2/pkg/proxy/network_proxy_test.go0000644000175000017500000001255412524223634020625 0ustar tianontianonpackage proxy import ( "bytes" "fmt" "io" "net" "strings" "testing" "time" ) var testBuf = []byte("Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo") var testBufSize = len(testBuf) type EchoServer interface { Run() Close() LocalAddr() net.Addr } type TCPEchoServer struct { listener net.Listener testCtx *testing.T } type UDPEchoServer struct { conn net.PacketConn testCtx *testing.T } func NewEchoServer(t *testing.T, proto, address string) EchoServer { var server EchoServer if strings.HasPrefix(proto, "tcp") { listener, err := net.Listen(proto, address) if err != nil { t.Fatal(err) } server = &TCPEchoServer{listener: listener, testCtx: t} } else { socket, err := net.ListenPacket(proto, address) if err != nil { t.Fatal(err) } server = &UDPEchoServer{conn: socket, testCtx: t} } return server } func (server *TCPEchoServer) Run() { go func() { for { client, err := server.listener.Accept() if err != nil { return } go func(client net.Conn) { if _, err := io.Copy(client, client); err != nil { server.testCtx.Logf("can't echo to the client: %v\n", err.Error()) } client.Close() }(client) } }() } func (server *TCPEchoServer) LocalAddr() net.Addr { return server.listener.Addr() } func (server *TCPEchoServer) Close() { server.listener.Addr() } func (server *UDPEchoServer) Run() { go func() { readBuf := make([]byte, 1024) for { read, from, err := server.conn.ReadFrom(readBuf) if err != nil { return } for i := 0; i != read; { written, err := server.conn.WriteTo(readBuf[i:read], from) if err != nil { break } i += written } } }() } func (server *UDPEchoServer) LocalAddr() net.Addr { return server.conn.LocalAddr() } func (server *UDPEchoServer) Close() { server.conn.Close() } func testProxyAt(t *testing.T, proto string, proxy Proxy, addr string) { defer proxy.Close() go proxy.Run() client, err := net.Dial(proto, addr) if err != nil { t.Fatalf("Can't connect to the proxy: %v", err) } defer client.Close() client.SetDeadline(time.Now().Add(10 * time.Second)) if _, err = client.Write(testBuf); err != nil { t.Fatal(err) } recvBuf := make([]byte, testBufSize) if _, err = client.Read(recvBuf); err != nil { t.Fatal(err) } if !bytes.Equal(testBuf, recvBuf) { t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) } } func testProxy(t *testing.T, proto string, proxy Proxy) { testProxyAt(t, proto, proxy, proxy.FrontendAddr().String()) } func TestTCP4Proxy(t *testing.T) { backend := NewEchoServer(t, "tcp", "127.0.0.1:0") defer backend.Close() backend.Run() frontendAddr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "tcp", proxy) } func TestTCP6Proxy(t *testing.T) { backend := NewEchoServer(t, "tcp", "[::1]:0") defer backend.Close() backend.Run() frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "tcp", proxy) } func TestTCPDualStackProxy(t *testing.T) { // If I understand `godoc -src net favoriteAddrFamily` (used by the // net.Listen* functions) correctly this should work, but it doesn't. t.Skip("No support for dual stack yet") backend := NewEchoServer(t, "tcp", "[::1]:0") defer backend.Close() backend.Run() frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } ipv4ProxyAddr := &net.TCPAddr{ IP: net.IPv4(127, 0, 0, 1), Port: proxy.FrontendAddr().(*net.TCPAddr).Port, } testProxyAt(t, "tcp", proxy, ipv4ProxyAddr.String()) } func TestUDP4Proxy(t *testing.T) { backend := NewEchoServer(t, "udp", "127.0.0.1:0") defer backend.Close() backend.Run() frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "udp", proxy) } func TestUDP6Proxy(t *testing.T) { backend := NewEchoServer(t, "udp", "[::1]:0") defer backend.Close() backend.Run() frontendAddr := &net.UDPAddr{IP: net.IPv6loopback, Port: 0} proxy, err := NewProxy(frontendAddr, backend.LocalAddr()) if err != nil { t.Fatal(err) } testProxy(t, "udp", proxy) } func TestUDPWriteError(t *testing.T) { frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} // Hopefully, this port will be free: */ backendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 25587} proxy, err := NewProxy(frontendAddr, backendAddr) if err != nil { t.Fatal(err) } defer proxy.Close() go proxy.Run() client, err := net.Dial("udp", "127.0.0.1:25587") if err != nil { t.Fatalf("Can't connect to the proxy: %v", err) } defer client.Close() // Make sure the proxy doesn't stop when there is no actual backend: client.Write(testBuf) client.Write(testBuf) backend := NewEchoServer(t, "udp", "127.0.0.1:25587") defer backend.Close() backend.Run() client.SetDeadline(time.Now().Add(10 * time.Second)) if _, err = client.Write(testBuf); err != nil { t.Fatal(err) } recvBuf := make([]byte, testBufSize) if _, err = client.Read(recvBuf); err != nil { t.Fatal(err) } if !bytes.Equal(testBuf, recvBuf) { t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf)) } } docker-1.6.2/pkg/proxy/proxy.go0000644000175000017500000000131412524223634016005 0ustar tianontianonpackage proxy import ( "fmt" "net" ) type Proxy interface { // Start forwarding traffic back and forth the front and back-end // addresses. Run() // Stop forwarding traffic and close both ends of the Proxy. Close() // Return the address on which the proxy is listening. FrontendAddr() net.Addr // Return the proxied address. BackendAddr() net.Addr } func NewProxy(frontendAddr, backendAddr net.Addr) (Proxy, error) { switch frontendAddr.(type) { case *net.UDPAddr: return NewUDPProxy(frontendAddr.(*net.UDPAddr), backendAddr.(*net.UDPAddr)) case *net.TCPAddr: return NewTCPProxy(frontendAddr.(*net.TCPAddr), backendAddr.(*net.TCPAddr)) default: panic(fmt.Errorf("Unsupported protocol")) } } docker-1.6.2/pkg/proxy/udp_proxy.go0000644000175000017500000000771312524223634016666 0ustar tianontianonpackage proxy import ( "encoding/binary" "net" "strings" "sync" "syscall" "time" log "github.com/Sirupsen/logrus" ) const ( UDPConnTrackTimeout = 90 * time.Second UDPBufSize = 65507 ) // A net.Addr where the IP is split into two fields so you can use it as a key // in a map: type connTrackKey struct { IPHigh uint64 IPLow uint64 Port int } func newConnTrackKey(addr *net.UDPAddr) *connTrackKey { if len(addr.IP) == net.IPv4len { return &connTrackKey{ IPHigh: 0, IPLow: uint64(binary.BigEndian.Uint32(addr.IP)), Port: addr.Port, } } return &connTrackKey{ IPHigh: binary.BigEndian.Uint64(addr.IP[:8]), IPLow: binary.BigEndian.Uint64(addr.IP[8:]), Port: addr.Port, } } type connTrackMap map[connTrackKey]*net.UDPConn type UDPProxy struct { listener *net.UDPConn frontendAddr *net.UDPAddr backendAddr *net.UDPAddr connTrackTable connTrackMap connTrackLock sync.Mutex } func NewUDPProxy(frontendAddr, backendAddr *net.UDPAddr) (*UDPProxy, error) { listener, err := net.ListenUDP("udp", frontendAddr) if err != nil { return nil, err } return &UDPProxy{ listener: listener, frontendAddr: listener.LocalAddr().(*net.UDPAddr), backendAddr: backendAddr, connTrackTable: make(connTrackMap), }, nil } func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr, clientKey *connTrackKey) { defer func() { proxy.connTrackLock.Lock() delete(proxy.connTrackTable, *clientKey) proxy.connTrackLock.Unlock() proxyConn.Close() }() readBuf := make([]byte, UDPBufSize) for { proxyConn.SetReadDeadline(time.Now().Add(UDPConnTrackTimeout)) again: read, err := proxyConn.Read(readBuf) if err != nil { if err, ok := err.(*net.OpError); ok && err.Err == syscall.ECONNREFUSED { // This will happen if the last write failed // (e.g: nothing is actually listening on the // proxied port on the container), ignore it // and continue until UDPConnTrackTimeout // expires: goto again } return } for i := 0; i != read; { written, err := proxy.listener.WriteToUDP(readBuf[i:read], clientAddr) if err != nil { return } i += written } } } func (proxy *UDPProxy) Run() { readBuf := make([]byte, UDPBufSize) for { read, from, err := proxy.listener.ReadFromUDP(readBuf) if err != nil { // NOTE: Apparently ReadFrom doesn't return // ECONNREFUSED like Read do (see comment in // UDPProxy.replyLoop) if !isClosedError(err) { log.Printf("Stopping proxy on udp/%v for udp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) } break } fromKey := newConnTrackKey(from) proxy.connTrackLock.Lock() proxyConn, hit := proxy.connTrackTable[*fromKey] if !hit { proxyConn, err = net.DialUDP("udp", nil, proxy.backendAddr) if err != nil { log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) proxy.connTrackLock.Unlock() continue } proxy.connTrackTable[*fromKey] = proxyConn go proxy.replyLoop(proxyConn, from, fromKey) } proxy.connTrackLock.Unlock() for i := 0; i != read; { written, err := proxyConn.Write(readBuf[i:read]) if err != nil { log.Printf("Can't proxy a datagram to udp/%s: %s\n", proxy.backendAddr, err) break } i += written } } } func (proxy *UDPProxy) Close() { proxy.listener.Close() proxy.connTrackLock.Lock() defer proxy.connTrackLock.Unlock() for _, conn := range proxy.connTrackTable { conn.Close() } } func (proxy *UDPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } func (proxy *UDPProxy) BackendAddr() net.Addr { return proxy.backendAddr } func isClosedError(err error) bool { /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. * See: * http://golang.org/src/pkg/net/net.go * https://code.google.com/p/go/issues/detail?id=4337 * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ */ return strings.HasSuffix(err.Error(), "use of closed network connection") } docker-1.6.2/pkg/proxy/tcp_proxy.go0000644000175000017500000000424212524223634016656 0ustar tianontianonpackage proxy import ( "io" "net" "syscall" log "github.com/Sirupsen/logrus" ) type TCPProxy struct { listener *net.TCPListener frontendAddr *net.TCPAddr backendAddr *net.TCPAddr } func NewTCPProxy(frontendAddr, backendAddr *net.TCPAddr) (*TCPProxy, error) { listener, err := net.ListenTCP("tcp", frontendAddr) if err != nil { return nil, err } // If the port in frontendAddr was 0 then ListenTCP will have a picked // a port to listen on, hence the call to Addr to get that actual port: return &TCPProxy{ listener: listener, frontendAddr: listener.Addr().(*net.TCPAddr), backendAddr: backendAddr, }, nil } func (proxy *TCPProxy) clientLoop(client *net.TCPConn, quit chan bool) { backend, err := net.DialTCP("tcp", nil, proxy.backendAddr) if err != nil { log.Printf("Can't forward traffic to backend tcp/%v: %s\n", proxy.backendAddr, err) client.Close() return } event := make(chan int64) var broker = func(to, from *net.TCPConn) { written, err := io.Copy(to, from) if err != nil { // If the socket we are writing to is shutdown with // SHUT_WR, forward it to the other end of the pipe: if err, ok := err.(*net.OpError); ok && err.Err == syscall.EPIPE { from.CloseWrite() } } to.CloseRead() event <- written } go broker(client, backend) go broker(backend, client) var transferred int64 = 0 for i := 0; i < 2; i++ { select { case written := <-event: transferred += written case <-quit: // Interrupt the two brokers and "join" them. client.Close() backend.Close() for ; i < 2; i++ { transferred += <-event } return } } client.Close() backend.Close() } func (proxy *TCPProxy) Run() { quit := make(chan bool) defer close(quit) for { client, err := proxy.listener.Accept() if err != nil { log.Printf("Stopping proxy on tcp/%v for tcp/%v (%s)", proxy.frontendAddr, proxy.backendAddr, err) return } go proxy.clientLoop(client.(*net.TCPConn), quit) } } func (proxy *TCPProxy) Close() { proxy.listener.Close() } func (proxy *TCPProxy) FrontendAddr() net.Addr { return proxy.frontendAddr } func (proxy *TCPProxy) BackendAddr() net.Addr { return proxy.backendAddr } docker-1.6.2/pkg/sysinfo/0000755000175000017500000000000012524223634014607 5ustar tianontianondocker-1.6.2/pkg/sysinfo/sysinfo.go0000644000175000017500000000236112524223634016632 0ustar tianontianonpackage sysinfo import ( "io/ioutil" "os" "path" log "github.com/Sirupsen/logrus" "github.com/docker/libcontainer/cgroups" ) type SysInfo struct { MemoryLimit bool SwapLimit bool IPv4ForwardingDisabled bool AppArmor bool } func New(quiet bool) *SysInfo { sysInfo := &SysInfo{} if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil { if !quiet { log.Warnf("%s", err) } } else { _, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes")) _, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes")) sysInfo.MemoryLimit = err1 == nil && err2 == nil if !sysInfo.MemoryLimit && !quiet { log.Warnf("Your kernel does not support cgroup memory limit.") } _, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes")) sysInfo.SwapLimit = err == nil if !sysInfo.SwapLimit && !quiet { log.Warnf("Your kernel does not support cgroup swap limit.") } } // Check if AppArmor seems to be enabled on this system. if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) { sysInfo.AppArmor = false } else { sysInfo.AppArmor = true } return sysInfo } docker-1.6.2/pkg/stdcopy/0000755000175000017500000000000012524223634014602 5ustar tianontianondocker-1.6.2/pkg/stdcopy/stdcopy_test.go0000644000175000017500000000057412524223634017663 0ustar tianontianonpackage stdcopy import ( "bytes" "io/ioutil" "testing" ) func BenchmarkWrite(b *testing.B) { w := NewStdWriter(ioutil.Discard, Stdout) data := []byte("Test line for testing stdwriter performance\n") data = bytes.Repeat(data, 100) b.SetBytes(int64(len(data))) b.ResetTimer() for i := 0; i < b.N; i++ { if _, err := w.Write(data); err != nil { b.Fatal(err) } } } docker-1.6.2/pkg/stdcopy/stdcopy.go0000644000175000017500000001053412524223634016621 0ustar tianontianonpackage stdcopy import ( "encoding/binary" "errors" "io" log "github.com/Sirupsen/logrus" ) const ( StdWriterPrefixLen = 8 StdWriterFdIndex = 0 StdWriterSizeIndex = 4 ) type StdType [StdWriterPrefixLen]byte var ( Stdin StdType = StdType{0: 0} Stdout StdType = StdType{0: 1} Stderr StdType = StdType{0: 2} ) type StdWriter struct { io.Writer prefix StdType sizeBuf []byte } func (w *StdWriter) Write(buf []byte) (n int, err error) { var n1, n2 int if w == nil || w.Writer == nil { return 0, errors.New("Writer not instanciated") } binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) n1, err = w.Writer.Write(w.prefix[:]) if err != nil { n = n1 - StdWriterPrefixLen } else { n2, err = w.Writer.Write(buf) n = n1 + n2 - StdWriterPrefixLen } if n < 0 { n = 0 } return } // NewStdWriter instanciates a new Writer. // Everything written to it will be encapsulated using a custom format, // and written to the underlying `w` stream. // This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. // `t` indicates the id of the stream to encapsulate. // It can be utils.Stdin, utils.Stdout, utils.Stderr. func NewStdWriter(w io.Writer, t StdType) *StdWriter { if len(t) != StdWriterPrefixLen { return nil } return &StdWriter{ Writer: w, prefix: t, sizeBuf: make([]byte, 4), } } var ErrInvalidStdHeader = errors.New("Unrecognized input header") // StdCopy is a modified version of io.Copy. // // StdCopy will demultiplex `src`, assuming that it contains two streams, // previously multiplexed together using a StdWriter instance. // As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. // // StdCopy will read until it hits EOF on `src`. It will then return a nil error. // In other words: if `err` is non nil, it indicates a real underlying error. // // `written` will hold the total number of bytes written to `dstout` and `dsterr`. func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { var ( buf = make([]byte, 32*1024+StdWriterPrefixLen+1) bufLen = len(buf) nr, nw int er, ew error out io.Writer frameSize int ) for { // Make sure we have at least a full header for nr < StdWriterPrefixLen { var nr2 int nr2, er = src.Read(buf[nr:]) nr += nr2 if er == io.EOF { if nr < StdWriterPrefixLen { log.Debugf("Corrupted prefix: %v", buf[:nr]) return written, nil } break } if er != nil { log.Debugf("Error reading header: %s", er) return 0, er } } // Check the first byte to know where to write switch buf[StdWriterFdIndex] { case 0: fallthrough case 1: // Write on stdout out = dstout case 2: // Write on stderr out = dsterr default: log.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex]) return 0, ErrInvalidStdHeader } // Retrieve the size of the frame frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) log.Debugf("framesize: %d", frameSize) // Check if the buffer is big enough to read the frame. // Extend it if necessary. if frameSize+StdWriterPrefixLen > bufLen { log.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf)) buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...) bufLen = len(buf) } // While the amount of bytes read is less than the size of the frame + header, we keep reading for nr < frameSize+StdWriterPrefixLen { var nr2 int nr2, er = src.Read(buf[nr:]) nr += nr2 if er == io.EOF { if nr < frameSize+StdWriterPrefixLen { log.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr]) return written, nil } break } if er != nil { log.Debugf("Error reading frame: %s", er) return 0, er } } // Write the retrieved frame (without header) nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen]) if ew != nil { log.Debugf("Error writing frame: %s", ew) return 0, ew } // If the frame has not been fully written: error if nw != frameSize { log.Debugf("Error Short Write: (%d on %d)", nw, frameSize) return 0, io.ErrShortWrite } written += int64(nw) // Move the rest of the buffer to the beginning copy(buf, buf[frameSize+StdWriterPrefixLen:]) // Move the index nr -= frameSize + StdWriterPrefixLen } } docker-1.6.2/pkg/common/0000755000175000017500000000000012524223634014405 5ustar tianontianondocker-1.6.2/pkg/common/randomid.go0000644000175000017500000000226512524223634016536 0ustar tianontianonpackage common import ( "crypto/rand" "encoding/hex" "io" "strconv" ) // TruncateID returns a shorthand version of a string identifier for convenience. // A collision with other shorthands is very unlikely, but possible. // In case of a collision a lookup with TruncIndex.Get() will fail, and the caller // will need to use a langer prefix, or the full-length Id. func TruncateID(id string) string { shortLen := 12 if len(id) < shortLen { shortLen = len(id) } return id[:shortLen] } // GenerateRandomID returns an unique id func GenerateRandomID() string { for { id := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, id); err != nil { panic(err) // This shouldn't happen } value := hex.EncodeToString(id) // if we try to parse the truncated for as an int and we don't have // an error then the value is all numberic and causes issues when // used as a hostname. ref #3869 if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil { continue } return value } } func RandomString() string { id := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, id); err != nil { panic(err) // This shouldn't happen } return hex.EncodeToString(id) } docker-1.6.2/pkg/common/randomid_test.go0000644000175000017500000000233512524223634017573 0ustar tianontianonpackage common import ( "testing" ) func TestShortenId(t *testing.T) { id := GenerateRandomID() truncID := TruncateID(id) if len(truncID) != 12 { t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) } } func TestShortenIdEmpty(t *testing.T) { id := "" truncID := TruncateID(id) if len(truncID) > len(id) { t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) } } func TestShortenIdInvalid(t *testing.T) { id := "1234" truncID := TruncateID(id) if len(truncID) != len(id) { t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) } } func TestGenerateRandomID(t *testing.T) { id := GenerateRandomID() if len(id) != 64 { t.Fatalf("Id returned is incorrect: %s", id) } } func TestRandomString(t *testing.T) { id := RandomString() if len(id) != 64 { t.Fatalf("Id returned is incorrect: %s", id) } } func TestRandomStringUniqueness(t *testing.T) { repeats := 25 set := make(map[string]struct{}, repeats) for i := 0; i < repeats; i = i + 1 { id := RandomString() if len(id) != 64 { t.Fatalf("Id returned is incorrect: %s", id) } if _, ok := set[id]; ok { t.Fatalf("Random number is repeated") } set[id] = struct{}{} } } docker-1.6.2/pkg/jsonlog/0000755000175000017500000000000012524223634014570 5ustar tianontianondocker-1.6.2/pkg/jsonlog/jsonlog.go0000644000175000017500000000176412524223634016602 0ustar tianontianonpackage jsonlog import ( "encoding/json" "fmt" "io" "time" log "github.com/Sirupsen/logrus" ) type JSONLog struct { Log string `json:"log,omitempty"` Stream string `json:"stream,omitempty"` Created time.Time `json:"time"` } func (jl *JSONLog) Format(format string) (string, error) { if format == "" { return jl.Log, nil } if format == "json" { m, err := json.Marshal(jl) return string(m), err } return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil } func (jl *JSONLog) Reset() { jl.Log = "" jl.Stream = "" jl.Created = time.Time{} } func WriteLog(src io.Reader, dst io.Writer, format string) error { dec := json.NewDecoder(src) l := &JSONLog{} for { if err := dec.Decode(l); err == io.EOF { return nil } else if err != nil { log.Printf("Error streaming logs: %s", err) return err } line, err := l.Format(format) if err != nil { return err } if _, err := io.WriteString(dst, line); err != nil { return err } l.Reset() } } docker-1.6.2/pkg/jsonlog/jsonlog_marshalling.go0000644000175000017500000000761112524223634021160 0ustar tianontianon// This code was initially generated by ffjson // This code was generated via the following steps: // $ go get -u github.com/pquerna/ffjson // $ make BIND_DIR=. shell // $ ffjson pkg/jsonlog/jsonlog.go // $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go // // It has been modified to improve the performance of time marshalling to JSON // and to clean it up. // Should this code need to be regenerated when the JSONLog struct is changed, // the relevant changes which have been made are: // import ( // "bytes" //- // "unicode/utf8" //+ //+ "github.com/docker/docker/pkg/timeutils" // ) // // func (mj *JSONLog) MarshalJSON() ([]byte, error) { //@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { // } // return buf.Bytes(), nil // } //+ // func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { //- var err error //- var obj []byte //- var first bool = true //- _ = obj //- _ = err //- _ = first //+ var ( //+ err error //+ timestamp string //+ first bool = true //+ ) // buf.WriteString(`{`) // if len(mj.Log) != 0 { // if first == true { //@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { // buf.WriteString(`,`) // } // buf.WriteString(`"time":`) //- obj, err = mj.Created.MarshalJSON() //+ timestamp, err = timeutils.FastMarshalJSON(mj.Created) // if err != nil { // return err // } //- buf.Write(obj) //+ buf.WriteString(timestamp) // buf.WriteString(`}`) // return nil // } package jsonlog import ( "bytes" "unicode/utf8" "github.com/docker/docker/pkg/timeutils" ) func (mj *JSONLog) MarshalJSON() ([]byte, error) { var buf bytes.Buffer buf.Grow(1024) err := mj.MarshalJSONBuf(&buf) if err != nil { return nil, err } return buf.Bytes(), nil } func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { var ( err error timestamp string first bool = true ) buf.WriteString(`{`) if len(mj.Log) != 0 { if first == true { first = false } else { buf.WriteString(`,`) } buf.WriteString(`"log":`) ffjson_WriteJsonString(buf, mj.Log) } if len(mj.Stream) != 0 { if first == true { first = false } else { buf.WriteString(`,`) } buf.WriteString(`"stream":`) ffjson_WriteJsonString(buf, mj.Stream) } if first == true { first = false } else { buf.WriteString(`,`) } buf.WriteString(`"time":`) timestamp, err = timeutils.FastMarshalJSON(mj.Created) if err != nil { return err } buf.WriteString(timestamp) buf.WriteString(`}`) return nil } func ffjson_WriteJsonString(buf *bytes.Buffer, s string) { const hex = "0123456789abcdef" buf.WriteByte('"') start := 0 for i := 0; i < len(s); { if b := s[i]; b < utf8.RuneSelf { if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { i++ continue } if start < i { buf.WriteString(s[start:i]) } switch b { case '\\', '"': buf.WriteByte('\\') buf.WriteByte(b) case '\n': buf.WriteByte('\\') buf.WriteByte('n') case '\r': buf.WriteByte('\\') buf.WriteByte('r') default: buf.WriteString(`\u00`) buf.WriteByte(hex[b>>4]) buf.WriteByte(hex[b&0xF]) } i++ start = i continue } c, size := utf8.DecodeRuneInString(s[i:]) if c == utf8.RuneError && size == 1 { if start < i { buf.WriteString(s[start:i]) } buf.WriteString(`\ufffd`) i += size start = i continue } if c == '\u2028' || c == '\u2029' { if start < i { buf.WriteString(s[start:i]) } buf.WriteString(`\u202`) buf.WriteByte(hex[c&0xF]) i += size start = i continue } i += size } if start < len(s) { buf.WriteString(s[start:]) } buf.WriteByte('"') } docker-1.6.2/pkg/jsonlog/jsonlog_test.go0000644000175000017500000000300712524223634017631 0ustar tianontianonpackage jsonlog import ( "bytes" "encoding/json" "io/ioutil" "regexp" "strings" "testing" "time" "github.com/docker/docker/pkg/timeutils" ) func TestWriteLog(t *testing.T) { var buf bytes.Buffer e := json.NewEncoder(&buf) testLine := "Line that thinks that it is log line from docker\n" for i := 0; i < 30; i++ { e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()}) } w := bytes.NewBuffer(nil) format := timeutils.RFC3339NanoFixed if err := WriteLog(&buf, w, format); err != nil { t.Fatal(err) } res := w.String() t.Logf("Result of WriteLog: %q", res) lines := strings.Split(strings.TrimSpace(res), "\n") if len(lines) != 30 { t.Fatalf("Must be 30 lines but got %d", len(lines)) } // 30+ symbols, five more can come from system timezone logRe := regexp.MustCompile(`.{30,} Line that thinks that it is log line from docker`) for _, l := range lines { if !logRe.MatchString(l) { t.Fatalf("Log line not in expected format: %q", l) } } } func BenchmarkWriteLog(b *testing.B) { var buf bytes.Buffer e := json.NewEncoder(&buf) testLine := "Line that thinks that it is log line from docker\n" for i := 0; i < 30; i++ { e.Encode(JSONLog{Log: testLine, Stream: "stdout", Created: time.Now()}) } r := bytes.NewReader(buf.Bytes()) w := ioutil.Discard format := timeutils.RFC3339NanoFixed b.SetBytes(int64(r.Len())) b.ResetTimer() for i := 0; i < b.N; i++ { if err := WriteLog(r, w, format); err != nil { b.Fatal(err) } b.StopTimer() r.Seek(0, 0) b.StartTimer() } } docker-1.6.2/pkg/term/0000755000175000017500000000000012524223634014064 5ustar tianontianondocker-1.6.2/pkg/term/term_windows.go0000644000175000017500000001006612524223634017137 0ustar tianontianon// +build windows package term import ( "io" "os" "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/term/winconsole" ) // State holds the console mode for the terminal. type State struct { mode uint32 } // Winsize is used for window size. type Winsize struct { Height uint16 Width uint16 x uint16 y uint16 } func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { switch { case os.Getenv("ConEmuANSI") == "ON": // The ConEmu shell emulates ANSI well by default. return os.Stdin, os.Stdout, os.Stderr case os.Getenv("MSYSTEM") != "": // MSYS (mingw) does not emulate ANSI well. return winconsole.WinConsoleStreams() default: return winconsole.WinConsoleStreams() } } // GetFdInfo returns file descriptor and bool indicating whether the file is a terminal. func GetFdInfo(in interface{}) (uintptr, bool) { return winconsole.GetHandleInfo(in) } // GetWinsize retrieves the window size of the terminal connected to the passed file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { info, err := winconsole.GetConsoleScreenBufferInfo(fd) if err != nil { return nil, err } // TODO(azlinux): Set the pixel width / height of the console (currently unused by any caller) return &Winsize{ Width: uint16(info.Window.Right - info.Window.Left + 1), Height: uint16(info.Window.Bottom - info.Window.Top + 1), x: 0, y: 0}, nil } // SetWinsize sets the size of the given terminal connected to the passed file descriptor. func SetWinsize(fd uintptr, ws *Winsize) error { // TODO(azlinux): Implement SetWinsize logrus.Debugf("[windows] SetWinsize: WARNING -- Unsupported method invoked") return nil } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { return winconsole.IsConsole(fd) } // RestoreTerminal restores the terminal connected to the given file descriptor to a // previous state. func RestoreTerminal(fd uintptr, state *State) error { return winconsole.SetConsoleMode(fd, state.mode) } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { mode, e := winconsole.GetConsoleMode(fd) if e != nil { return nil, e } return &State{mode}, nil } // DisableEcho disables echo for the terminal connected to the given file descriptor. // -- See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx func DisableEcho(fd uintptr, state *State) error { mode := state.mode mode &^= winconsole.ENABLE_ECHO_INPUT mode |= winconsole.ENABLE_PROCESSED_INPUT | winconsole.ENABLE_LINE_INPUT // TODO(azlinux): Core code registers a goroutine to catch os.Interrupt and reset the terminal state. return winconsole.SetConsoleMode(fd, mode) } // SetRawTerminal puts the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func SetRawTerminal(fd uintptr) (*State, error) { state, err := MakeRaw(fd) if err != nil { return nil, err } // TODO(azlinux): Core code registers a goroutine to catch os.Interrupt and reset the terminal state. return state, err } // MakeRaw puts the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { state, err := SaveState(fd) if err != nil { return nil, err } // See // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx mode := state.mode // Disable these modes mode &^= winconsole.ENABLE_ECHO_INPUT mode &^= winconsole.ENABLE_LINE_INPUT mode &^= winconsole.ENABLE_MOUSE_INPUT mode &^= winconsole.ENABLE_WINDOW_INPUT mode &^= winconsole.ENABLE_PROCESSED_INPUT // Enable these modes mode |= winconsole.ENABLE_EXTENDED_FLAGS mode |= winconsole.ENABLE_INSERT_MODE mode |= winconsole.ENABLE_QUICK_EDIT_MODE err = winconsole.SetConsoleMode(fd, mode) if err != nil { return nil, err } return state, nil } docker-1.6.2/pkg/term/winconsole/0000755000175000017500000000000012524223634016244 5ustar tianontianondocker-1.6.2/pkg/term/winconsole/console_windows_test.go0000644000175000017500000002366312524223634023060 0ustar tianontianon// +build windows package winconsole import ( "fmt" "testing" ) func helpsTestParseInt16OrDefault(t *testing.T, expectedValue int16, shouldFail bool, input string, defaultValue int16, format string, args ...string) { value, err := parseInt16OrDefault(input, defaultValue) if nil != err && !shouldFail { t.Errorf("Unexpected error returned %v", err) t.Errorf(format, args) } if nil == err && shouldFail { t.Errorf("Should have failed as expected\n\tReturned value = %d", value) t.Errorf(format, args) } if expectedValue != value { t.Errorf("The value returned does not macth expected\n\tExpected:%v\n\t:Actual%v", expectedValue, value) t.Errorf(format, args) } } func TestParseInt16OrDefault(t *testing.T) { // empty string helpsTestParseInt16OrDefault(t, 0, false, "", 0, "Empty string returns default") helpsTestParseInt16OrDefault(t, 2, false, "", 2, "Empty string returns default") // normal case helpsTestParseInt16OrDefault(t, 0, false, "0", 0, "0 handled correctly") helpsTestParseInt16OrDefault(t, 111, false, "111", 2, "Normal") helpsTestParseInt16OrDefault(t, 111, false, "+111", 2, "+N") helpsTestParseInt16OrDefault(t, -111, false, "-111", 2, "-N") helpsTestParseInt16OrDefault(t, 0, false, "+0", 11, "+0") helpsTestParseInt16OrDefault(t, 0, false, "-0", 12, "-0") // ill formed strings helpsTestParseInt16OrDefault(t, 0, true, "abc", 0, "Invalid string") helpsTestParseInt16OrDefault(t, 42, true, "+= 23", 42, "Invalid string") helpsTestParseInt16OrDefault(t, 42, true, "123.45", 42, "float like") } func helpsTestGetNumberOfChars(t *testing.T, expected uint32, fromCoord COORD, toCoord COORD, screenSize COORD, format string, args ...interface{}) { actual := getNumberOfChars(fromCoord, toCoord, screenSize) mesg := fmt.Sprintf(format, args) assertTrue(t, expected == actual, fmt.Sprintf("%s Expected=%d, Actual=%d, Parameters = { fromCoord=%+v, toCoord=%+v, screenSize=%+v", mesg, expected, actual, fromCoord, toCoord, screenSize)) } func TestGetNumberOfChars(t *testing.T) { // Note: The columns and lines are 0 based // Also that interval is "inclusive" means will have both start and end chars // This test only tests the number opf characters being written // all four corners maxWindow := COORD{X: 80, Y: 50} leftTop := COORD{X: 0, Y: 0} rightTop := COORD{X: 79, Y: 0} leftBottom := COORD{X: 0, Y: 49} rightBottom := COORD{X: 79, Y: 49} // same position helpsTestGetNumberOfChars(t, 1, COORD{X: 1, Y: 14}, COORD{X: 1, Y: 14}, COORD{X: 80, Y: 50}, "Same position random line") // four corners helpsTestGetNumberOfChars(t, 1, leftTop, leftTop, maxWindow, "Same position- leftTop") helpsTestGetNumberOfChars(t, 1, rightTop, rightTop, maxWindow, "Same position- rightTop") helpsTestGetNumberOfChars(t, 1, leftBottom, leftBottom, maxWindow, "Same position- leftBottom") helpsTestGetNumberOfChars(t, 1, rightBottom, rightBottom, maxWindow, "Same position- rightBottom") // from this char to next char on same line helpsTestGetNumberOfChars(t, 2, COORD{X: 0, Y: 0}, COORD{X: 1, Y: 0}, maxWindow, "Next position on same line") helpsTestGetNumberOfChars(t, 2, COORD{X: 1, Y: 14}, COORD{X: 2, Y: 14}, maxWindow, "Next position on same line") // from this char to next 10 chars on same line helpsTestGetNumberOfChars(t, 11, COORD{X: 0, Y: 0}, COORD{X: 10, Y: 0}, maxWindow, "Next position on same line") helpsTestGetNumberOfChars(t, 11, COORD{X: 1, Y: 14}, COORD{X: 11, Y: 14}, maxWindow, "Next position on same line") helpsTestGetNumberOfChars(t, 5, COORD{X: 3, Y: 11}, COORD{X: 7, Y: 11}, maxWindow, "To and from on same line") helpsTestGetNumberOfChars(t, 8, COORD{X: 0, Y: 34}, COORD{X: 7, Y: 34}, maxWindow, "Start of line to middle") helpsTestGetNumberOfChars(t, 4, COORD{X: 76, Y: 34}, COORD{X: 79, Y: 34}, maxWindow, "Middle to end of line") // multiple lines - 1 helpsTestGetNumberOfChars(t, 81, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 1}, maxWindow, "one line below same X") helpsTestGetNumberOfChars(t, 81, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 11}, maxWindow, "one line below same X") // multiple lines - 2 helpsTestGetNumberOfChars(t, 161, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 2}, maxWindow, "one line below same X") helpsTestGetNumberOfChars(t, 161, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 12}, maxWindow, "one line below same X") // multiple lines - 3 helpsTestGetNumberOfChars(t, 241, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 3}, maxWindow, "one line below same X") helpsTestGetNumberOfChars(t, 241, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 13}, maxWindow, "one line below same X") // full line helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 0}, COORD{X: 79, Y: 0}, maxWindow, "Full line - first") helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 23}, COORD{X: 79, Y: 23}, maxWindow, "Full line - random") helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 49}, COORD{X: 79, Y: 49}, maxWindow, "Full line - last") // full screen helpsTestGetNumberOfChars(t, 80*50, leftTop, rightBottom, maxWindow, "full screen") helpsTestGetNumberOfChars(t, 80*50-1, COORD{X: 1, Y: 0}, rightBottom, maxWindow, "dropping first char to, end of screen") helpsTestGetNumberOfChars(t, 80*50-2, COORD{X: 2, Y: 0}, rightBottom, maxWindow, "dropping first two char to, end of screen") helpsTestGetNumberOfChars(t, 80*50-1, leftTop, COORD{X: 78, Y: 49}, maxWindow, "from start of screen, till last char-1") helpsTestGetNumberOfChars(t, 80*50-2, leftTop, COORD{X: 77, Y: 49}, maxWindow, "from start of screen, till last char-2") helpsTestGetNumberOfChars(t, 80*50-5, COORD{X: 4, Y: 0}, COORD{X: 78, Y: 49}, COORD{X: 80, Y: 50}, "from start of screen+4, till last char-1") helpsTestGetNumberOfChars(t, 80*50-6, COORD{X: 4, Y: 0}, COORD{X: 77, Y: 49}, COORD{X: 80, Y: 50}, "from start of screen+4, till last char-2") } var allForeground = []int16{ ANSI_FOREGROUND_BLACK, ANSI_FOREGROUND_RED, ANSI_FOREGROUND_GREEN, ANSI_FOREGROUND_YELLOW, ANSI_FOREGROUND_BLUE, ANSI_FOREGROUND_MAGENTA, ANSI_FOREGROUND_CYAN, ANSI_FOREGROUND_WHITE, ANSI_FOREGROUND_DEFAULT, } var allBackground = []int16{ ANSI_BACKGROUND_BLACK, ANSI_BACKGROUND_RED, ANSI_BACKGROUND_GREEN, ANSI_BACKGROUND_YELLOW, ANSI_BACKGROUND_BLUE, ANSI_BACKGROUND_MAGENTA, ANSI_BACKGROUND_CYAN, ANSI_BACKGROUND_WHITE, ANSI_BACKGROUND_DEFAULT, } func maskForeground(flag WORD) WORD { return flag & FOREGROUND_MASK_UNSET } func onlyForeground(flag WORD) WORD { return flag & FOREGROUND_MASK_SET } func maskBackground(flag WORD) WORD { return flag & BACKGROUND_MASK_UNSET } func onlyBackground(flag WORD) WORD { return flag & BACKGROUND_MASK_SET } func helpsTestGetWindowsTextAttributeForAnsiValue(t *testing.T, oldValue WORD /*, expected WORD*/, ansi int16, onlyMask WORD, restMask WORD) WORD { actual, err := getWindowsTextAttributeForAnsiValue(oldValue, FOREGROUND_MASK_SET, ansi) assertTrue(t, nil == err, "Should be no error") // assert that other bits are not affected if 0 != oldValue { assertTrue(t, (actual&restMask) == (oldValue&restMask), "The operation should not have affected other bits actual=%X oldValue=%X ansi=%d", actual, oldValue, ansi) } return actual } func TestBackgroundForAnsiValue(t *testing.T) { // Check that nothing else changes // background changes for _, state1 := range allBackground { for _, state2 := range allBackground { flag := WORD(0) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) } } // cummulative bcakground changes for _, state1 := range allBackground { flag := WORD(0) for _, state2 := range allBackground { flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) } } // change background after foreground for _, state1 := range allForeground { for _, state2 := range allBackground { flag := WORD(0) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) } } // change background after change cumulative for _, state1 := range allForeground { flag := WORD(0) for _, state2 := range allBackground { flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) } } } func TestForegroundForAnsiValue(t *testing.T) { // Check that nothing else changes for _, state1 := range allForeground { for _, state2 := range allForeground { flag := WORD(0) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) } } for _, state1 := range allForeground { flag := WORD(0) for _, state2 := range allForeground { flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) } } for _, state1 := range allBackground { for _, state2 := range allForeground { flag := WORD(0) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) } } for _, state1 := range allBackground { flag := WORD(0) for _, state2 := range allForeground { flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) } } } docker-1.6.2/pkg/term/winconsole/term_emulator_test.go0000644000175000017500000003306512524223634022520 0ustar tianontianonpackage winconsole import ( "bytes" "fmt" "io" "io/ioutil" "testing" ) const ( WRITE_OPERATION = iota COMMAND_OPERATION = iota ) var languages = []string{ "БългарÑки", "Català", "ÄŒeÅ¡tina", "Ελληνικά", "Español", "Esperanto", "Euskara", "Français", "Galego", "한국어", "ქáƒáƒ áƒ—ული", "LatvieÅ¡u", "Lietuvių", "Magyar", "Nederlands", "日本語", "Norsk bokmÃ¥l", "Norsk nynorsk", "Polski", "Português", "Română", "РуÑÑкий", "SlovenÄina", "SlovenÅ¡Äina", "СрпÑки", "ÑрпÑкохрватÑки", "Suomi", "Svenska", "ไทย", "Tiếng Việt", "Türkçe", "УкраїнÑька", "中文", } // Mock terminal handler object type mockTerminal struct { OutputCommandSequence []terminalOperation } // Used for recording the callback data type terminalOperation struct { Operation int Data []byte Str string } func (mt *mockTerminal) record(operation int, data []byte) { op := terminalOperation{ Operation: operation, Data: make([]byte, len(data)), } copy(op.Data, data) op.Str = string(op.Data) mt.OutputCommandSequence = append(mt.OutputCommandSequence, op) } func (mt *mockTerminal) HandleOutputCommand(fd uintptr, command []byte) (n int, err error) { mt.record(COMMAND_OPERATION, command) return len(command), nil } func (mt *mockTerminal) HandleInputSequence(fd uintptr, command []byte) (n int, err error) { return 0, nil } func (mt *mockTerminal) WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) { mt.record(WRITE_OPERATION, p) return len(p), nil } func (mt *mockTerminal) ReadChars(fd uintptr, w io.Reader, p []byte) (n int, err error) { return len(p), nil } func assertTrue(t *testing.T, cond bool, format string, args ...interface{}) { if !cond { t.Errorf(format, args...) } } // reflect.DeepEqual does not provide detailed information as to what excatly failed. func assertBytesEqual(t *testing.T, expected, actual []byte, format string, args ...interface{}) { match := true mismatchIndex := 0 if len(expected) == len(actual) { for i := 0; i < len(expected); i++ { if expected[i] != actual[i] { match = false mismatchIndex = i break } } } else { match = false t.Errorf("Lengths don't match Expected=%d Actual=%d", len(expected), len(actual)) } if !match { t.Errorf("Mismatch at index %d ", mismatchIndex) t.Errorf("\tActual String = %s", string(actual)) t.Errorf("\tExpected String = %s", string(expected)) t.Errorf("\tActual = %v", actual) t.Errorf("\tExpected = %v", expected) t.Errorf(format, args) } } // Just to make sure :) func TestAssertEqualBytes(t *testing.T) { data := []byte{9, 9, 1, 1, 1, 9, 9} assertBytesEqual(t, data, data, "Self") assertBytesEqual(t, data[1:4], data[1:4], "Self") assertBytesEqual(t, []byte{1, 1}, []byte{1, 1}, "Simple match") assertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 2, 3}, "content mismatch") assertBytesEqual(t, []byte{1, 1, 1}, data[2:5], "slice match") } /* func TestAssertEqualBytesNegative(t *testing.T) { AssertBytesEqual(t, []byte{1, 1}, []byte{1}, "Length mismatch") AssertBytesEqual(t, []byte{1, 1}, []byte{1}, "Length mismatch") AssertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 1, 1}, "content mismatch") }*/ // Checks that the calls recieved func assertHandlerOutput(t *testing.T, mock *mockTerminal, plainText string, commands ...string) { text := make([]byte, 0, 3*len(plainText)) cmdIndex := 0 for opIndex := 0; opIndex < len(mock.OutputCommandSequence); opIndex++ { op := mock.OutputCommandSequence[opIndex] if op.Operation == WRITE_OPERATION { t.Logf("\nThe data is[%d] == %s", opIndex, string(op.Data)) text = append(text[:], op.Data...) } else { assertTrue(t, mock.OutputCommandSequence[opIndex].Operation == COMMAND_OPERATION, "Operation should be command : %s", fmt.Sprintf("%+v", mock)) assertBytesEqual(t, StringToBytes(commands[cmdIndex]), mock.OutputCommandSequence[opIndex].Data, "Command data should match") cmdIndex++ } } assertBytesEqual(t, StringToBytes(plainText), text, "Command data should match %#v", mock) } func StringToBytes(str string) []byte { bytes := make([]byte, len(str)) copy(bytes[:], str) return bytes } func TestParseAnsiCommand(t *testing.T) { // Note: if the parameter does not exist then the empty value is returned c := parseAnsiCommand(StringToBytes("\x1Bm")) assertTrue(t, c.Command == "m", "Command should be m") assertTrue(t, "" == c.getParam(0), "should return empty string") assertTrue(t, "" == c.getParam(1), "should return empty string") // Escape sequence - ESC[ c = parseAnsiCommand(StringToBytes("\x1B[m")) assertTrue(t, c.Command == "m", "Command should be m") assertTrue(t, "" == c.getParam(0), "should return empty string") assertTrue(t, "" == c.getParam(1), "should return empty string") // Escape sequence With empty parameters- ESC[ c = parseAnsiCommand(StringToBytes("\x1B[;m")) assertTrue(t, c.Command == "m", "Command should be m") assertTrue(t, "" == c.getParam(0), "should return empty string") assertTrue(t, "" == c.getParam(1), "should return empty string") assertTrue(t, "" == c.getParam(2), "should return empty string") // Escape sequence With empty muliple parameters- ESC[ c = parseAnsiCommand(StringToBytes("\x1B[;;m")) assertTrue(t, c.Command == "m", "Command should be m") assertTrue(t, "" == c.getParam(0), "") assertTrue(t, "" == c.getParam(1), "") assertTrue(t, "" == c.getParam(2), "") // Escape sequence With muliple parameters- ESC[ c = parseAnsiCommand(StringToBytes("\x1B[1;2;3m")) assertTrue(t, c.Command == "m", "Command should be m") assertTrue(t, "1" == c.getParam(0), "") assertTrue(t, "2" == c.getParam(1), "") assertTrue(t, "3" == c.getParam(2), "") // Escape sequence With muliple parameters- some missing c = parseAnsiCommand(StringToBytes("\x1B[1;;3;;;6m")) assertTrue(t, c.Command == "m", "Command should be m") assertTrue(t, "1" == c.getParam(0), "") assertTrue(t, "" == c.getParam(1), "") assertTrue(t, "3" == c.getParam(2), "") assertTrue(t, "" == c.getParam(3), "") assertTrue(t, "" == c.getParam(4), "") assertTrue(t, "6" == c.getParam(5), "") } func newBufferedMockTerm() (stdOut io.Writer, stdErr io.Writer, stdIn io.ReadCloser, mock *mockTerminal) { var input bytes.Buffer var output bytes.Buffer var err bytes.Buffer mock = &mockTerminal{ OutputCommandSequence: make([]terminalOperation, 0, 256), } stdOut = &terminalWriter{ wrappedWriter: &output, emulator: mock, command: make([]byte, 0, 256), } stdErr = &terminalWriter{ wrappedWriter: &err, emulator: mock, command: make([]byte, 0, 256), } stdIn = &terminalReader{ wrappedReader: ioutil.NopCloser(&input), emulator: mock, command: make([]byte, 0, 256), } return } func TestOutputSimple(t *testing.T) { stdOut, _, _, mock := newBufferedMockTerm() stdOut.Write(StringToBytes("Hello world")) stdOut.Write(StringToBytes("\x1BmHello again")) assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) assertBytesEqual(t, StringToBytes("\x1Bm"), mock.OutputCommandSequence[1].Data, "Command data should match") assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[2].Data, "Write data should match") } func TestOutputSplitCommand(t *testing.T) { stdOut, _, _, mock := newBufferedMockTerm() stdOut.Write(StringToBytes("Hello world\x1B[1;2;3")) stdOut.Write(StringToBytes("mHello again")) assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) assertBytesEqual(t, StringToBytes("\x1B[1;2;3m"), mock.OutputCommandSequence[1].Data, "Command data should match") assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[2].Data, "Write data should match") } func TestOutputMultipleCommands(t *testing.T) { stdOut, _, _, mock := newBufferedMockTerm() stdOut.Write(StringToBytes("Hello world")) stdOut.Write(StringToBytes("\x1B[1;2;3m")) stdOut.Write(StringToBytes("\x1B[J")) stdOut.Write(StringToBytes("Hello again")) assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) assertBytesEqual(t, StringToBytes("\x1B[1;2;3m"), mock.OutputCommandSequence[1].Data, "Command data should match") assertTrue(t, mock.OutputCommandSequence[2].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) assertBytesEqual(t, StringToBytes("\x1B[J"), mock.OutputCommandSequence[2].Data, "Command data should match") assertTrue(t, mock.OutputCommandSequence[3].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[3].Data, "Write data should match") } // Splits the given data in two chunks , makes two writes and checks the split data is parsed correctly // checks output write/command is passed to handler correctly func helpsTestOutputSplitChunksAtIndex(t *testing.T, i int, data []byte) { t.Logf("\ni=%d", i) stdOut, _, _, mock := newBufferedMockTerm() t.Logf("\nWriting chunk[0] == %s", string(data[:i])) t.Logf("\nWriting chunk[1] == %s", string(data[i:])) stdOut.Write(data[:i]) stdOut.Write(data[i:]) assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) assertBytesEqual(t, data[:i], mock.OutputCommandSequence[0].Data, "Write data should match") assertTrue(t, mock.OutputCommandSequence[1].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) assertBytesEqual(t, data[i:], mock.OutputCommandSequence[1].Data, "Write data should match") } // Splits the given data in three chunks , makes three writes and checks the split data is parsed correctly // checks output write/command is passed to handler correctly func helpsTestOutputSplitThreeChunksAtIndex(t *testing.T, data []byte, i int, j int) { stdOut, _, _, mock := newBufferedMockTerm() t.Logf("\nWriting chunk[0] == %s", string(data[:i])) t.Logf("\nWriting chunk[1] == %s", string(data[i:j])) t.Logf("\nWriting chunk[2] == %s", string(data[j:])) stdOut.Write(data[:i]) stdOut.Write(data[i:j]) stdOut.Write(data[j:]) assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) assertBytesEqual(t, data[:i], mock.OutputCommandSequence[0].Data, "Write data should match") assertTrue(t, mock.OutputCommandSequence[1].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) assertBytesEqual(t, data[i:j], mock.OutputCommandSequence[1].Data, "Write data should match") assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) assertBytesEqual(t, data[j:], mock.OutputCommandSequence[2].Data, "Write data should match") } // Splits the output into two parts and tests all such possible pairs func helpsTestOutputSplitChunks(t *testing.T, data []byte) { for i := 1; i < len(data)-1; i++ { helpsTestOutputSplitChunksAtIndex(t, i, data) } } // Splits the output in three parts and tests all such possible triples func helpsTestOutputSplitThreeChunks(t *testing.T, data []byte) { for i := 1; i < len(data)-2; i++ { for j := i + 1; j < len(data)-1; j++ { helpsTestOutputSplitThreeChunksAtIndex(t, data, i, j) } } } func helpsTestOutputSplitCommandsAtIndex(t *testing.T, data []byte, i int, plainText string, commands ...string) { t.Logf("\ni=%d", i) stdOut, _, _, mock := newBufferedMockTerm() stdOut.Write(data[:i]) stdOut.Write(data[i:]) assertHandlerOutput(t, mock, plainText, commands...) } func helpsTestOutputSplitCommands(t *testing.T, data []byte, plainText string, commands ...string) { for i := 1; i < len(data)-1; i++ { helpsTestOutputSplitCommandsAtIndex(t, data, i, plainText, commands...) } } func injectCommandAt(data string, i int, command string) string { retValue := make([]byte, len(data)+len(command)+4) retValue = append(retValue, data[:i]...) retValue = append(retValue, data[i:]...) return string(retValue) } func TestOutputSplitChunks(t *testing.T) { data := StringToBytes("qwertyuiopasdfghjklzxcvbnm") helpsTestOutputSplitChunks(t, data) helpsTestOutputSplitChunks(t, StringToBytes("BBBBB")) helpsTestOutputSplitThreeChunks(t, StringToBytes("ABCDE")) } func TestOutputSplitChunksIncludingCommands(t *testing.T) { helpsTestOutputSplitCommands(t, StringToBytes("Hello world.\x1B[mHello again."), "Hello world.Hello again.", "\x1B[m") helpsTestOutputSplitCommandsAtIndex(t, StringToBytes("Hello world.\x1B[mHello again."), 2, "Hello world.Hello again.", "\x1B[m") } func TestSplitChunkUnicode(t *testing.T) { for _, l := range languages { data := StringToBytes(l) helpsTestOutputSplitChunks(t, data) helpsTestOutputSplitThreeChunks(t, data) } } docker-1.6.2/pkg/term/winconsole/term_emulator.go0000644000175000017500000001445312524223634021461 0ustar tianontianonpackage winconsole import ( "fmt" "io" "strconv" "strings" ) // http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html const ( ANSI_ESCAPE_PRIMARY = 0x1B ANSI_ESCAPE_SECONDARY = 0x5B ANSI_COMMAND_FIRST = 0x40 ANSI_COMMAND_LAST = 0x7E ANSI_PARAMETER_SEP = ";" ANSI_CMD_G0 = '(' ANSI_CMD_G1 = ')' ANSI_CMD_G2 = '*' ANSI_CMD_G3 = '+' ANSI_CMD_DECPNM = '>' ANSI_CMD_DECPAM = '=' ANSI_CMD_OSC = ']' ANSI_CMD_STR_TERM = '\\' ANSI_BEL = 0x07 KEY_EVENT = 1 ) // Interface that implements terminal handling type terminalEmulator interface { HandleOutputCommand(fd uintptr, command []byte) (n int, err error) HandleInputSequence(fd uintptr, command []byte) (n int, err error) WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) ReadChars(fd uintptr, w io.Reader, p []byte) (n int, err error) } type terminalWriter struct { wrappedWriter io.Writer emulator terminalEmulator command []byte inSequence bool fd uintptr } type terminalReader struct { wrappedReader io.ReadCloser emulator terminalEmulator command []byte inSequence bool fd uintptr } // http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html func isAnsiCommandChar(b byte) bool { switch { case ANSI_COMMAND_FIRST <= b && b <= ANSI_COMMAND_LAST && b != ANSI_ESCAPE_SECONDARY: return true case b == ANSI_CMD_G1 || b == ANSI_CMD_OSC || b == ANSI_CMD_DECPAM || b == ANSI_CMD_DECPNM: // non-CSI escape sequence terminator return true case b == ANSI_CMD_STR_TERM || b == ANSI_BEL: // String escape sequence terminator return true } return false } func isCharacterSelectionCmdChar(b byte) bool { return (b == ANSI_CMD_G0 || b == ANSI_CMD_G1 || b == ANSI_CMD_G2 || b == ANSI_CMD_G3) } func isXtermOscSequence(command []byte, current byte) bool { return (len(command) >= 2 && command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_CMD_OSC && current != ANSI_BEL) } // Write writes len(p) bytes from p to the underlying data stream. // http://golang.org/pkg/io/#Writer func (tw *terminalWriter) Write(p []byte) (n int, err error) { if len(p) == 0 { return 0, nil } if tw.emulator == nil { return tw.wrappedWriter.Write(p) } // Emulate terminal by extracting commands and executing them totalWritten := 0 start := 0 // indicates start of the next chunk end := len(p) for current := 0; current < end; current++ { if tw.inSequence { // inside escape sequence tw.command = append(tw.command, p[current]) if isAnsiCommandChar(p[current]) { if !isXtermOscSequence(tw.command, p[current]) { // found the last command character. // Now we have a complete command. nchar, err := tw.emulator.HandleOutputCommand(tw.fd, tw.command) totalWritten += nchar if err != nil { return totalWritten, err } // clear the command // don't include current character again tw.command = tw.command[:0] start = current + 1 tw.inSequence = false } } } else { if p[current] == ANSI_ESCAPE_PRIMARY { // entering escape sequnce tw.inSequence = true // indicates end of "normal sequence", write whatever you have so far if len(p[start:current]) > 0 { nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:current]) totalWritten += nw if err != nil { return totalWritten, err } } // include the current character as part of the next sequence tw.command = append(tw.command, p[current]) } } } // note that so far, start of the escape sequence triggers writing out of bytes to console. // For the part _after_ the end of last escape sequence, it is not written out yet. So write it out if !tw.inSequence { // assumption is that we can't be inside sequence and therefore command should be empty if len(p[start:]) > 0 { nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:]) totalWritten += nw if err != nil { return totalWritten, err } } } return totalWritten, nil } // Read reads up to len(p) bytes into p. // http://golang.org/pkg/io/#Reader func (tr *terminalReader) Read(p []byte) (n int, err error) { //Implementations of Read are discouraged from returning a zero byte count // with a nil error, except when len(p) == 0. if len(p) == 0 { return 0, nil } if nil == tr.emulator { return tr.readFromWrappedReader(p) } return tr.emulator.ReadChars(tr.fd, tr.wrappedReader, p) } // Close the underlying stream func (tr *terminalReader) Close() (err error) { return tr.wrappedReader.Close() } func (tr *terminalReader) readFromWrappedReader(p []byte) (n int, err error) { return tr.wrappedReader.Read(p) } type ansiCommand struct { CommandBytes []byte Command string Parameters []string IsSpecial bool } func parseAnsiCommand(command []byte) *ansiCommand { if isCharacterSelectionCmdChar(command[1]) { // Is Character Set Selection commands return &ansiCommand{ CommandBytes: command, Command: string(command), IsSpecial: true, } } // last char is command character lastCharIndex := len(command) - 1 retValue := &ansiCommand{ CommandBytes: command, Command: string(command[lastCharIndex]), IsSpecial: false, } // more than a single escape if lastCharIndex != 0 { start := 1 // skip if double char escape sequence if command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_ESCAPE_SECONDARY { start++ } // convert this to GetNextParam method retValue.Parameters = strings.Split(string(command[start:lastCharIndex]), ANSI_PARAMETER_SEP) } return retValue } func (c *ansiCommand) getParam(index int) string { if len(c.Parameters) > index { return c.Parameters[index] } return "" } func (ac *ansiCommand) String() string { return fmt.Sprintf("0x%v \"%v\" (\"%v\")", bytesToHex(ac.CommandBytes), ac.Command, strings.Join(ac.Parameters, "\",\"")) } func bytesToHex(b []byte) string { hex := make([]string, len(b)) for i, ch := range b { hex[i] = fmt.Sprintf("%X", ch) } return strings.Join(hex, "") } func parseInt16OrDefault(s string, defaultValue int16) (n int16, err error) { if s == "" { return defaultValue, nil } parsedValue, err := strconv.ParseInt(s, 10, 16) if err != nil { return defaultValue, err } return int16(parsedValue), nil } docker-1.6.2/pkg/term/winconsole/console_windows.go0000644000175000017500000010251112524223634022007 0ustar tianontianon// +build windows package winconsole import ( "bytes" "fmt" "io" "os" "strconv" "strings" "sync" "syscall" "unsafe" "github.com/Sirupsen/logrus" ) const ( // Consts for Get/SetConsoleMode function // -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx ENABLE_PROCESSED_INPUT = 0x0001 ENABLE_LINE_INPUT = 0x0002 ENABLE_ECHO_INPUT = 0x0004 ENABLE_WINDOW_INPUT = 0x0008 ENABLE_MOUSE_INPUT = 0x0010 ENABLE_INSERT_MODE = 0x0020 ENABLE_QUICK_EDIT_MODE = 0x0040 ENABLE_EXTENDED_FLAGS = 0x0080 // If parameter is a screen buffer handle, additional values ENABLE_PROCESSED_OUTPUT = 0x0001 ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 //http://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes FOREGROUND_BLUE = 1 FOREGROUND_GREEN = 2 FOREGROUND_RED = 4 FOREGROUND_INTENSITY = 8 FOREGROUND_MASK_SET = 0x000F FOREGROUND_MASK_UNSET = 0xFFF0 BACKGROUND_BLUE = 16 BACKGROUND_GREEN = 32 BACKGROUND_RED = 64 BACKGROUND_INTENSITY = 128 BACKGROUND_MASK_SET = 0x00F0 BACKGROUND_MASK_UNSET = 0xFF0F COMMON_LVB_REVERSE_VIDEO = 0x4000 COMMON_LVB_UNDERSCORE = 0x8000 // http://man7.org/linux/man-pages/man4/console_codes.4.html // ECMA-48 Set Graphics Rendition ANSI_ATTR_RESET = 0 ANSI_ATTR_BOLD = 1 ANSI_ATTR_DIM = 2 ANSI_ATTR_UNDERLINE = 4 ANSI_ATTR_BLINK = 5 ANSI_ATTR_REVERSE = 7 ANSI_ATTR_INVISIBLE = 8 ANSI_ATTR_UNDERLINE_OFF = 24 ANSI_ATTR_BLINK_OFF = 25 ANSI_ATTR_REVERSE_OFF = 27 ANSI_ATTR_INVISIBLE_OFF = 8 ANSI_FOREGROUND_BLACK = 30 ANSI_FOREGROUND_RED = 31 ANSI_FOREGROUND_GREEN = 32 ANSI_FOREGROUND_YELLOW = 33 ANSI_FOREGROUND_BLUE = 34 ANSI_FOREGROUND_MAGENTA = 35 ANSI_FOREGROUND_CYAN = 36 ANSI_FOREGROUND_WHITE = 37 ANSI_FOREGROUND_DEFAULT = 39 ANSI_BACKGROUND_BLACK = 40 ANSI_BACKGROUND_RED = 41 ANSI_BACKGROUND_GREEN = 42 ANSI_BACKGROUND_YELLOW = 43 ANSI_BACKGROUND_BLUE = 44 ANSI_BACKGROUND_MAGENTA = 45 ANSI_BACKGROUND_CYAN = 46 ANSI_BACKGROUND_WHITE = 47 ANSI_BACKGROUND_DEFAULT = 49 ANSI_MAX_CMD_LENGTH = 256 MAX_INPUT_EVENTS = 128 MAX_INPUT_BUFFER = 1024 DEFAULT_WIDTH = 80 DEFAULT_HEIGHT = 24 ) // http://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx const ( VK_PRIOR = 0x21 // PAGE UP key VK_NEXT = 0x22 // PAGE DOWN key VK_END = 0x23 // END key VK_HOME = 0x24 // HOME key VK_LEFT = 0x25 // LEFT ARROW key VK_UP = 0x26 // UP ARROW key VK_RIGHT = 0x27 // RIGHT ARROW key VK_DOWN = 0x28 // DOWN ARROW key VK_SELECT = 0x29 // SELECT key VK_PRINT = 0x2A // PRINT key VK_EXECUTE = 0x2B // EXECUTE key VK_SNAPSHOT = 0x2C // PRINT SCREEN key VK_INSERT = 0x2D // INS key VK_DELETE = 0x2E // DEL key VK_HELP = 0x2F // HELP key VK_F1 = 0x70 // F1 key VK_F2 = 0x71 // F2 key VK_F3 = 0x72 // F3 key VK_F4 = 0x73 // F4 key VK_F5 = 0x74 // F5 key VK_F6 = 0x75 // F6 key VK_F7 = 0x76 // F7 key VK_F8 = 0x77 // F8 key VK_F9 = 0x78 // F9 key VK_F10 = 0x79 // F10 key VK_F11 = 0x7A // F11 key VK_F12 = 0x7B // F12 key ) var kernel32DLL = syscall.NewLazyDLL("kernel32.dll") var ( setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") fillConsoleOutputCharacterProc = kernel32DLL.NewProc("FillConsoleOutputCharacterW") writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") getNumberOfConsoleInputEventsProc = kernel32DLL.NewProc("GetNumberOfConsoleInputEvents") getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") ) // types for calling various windows API // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx type ( SHORT int16 BOOL int32 WORD uint16 WCHAR uint16 DWORD uint32 SMALL_RECT struct { Left SHORT Top SHORT Right SHORT Bottom SHORT } COORD struct { X SHORT Y SHORT } CONSOLE_SCREEN_BUFFER_INFO struct { Size COORD CursorPosition COORD Attributes WORD Window SMALL_RECT MaximumWindowSize COORD } CONSOLE_CURSOR_INFO struct { Size DWORD Visible BOOL } // http://msdn.microsoft.com/en-us/library/windows/desktop/ms684166(v=vs.85).aspx KEY_EVENT_RECORD struct { KeyDown BOOL RepeatCount WORD VirtualKeyCode WORD VirtualScanCode WORD UnicodeChar WCHAR ControlKeyState DWORD } INPUT_RECORD struct { EventType WORD KeyEvent KEY_EVENT_RECORD } CHAR_INFO struct { UnicodeChar WCHAR Attributes WORD } ) // TODO(azlinux): Basic type clean-up // -- Convert all uses of uintptr to syscall.Handle to be consistent with Windows syscall // -- Convert, as appropriate, types to use defined Windows types (e.g., DWORD instead of uint32) // Implements the TerminalEmulator interface type WindowsTerminal struct { outMutex sync.Mutex inMutex sync.Mutex inputBuffer []byte inputSize int inputEvents []INPUT_RECORD screenBufferInfo *CONSOLE_SCREEN_BUFFER_INFO inputEscapeSequence []byte } func getStdHandle(stdhandle int) uintptr { handle, err := syscall.GetStdHandle(stdhandle) if err != nil { panic(fmt.Errorf("could not get standard io handle %d", stdhandle)) } return uintptr(handle) } func WinConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { handler := &WindowsTerminal{ inputBuffer: make([]byte, MAX_INPUT_BUFFER), inputEscapeSequence: []byte(KEY_ESC_CSI), inputEvents: make([]INPUT_RECORD, MAX_INPUT_EVENTS), } if IsConsole(os.Stdin.Fd()) { stdIn = &terminalReader{ wrappedReader: os.Stdin, emulator: handler, command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), fd: getStdHandle(syscall.STD_INPUT_HANDLE), } } else { stdIn = os.Stdin } if IsConsole(os.Stdout.Fd()) { stdoutHandle := getStdHandle(syscall.STD_OUTPUT_HANDLE) // Save current screen buffer info screenBufferInfo, err := GetConsoleScreenBufferInfo(stdoutHandle) if err != nil { // If GetConsoleScreenBufferInfo returns a nil error, it usually means that stdout is not a TTY. // However, this is in the branch where stdout is a TTY, hence the panic. panic("could not get console screen buffer info") } handler.screenBufferInfo = screenBufferInfo buffer = make([]CHAR_INFO, screenBufferInfo.MaximumWindowSize.X*screenBufferInfo.MaximumWindowSize.Y) stdOut = &terminalWriter{ wrappedWriter: os.Stdout, emulator: handler, command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), fd: stdoutHandle, } } else { stdOut = os.Stdout } if IsConsole(os.Stderr.Fd()) { stdErr = &terminalWriter{ wrappedWriter: os.Stderr, emulator: handler, command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), fd: getStdHandle(syscall.STD_ERROR_HANDLE), } } else { stdErr = os.Stderr } return stdIn, stdOut, stdErr } // GetHandleInfo returns file descriptor and bool indicating whether the file is a console. func GetHandleInfo(in interface{}) (uintptr, bool) { var inFd uintptr var isTerminalIn bool switch t := in.(type) { case *terminalReader: in = t.wrappedReader case *terminalWriter: in = t.wrappedWriter } if file, ok := in.(*os.File); ok { inFd = file.Fd() isTerminalIn = IsConsole(inFd) } return inFd, isTerminalIn } func getError(r1, r2 uintptr, lastErr error) error { // If the function fails, the return value is zero. if r1 == 0 { if lastErr != nil { return lastErr } return syscall.EINVAL } return nil } // GetConsoleMode gets the console mode for given file descriptor // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx func GetConsoleMode(handle uintptr) (uint32, error) { var mode uint32 err := syscall.GetConsoleMode(syscall.Handle(handle), &mode) return mode, err } // SetConsoleMode sets the console mode for given file descriptor // http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx func SetConsoleMode(handle uintptr, mode uint32) error { return getError(setConsoleModeProc.Call(handle, uintptr(mode), 0)) } // SetCursorVisible sets the cursor visbility // http://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx func SetCursorVisible(handle uintptr, isVisible BOOL) (bool, error) { var cursorInfo *CONSOLE_CURSOR_INFO = &CONSOLE_CURSOR_INFO{} if err := getError(getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)); err != nil { return false, err } cursorInfo.Visible = isVisible if err := getError(setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)); err != nil { return false, err } return true, nil } // SetWindowSize sets the size of the console window. func SetWindowSize(handle uintptr, width, height, max SHORT) (bool, error) { window := SMALL_RECT{Left: 0, Top: 0, Right: width - 1, Bottom: height - 1} coord := COORD{X: width - 1, Y: max} if err := getError(setConsoleWindowInfoProc.Call(handle, uintptr(1), uintptr(unsafe.Pointer(&window)))); err != nil { return false, err } if err := getError(setConsoleScreenBufferSizeProc.Call(handle, marshal(coord))); err != nil { return false, err } return true, nil } // GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { var info CONSOLE_SCREEN_BUFFER_INFO if err := getError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)); err != nil { return nil, err } return &info, nil } // setConsoleTextAttribute sets the attributes of characters written to the // console screen buffer by the WriteFile or WriteConsole function, // http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx func setConsoleTextAttribute(handle uintptr, attribute WORD) error { return getError(setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)) } func writeConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) (bool, error) { if err := getError(writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), marshal(bufferSize), marshal(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))); err != nil { return false, err } return true, nil } // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682663(v=vs.85).aspx func fillConsoleOutputCharacter(handle uintptr, fillChar byte, length uint32, writeCord COORD) (bool, error) { out := int64(0) if err := getError(fillConsoleOutputCharacterProc.Call(handle, uintptr(fillChar), uintptr(length), marshal(writeCord), uintptr(unsafe.Pointer(&out)))); err != nil { return false, err } return true, nil } // Gets the number of space characters to write for "clearing" the section of terminal func getNumberOfChars(fromCoord COORD, toCoord COORD, screenSize COORD) uint32 { // must be valid cursor position if fromCoord.X < 0 || fromCoord.Y < 0 || toCoord.X < 0 || toCoord.Y < 0 { return 0 } if fromCoord.X >= screenSize.X || fromCoord.Y >= screenSize.Y || toCoord.X >= screenSize.X || toCoord.Y >= screenSize.Y { return 0 } // can't be backwards if fromCoord.Y > toCoord.Y { return 0 } // same line if fromCoord.Y == toCoord.Y { return uint32(toCoord.X-fromCoord.X) + 1 } // spans more than one line if fromCoord.Y < toCoord.Y { // from start till end of line for first line + from start of line till end retValue := uint32(screenSize.X-fromCoord.X) + uint32(toCoord.X) + 1 // don't count first and last line linesBetween := toCoord.Y - fromCoord.Y - 1 if linesBetween > 0 { retValue = retValue + uint32(linesBetween*screenSize.X) } return retValue } return 0 } var buffer []CHAR_INFO func clearDisplayRect(handle uintptr, attributes WORD, fromCoord COORD, toCoord COORD) (uint32, error) { var writeRegion SMALL_RECT writeRegion.Left = fromCoord.X writeRegion.Top = fromCoord.Y writeRegion.Right = toCoord.X writeRegion.Bottom = toCoord.Y // allocate and initialize buffer width := toCoord.X - fromCoord.X + 1 height := toCoord.Y - fromCoord.Y + 1 size := uint32(width) * uint32(height) if size > 0 { buffer := make([]CHAR_INFO, size) for i := range buffer { buffer[i] = CHAR_INFO{WCHAR(' '), attributes} } // Write to buffer r, err := writeConsoleOutput(handle, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, &writeRegion) if !r { if err != nil { return 0, err } return 0, syscall.EINVAL } } return uint32(size), nil } func clearDisplayRange(handle uintptr, attributes WORD, fromCoord COORD, toCoord COORD) (uint32, error) { nw := uint32(0) // start and end on same line if fromCoord.Y == toCoord.Y { return clearDisplayRect(handle, attributes, fromCoord, toCoord) } // TODO(azlinux): if full screen, optimize // spans more than one line if fromCoord.Y < toCoord.Y { // from start position till end of line for first line n, err := clearDisplayRect(handle, attributes, fromCoord, COORD{X: toCoord.X, Y: fromCoord.Y}) if err != nil { return nw, err } nw += n // lines between linesBetween := toCoord.Y - fromCoord.Y - 1 if linesBetween > 0 { n, err = clearDisplayRect(handle, attributes, COORD{X: 0, Y: fromCoord.Y + 1}, COORD{X: toCoord.X, Y: toCoord.Y - 1}) if err != nil { return nw, err } nw += n } // lines at end n, err = clearDisplayRect(handle, attributes, COORD{X: 0, Y: toCoord.Y}, toCoord) if err != nil { return nw, err } nw += n } return nw, nil } // setConsoleCursorPosition sets the console cursor position // Note The X and Y are zero based // If relative is true then the new position is relative to current one func setConsoleCursorPosition(handle uintptr, isRelative bool, column int16, line int16) error { screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) if err != nil { return err } var position COORD if isRelative { position.X = screenBufferInfo.CursorPosition.X + SHORT(column) position.Y = screenBufferInfo.CursorPosition.Y + SHORT(line) } else { position.X = SHORT(column) position.Y = SHORT(line) } return getError(setConsoleCursorPositionProc.Call(handle, marshal(position), 0)) } // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683207(v=vs.85).aspx func getNumberOfConsoleInputEvents(handle uintptr) (uint16, error) { var n DWORD if err := getError(getNumberOfConsoleInputEventsProc.Call(handle, uintptr(unsafe.Pointer(&n)))); err != nil { return 0, err } return uint16(n), nil } //http://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx func readConsoleInputKey(handle uintptr, inputBuffer []INPUT_RECORD) (int, error) { var nr DWORD if err := getError(readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&inputBuffer[0])), uintptr(len(inputBuffer)), uintptr(unsafe.Pointer(&nr)))); err != nil { return 0, err } return int(nr), nil } func getWindowsTextAttributeForAnsiValue(originalFlag WORD, defaultValue WORD, ansiValue int16) (WORD, error) { flag := WORD(originalFlag) if flag == 0 { flag = defaultValue } switch ansiValue { case ANSI_ATTR_RESET: flag &^= COMMON_LVB_UNDERSCORE flag &^= BACKGROUND_INTENSITY flag = flag | FOREGROUND_INTENSITY case ANSI_ATTR_INVISIBLE: // TODO: how do you reset reverse? case ANSI_ATTR_UNDERLINE: flag = flag | COMMON_LVB_UNDERSCORE case ANSI_ATTR_BLINK: // seems like background intenisty is blink flag = flag | BACKGROUND_INTENSITY case ANSI_ATTR_UNDERLINE_OFF: flag &^= COMMON_LVB_UNDERSCORE case ANSI_ATTR_BLINK_OFF: // seems like background intenisty is blink flag &^= BACKGROUND_INTENSITY case ANSI_ATTR_BOLD: flag = flag | FOREGROUND_INTENSITY case ANSI_ATTR_DIM: flag &^= FOREGROUND_INTENSITY case ANSI_ATTR_REVERSE, ANSI_ATTR_REVERSE_OFF: // swap forground and background bits foreground := flag & FOREGROUND_MASK_SET background := flag & BACKGROUND_MASK_SET flag = (flag & BACKGROUND_MASK_UNSET & FOREGROUND_MASK_UNSET) | (foreground << 4) | (background >> 4) // FOREGROUND case ANSI_FOREGROUND_DEFAULT: flag = (flag & FOREGROUND_MASK_UNSET) | (defaultValue & FOREGROUND_MASK_SET) case ANSI_FOREGROUND_BLACK: flag = flag ^ (FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE) case ANSI_FOREGROUND_RED: flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED case ANSI_FOREGROUND_GREEN: flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_GREEN case ANSI_FOREGROUND_YELLOW: flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_GREEN case ANSI_FOREGROUND_BLUE: flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_BLUE case ANSI_FOREGROUND_MAGENTA: flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_BLUE case ANSI_FOREGROUND_CYAN: flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_GREEN | FOREGROUND_BLUE case ANSI_FOREGROUND_WHITE: flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE // Background case ANSI_BACKGROUND_DEFAULT: // Black with no intensity flag = (flag & BACKGROUND_MASK_UNSET) | (defaultValue & BACKGROUND_MASK_SET) case ANSI_BACKGROUND_BLACK: flag = (flag & BACKGROUND_MASK_UNSET) case ANSI_BACKGROUND_RED: flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED case ANSI_BACKGROUND_GREEN: flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_GREEN case ANSI_BACKGROUND_YELLOW: flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_GREEN case ANSI_BACKGROUND_BLUE: flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_BLUE case ANSI_BACKGROUND_MAGENTA: flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_BLUE case ANSI_BACKGROUND_CYAN: flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_GREEN | BACKGROUND_BLUE case ANSI_BACKGROUND_WHITE: flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE } return flag, nil } // HandleOutputCommand interpretes the Ansi commands and then makes appropriate Win32 calls func (term *WindowsTerminal) HandleOutputCommand(handle uintptr, command []byte) (n int, err error) { // always consider all the bytes in command, processed n = len(command) parsedCommand := parseAnsiCommand(command) logrus.Debugf("[windows] HandleOutputCommand: %v", parsedCommand) // console settings changes need to happen in atomic way term.outMutex.Lock() defer term.outMutex.Unlock() switch parsedCommand.Command { case "m": // [Value;...;Valuem // Set Graphics Mode: // Calls the graphics functions specified by the following values. // These specified functions remain active until the next occurrence of this escape sequence. // Graphics mode changes the colors and attributes of text (such as bold and underline) displayed on the screen. screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) if err != nil { return n, err } flag := screenBufferInfo.Attributes for _, e := range parsedCommand.Parameters { value, _ := strconv.ParseInt(e, 10, 16) // base 10, 16 bit if value == ANSI_ATTR_RESET { flag = term.screenBufferInfo.Attributes // reset } else { flag, err = getWindowsTextAttributeForAnsiValue(flag, term.screenBufferInfo.Attributes, int16(value)) if err != nil { return n, err } } } if err := setConsoleTextAttribute(handle, flag); err != nil { return n, err } case "H", "f": // [line;columnH // [line;columnf // Moves the cursor to the specified position (coordinates). // If you do not specify a position, the cursor moves to the home position at the upper-left corner of the screen (line 0, column 0). screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) if err != nil { return n, err } line, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) if err != nil { return n, err } if line > int16(screenBufferInfo.Window.Bottom) { line = int16(screenBufferInfo.Window.Bottom) + 1 } column, err := parseInt16OrDefault(parsedCommand.getParam(1), 1) if err != nil { return n, err } if column > int16(screenBufferInfo.Window.Right) { column = int16(screenBufferInfo.Window.Right) + 1 } // The numbers are not 0 based, but 1 based logrus.Debugf("[windows] HandleOutputCommmand: Moving cursor to (%v,%v)", column-1, line-1) if err := setConsoleCursorPosition(handle, false, column-1, line-1); err != nil { return n, err } case "A": // [valueA // Moves the cursor up by the specified number of lines without changing columns. // If the cursor is already on the top line, ignores this sequence. value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) if err != nil { return len(command), err } if err := setConsoleCursorPosition(handle, true, 0, -value); err != nil { return n, err } case "B": // [valueB // Moves the cursor down by the specified number of lines without changing columns. // If the cursor is already on the bottom line, ignores this sequence. value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) if err != nil { return n, err } if err := setConsoleCursorPosition(handle, true, 0, value); err != nil { return n, err } case "C": // [valueC // Moves the cursor forward by the specified number of columns without changing lines. // If the cursor is already in the rightmost column, ignores this sequence. value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) if err != nil { return n, err } if err := setConsoleCursorPosition(handle, true, value, 0); err != nil { return n, err } case "D": // [valueD // Moves the cursor back by the specified number of columns without changing lines. // If the cursor is already in the leftmost column, ignores this sequence. value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) if err != nil { return n, err } if err := setConsoleCursorPosition(handle, true, -value, 0); err != nil { return n, err } case "J": // [J Erases from the cursor to the end of the screen, including the cursor position. // [1J Erases from the beginning of the screen to the cursor, including the cursor position. // [2J Erases the complete display. The cursor does not move. // Clears the screen and moves the cursor to the home position (line 0, column 0). value, err := parseInt16OrDefault(parsedCommand.getParam(0), 0) if err != nil { return n, err } var start COORD var cursor COORD var end COORD screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) if err != nil { return n, err } switch value { case 0: start = screenBufferInfo.CursorPosition // end of the buffer end.X = screenBufferInfo.Size.X - 1 end.Y = screenBufferInfo.Size.Y - 1 // cursor cursor = screenBufferInfo.CursorPosition case 1: // start of the screen start.X = 0 start.Y = 0 // end of the screen end = screenBufferInfo.CursorPosition // cursor cursor = screenBufferInfo.CursorPosition case 2: // start of the screen start.X = 0 start.Y = 0 // end of the buffer end.X = screenBufferInfo.Size.X - 1 end.Y = screenBufferInfo.Size.Y - 1 // cursor cursor.X = 0 cursor.Y = 0 } if _, err := clearDisplayRange(uintptr(handle), term.screenBufferInfo.Attributes, start, end); err != nil { return n, err } // remember the the cursor position is 1 based if err := setConsoleCursorPosition(handle, false, int16(cursor.X), int16(cursor.Y)); err != nil { return n, err } case "K": // [K // Clears all characters from the cursor position to the end of the line (including the character at the cursor position). // [K Erases from the cursor to the end of the line, including the cursor position. // [1K Erases from the beginning of the line to the cursor, including the cursor position. // [2K Erases the complete line. value, err := parseInt16OrDefault(parsedCommand.getParam(0), 0) var start COORD var cursor COORD var end COORD screenBufferInfo, err := GetConsoleScreenBufferInfo(uintptr(handle)) if err != nil { return n, err } switch value { case 0: // start is where cursor is start = screenBufferInfo.CursorPosition // end of line end.X = screenBufferInfo.Size.X - 1 end.Y = screenBufferInfo.CursorPosition.Y // cursor remains the same cursor = screenBufferInfo.CursorPosition case 1: // beginning of line start.X = 0 start.Y = screenBufferInfo.CursorPosition.Y // until cursor end = screenBufferInfo.CursorPosition // cursor remains the same cursor = screenBufferInfo.CursorPosition case 2: // start of the line start.X = 0 start.Y = screenBufferInfo.CursorPosition.Y - 1 // end of the line end.X = screenBufferInfo.Size.X - 1 end.Y = screenBufferInfo.CursorPosition.Y - 1 // cursor cursor.X = 0 cursor.Y = screenBufferInfo.CursorPosition.Y - 1 } if _, err := clearDisplayRange(uintptr(handle), term.screenBufferInfo.Attributes, start, end); err != nil { return n, err } // remember the the cursor position is 1 based if err := setConsoleCursorPosition(uintptr(handle), false, int16(cursor.X), int16(cursor.Y)); err != nil { return n, err } case "l": for _, value := range parsedCommand.Parameters { switch value { case "?25", "25": SetCursorVisible(uintptr(handle), BOOL(0)) case "?1049", "1049": // TODO (azlinux): Restore terminal case "?1", "1": // If the DECCKM function is reset, then the arrow keys send ANSI cursor sequences to the host. term.inputEscapeSequence = []byte(KEY_ESC_CSI) } } case "h": for _, value := range parsedCommand.Parameters { switch value { case "?25", "25": SetCursorVisible(uintptr(handle), BOOL(1)) case "?1049", "1049": // TODO (azlinux): Save terminal case "?1", "1": // If the DECCKM function is set, then the arrow keys send application sequences to the host. // DECCKM (default off): When set, the cursor keys send an ESC O prefix, rather than ESC [. term.inputEscapeSequence = []byte(KEY_ESC_O) } } case "]": /* TODO (azlinux): Linux Console Private CSI Sequences The following sequences are neither ECMA-48 nor native VT102. They are native to the Linux console driver. Colors are in SGR parameters: 0 = black, 1 = red, 2 = green, 3 = brown, 4 = blue, 5 = magenta, 6 = cyan, 7 = white. ESC [ 1 ; n ] Set color n as the underline color ESC [ 2 ; n ] Set color n as the dim color ESC [ 8 ] Make the current color pair the default attributes. ESC [ 9 ; n ] Set screen blank timeout to n minutes. ESC [ 10 ; n ] Set bell frequency in Hz. ESC [ 11 ; n ] Set bell duration in msec. ESC [ 12 ; n ] Bring specified console to the front. ESC [ 13 ] Unblank the screen. ESC [ 14 ; n ] Set the VESA powerdown interval in minutes. */ } return n, nil } // WriteChars writes the bytes to given writer. func (term *WindowsTerminal) WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) { if len(p) == 0 { return 0, nil } return w.Write(p) } const ( CAPSLOCK_ON = 0x0080 //The CAPS LOCK light is on. ENHANCED_KEY = 0x0100 //The key is enhanced. LEFT_ALT_PRESSED = 0x0002 //The left ALT key is pressed. LEFT_CTRL_PRESSED = 0x0008 //The left CTRL key is pressed. NUMLOCK_ON = 0x0020 //The NUM LOCK light is on. RIGHT_ALT_PRESSED = 0x0001 //The right ALT key is pressed. RIGHT_CTRL_PRESSED = 0x0004 //The right CTRL key is pressed. SCROLLLOCK_ON = 0x0040 //The SCROLL LOCK light is on. SHIFT_PRESSED = 0x0010 // The SHIFT key is pressed. ) const ( KEY_CONTROL_PARAM_2 = ";2" KEY_CONTROL_PARAM_3 = ";3" KEY_CONTROL_PARAM_4 = ";4" KEY_CONTROL_PARAM_5 = ";5" KEY_CONTROL_PARAM_6 = ";6" KEY_CONTROL_PARAM_7 = ";7" KEY_CONTROL_PARAM_8 = ";8" KEY_ESC_CSI = "\x1B[" KEY_ESC_N = "\x1BN" KEY_ESC_O = "\x1BO" ) var keyMapPrefix = map[WORD]string{ VK_UP: "\x1B[%sA", VK_DOWN: "\x1B[%sB", VK_RIGHT: "\x1B[%sC", VK_LEFT: "\x1B[%sD", VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 VK_END: "\x1B[4%s~", // showkey shows ^[[4 VK_INSERT: "\x1B[2%s~", VK_DELETE: "\x1B[3%s~", VK_PRIOR: "\x1B[5%s~", VK_NEXT: "\x1B[6%s~", VK_F1: "", VK_F2: "", VK_F3: "\x1B[13%s~", VK_F4: "\x1B[14%s~", VK_F5: "\x1B[15%s~", VK_F6: "\x1B[17%s~", VK_F7: "\x1B[18%s~", VK_F8: "\x1B[19%s~", VK_F9: "\x1B[20%s~", VK_F10: "\x1B[21%s~", VK_F11: "\x1B[23%s~", VK_F12: "\x1B[24%s~", } var arrowKeyMapPrefix = map[WORD]string{ VK_UP: "%s%sA", VK_DOWN: "%s%sB", VK_RIGHT: "%s%sC", VK_LEFT: "%s%sD", } func getControlStateParameter(shift, alt, control, meta bool) string { if shift && alt && control { return KEY_CONTROL_PARAM_8 } if alt && control { return KEY_CONTROL_PARAM_7 } if shift && control { return KEY_CONTROL_PARAM_6 } if control { return KEY_CONTROL_PARAM_5 } if shift && alt { return KEY_CONTROL_PARAM_4 } if alt { return KEY_CONTROL_PARAM_3 } if shift { return KEY_CONTROL_PARAM_2 } return "" } func getControlKeys(controlState DWORD) (shift, alt, control bool) { shift = 0 != (controlState & SHIFT_PRESSED) alt = 0 != (controlState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED)) control = 0 != (controlState & (LEFT_CTRL_PRESSED | RIGHT_CTRL_PRESSED)) return shift, alt, control } func charSequenceForKeys(key WORD, controlState DWORD, escapeSequence []byte) string { i, ok := arrowKeyMapPrefix[key] if ok { shift, alt, control := getControlKeys(controlState) modifier := getControlStateParameter(shift, alt, control, false) return fmt.Sprintf(i, escapeSequence, modifier) } i, ok = keyMapPrefix[key] if ok { shift, alt, control := getControlKeys(controlState) modifier := getControlStateParameter(shift, alt, control, false) return fmt.Sprintf(i, modifier) } return "" } // mapKeystokeToTerminalString maps the given input event record to string func mapKeystokeToTerminalString(keyEvent *KEY_EVENT_RECORD, escapeSequence []byte) string { _, alt, control := getControlKeys(keyEvent.ControlKeyState) if keyEvent.UnicodeChar == 0 { return charSequenceForKeys(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) } if control { // TODO(azlinux): Implement following control sequences // -D Signals the end of input from the keyboard; also exits current shell. // -H Deletes the first character to the left of the cursor. Also called the ERASE key. // -Q Restarts printing after it has been stopped with -s. // -S Suspends printing on the screen (does not stop the program). // -U Deletes all characters on the current line. Also called the KILL key. // -E Quits current command and creates a core } // +Key generates ESC N Key if !control && alt { return KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) } return string(keyEvent.UnicodeChar) } // getAvailableInputEvents polls the console for availble events // The function does not return until at least one input record has been read. func getAvailableInputEvents(handle uintptr, inputEvents []INPUT_RECORD) (n int, err error) { // TODO(azlinux): Why is there a for loop? Seems to me, that `n` cannot be negative. - tibor for { // Read number of console events available n, err = readConsoleInputKey(handle, inputEvents) if err != nil || n >= 0 { return n, err } } } // getTranslatedKeyCodes converts the input events into the string of characters // The ansi escape sequence are used to map key strokes to the strings func getTranslatedKeyCodes(inputEvents []INPUT_RECORD, escapeSequence []byte) string { var buf bytes.Buffer for i := 0; i < len(inputEvents); i++ { input := inputEvents[i] if input.EventType == KEY_EVENT && input.KeyEvent.KeyDown != 0 { keyString := mapKeystokeToTerminalString(&input.KeyEvent, escapeSequence) buf.WriteString(keyString) } } return buf.String() } // ReadChars reads the characters from the given reader func (term *WindowsTerminal) ReadChars(fd uintptr, r io.Reader, p []byte) (n int, err error) { for term.inputSize == 0 { nr, err := getAvailableInputEvents(fd, term.inputEvents) if nr == 0 && nil != err { return n, err } if nr > 0 { keyCodes := getTranslatedKeyCodes(term.inputEvents[:nr], term.inputEscapeSequence) term.inputSize = copy(term.inputBuffer, keyCodes) } } n = copy(p, term.inputBuffer[:term.inputSize]) term.inputSize -= n return n, nil } // HandleInputSequence interprets the input sequence command func (term *WindowsTerminal) HandleInputSequence(fd uintptr, command []byte) (n int, err error) { return 0, nil } func marshal(c COORD) uintptr { return uintptr(*((*DWORD)(unsafe.Pointer(&c)))) } // IsConsole returns true if the given file descriptor is a terminal. // -- The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. func IsConsole(fd uintptr) bool { _, e := GetConsoleMode(fd) return e == nil } docker-1.6.2/pkg/term/termios_linux.go0000644000175000017500000000225712524223634017322 0ustar tianontianon// +build !cgo package term import ( "syscall" "unsafe" ) const ( getTermios = syscall.TCGETS setTermios = syscall.TCSETS ) type Termios struct { Iflag uint32 Oflag uint32 Cflag uint32 Lflag uint32 Cc [20]byte Ispeed uint32 Ospeed uint32 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) newState.Oflag &^= syscall.OPOST newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) newState.Cflag |= syscall.CS8 if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err } return &oldState, nil } docker-1.6.2/pkg/term/term.go0000644000175000017500000000436012524223634015365 0ustar tianontianon// +build !windows package term import ( "errors" "io" "os" "os/signal" "syscall" "unsafe" ) var ( ErrInvalidState = errors.New("Invalid terminal state") ) type State struct { termios Termios } type Winsize struct { Height uint16 Width uint16 x uint16 y uint16 } func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { return os.Stdin, os.Stdout, os.Stderr } func GetFdInfo(in interface{}) (uintptr, bool) { var inFd uintptr var isTerminalIn bool if file, ok := in.(*os.File); ok { inFd = file.Fd() isTerminalIn = IsTerminal(inFd) } return inFd, isTerminalIn } func GetWinsize(fd uintptr) (*Winsize, error) { ws := &Winsize{} _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) // Skipp errno = 0 if err == 0 { return ws, nil } return ws, err } func SetWinsize(fd uintptr, ws *Winsize) error { _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) // Skipp errno = 0 if err == 0 { return nil } return err } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { var termios Termios return tcget(fd, &termios) == 0 } // Restore restores the terminal connected to the given file descriptor to a // previous state. func RestoreTerminal(fd uintptr, state *State) error { if state == nil { return ErrInvalidState } if err := tcset(fd, &state.termios); err != 0 { return err } return nil } func SaveState(fd uintptr) (*State, error) { var oldState State if err := tcget(fd, &oldState.termios); err != 0 { return nil, err } return &oldState, nil } func DisableEcho(fd uintptr, state *State) error { newState := state.termios newState.Lflag &^= syscall.ECHO if err := tcset(fd, &newState); err != 0 { return err } handleInterrupt(fd, state) return nil } func SetRawTerminal(fd uintptr) (*State, error) { oldState, err := MakeRaw(fd) if err != nil { return nil, err } handleInterrupt(fd, oldState) return oldState, err } func handleInterrupt(fd uintptr, state *State) { sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, os.Interrupt) go func() { _ = <-sigchan RestoreTerminal(fd, state) os.Exit(0) }() } docker-1.6.2/pkg/term/tc_other.go0000644000175000017500000000063712524223634016230 0ustar tianontianon// +build !windows // +build !linux !cgo package term import ( "syscall" "unsafe" ) func tcget(fd uintptr, p *Termios) syscall.Errno { _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) return err } func tcset(fd uintptr, p *Termios) syscall.Errno { _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) return err } docker-1.6.2/pkg/term/termios_freebsd.go0000644000175000017500000000304012524223634017564 0ustar tianontianonpackage term import ( "syscall" "unsafe" ) const ( getTermios = syscall.TIOCGETA setTermios = syscall.TIOCSETA IGNBRK = syscall.IGNBRK PARMRK = syscall.PARMRK INLCR = syscall.INLCR IGNCR = syscall.IGNCR ECHONL = syscall.ECHONL CSIZE = syscall.CSIZE ICRNL = syscall.ICRNL ISTRIP = syscall.ISTRIP PARENB = syscall.PARENB ECHO = syscall.ECHO ICANON = syscall.ICANON ISIG = syscall.ISIG IXON = syscall.IXON BRKINT = syscall.BRKINT INPCK = syscall.INPCK OPOST = syscall.OPOST CS8 = syscall.CS8 IEXTEN = syscall.IEXTEN ) type Termios struct { Iflag uint32 Oflag uint32 Cflag uint32 Lflag uint32 Cc [20]byte Ispeed uint32 Ospeed uint32 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) newState.Oflag &^= OPOST newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) newState.Cflag &^= (CSIZE | PARENB) newState.Cflag |= CS8 newState.Cc[syscall.VMIN] = 1 newState.Cc[syscall.VTIME] = 0 if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err } return &oldState, nil } docker-1.6.2/pkg/term/termios_darwin.go0000644000175000017500000000304012524223634017436 0ustar tianontianonpackage term import ( "syscall" "unsafe" ) const ( getTermios = syscall.TIOCGETA setTermios = syscall.TIOCSETA IGNBRK = syscall.IGNBRK PARMRK = syscall.PARMRK INLCR = syscall.INLCR IGNCR = syscall.IGNCR ECHONL = syscall.ECHONL CSIZE = syscall.CSIZE ICRNL = syscall.ICRNL ISTRIP = syscall.ISTRIP PARENB = syscall.PARENB ECHO = syscall.ECHO ICANON = syscall.ICANON ISIG = syscall.ISIG IXON = syscall.IXON BRKINT = syscall.BRKINT INPCK = syscall.INPCK OPOST = syscall.OPOST CS8 = syscall.CS8 IEXTEN = syscall.IEXTEN ) type Termios struct { Iflag uint64 Oflag uint64 Cflag uint64 Lflag uint64 Cc [20]byte Ispeed uint64 Ospeed uint64 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) newState.Oflag &^= OPOST newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) newState.Cflag &^= (CSIZE | PARENB) newState.Cflag |= CS8 newState.Cc[syscall.VMIN] = 1 newState.Cc[syscall.VTIME] = 0 if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err } return &oldState, nil } docker-1.6.2/pkg/term/tc_linux_cgo.go0000644000175000017500000000175012524223634017073 0ustar tianontianon// +build linux,cgo package term import ( "syscall" "unsafe" ) // #include import "C" type Termios syscall.Termios // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { var oldState State if err := tcget(fd, &oldState.termios); err != 0 { return nil, err } newState := oldState.termios C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) if err := tcset(fd, &newState); err != 0 { return nil, err } return &oldState, nil } func tcget(fd uintptr, p *Termios) syscall.Errno { ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) if ret != 0 { return err.(syscall.Errno) } return 0 } func tcset(fd uintptr, p *Termios) syscall.Errno { ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) if ret != 0 { return err.(syscall.Errno) } return 0 } docker-1.6.2/pkg/promise/0000755000175000017500000000000012524223634014573 5ustar tianontianondocker-1.6.2/pkg/promise/promise.go0000644000175000017500000000043512524223634016602 0ustar tianontianonpackage promise // Go is a basic promise implementation: it wraps calls a function in a goroutine, // and returns a channel which will later return the function's return value. func Go(f func() error) chan error { ch := make(chan error, 1) go func() { ch <- f() }() return ch } docker-1.6.2/pkg/pubsub/0000755000175000017500000000000012524223634014415 5ustar tianontianondocker-1.6.2/pkg/pubsub/publisher_test.go0000644000175000017500000000236012524223634020001 0ustar tianontianonpackage pubsub import ( "testing" "time" ) func TestSendToOneSub(t *testing.T) { p := NewPublisher(100*time.Millisecond, 10) c := p.Subscribe() p.Publish("hi") msg := <-c if msg.(string) != "hi" { t.Fatalf("expected message hi but received %v", msg) } } func TestSendToMultipleSubs(t *testing.T) { p := NewPublisher(100*time.Millisecond, 10) subs := []chan interface{}{} subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) p.Publish("hi") for _, c := range subs { msg := <-c if msg.(string) != "hi" { t.Fatalf("expected message hi but received %v", msg) } } } func TestEvictOneSub(t *testing.T) { p := NewPublisher(100*time.Millisecond, 10) s1 := p.Subscribe() s2 := p.Subscribe() p.Evict(s1) p.Publish("hi") if _, ok := <-s1; ok { t.Fatal("expected s1 to not receive the published message") } msg := <-s2 if msg.(string) != "hi" { t.Fatalf("expected message hi but received %v", msg) } } func TestClosePublisher(t *testing.T) { p := NewPublisher(100*time.Millisecond, 10) subs := []chan interface{}{} subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) p.Close() for _, c := range subs { if _, ok := <-c; ok { t.Fatal("expected all subscriber channels to be closed") } } } docker-1.6.2/pkg/pubsub/publisher.go0000644000175000017500000000343212524223634016743 0ustar tianontianonpackage pubsub import ( "sync" "time" ) // NewPublisher creates a new pub/sub publisher to broadcast messages. // The duration is used as the send timeout as to not block the publisher publishing // messages to other clients if one client is slow or unresponsive. // The buffer is used when creating new channels for subscribers. func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { return &Publisher{ buffer: buffer, timeout: publishTimeout, subscribers: make(map[subscriber]struct{}), } } type subscriber chan interface{} type Publisher struct { m sync.RWMutex buffer int timeout time.Duration subscribers map[subscriber]struct{} } // Len returns the number of subscribers for the publisher func (p *Publisher) Len() int { p.m.RLock() i := len(p.subscribers) p.m.RUnlock() return i } // Subscribe adds a new subscriber to the publisher returning the channel. func (p *Publisher) Subscribe() chan interface{} { ch := make(chan interface{}, p.buffer) p.m.Lock() p.subscribers[ch] = struct{}{} p.m.Unlock() return ch } // Evict removes the specified subscriber from receiving any more messages. func (p *Publisher) Evict(sub chan interface{}) { p.m.Lock() delete(p.subscribers, sub) close(sub) p.m.Unlock() } // Publish sends the data in v to all subscribers currently registered with the publisher. func (p *Publisher) Publish(v interface{}) { p.m.RLock() for sub := range p.subscribers { // send under a select as to not block if the receiver is unavailable select { case sub <- v: case <-time.After(p.timeout): } } p.m.RUnlock() } // Close closes the channels to all subscribers registered with the publisher. func (p *Publisher) Close() { p.m.Lock() for sub := range p.subscribers { close(sub) } p.m.Unlock() } docker-1.6.2/runconfig/0000755000175000017500000000000012524223634014326 5ustar tianontianondocker-1.6.2/runconfig/merge.go0000644000175000017500000000544112524223634015760 0ustar tianontianonpackage runconfig import ( "strings" log "github.com/Sirupsen/logrus" "github.com/docker/docker/nat" ) func Merge(userConf, imageConf *Config) error { if userConf.User == "" { userConf.User = imageConf.User } if userConf.Memory == 0 { userConf.Memory = imageConf.Memory } if userConf.MemorySwap == 0 { userConf.MemorySwap = imageConf.MemorySwap } if userConf.CpuShares == 0 { userConf.CpuShares = imageConf.CpuShares } if len(userConf.ExposedPorts) == 0 { userConf.ExposedPorts = imageConf.ExposedPorts } else if imageConf.ExposedPorts != nil { if userConf.ExposedPorts == nil { userConf.ExposedPorts = make(nat.PortSet) } for port := range imageConf.ExposedPorts { if _, exists := userConf.ExposedPorts[port]; !exists { userConf.ExposedPorts[port] = struct{}{} } } } if len(userConf.PortSpecs) > 0 { if userConf.ExposedPorts == nil { userConf.ExposedPorts = make(nat.PortSet) } ports, _, err := nat.ParsePortSpecs(userConf.PortSpecs) if err != nil { return err } for port := range ports { if _, exists := userConf.ExposedPorts[port]; !exists { userConf.ExposedPorts[port] = struct{}{} } } userConf.PortSpecs = nil } if len(imageConf.PortSpecs) > 0 { // FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia. log.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) if userConf.ExposedPorts == nil { userConf.ExposedPorts = make(nat.PortSet) } ports, _, err := nat.ParsePortSpecs(imageConf.PortSpecs) if err != nil { return err } for port := range ports { if _, exists := userConf.ExposedPorts[port]; !exists { userConf.ExposedPorts[port] = struct{}{} } } } if len(userConf.Env) == 0 { userConf.Env = imageConf.Env } else { for _, imageEnv := range imageConf.Env { found := false imageEnvKey := strings.Split(imageEnv, "=")[0] for _, userEnv := range userConf.Env { userEnvKey := strings.Split(userEnv, "=")[0] if imageEnvKey == userEnvKey { found = true } } if !found { userConf.Env = append(userConf.Env, imageEnv) } } } if userConf.Labels == nil { userConf.Labels = map[string]string{} } if imageConf.Labels != nil { for l := range userConf.Labels { imageConf.Labels[l] = userConf.Labels[l] } userConf.Labels = imageConf.Labels } if len(userConf.Entrypoint) == 0 { if len(userConf.Cmd) == 0 { userConf.Cmd = imageConf.Cmd } if userConf.Entrypoint == nil { userConf.Entrypoint = imageConf.Entrypoint } } if userConf.WorkingDir == "" { userConf.WorkingDir = imageConf.WorkingDir } if len(userConf.Volumes) == 0 { userConf.Volumes = imageConf.Volumes } else { for k, v := range imageConf.Volumes { userConf.Volumes[k] = v } } return nil } docker-1.6.2/runconfig/parse.go0000644000175000017500000004022312524223634015770 0ustar tianontianonpackage runconfig import ( "fmt" "path" "strconv" "strings" "github.com/docker/docker/nat" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/ulimit" "github.com/docker/docker/pkg/units" "github.com/docker/docker/utils" ) var ( ErrInvalidWorkingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior.") ErrConflictContainerNetworkAndDns = fmt.Errorf("Conflicting options: --net=container can't be used with --dns. This configuration is invalid.") ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: -h and the network mode (--net)") ErrConflictHostNetworkAndDns = fmt.Errorf("Conflicting options: --net=host can't be used with --dns. This configuration is invalid.") ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: --net=host can't be used with links. This would result in undefined behavior.") ) func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSet, error) { var ( // FIXME: use utils.ListOpts for attach and volumes? flAttach = opts.NewListOpts(opts.ValidateAttach) flVolumes = opts.NewListOpts(opts.ValidatePath) flLinks = opts.NewListOpts(opts.ValidateLink) flEnv = opts.NewListOpts(opts.ValidateEnv) flLabels = opts.NewListOpts(opts.ValidateEnv) flDevices = opts.NewListOpts(opts.ValidatePath) ulimits = make(map[string]*ulimit.Ulimit) flUlimits = opts.NewUlimitOpt(ulimits) flPublish = opts.NewListOpts(nil) flExpose = opts.NewListOpts(nil) flDns = opts.NewListOpts(opts.ValidateIPAddress) flDnsSearch = opts.NewListOpts(opts.ValidateDnsSearch) flExtraHosts = opts.NewListOpts(opts.ValidateExtraHost) flVolumesFrom = opts.NewListOpts(nil) flLxcOpts = opts.NewListOpts(nil) flEnvFile = opts.NewListOpts(nil) flCapAdd = opts.NewListOpts(nil) flCapDrop = opts.NewListOpts(nil) flSecurityOpt = opts.NewListOpts(nil) flLabelsFile = opts.NewListOpts(nil) flNetwork = cmd.Bool([]string{"#n", "#-networking"}, true, "Enable networking for this container") flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container") flPidMode = cmd.String([]string{"-pid"}, "", "PID namespace to use") flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to random ports") flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file") flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image") flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemorySwap = cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCpusetCpus = cmd.String([]string{"#-cpuset", "-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container") flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use") flRestartPolicy = cmd.String([]string{"-restart"}, "no", "Restart policy to apply when a container exits") flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only") flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container") flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") ) cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR") cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume") cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container") cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container") cmd.Var(&flLabels, []string{"l", "-label"}, "Set meta data on a container") cmd.Var(&flLabelsFile, []string{"-label-file"}, "Read in a line delimited file of labels") cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a file of environment variables") cmd.Var(&flPublish, []string{"p", "-publish"}, "Publish a container's port(s) to the host") cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port or a range of ports") cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom DNS servers") cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom DNS search domains") cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options") cmd.Var(&flCapAdd, []string{"-cap-add"}, "Add Linux capabilities") cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities") cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options") cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") cmd.Require(flag.Min, 1) if err := utils.ParseFlags(cmd, args, true); err != nil { return nil, nil, cmd, err } // Validate input params if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) { return nil, nil, cmd, ErrInvalidWorkingDirectory } // Validate the input mac address if *flMacAddress != "" { if _, err := opts.ValidateMACAddress(*flMacAddress); err != nil { return nil, nil, cmd, fmt.Errorf("%s is not a valid mac address", *flMacAddress) } } var ( attachStdin = flAttach.Get("stdin") attachStdout = flAttach.Get("stdout") attachStderr = flAttach.Get("stderr") ) if *flNetMode != "bridge" && *flNetMode != "none" && *flHostname != "" { return nil, nil, cmd, ErrConflictNetworkHostname } if *flNetMode == "host" && flLinks.Len() > 0 { return nil, nil, cmd, ErrConflictHostNetworkAndLinks } if *flNetMode == "container" && flLinks.Len() > 0 { return nil, nil, cmd, ErrConflictContainerNetworkAndLinks } if *flNetMode == "host" && flDns.Len() > 0 { return nil, nil, cmd, ErrConflictHostNetworkAndDns } if *flNetMode == "container" && flDns.Len() > 0 { return nil, nil, cmd, ErrConflictContainerNetworkAndDns } // If neither -d or -a are set, attach to everything by default if flAttach.Len() == 0 { attachStdout = true attachStderr = true if *flStdin { attachStdin = true } } var flMemory int64 if *flMemoryString != "" { parsedMemory, err := units.RAMInBytes(*flMemoryString) if err != nil { return nil, nil, cmd, err } flMemory = parsedMemory } var MemorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { MemorySwap = -1 } else { parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) if err != nil { return nil, nil, cmd, err } MemorySwap = parsedMemorySwap } } var binds []string // add any bind targets to the list of container volumes for bind := range flVolumes.GetMap() { if arr := strings.Split(bind, ":"); len(arr) > 1 { if arr[1] == "/" { return nil, nil, cmd, fmt.Errorf("Invalid bind mount: destination can't be '/'") } // after creating the bind mount we want to delete it from the flVolumes values because // we do not want bind mounts being committed to image configs binds = append(binds, bind) flVolumes.Delete(bind) } else if bind == "/" { return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'") } } var ( parsedArgs = cmd.Args() runCmd []string entrypoint []string image = cmd.Arg(0) ) if len(parsedArgs) > 1 { runCmd = parsedArgs[1:] } if *flEntrypoint != "" { entrypoint = []string{*flEntrypoint} } lxcConf, err := parseKeyValueOpts(flLxcOpts) if err != nil { return nil, nil, cmd, err } var ( domainname string hostname = *flHostname parts = strings.SplitN(hostname, ".", 2) ) if len(parts) > 1 { hostname = parts[0] domainname = parts[1] } ports, portBindings, err := nat.ParsePortSpecs(flPublish.GetAll()) if err != nil { return nil, nil, cmd, err } // Merge in exposed ports to the map of published ports for _, e := range flExpose.GetAll() { if strings.Contains(e, ":") { return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) } //support two formats for expose, original format /[] or /[] proto, port := nat.SplitProtoPort(e) //parse the start and end port and create a sequence of ports to expose //if expose a port, the start and end port are the same start, end, err := parsers.ParsePortRange(port) if err != nil { return nil, nil, cmd, fmt.Errorf("Invalid range format for --expose: %s, error: %s", e, err) } for i := start; i <= end; i++ { p := nat.NewPort(proto, strconv.FormatUint(i, 10)) if _, exists := ports[p]; !exists { ports[p] = struct{}{} } } } // parse device mappings deviceMappings := []DeviceMapping{} for _, device := range flDevices.GetAll() { deviceMapping, err := ParseDevice(device) if err != nil { return nil, nil, cmd, err } deviceMappings = append(deviceMappings, deviceMapping) } // collect all the environment variables for the container envVariables, err := readKVStrings(flEnvFile.GetAll(), flEnv.GetAll()) if err != nil { return nil, nil, cmd, err } // collect all the labels for the container labels, err := readKVStrings(flLabelsFile.GetAll(), flLabels.GetAll()) if err != nil { return nil, nil, cmd, err } ipcMode := IpcMode(*flIpcMode) if !ipcMode.Valid() { return nil, nil, cmd, fmt.Errorf("--ipc: invalid IPC mode") } pidMode := PidMode(*flPidMode) if !pidMode.Valid() { return nil, nil, cmd, fmt.Errorf("--pid: invalid PID mode") } netMode, err := parseNetMode(*flNetMode) if err != nil { return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err) } restartPolicy, err := parseRestartPolicy(*flRestartPolicy) if err != nil { return nil, nil, cmd, err } config := &Config{ Hostname: hostname, Domainname: domainname, PortSpecs: nil, // Deprecated ExposedPorts: ports, User: *flUser, Tty: *flTty, NetworkDisabled: !*flNetwork, OpenStdin: *flStdin, Memory: flMemory, // FIXME: for backward compatibility MemorySwap: MemorySwap, // FIXME: for backward compatibility CpuShares: *flCpuShares, // FIXME: for backward compatibility Cpuset: *flCpusetCpus, // FIXME: for backward compatibility AttachStdin: attachStdin, AttachStdout: attachStdout, AttachStderr: attachStderr, Env: envVariables, Cmd: runCmd, Image: image, Volumes: flVolumes.GetMap(), MacAddress: *flMacAddress, Entrypoint: entrypoint, WorkingDir: *flWorkingDir, Labels: convertKVStringsToMap(labels), } hostConfig := &HostConfig{ Binds: binds, ContainerIDFile: *flContainerIDFile, LxcConf: lxcConf, Memory: flMemory, MemorySwap: MemorySwap, CpuShares: *flCpuShares, CpusetCpus: *flCpusetCpus, Privileged: *flPrivileged, PortBindings: portBindings, Links: flLinks.GetAll(), PublishAllPorts: *flPublishAll, Dns: flDns.GetAll(), DnsSearch: flDnsSearch.GetAll(), ExtraHosts: flExtraHosts.GetAll(), VolumesFrom: flVolumesFrom.GetAll(), NetworkMode: netMode, IpcMode: ipcMode, PidMode: pidMode, Devices: deviceMappings, CapAdd: flCapAdd.GetAll(), CapDrop: flCapDrop.GetAll(), RestartPolicy: restartPolicy, SecurityOpt: flSecurityOpt.GetAll(), ReadonlyRootfs: *flReadonlyRootfs, Ulimits: flUlimits.GetList(), LogConfig: LogConfig{Type: *flLoggingDriver}, CgroupParent: *flCgroupParent, } // When allocating stdin in attached mode, close stdin at client disconnect if config.OpenStdin && config.AttachStdin { config.StdinOnce = true } return config, hostConfig, cmd, nil } // reads a file of line terminated key=value pairs and override that with override parameter func readKVStrings(files []string, override []string) ([]string, error) { envVariables := []string{} for _, ef := range files { parsedVars, err := opts.ParseEnvFile(ef) if err != nil { return nil, err } envVariables = append(envVariables, parsedVars...) } // parse the '-e' and '--env' after, to allow override envVariables = append(envVariables, override...) return envVariables, nil } // converts ["key=value"] to {"key":"value"} func convertKVStringsToMap(values []string) map[string]string { result := make(map[string]string, len(values)) for _, value := range values { kv := strings.SplitN(value, "=", 2) if len(kv) == 1 { result[kv[0]] = "" } else { result[kv[0]] = kv[1] } } return result } // parseRestartPolicy returns the parsed policy or an error indicating what is incorrect func parseRestartPolicy(policy string) (RestartPolicy, error) { p := RestartPolicy{} if policy == "" { return p, nil } var ( parts = strings.Split(policy, ":") name = parts[0] ) p.Name = name switch name { case "always": if len(parts) == 2 { return p, fmt.Errorf("maximum restart count not valid with restart policy of \"always\"") } case "no": // do nothing case "on-failure": if len(parts) == 2 { count, err := strconv.Atoi(parts[1]) if err != nil { return p, err } p.MaximumRetryCount = count } default: return p, fmt.Errorf("invalid restart policy %s", name) } return p, nil } // options will come in the format of name.key=value or name.option func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) { out := make(map[string][]string, len(opts.GetAll())) for _, o := range opts.GetAll() { parts := strings.SplitN(o, ".", 2) if len(parts) < 2 { return nil, fmt.Errorf("invalid opt format %s", o) } else if strings.TrimSpace(parts[0]) == "" { return nil, fmt.Errorf("key cannot be empty %s", o) } values, exists := out[parts[0]] if !exists { values = []string{} } out[parts[0]] = append(values, parts[1]) } return out, nil } func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) { out := make([]utils.KeyValuePair, opts.Len()) for i, o := range opts.GetAll() { k, v, err := parsers.ParseKeyValueOpt(o) if err != nil { return nil, err } out[i] = utils.KeyValuePair{Key: k, Value: v} } return out, nil } func parseNetMode(netMode string) (NetworkMode, error) { parts := strings.Split(netMode, ":") switch mode := parts[0]; mode { case "bridge", "none", "host": case "container": if len(parts) < 2 || parts[1] == "" { return "", fmt.Errorf("invalid container format container:") } default: return "", fmt.Errorf("invalid --net: %s", netMode) } return NetworkMode(netMode), nil } func ParseDevice(device string) (DeviceMapping, error) { src := "" dst := "" permissions := "rwm" arr := strings.Split(device, ":") switch len(arr) { case 3: permissions = arr[2] fallthrough case 2: dst = arr[1] fallthrough case 1: src = arr[0] default: return DeviceMapping{}, fmt.Errorf("Invalid device specification: %s", device) } if dst == "" { dst = src } deviceMapping := DeviceMapping{ PathOnHost: src, PathInContainer: dst, CgroupPermissions: permissions, } return deviceMapping, nil } docker-1.6.2/runconfig/compare.go0000644000175000017500000000270112524223634016303 0ustar tianontianonpackage runconfig // Compare two Config struct. Do not compare the "Image" nor "Hostname" fields // If OpenStdin is set, then it differs func Compare(a, b *Config) bool { if a == nil || b == nil || a.OpenStdin || b.OpenStdin { return false } if a.AttachStdout != b.AttachStdout || a.AttachStderr != b.AttachStderr || a.User != b.User || a.Memory != b.Memory || a.MemorySwap != b.MemorySwap || a.CpuShares != b.CpuShares || a.OpenStdin != b.OpenStdin || a.Tty != b.Tty { return false } if len(a.Cmd) != len(b.Cmd) || len(a.Env) != len(b.Env) || len(a.Labels) != len(b.Labels) || len(a.PortSpecs) != len(b.PortSpecs) || len(a.ExposedPorts) != len(b.ExposedPorts) || len(a.Entrypoint) != len(b.Entrypoint) || len(a.Volumes) != len(b.Volumes) { return false } for i := 0; i < len(a.Cmd); i++ { if a.Cmd[i] != b.Cmd[i] { return false } } for i := 0; i < len(a.Env); i++ { if a.Env[i] != b.Env[i] { return false } } for k, v := range a.Labels { if v != b.Labels[k] { return false } } for i := 0; i < len(a.PortSpecs); i++ { if a.PortSpecs[i] != b.PortSpecs[i] { return false } } for k := range a.ExposedPorts { if _, exists := b.ExposedPorts[k]; !exists { return false } } for i := 0; i < len(a.Entrypoint); i++ { if a.Entrypoint[i] != b.Entrypoint[i] { return false } } for key := range a.Volumes { if _, exists := b.Volumes[key]; !exists { return false } } return true } docker-1.6.2/runconfig/config.go0000644000175000017500000000545412524223634016132 0ustar tianontianonpackage runconfig import ( "github.com/docker/docker/engine" "github.com/docker/docker/nat" ) // Note: the Config structure should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". // Non-portable information *should* appear in HostConfig. type Config struct { Hostname string Domainname string User string Memory int64 // FIXME: we keep it for backward compatibility, it has been moved to hostConfig. MemorySwap int64 // FIXME: it has been moved to hostConfig. CpuShares int64 // FIXME: it has been moved to hostConfig. Cpuset string // FIXME: it has been moved to hostConfig and renamed to CpusetCpus. AttachStdin bool AttachStdout bool AttachStderr bool PortSpecs []string // Deprecated - Can be in the format of 8080/tcp ExposedPorts map[nat.Port]struct{} Tty bool // Attach standard streams to a tty, including stdin if it is not closed. OpenStdin bool // Open stdin StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string Cmd []string Image string // Name of the image as it was passed by the operator (eg. could be symbolic) Volumes map[string]struct{} WorkingDir string Entrypoint []string NetworkDisabled bool MacAddress string OnBuild []string Labels map[string]string } func ContainerConfigFromJob(job *engine.Job) *Config { config := &Config{ Hostname: job.Getenv("Hostname"), Domainname: job.Getenv("Domainname"), User: job.Getenv("User"), Memory: job.GetenvInt64("Memory"), MemorySwap: job.GetenvInt64("MemorySwap"), CpuShares: job.GetenvInt64("CpuShares"), Cpuset: job.Getenv("Cpuset"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStdout: job.GetenvBool("AttachStdout"), AttachStderr: job.GetenvBool("AttachStderr"), Tty: job.GetenvBool("Tty"), OpenStdin: job.GetenvBool("OpenStdin"), StdinOnce: job.GetenvBool("StdinOnce"), Image: job.Getenv("Image"), WorkingDir: job.Getenv("WorkingDir"), NetworkDisabled: job.GetenvBool("NetworkDisabled"), MacAddress: job.Getenv("MacAddress"), } job.GetenvJson("ExposedPorts", &config.ExposedPorts) job.GetenvJson("Volumes", &config.Volumes) if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { config.PortSpecs = PortSpecs } if Env := job.GetenvList("Env"); Env != nil { config.Env = Env } if Cmd := job.GetenvList("Cmd"); Cmd != nil { config.Cmd = Cmd } job.GetenvJson("Labels", &config.Labels) if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { config.Entrypoint = Entrypoint } return config } docker-1.6.2/runconfig/config_test.go0000644000175000017500000002577612524223634017202 0ustar tianontianonpackage runconfig import ( "fmt" "strings" "testing" "github.com/docker/docker/nat" ) func parse(t *testing.T, args string) (*Config, *HostConfig, error) { config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) return config, hostConfig, err } func mustParse(t *testing.T, args string) (*Config, *HostConfig) { config, hostConfig, err := parse(t, args) if err != nil { t.Fatal(err) } return config, hostConfig } // check if (a == c && b == d) || (a == d && b == c) // because maps are randomized func compareRandomizedStrings(a, b, c, d string) error { if a == c && b == d { return nil } if a == d && b == c { return nil } return fmt.Errorf("strings don't match") } func TestParseRunLinks(t *testing.T) { if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) } if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) } if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) } if _, _, err := parse(t, "--link a"); err == nil { t.Fatalf("Error parsing links. `--link a` should be an error but is not") } if _, _, err := parse(t, "--link"); err == nil { t.Fatalf("Error parsing links. `--link` should be an error but is not") } } func TestParseRunAttach(t *testing.T) { if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) } if _, _, err := parse(t, "-a"); err == nil { t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") } if _, _, err := parse(t, "-a invalid"); err == nil { t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") } if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") } if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") } if _, _, err := parse(t, "-a stdin -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") } if _, _, err := parse(t, "-a stdout -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") } if _, _, err := parse(t, "-a stderr -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") } if _, _, err := parse(t, "-d --rm"); err == nil { t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") } } func TestParseRunVolumes(t *testing.T) { if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes["/tmp"]; !exists { t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) } if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes["/tmp"]; !exists { t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes) } else if _, exists := config.Volumes["/var"]; !exists { t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) } if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) } if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp", "/hostVar:/containerVar") != nil { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) } if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro", "/hostVar:/containerVar:rw") != nil { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) } if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes["/containerVar"]; !exists { t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) } if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) } else if len(config.Volumes) != 0 { t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) } if _, _, err := parse(t, "-v /"); err == nil { t.Fatalf("Expected error, but got none") } if _, _, err := parse(t, "-v /:/"); err == nil { t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") } if _, _, err := parse(t, "-v"); err == nil { t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") } if _, _, err := parse(t, "-v /tmp:"); err == nil { t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") } if _, _, err := parse(t, "-v /tmp:ro"); err == nil { t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't") } if _, _, err := parse(t, "-v /tmp::"); err == nil { t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") } if _, _, err := parse(t, "-v :"); err == nil { t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") } if _, _, err := parse(t, "-v ::"); err == nil { t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") } if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") } } func TestCompare(t *testing.T) { volumes1 := make(map[string]struct{}) volumes1["/test1"] = struct{}{} config1 := Config{ PortSpecs: []string{"1111:1111", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, Volumes: volumes1, } config3 := Config{ PortSpecs: []string{"0000:0000", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, Volumes: volumes1, } volumes2 := make(map[string]struct{}) volumes2["/test2"] = struct{}{} config5 := Config{ PortSpecs: []string{"0000:0000", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, Volumes: volumes2, } if Compare(&config1, &config3) { t.Fatalf("Compare should return false, PortSpecs are different") } if Compare(&config1, &config5) { t.Fatalf("Compare should return false, Volumes are different") } if !Compare(&config1, &config1) { t.Fatalf("Compare should return true") } } func TestMerge(t *testing.T) { volumesImage := make(map[string]struct{}) volumesImage["/test1"] = struct{}{} volumesImage["/test2"] = struct{}{} configImage := &Config{ PortSpecs: []string{"1111:1111", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, Volumes: volumesImage, } volumesUser := make(map[string]struct{}) volumesUser["/test3"] = struct{}{} configUser := &Config{ PortSpecs: []string{"3333:2222", "3333:3333"}, Env: []string{"VAR2=3", "VAR3=3"}, Volumes: volumesUser, } if err := Merge(configUser, configImage); err != nil { t.Error(err) } if len(configUser.ExposedPorts) != 3 { t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) } for portSpecs := range configUser.ExposedPorts { if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) } } if len(configUser.Env) != 3 { t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) } for _, env := range configUser.Env { if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) } } if len(configUser.Volumes) != 3 { t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) } for v := range configUser.Volumes { if v != "/test1" && v != "/test2" && v != "/test3" { t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) } } ports, _, err := nat.ParsePortSpecs([]string{"0000"}) if err != nil { t.Error(err) } configImage2 := &Config{ ExposedPorts: ports, } if err := Merge(configUser, configImage2); err != nil { t.Error(err) } if len(configUser.ExposedPorts) != 4 { t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) } for portSpecs := range configUser.ExposedPorts { if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) } } } docker-1.6.2/runconfig/hostconfig.go0000644000175000017500000001374412524223634017031 0ustar tianontianonpackage runconfig import ( "strings" "github.com/docker/docker/engine" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/ulimit" "github.com/docker/docker/utils" ) type NetworkMode string // IsPrivate indicates whether container use it's private network stack func (n NetworkMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer() || n.IsNone()) } func (n NetworkMode) IsHost() bool { return n == "host" } func (n NetworkMode) IsContainer() bool { parts := strings.SplitN(string(n), ":", 2) return len(parts) > 1 && parts[0] == "container" } func (n NetworkMode) IsNone() bool { return n == "none" } type IpcMode string // IsPrivate indicates whether container use it's private ipc stack func (n IpcMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer()) } func (n IpcMode) IsHost() bool { return n == "host" } func (n IpcMode) IsContainer() bool { parts := strings.SplitN(string(n), ":", 2) return len(parts) > 1 && parts[0] == "container" } func (n IpcMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { case "", "host": case "container": if len(parts) != 2 || parts[1] == "" { return false } default: return false } return true } func (n IpcMode) Container() string { parts := strings.SplitN(string(n), ":", 2) if len(parts) > 1 { return parts[1] } return "" } type PidMode string // IsPrivate indicates whether container use it's private pid stack func (n PidMode) IsPrivate() bool { return !(n.IsHost()) } func (n PidMode) IsHost() bool { return n == "host" } func (n PidMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { case "", "host": default: return false } return true } type DeviceMapping struct { PathOnHost string PathInContainer string CgroupPermissions string } type RestartPolicy struct { Name string MaximumRetryCount int } type LogConfig struct { Type string Config map[string]string } type HostConfig struct { Binds []string ContainerIDFile string LxcConf []utils.KeyValuePair Memory int64 // Memory limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap CpuShares int64 // CPU shares (relative weight vs. other containers) CpusetCpus string // CpusetCpus 0-2, 0,1 Privileged bool PortBindings nat.PortMap Links []string PublishAllPorts bool Dns []string DnsSearch []string ExtraHosts []string VolumesFrom []string Devices []DeviceMapping NetworkMode NetworkMode IpcMode IpcMode PidMode PidMode CapAdd []string CapDrop []string RestartPolicy RestartPolicy SecurityOpt []string ReadonlyRootfs bool Ulimits []*ulimit.Ulimit LogConfig LogConfig CgroupParent string // Parent cgroup. } // This is used by the create command when you want to set both the // Config and the HostConfig in the same call type ConfigAndHostConfig struct { Config HostConfig HostConfig } func MergeConfigs(config *Config, hostConfig *HostConfig) *ConfigAndHostConfig { return &ConfigAndHostConfig{ *config, *hostConfig, } } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { if job.EnvExists("HostConfig") { hostConfig := HostConfig{} job.GetenvJson("HostConfig", &hostConfig) // FIXME: These are for backward compatibility, if people use these // options with `HostConfig`, we should still make them workable. if job.EnvExists("Memory") && hostConfig.Memory == 0 { hostConfig.Memory = job.GetenvInt64("Memory") } if job.EnvExists("MemorySwap") && hostConfig.MemorySwap == 0 { hostConfig.MemorySwap = job.GetenvInt64("MemorySwap") } if job.EnvExists("CpuShares") && hostConfig.CpuShares == 0 { hostConfig.CpuShares = job.GetenvInt64("CpuShares") } if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" { hostConfig.CpusetCpus = job.Getenv("Cpuset") } return &hostConfig } hostConfig := &HostConfig{ ContainerIDFile: job.Getenv("ContainerIDFile"), Memory: job.GetenvInt64("Memory"), MemorySwap: job.GetenvInt64("MemorySwap"), CpuShares: job.GetenvInt64("CpuShares"), CpusetCpus: job.Getenv("CpusetCpus"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), NetworkMode: NetworkMode(job.Getenv("NetworkMode")), IpcMode: IpcMode(job.Getenv("IpcMode")), PidMode: PidMode(job.Getenv("PidMode")), ReadonlyRootfs: job.GetenvBool("ReadonlyRootfs"), CgroupParent: job.Getenv("CgroupParent"), } // FIXME: This is for backward compatibility, if people use `Cpuset` // in json, make it workable, we will only pass hostConfig.CpusetCpus // to execDriver. if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" { hostConfig.CpusetCpus = job.Getenv("Cpuset") } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) job.GetenvJson("Devices", &hostConfig.Devices) job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy) job.GetenvJson("Ulimits", &hostConfig.Ulimits) job.GetenvJson("LogConfig", &hostConfig.LogConfig) hostConfig.SecurityOpt = job.GetenvList("SecurityOpt") if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } if Links := job.GetenvList("Links"); Links != nil { hostConfig.Links = Links } if Dns := job.GetenvList("Dns"); Dns != nil { hostConfig.Dns = Dns } if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { hostConfig.DnsSearch = DnsSearch } if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil { hostConfig.ExtraHosts = ExtraHosts } if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { hostConfig.VolumesFrom = VolumesFrom } if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil { hostConfig.CapAdd = CapAdd } if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil { hostConfig.CapDrop = CapDrop } return hostConfig } docker-1.6.2/runconfig/exec.go0000644000175000017500000000403212524223634015600 0ustar tianontianonpackage runconfig import ( "fmt" "github.com/docker/docker/engine" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/utils" ) type ExecConfig struct { User string Privileged bool Tty bool Container string AttachStdin bool AttachStderr bool AttachStdout bool Detach bool Cmd []string } func ExecConfigFromJob(job *engine.Job) (*ExecConfig, error) { execConfig := &ExecConfig{ // TODO(vishh): Expose 'User' once it is supported. //User: job.Getenv("User"), // TODO(vishh): Expose 'Privileged' once it is supported. //Privileged: job.GetenvBool("Privileged"), Tty: job.GetenvBool("Tty"), AttachStdin: job.GetenvBool("AttachStdin"), AttachStderr: job.GetenvBool("AttachStderr"), AttachStdout: job.GetenvBool("AttachStdout"), } cmd := job.GetenvList("Cmd") if len(cmd) == 0 { return nil, fmt.Errorf("No exec command specified") } execConfig.Cmd = cmd return execConfig, nil } func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { var ( flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") execCmd []string container string ) cmd.Require(flag.Min, 2) if err := utils.ParseFlags(cmd, args, true); err != nil { return nil, err } container = cmd.Arg(0) parsedArgs := cmd.Args() execCmd = parsedArgs[1:] execConfig := &ExecConfig{ // TODO(vishh): Expose '-u' flag once it is supported. User: "", // TODO(vishh): Expose '-p' flag once it is supported. Privileged: false, Tty: *flTty, Cmd: execCmd, Container: container, Detach: *flDetach, } // If -d is not set, attach to everything by default if !*flDetach { execConfig.AttachStdout = true execConfig.AttachStderr = true if *flStdin { execConfig.AttachStdin = true } } return execConfig, nil } docker-1.6.2/runconfig/parse_test.go0000644000175000017500000000306612524223634017033 0ustar tianontianonpackage runconfig import ( "io/ioutil" "testing" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" ) func parseRun(args []string) (*Config, *HostConfig, *flag.FlagSet, error) { cmd := flag.NewFlagSet("run", flag.ContinueOnError) cmd.SetOutput(ioutil.Discard) cmd.Usage = nil return Parse(cmd, args) } func TestParseLxcConfOpt(t *testing.T) { opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} for _, o := range opts { k, v, err := parsers.ParseKeyValueOpt(o) if err != nil { t.FailNow() } if k != "lxc.utsname" { t.Fail() } if v != "docker" { t.Fail() } } } func TestNetHostname(t *testing.T) { if _, _, _, err := parseRun([]string{"-h=name", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } if _, _, _, err := parseRun([]string{"--net=host", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } if _, _, _, err := parseRun([]string{"-h=name", "--net=bridge", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } if _, _, _, err := parseRun([]string{"-h=name", "--net=none", "img", "cmd"}); err != nil { t.Fatalf("Unexpected error: %s", err) } if _, _, _, err := parseRun([]string{"-h=name", "--net=host", "img", "cmd"}); err != ErrConflictNetworkHostname { t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) } if _, _, _, err := parseRun([]string{"-h=name", "--net=container:other", "img", "cmd"}); err != ErrConflictNetworkHostname { t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) } } docker-1.6.2/Dockerfile.simple0000644000175000017500000000170012524223634015614 0ustar tianontianon# docker build -t docker:simple -f Dockerfile.simple . # docker run --rm docker:simple hack/make.sh dynbinary # docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit # docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli # This represents the bare minimum required to build and test Docker. FROM debian:jessie # compile and runtime deps # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ btrfs-tools \ curl \ gcc \ git \ golang \ libdevmapper-dev \ libsqlite3-dev \ \ ca-certificates \ e2fsprogs \ iptables \ procps \ xz-utils \ \ aufs-tools \ lxc \ && rm -rf /var/lib/apt/lists/* ENV AUTO_GOPATH 1 WORKDIR /usr/src/docker COPY . /usr/src/docker docker-1.6.2/builtins/0000755000175000017500000000000012524223634014165 5ustar tianontianondocker-1.6.2/builtins/builtins.go0000644000175000017500000000406712524223634016354 0ustar tianontianonpackage builtins import ( "runtime" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/daemon/networkdriver/bridge" "github.com/docker/docker/engine" "github.com/docker/docker/events" "github.com/docker/docker/pkg/parsers/kernel" ) func Register(eng *engine.Engine) error { if err := daemon(eng); err != nil { return err } if err := remote(eng); err != nil { return err } if err := events.New().Install(eng); err != nil { return err } if err := eng.Register("version", dockerVersion); err != nil { return err } return nil } // remote: a RESTful api for cross-docker communication func remote(eng *engine.Engine) error { if err := eng.Register("serveapi", apiserver.ServeApi); err != nil { return err } return eng.Register("acceptconnections", apiserver.AcceptConnections) } // daemon: a default execution and storage backend for Docker on Linux, // with the following underlying components: // // * Pluggable storage drivers including aufs, vfs, lvm and btrfs. // * Pluggable execution drivers including lxc and chroot. // // In practice `daemon` still includes most core Docker components, including: // // * The reference registry client implementation // * Image management // * The build facility // * Logging // // These components should be broken off into plugins of their own. // func daemon(eng *engine.Engine) error { return eng.Register("init_networkdriver", bridge.InitDriver) } // builtins jobs independent of any subsystem func dockerVersion(job *engine.Job) engine.Status { v := &engine.Env{} v.SetJson("Version", dockerversion.VERSION) v.SetJson("ApiVersion", api.APIVERSION) v.SetJson("GitCommit", dockerversion.GITCOMMIT) v.Set("GoVersion", runtime.Version()) v.Set("Os", runtime.GOOS) v.Set("Arch", runtime.GOARCH) if kernelVersion, err := kernel.GetKernelVersion(); err == nil { v.Set("KernelVersion", kernelVersion.String()) } if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } docker-1.6.2/nat/0000755000175000017500000000000012524223634013116 5ustar tianontianondocker-1.6.2/nat/sort_test.go0000644000175000017500000000131412524223634015472 0ustar tianontianonpackage nat import ( "fmt" "testing" ) func TestSortUniquePorts(t *testing.T) { ports := []Port{ Port("6379/tcp"), Port("22/tcp"), } Sort(ports, func(ip, jp Port) bool { return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") }) first := ports[0] if fmt.Sprint(first) != "22/tcp" { t.Log(fmt.Sprint(first)) t.Fail() } } func TestSortSamePortWithDifferentProto(t *testing.T) { ports := []Port{ Port("8888/tcp"), Port("8888/udp"), Port("6379/tcp"), Port("6379/udp"), } Sort(ports, func(ip, jp Port) bool { return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") }) first := ports[0] if fmt.Sprint(first) != "6379/tcp" { t.Fail() } } docker-1.6.2/nat/nat.go0000644000175000017500000000766012524223634014240 0ustar tianontianonpackage nat // nat is a convenience package for docker's manipulation of strings describing // network ports. import ( "fmt" "net" "strconv" "strings" "github.com/docker/docker/pkg/parsers" ) const ( PortSpecTemplate = "ip:hostPort:containerPort" PortSpecTemplateFormat = "ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort" ) type PortBinding struct { HostIp string HostPort string } type PortMap map[Port][]PortBinding type PortSet map[Port]struct{} // 80/tcp type Port string func NewPort(proto, port string) Port { return Port(fmt.Sprintf("%s/%s", port, proto)) } func ParsePort(rawPort string) (int, error) { port, err := strconv.ParseUint(rawPort, 10, 16) if err != nil { return 0, err } return int(port), nil } func (p Port) Proto() string { proto, _ := SplitProtoPort(string(p)) return proto } func (p Port) Port() string { _, port := SplitProtoPort(string(p)) return port } func (p Port) Int() int { port, err := ParsePort(p.Port()) if err != nil { panic(err) } return port } // Splits a port in the format of proto/port func SplitProtoPort(rawPort string) (string, string) { parts := strings.Split(rawPort, "/") l := len(parts) if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { return "", "" } if l == 1 { return "tcp", rawPort } if len(parts[1]) == 0 { return "tcp", parts[0] } return parts[1], parts[0] } func validateProto(proto string) bool { for _, availableProto := range []string{"tcp", "udp"} { if availableProto == proto { return true } } return false } // We will receive port specs in the format of ip:public:private/proto and these need to be // parsed in the internal types func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { var ( exposedPorts = make(map[Port]struct{}, len(ports)) bindings = make(map[Port][]PortBinding) ) for _, rawPort := range ports { proto := "tcp" if i := strings.LastIndex(rawPort, "/"); i != -1 { proto = rawPort[i+1:] rawPort = rawPort[:i] } if !strings.Contains(rawPort, ":") { rawPort = fmt.Sprintf("::%s", rawPort) } else if len(strings.Split(rawPort, ":")) == 2 { rawPort = fmt.Sprintf(":%s", rawPort) } parts, err := parsers.PartParser(PortSpecTemplate, rawPort) if err != nil { return nil, nil, err } var ( containerPort = parts["containerPort"] rawIp = parts["ip"] hostPort = parts["hostPort"] ) if rawIp != "" && net.ParseIP(rawIp) == nil { return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIp) } if containerPort == "" { return nil, nil, fmt.Errorf("No port specified: %s", rawPort) } startPort, endPort, err := parsers.ParsePortRange(containerPort) if err != nil { return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) } var startHostPort, endHostPort uint64 = 0, 0 if len(hostPort) > 0 { startHostPort, endHostPort, err = parsers.ParsePortRange(hostPort) if err != nil { return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) } } if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) } if !validateProto(strings.ToLower(proto)) { return nil, nil, fmt.Errorf("Invalid proto: %s", proto) } for i := uint64(0); i <= (endPort - startPort); i++ { containerPort = strconv.FormatUint(startPort+i, 10) if len(hostPort) > 0 { hostPort = strconv.FormatUint(startHostPort+i, 10) } port := NewPort(strings.ToLower(proto), containerPort) if _, exists := exposedPorts[port]; !exists { exposedPorts[port] = struct{}{} } binding := PortBinding{ HostIp: rawIp, HostPort: hostPort, } bslice, exists := bindings[port] if !exists { bslice = []PortBinding{} } bindings[port] = append(bslice, binding) } } return exposedPorts, bindings, nil } docker-1.6.2/nat/nat_test.go0000644000175000017500000001520712524223634015273 0ustar tianontianonpackage nat import ( "testing" ) func TestParsePort(t *testing.T) { var ( p int err error ) p, err = ParsePort("1234") if err != nil || p != 1234 { t.Fatal("Parsing '1234' did not succeed") } // FIXME currently this is a valid port. I don't think it should be. // I'm leaving this test commented out until we make a decision. // - erikh /* p, err = ParsePort("0123") if err != nil { t.Fatal("Successfully parsed port '0123' to '123'") } */ p, err = ParsePort("asdf") if err == nil || p != 0 { t.Fatal("Parsing port 'asdf' succeeded") } p, err = ParsePort("1asdf") if err == nil || p != 0 { t.Fatal("Parsing port '1asdf' succeeded") } } func TestPort(t *testing.T) { p := NewPort("tcp", "1234") if string(p) != "1234/tcp" { t.Fatal("tcp, 1234 did not result in the string 1234/tcp") } if p.Proto() != "tcp" { t.Fatal("protocol was not tcp") } if p.Port() != "1234" { t.Fatal("port string value was not 1234") } if p.Int() != 1234 { t.Fatal("port int value was not 1234") } } func TestSplitProtoPort(t *testing.T) { var ( proto string port string ) proto, port = SplitProtoPort("1234/tcp") if proto != "tcp" || port != "1234" { t.Fatal("Could not split 1234/tcp properly") } proto, port = SplitProtoPort("") if proto != "" || port != "" { t.Fatal("parsing an empty string yielded surprising results", proto, port) } proto, port = SplitProtoPort("1234") if proto != "tcp" || port != "1234" { t.Fatal("tcp is not the default protocol for portspec '1234'", proto, port) } proto, port = SplitProtoPort("1234/") if proto != "tcp" || port != "1234" { t.Fatal("parsing '1234/' yielded:" + port + "/" + proto) } proto, port = SplitProtoPort("/tcp") if proto != "" || port != "" { t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto) } } func TestParsePortSpecs(t *testing.T) { var ( portMap map[Port]struct{} bindingMap map[Port][]PortBinding err error ) portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"}) if err != nil { t.Fatalf("Error while processing ParsePortSpecs: %s", err) } if _, ok := portMap[Port("1234/tcp")]; !ok { t.Fatal("1234/tcp was not parsed properly") } if _, ok := portMap[Port("2345/udp")]; !ok { t.Fatal("2345/udp was not parsed properly") } for portspec, bindings := range bindingMap { if len(bindings) != 1 { t.Fatalf("%s should have exactly one binding", portspec) } if bindings[0].HostIp != "" { t.Fatalf("HostIp should not be set for %s", portspec) } if bindings[0].HostPort != "" { t.Fatalf("HostPort should not be set for %s", portspec) } } portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"}) if err != nil { t.Fatalf("Error while processing ParsePortSpecs: %s", err) } if _, ok := portMap[Port("1234/tcp")]; !ok { t.Fatal("1234/tcp was not parsed properly") } if _, ok := portMap[Port("2345/udp")]; !ok { t.Fatal("2345/udp was not parsed properly") } for portspec, bindings := range bindingMap { _, port := SplitProtoPort(string(portspec)) if len(bindings) != 1 { t.Fatalf("%s should have exactly one binding", portspec) } if bindings[0].HostIp != "" { t.Fatalf("HostIp should not be set for %s", portspec) } if bindings[0].HostPort != port { t.Fatalf("HostPort should be %s for %s", port, portspec) } } portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"}) if err != nil { t.Fatalf("Error while processing ParsePortSpecs: %s", err) } if _, ok := portMap[Port("1234/tcp")]; !ok { t.Fatal("1234/tcp was not parsed properly") } if _, ok := portMap[Port("2345/udp")]; !ok { t.Fatal("2345/udp was not parsed properly") } for portspec, bindings := range bindingMap { _, port := SplitProtoPort(string(portspec)) if len(bindings) != 1 { t.Fatalf("%s should have exactly one binding", portspec) } if bindings[0].HostIp != "0.0.0.0" { t.Fatalf("HostIp is not 0.0.0.0 for %s", portspec) } if bindings[0].HostPort != port { t.Fatalf("HostPort should be %s for %s", port, portspec) } } _, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"}) if err == nil { t.Fatal("Received no error while trying to parse a hostname instead of ip") } } func TestParsePortSpecsWithRange(t *testing.T) { var ( portMap map[Port]struct{} bindingMap map[Port][]PortBinding err error ) portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236/tcp", "2345-2347/udp"}) if err != nil { t.Fatalf("Error while processing ParsePortSpecs: %s", err) } if _, ok := portMap[Port("1235/tcp")]; !ok { t.Fatal("1234/tcp was not parsed properly") } if _, ok := portMap[Port("2346/udp")]; !ok { t.Fatal("2345/udp was not parsed properly") } for portspec, bindings := range bindingMap { if len(bindings) != 1 { t.Fatalf("%s should have exactly one binding", portspec) } if bindings[0].HostIp != "" { t.Fatalf("HostIp should not be set for %s", portspec) } if bindings[0].HostPort != "" { t.Fatalf("HostPort should not be set for %s", portspec) } } portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236:1234-1236/tcp", "2345-2347:2345-2347/udp"}) if err != nil { t.Fatalf("Error while processing ParsePortSpecs: %s", err) } if _, ok := portMap[Port("1235/tcp")]; !ok { t.Fatal("1234/tcp was not parsed properly") } if _, ok := portMap[Port("2346/udp")]; !ok { t.Fatal("2345/udp was not parsed properly") } for portspec, bindings := range bindingMap { _, port := SplitProtoPort(string(portspec)) if len(bindings) != 1 { t.Fatalf("%s should have exactly one binding", portspec) } if bindings[0].HostIp != "" { t.Fatalf("HostIp should not be set for %s", portspec) } if bindings[0].HostPort != port { t.Fatalf("HostPort should be %s for %s", port, portspec) } } portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234-1236:1234-1236/tcp", "0.0.0.0:2345-2347:2345-2347/udp"}) if err != nil { t.Fatalf("Error while processing ParsePortSpecs: %s", err) } if _, ok := portMap[Port("1235/tcp")]; !ok { t.Fatal("1234/tcp was not parsed properly") } if _, ok := portMap[Port("2346/udp")]; !ok { t.Fatal("2345/udp was not parsed properly") } for portspec, bindings := range bindingMap { _, port := SplitProtoPort(string(portspec)) if len(bindings) != 1 || bindings[0].HostIp != "0.0.0.0" || bindings[0].HostPort != port { t.Fatalf("Expect single binding to port %s but found %s", port, bindings) } } _, _, err = ParsePortSpecs([]string{"localhost:1234-1236:1234-1236/tcp"}) if err == nil { t.Fatal("Received no error while trying to parse a hostname instead of ip") } } docker-1.6.2/nat/sort.go0000644000175000017500000000071312524223634014435 0ustar tianontianonpackage nat import "sort" type portSorter struct { ports []Port by func(i, j Port) bool } func (s *portSorter) Len() int { return len(s.ports) } func (s *portSorter) Swap(i, j int) { s.ports[i], s.ports[j] = s.ports[j], s.ports[i] } func (s *portSorter) Less(i, j int) bool { ip := s.ports[i] jp := s.ports[j] return s.by(ip, jp) } func Sort(ports []Port, predicate func(i, j Port) bool) { s := &portSorter{ports, predicate} sort.Sort(s) } docker-1.6.2/NOTICE0000644000175000017500000000117612524223634013245 0ustar tianontianonDocker Copyright 2012-2015 Docker, Inc. This product includes software developed at Docker, Inc. (http://www.docker.com). This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: Use and transfer of Docker may be subject to certain restrictions by the United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see http://www.bis.doc.gov See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. docker-1.6.2/contrib/0000755000175000017500000000000012524223634013774 5ustar tianontianondocker-1.6.2/contrib/REVIEWERS0000644000175000017500000000005512524223634015272 0ustar tianontianonTianon Gravi (@tianon) docker-1.6.2/contrib/mkseccomp.pl0000755000175000017500000000422712524223634016322 0ustar tianontianon#!/usr/bin/perl # # A simple helper script to help people build seccomp profiles for # Docker/LXC. The goal is mostly to reduce the attack surface to the # kernel, by restricting access to rarely used, recently added or not used # syscalls. # # This script processes one or more files which contain the list of system # calls to be allowed. See mkseccomp.sample for more information how you # can configure the list of syscalls. When run, this script produces output # which, when stored in a file, can be passed to docker as follows: # # docker run --lxc-conf="lxc.seccomp=$file" # # The included sample file shows how to cut about a quarter of all syscalls, # which affecting most applications. # # For specific situations it is possible to reduce the list further. By # reducing the list to just those syscalls required by a certain application # you can make it difficult for unknown/unexpected code to run. # # Run this script as follows: # # ./mkseccomp.pl < mkseccomp.sample >syscalls.list # or # ./mkseccomp.pl mkseccomp.sample >syscalls.list # # Multiple files can be specified, in which case the lists of syscalls are # combined. # # By Martijn van Oosterhout Nov 2013 # How it works: # # This program basically spawns two processes to form a chain like: # # | cpp | use strict; use warnings; if( -t ) { print STDERR "Helper script to make seccomp filters for Docker/LXC.\n"; print STDERR "Usage: mkseccomp.pl < [files...]\n"; exit 1; } my $pid = open(my $in, "-|") // die "Couldn't fork1 ($!)\n"; if($pid == 0) { # Child $pid = open(my $out, "|-") // die "Couldn't fork2 ($!)\n"; if($pid == 0) { # Child, which execs cpp exec "cpp" or die "Couldn't exec cpp ($!)\n"; exit 1; } # Process the DATA section and output to cpp print $out "#include \n"; while(<>) { if(/^\w/) { print $out "__NR_$_"; } } close $out; exit 0; } # Print header and then process output from cpp. print "1\n"; print "whitelist\n"; while(<$in>) { print if( /^[0-9]/ ); } docker-1.6.2/contrib/mkseccomp.sample0000644000175000017500000001631112524223634017162 0ustar tianontianon/* This sample file is an example for mkseccomp.pl to produce a seccomp file * which restricts syscalls that are only useful for an admin but allows the * vast majority of normal userspace programs to run normally. * * The format of this file is one line per syscall. This is then processed * and passed to 'cpp' to convert the names to numbers using whatever is * correct for your platform. As such C-style comments are permitted. Note * this also means that C preprocessor macros are also allowed. So it is * possible to create groups surrounded by #ifdef/#endif and control their * inclusion via #define (not #include). * * Syscalls that don't exist on your architecture are silently filtered out. * Syscalls marked with (*) are required for a container to spawn a bash * shell successfully (not necessarily full featured). Listing the same * syscall multiple times is no problem. * * If you want to make a list specifically for one application the easiest * way is to run the application under strace, like so: * * $ strace -f -q -c -o strace.out application args... * * Once you have a reasonable sample of the execution of the program, exit * it. The file strace.out will have a summary of the syscalls used. Copy * that list into this file, comment out everything else except the starred * syscalls (which you need for the container to start) and you're done. * * To get the list of syscalls from the strace output this works well for * me * * $ cut -c52 < strace.out * * This sample list was compiled as a combination of all the syscalls * available on i386 and amd64 on Ubuntu Precise, as such it may not contain * everything and not everything may be relevent for your system. This * shouldn't be a problem. */ // Filesystem/File descriptor related access // (*) chdir // (*) chmod chown chown32 close // (*) creat dup // (*) dup2 // (*) dup3 epoll_create epoll_create1 epoll_ctl epoll_ctl_old epoll_pwait epoll_wait epoll_wait_old eventfd eventfd2 faccessat // (*) fadvise64 fadvise64_64 fallocate fanotify_init fanotify_mark ioctl // (*) fchdir fchmod fchmodat fchown fchown32 fchownat fcntl // (*) fcntl64 fdatasync fgetxattr flistxattr flock fremovexattr fsetxattr fstat // (*) fstat64 fstatat64 fstatfs fstatfs64 fsync ftruncate ftruncate64 getcwd // (*) getdents // (*) getdents64 getxattr inotify_add_watch inotify_init inotify_init1 inotify_rm_watch io_cancel io_destroy io_getevents io_setup io_submit lchown lchown32 lgetxattr link linkat listxattr llistxattr llseek _llseek lremovexattr lseek // (*) lsetxattr lstat lstat64 mkdir mkdirat mknod mknodat newfstatat _newselect oldfstat oldlstat oldolduname oldstat olduname oldwait4 open // (*) openat // (*) pipe // (*) pipe2 poll ppoll pread64 preadv futimesat pselect6 pwrite64 pwritev read // (*) readahead readdir readlink readlinkat readv removexattr rename renameat rmdir select sendfile sendfile64 setxattr splice stat // (*) stat64 statfs // (*) statfs64 symlink symlinkat sync sync_file_range sync_file_range2 syncfs tee truncate truncate64 umask unlink unlinkat ustat utime utimensat utimes write // (*) writev // Network related accept accept4 bind // (*) connect // (*) getpeername getsockname // (*) getsockopt listen recv recvfrom // (*) recvmmsg recvmsg send sendmmsg sendmsg sendto // (*) setsockopt shutdown socket // (*) socketcall socketpair sethostname // (*) // Signal related pause rt_sigaction // (*) rt_sigpending rt_sigprocmask // (*) rt_sigqueueinfo rt_sigreturn // (*) rt_sigsuspend rt_sigtimedwait rt_tgsigqueueinfo sigaction sigaltstack // (*) signal signalfd signalfd4 sigpending sigprocmask sigreturn sigsuspend // Other needed POSIX alarm brk // (*) clock_adjtime clock_getres clock_gettime clock_nanosleep //clock_settime gettimeofday nanosleep nice sysinfo syslog time timer_create timer_delete timerfd_create timerfd_gettime timerfd_settime timer_getoverrun timer_gettime timer_settime times uname // (*) // Memory control madvise mbind mincore mlock mlockall mmap // (*) mmap2 mprotect // (*) mremap msync munlock munlockall munmap // (*) remap_file_pages set_mempolicy vmsplice // Process control capget capset // (*) clone // (*) execve // (*) exit // (*) exit_group // (*) fork getcpu getpgid getpgrp // (*) getpid // (*) getppid // (*) getpriority getresgid getresgid32 getresuid getresuid32 getrlimit // (*) getrusage getsid getuid // (*) getuid32 getegid // (*) getegid32 geteuid // (*) geteuid32 getgid // (*) getgid32 getgroups getgroups32 getitimer get_mempolicy kill //personality prctl prlimit64 sched_getaffinity sched_getparam sched_get_priority_max sched_get_priority_min sched_getscheduler sched_rr_get_interval //sched_setaffinity //sched_setparam //sched_setscheduler sched_yield setfsgid setfsgid32 setfsuid setfsuid32 setgid setgid32 setgroups setgroups32 setitimer setpgid // (*) setpriority setregid setregid32 setresgid setresgid32 setresuid setresuid32 setreuid setreuid32 setrlimit setsid setuid setuid32 ugetrlimit vfork wait4 // (*) waitid waitpid // IPC ipc mq_getsetattr mq_notify mq_open mq_timedreceive mq_timedsend mq_unlink msgctl msgget msgrcv msgsnd semctl semget semop semtimedop shmat shmctl shmdt shmget // Linux specific, mostly needed for thread-related stuff arch_prctl // (*) get_robust_list get_thread_area gettid futex // (*) restart_syscall // (*) set_robust_list // (*) set_thread_area set_tid_address // (*) tgkill tkill // Admin syscalls, these are blocked //acct //adjtimex //bdflush //chroot //create_module //delete_module //get_kernel_syms // Obsolete //idle // Obsolete //init_module //ioperm //iopl //ioprio_get //ioprio_set //kexec_load //lookup_dcookie // oprofile only? //migrate_pages // NUMA //modify_ldt //mount //move_pages // NUMA //name_to_handle_at // NFS server //nfsservctl // NFS server //open_by_handle_at // NFS server //perf_event_open //pivot_root //process_vm_readv // For debugger //process_vm_writev // For debugger //ptrace // For debugger //query_module //quotactl //reboot //setdomainname //setns //settimeofday //sgetmask // Obsolete //ssetmask // Obsolete //stime //swapoff //swapon //_sysctl //sysfs //sys_setaltroot //umount //umount2 //unshare //uselib //vhangup //vm86 //vm86old // Kernel key management //add_key //keyctl //request_key // Unimplemented //afs_syscall //break //ftime //getpmsg //gtty //lock //madvise1 //mpx //prof //profil //putpmsg //security //stty //tuxcall //ulimit //vserver docker-1.6.2/contrib/desktop-integration/0000755000175000017500000000000012524223634017766 5ustar tianontianondocker-1.6.2/contrib/desktop-integration/gparted/0000755000175000017500000000000012524223634021414 5ustar tianontianondocker-1.6.2/contrib/desktop-integration/gparted/Dockerfile0000644000175000017500000000164212524223634023411 0ustar tianontianon# VERSION: 0.1 # DESCRIPTION: Create gparted container with its dependencies # AUTHOR: Jessica Frazelle # COMMENTS: # This file describes how to build a gparted container with all # dependencies installed. It uses native X11 unix socket. # Tested on Debian Jessie # USAGE: # # Download gparted Dockerfile # wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile # # # Build gparted image # docker build -t gparted . # # docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ # --device=/dev/sda:/dev/sda \ # -e DISPLAY=unix$DISPLAY gparted # # Base docker image FROM debian:jessie MAINTAINER Jessica Frazelle # Install Gparted and its dependencies RUN apt-get update && apt-get install -y \ gparted \ libcanberra-gtk-module \ --no-install-recommends # Autorun gparted CMD ["/usr/sbin/gparted"] docker-1.6.2/contrib/desktop-integration/chromium/0000755000175000017500000000000012524223634021611 5ustar tianontianondocker-1.6.2/contrib/desktop-integration/chromium/Dockerfile0000644000175000017500000000232412524223634023604 0ustar tianontianon# VERSION: 0.1 # DESCRIPTION: Create chromium container with its dependencies # AUTHOR: Jessica Frazelle # COMMENTS: # This file describes how to build a Chromium container with all # dependencies installed. It uses native X11 unix socket. # Tested on Debian Jessie # USAGE: # # Download Chromium Dockerfile # wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile # # # Build chromium image # docker build -t chromium . # # # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data # docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ # -e DISPLAY=unix$DISPLAY chromium # # To run stateful dockerized data containers # docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ # -e DISPLAY=unix$DISPLAY chromium # Base docker image FROM debian:jessie MAINTAINER Jessica Frazelle # Install Chromium RUN apt-get update && apt-get install -y \ chromium \ chromium-l10n \ libcanberra-gtk-module \ libexif-dev \ --no-install-recommends # Autorun chromium CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] docker-1.6.2/contrib/desktop-integration/README.md0000644000175000017500000000050512524223634021245 0ustar tianontianonDesktop Integration =================== The ./contrib/desktop-integration contains examples of typical dockerized desktop applications. Examples ======== * Chromium: ./chromium/Dockerfile shows a way to dockerize a common application * Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices docker-1.6.2/contrib/mkimage-arch-pacman.conf0000644000175000017500000000521612524223634020431 0ustar tianontianon# # /etc/pacman.conf # # See the pacman.conf(5) manpage for option and repository directives # # GENERAL OPTIONS # [options] # The following paths are commented out with their default values listed. # If you wish to use different paths, uncomment and update the paths. #RootDir = / #DBPath = /var/lib/pacman/ #CacheDir = /var/cache/pacman/pkg/ #LogFile = /var/log/pacman.log #GPGDir = /etc/pacman.d/gnupg/ HoldPkg = pacman glibc #XferCommand = /usr/bin/curl -C - -f %u > %o #XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u #CleanMethod = KeepInstalled #UseDelta = 0.7 Architecture = auto # Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup #IgnorePkg = #IgnoreGroup = #NoUpgrade = #NoExtract = # Misc options #UseSyslog #Color #TotalDownload # We cannot check disk space from within a chroot environment #CheckSpace #VerbosePkgLists # By default, pacman accepts packages signed by keys that its local keyring # trusts (see pacman-key and its man page), as well as unsigned packages. SigLevel = Required DatabaseOptional LocalFileSigLevel = Optional #RemoteFileSigLevel = Required # NOTE: You must run `pacman-key --init` before first using pacman; the local # keyring can then be populated with the keys of all official Arch Linux # packagers with `pacman-key --populate archlinux`. # # REPOSITORIES # - can be defined here or included from another file # - pacman will search repositories in the order defined here # - local/custom mirrors can be added here or in separate files # - repositories listed first will take precedence when packages # have identical names, regardless of version number # - URLs will have $repo replaced by the name of the current repo # - URLs will have $arch replaced by the name of the architecture # # Repository entries are of the format: # [repo-name] # Server = ServerName # Include = IncludePath # # The header [repo-name] is crucial - it must be present and # uncommented to enable the repo. # # The testing repositories are disabled by default. To enable, uncomment the # repo name header and Include lines. You can add preferred servers immediately # after the header, and they will be used before the default mirrors. #[testing] #Include = /etc/pacman.d/mirrorlist [core] Include = /etc/pacman.d/mirrorlist [extra] Include = /etc/pacman.d/mirrorlist #[community-testing] #Include = /etc/pacman.d/mirrorlist [community] Include = /etc/pacman.d/mirrorlist # An example of a custom package repository. See the pacman manpage for # tips on creating your own repositories. #[custom] #SigLevel = Optional TrustAll #Server = file:///home/custompkgs docker-1.6.2/contrib/vagrant-docker/0000755000175000017500000000000012524223634016703 5ustar tianontianondocker-1.6.2/contrib/vagrant-docker/README.md0000644000175000017500000000402012524223634020156 0ustar tianontianon# Vagrant integration Currently there are at least 4 different projects that we are aware of that deals with integration with [Vagrant](http://vagrantup.com/) at different levels. One approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) which means you can create containers and pull base images on VMs using Docker's CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), meaning you can use Vagrant to control Docker containers. ### Provisioners * [Vocker](https://github.com/fgrehm/vocker) * [Ventriloquist](https://github.com/fgrehm/ventriloquist) ### Providers * [docker-provider](https://github.com/fgrehm/docker-provider) * [vagrant-shell](https://github.com/destructuring/vagrant-shell) ## Setting up Vagrant-docker with the Remote API The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: ``` description "Docker daemon" start on filesystem and started lxc-net stop on runlevel [!2345] respawn script /usr/bin/docker -d -H=tcp://0.0.0.0:2375 end script ``` Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: ``` ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost ``` (The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) Note that because the port has been changed, to run docker commands from within the command line you must run them like this: ``` sudo docker -H 0.0.0.0:2375 < commands for docker > ``` docker-1.6.2/contrib/project-stats.sh0000755000175000017500000000073412524223634017141 0ustar tianontianon#!/usr/bin/env bash ## Run this script from the root of the docker repository ## to query project stats useful to the maintainers. ## You will need to install `pulls` and `issues` from ## http://github.com/crosbymichael/pulls set -e echo -n "Open pulls: " PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 echo $PULLS echo -n "Pulls alru: " pulls alru echo -n "Open issues: " ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 echo $ISSUES echo -n "Issues alru: " issues alru docker-1.6.2/contrib/mkimage-crux.sh0000755000175000017500000000356712524223634016737 0ustar tianontianon#!/usr/bin/env bash # Generate a minimal filesystem for CRUX/Linux and load it into the local # docker as "cruxlinux" # requires root and the crux iso (http://crux.nu) set -e die () { echo >&2 "$@" exit 1 } [ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" ISO=${1} ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') # Mount the ISO mount -o ro,loop $ISO $CRUX # Extract pkgutils tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz # Put pkgadd in the $PATH export PATH="$TMP/usr/bin:$PATH" # Install core packages mkdir -p $ROOTFS/var/lib/pkg touch $ROOTFS/var/lib/pkg/db for pkg in $CRUX/crux/core/*; do pkgadd -r $ROOTFS $pkg done # Remove agetty and inittab config if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then echo "Removing agetty from /etc/inittab ..." chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab fi # Remove kernel source rm -rf $ROOTFS/usr/src/* # udev doesn't work in containers, rebuild /dev DEV=$ROOTFS/dev rm -rf $DEV mkdir -p $DEV mknod -m 666 $DEV/null c 1 3 mknod -m 666 $DEV/zero c 1 5 mknod -m 666 $DEV/random c 1 8 mknod -m 666 $DEV/urandom c 1 9 mkdir -m 755 $DEV/pts mkdir -m 1777 $DEV/shm mknod -m 666 $DEV/tty c 5 0 mknod -m 600 $DEV/console c 5 1 mknod -m 666 $DEV/tty0 c 4 0 mknod -m 666 $DEV/full c 1 7 mknod -m 600 $DEV/initctl p mknod -m 666 $DEV/ptmx c 5 2 IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) docker tag $IMAGE_ID crux:latest docker run -i -t crux echo Success. # Cleanup umount $CRUX rm -rf $ROOTFS rm -rf $CRUX rm -rf $TMP docker-1.6.2/contrib/nuke-graph-directory.sh0000755000175000017500000000274012524223634020401 0ustar tianontianon#!/bin/sh set -e dir="$1" if [ -z "$dir" ]; then { echo 'This script is for destroying old /var/lib/docker directories more safely than' echo ' "rm -rf", which can cause data loss or other serious issues.' echo echo "usage: $0 directory" echo " ie: $0 /var/lib/docker" } >&2 exit 1 fi if [ "$(id -u)" != 0 ]; then echo >&2 "error: $0 must be run as root" exit 1 fi if [ ! -d "$dir" ]; then echo >&2 "error: $dir is not a directory" exit 1 fi dir="$(readlink -f "$dir")" echo echo "Nuking $dir ..." echo ' (if this is wrong, press Ctrl+C NOW!)' echo ( set -x; sleep 10 ) echo dir_in_dir() { inner="$1" outer="$2" [ "${inner#$outer}" != "$inner" ] } # let's start by unmounting any submounts in $dir # (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do mount="$(readlink -f "$mount" || true)" if dir_in_dir "$mount" "$dir"; then ( set -x; umount -f "$mount" ) fi done # now, let's go destroy individual btrfs subvolumes, if any exist if command -v btrfs > /dev/null 2>&1; then root="$(df "$dir" | awk 'NR>1 { print $NF }')" root="${root#/}" # if root is "/", we want it to become "" for subvol in $(btrfs subvolume list -o "$root/" 2>/dev/null | awk -F' path ' '{ print $2 }' | sort -r); do subvolDir="$root/$subvol" if dir_in_dir "$subvolDir" "$dir"; then ( set -x; btrfs subvolume delete "$subvolDir" ) fi done fi # finally, DESTROY ALL THINGS ( set -x; rm -rf "$dir" ) docker-1.6.2/contrib/completion/0000755000175000017500000000000012524223634016145 5ustar tianontianondocker-1.6.2/contrib/completion/bash/0000755000175000017500000000000012524223634017062 5ustar tianontianondocker-1.6.2/contrib/completion/bash/docker0000755000175000017500000005200512524223634020261 0ustar tianontianon#!/bin/bash # # bash completion file for core docker commands # # This script provides completion of: # - commands and their options # - container ids and names # - image repos and tags # - filepaths # # To enable the completions either: # - place this file in /etc/bash_completion.d # or # - copy this file to e.g. ~/.docker-completion.sh and add the line # below to your .bashrc after bash completion features are loaded # . ~/.docker-completion.sh # # Note: # Currently, the completions will not work if the docker daemon is not # bound to the default communication port/socket # If the docker daemon is using a unix socket for communication your user # must have access to the socket for the completions to function correctly # # Note for developers: # Please arrange options sorted alphabetically by long name with the short # options immediately following their corresponding long form. # This order should be applied to lists, alternatives and code blocks. __docker_q() { docker 2>/dev/null "$@" } __docker_containers_all() { local IFS=$'\n' local containers=( $(__docker_q ps -aq --no-trunc) ) if [ "$1" ]; then containers=( $(__docker_q inspect --format "{{if $1}}{{.Id}}{{end}}" "${containers[@]}") ) fi local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) names=( "${names[@]#/}" ) # trim off the leading "/" from the container names unset IFS COMPREPLY=( $(compgen -W "${names[*]} ${containers[*]}" -- "$cur") ) } __docker_containers_running() { __docker_containers_all '.State.Running' } __docker_containers_stopped() { __docker_containers_all 'not .State.Running' } __docker_containers_pauseable() { __docker_containers_all 'and .State.Running (not .State.Paused)' } __docker_containers_unpauseable() { __docker_containers_all '.State.Paused' } __docker_container_names() { local containers=( $(__docker_q ps -aq --no-trunc) ) local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) names=( "${names[@]#/}" ) # trim off the leading "/" from the container names COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) } __docker_container_ids() { local containers=( $(__docker_q ps -aq) ) COMPREPLY=( $(compgen -W "${containers[*]}" -- "$cur") ) } __docker_image_repos() { local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) } __docker_image_repos_and_tags() { local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) __ltrim_colon_completions "$cur" } __docker_image_repos_and_tags_and_ids() { local images="$(__docker_q images -a --no-trunc | awk 'NR>1 { print $3; if ($1 != "") { print $1; print $1":"$2 } }')" COMPREPLY=( $(compgen -W "$images" -- "$cur") ) __ltrim_colon_completions "$cur" } __docker_containers_and_images() { __docker_containers_all local containers=( "${COMPREPLY[@]}" ) __docker_image_repos_and_tags_and_ids COMPREPLY+=( "${containers[@]}" ) } __docker_pos_first_nonflag() { local argument_flags=$1 local counter=$cpos while [ $counter -le $cword ]; do if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then (( counter++ )) else case "${words[$counter]}" in -*) ;; *) break ;; esac fi (( counter++ )) done echo $counter } # Transforms a multiline list of strings into a single line string # with the words separated by "|". # This is used to prepare arguments to __docker_pos_first_nonflag(). __docker_to_alternatives() { local parts=( $1 ) local IFS='|' echo "${parts[*]}" } # Transforms a multiline list of options into an extglob pattern # suitable for use in case statements. __docker_to_extglob() { local extglob=$( __docker_to_alternatives "$1" ) echo "@($extglob)" } __docker_resolve_hostname() { command -v host >/dev/null 2>&1 || return COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) } __docker_capabilities() { # The list of capabilities is defined in types.go, ALL was added manually. COMPREPLY=( $( compgen -W " ALL AUDIT_CONTROL AUDIT_WRITE AUDIT_READ BLOCK_SUSPEND CHOWN DAC_OVERRIDE DAC_READ_SEARCH FOWNER FSETID IPC_LOCK IPC_OWNER KILL LEASE LINUX_IMMUTABLE MAC_ADMIN MAC_OVERRIDE MKNOD NET_ADMIN NET_BIND_SERVICE NET_BROADCAST NET_RAW SETFCAP SETGID SETPCAP SETUID SYS_ADMIN SYS_BOOT SYS_CHROOT SYSLOG SYS_MODULE SYS_NICE SYS_PACCT SYS_PTRACE SYS_RAWIO SYS_RESOURCE SYS_TIME SYS_TTY_CONFIG WAKE_ALARM " -- "$cur" ) ) } # a selection of the available signals that is most likely of interest in the # context of docker containers. __docker_signals() { local signals=( SIGCONT SIGHUP SIGINT SIGKILL SIGQUIT SIGSTOP SIGTERM SIGUSR1 SIGUSR2 ) COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) ) } _docker_docker() { local boolean_options=" --daemon -d --debug -D --help -h --icc --ip-forward --ip-masq --iptables --ipv6 --selinux-enabled --tls --tlsverify --version -v " case "$prev" in --graph|-g) _filedir -d return ;; --log-level|-l) COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) ) return ;; --pidfile|-p|--tlscacert|--tlscert|--tlskey) _filedir return ;; --storage-driver|-s) COMPREPLY=( $( compgen -W "aufs devicemapper btrfs overlay" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) ) return ;; $main_options_with_args_glob ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "$boolean_options $main_options_with_args" -- "$cur" ) ) ;; *) COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) ;; esac } _docker_attach() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --no-stdin --sig-proxy" -- "$cur" ) ) ;; *) local counter="$(__docker_pos_first_nonflag)" if [ $cword -eq $counter ]; then __docker_containers_running fi ;; esac } _docker_build() { case "$prev" in --tag|-t) __docker_image_repos_and_tags return ;; --file|-f) _filedir return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--file -f --force-rm --help --no-cache --pull --quiet -q --rm --tag -t" -- "$cur" ) ) ;; *) local counter="$(__docker_pos_first_nonflag '--tag|-t')" if [ $cword -eq $counter ]; then _filedir -d fi ;; esac } _docker_commit() { case "$prev" in --author|-a|--change|-c|--message|-m) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--author -a --change -c --help --message -m --pause -p" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag '--author|-a|--change|-c|--message|-m') if [ $cword -eq $counter ]; then __docker_containers_all return fi (( counter++ )) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags return fi ;; esac } _docker_cp() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then case "$cur" in *:) return ;; *) __docker_containers_all COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) compopt -o nospace return ;; esac fi (( counter++ )) if [ $cword -eq $counter ]; then _filedir -d return fi ;; esac } _docker_create() { _docker_run } _docker_diff() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_all fi ;; esac } _docker_events() { case "$prev" in --filter|-f) COMPREPLY=( $( compgen -S = -W "container event image" -- "$cur" ) ) compopt -o nospace return ;; --since|--until) return ;; esac # "=" gets parsed to a word and assigned to either $cur or $prev depending on whether # it is the last character or not. So we search for "xxx=" in the the last two words. case "${words[$cword-2]}$prev=" in *container=*) cur="${cur#=}" __docker_containers_all return ;; *event=*) COMPREPLY=( $( compgen -W "create destroy die export kill pause restart start stop unpause" -- "${cur#=}" ) ) return ;; *image=*) cur="${cur#=}" __docker_image_repos_and_tags_and_ids return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--filter -f --help --since --until" -- "$cur" ) ) ;; esac } _docker_exec() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i -t --tty" -- "$cur" ) ) ;; *) __docker_containers_running ;; esac } _docker_export() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_all fi ;; esac } _docker_help() { local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) fi } _docker_history() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --no-trunc --quiet -q" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids fi ;; esac } _docker_images() { case "$prev" in --filter|-f) COMPREPLY=( $( compgen -W "dangling=true label=" -- "$cur" ) ) if [ "$COMPREPLY" = "label=" ]; then compopt -o nospace fi return ;; esac case "${words[$cword-2]}$prev=" in *dangling=*) COMPREPLY=( $( compgen -W "true false" -- "${cur#=}" ) ) return ;; *label=*) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-trunc --quiet -q" -- "$cur" ) ) ;; =) return ;; *) __docker_image_repos ;; esac } _docker_import() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then return fi (( counter++ )) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags return fi ;; esac } _docker_info() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; esac } _docker_inspect() { case "$prev" in --format|-f) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) ;; *) __docker_containers_and_images ;; esac } _docker_kill() { case "$prev" in --signal|-s) __docker_signals return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --signal -s" -- "$cur" ) ) ;; *) __docker_containers_running ;; esac } _docker_load() { case "$prev" in --input|-i) _filedir return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --input -i" -- "$cur" ) ) ;; esac } _docker_login() { case "$prev" in --email|-e|--password|-p|--username|-u) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--email -e --help --password -p --username -u" -- "$cur" ) ) ;; esac } _docker_logout() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; esac } _docker_logs() { case "$prev" in --tail) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--follow -f --help --tail --timestamps -t" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag '--tail') if [ $cword -eq $counter ]; then __docker_containers_all fi ;; esac } _docker_pause() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_pauseable fi ;; esac } _docker_port() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_all fi ;; esac } _docker_ps() { case "$prev" in --before|--since) __docker_containers_all ;; --filter|-f) COMPREPLY=( $( compgen -S = -W "exited id label name status" -- "$cur" ) ) compopt -o nospace return ;; -n) return ;; esac case "${words[$cword-2]}$prev=" in *id=*) cur="${cur#=}" __docker_container_ids return ;; *name=*) cur="${cur#=}" __docker_container_names return ;; *status=*) COMPREPLY=( $( compgen -W "exited paused restarting running" -- "${cur#=}" ) ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--all -a --before --filter -f --help --latest -l -n --no-trunc --quiet -q --size -s --since" -- "$cur" ) ) ;; esac } _docker_pull() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--all-tags -a --help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags fi ;; esac } _docker_push() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags fi ;; esac } _docker_rename() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_all fi ;; esac } _docker_restart() { case "$prev" in --time|-t) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) ;; *) __docker_containers_all ;; esac } _docker_rm() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--force -f --help --link -l --volumes -v" -- "$cur" ) ) ;; *) for arg in "${COMP_WORDS[@]}"; do case "$arg" in --force|-f) __docker_containers_all return ;; esac done __docker_containers_stopped ;; esac } _docker_rmi() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--force -f --help --no-prune" -- "$cur" ) ) ;; *) __docker_image_repos_and_tags_and_ids ;; esac } _docker_run() { local options_with_args=" --add-host --attach -a --cap-add --cap-drop --cgroup-parent --cidfile --cpuset --cpu-shares -c --device --dns --dns-search --entrypoint --env -e --env-file --expose --hostname -h --ipc --label -l --label-file --link --log-driver --lxc-conf --mac-address --memory -m --memory-swap --name --net --pid --publish -p --restart --security-opt --user -u --ulimit --volumes-from --volume -v --workdir -w " local all_options="$options_with_args --help --interactive -i --privileged --publish-all -P --read-only --tty -t " [ "$command" = "run" ] && all_options="$all_options --detach -d --rm --sig-proxy " local options_with_args_glob=$(__docker_to_extglob "$options_with_args") case "$prev" in --add-host) case "$cur" in *:) __docker_resolve_hostname return ;; esac ;; --attach|-a) COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) return ;; --cap-add|--cap-drop) __docker_capabilities return ;; --cidfile|--env-file|--label-file) _filedir return ;; --device|--volume|-v) case "$cur" in *:*) # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) ;; '') COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) compopt -o nospace ;; /*) _filedir compopt -o nospace ;; esac return ;; --env|-e) COMPREPLY=( $( compgen -e -- "$cur" ) ) compopt -o nospace return ;; --ipc) case "$cur" in *:*) cur="${cur#*:}" __docker_containers_running ;; *) COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) if [ "$COMPREPLY" = "container:" ]; then compopt -o nospace fi ;; esac return ;; --link) case "$cur" in *:*) ;; *) __docker_containers_running COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) compopt -o nospace ;; esac return ;; --log-driver) COMPREPLY=( $( compgen -W "json-file syslog none" -- "$cur") ) return ;; --net) case "$cur" in container:*) local cur=${cur#*:} __docker_containers_all ;; *) COMPREPLY=( $( compgen -W "bridge none container: host" -- "$cur") ) if [ "${COMPREPLY[*]}" = "container:" ] ; then compopt -o nospace fi ;; esac return ;; --restart) case "$cur" in on-failure:*) ;; *) COMPREPLY=( $( compgen -W "no on-failure on-failure: always" -- "$cur") ) ;; esac return ;; --security-opt) case "$cur" in label:*:*) ;; label:*) local cur=${cur##*:} COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "$cur") ) if [ "${COMPREPLY[*]}" != "disable" ] ; then compopt -o nospace fi ;; *) COMPREPLY=( $( compgen -W "label apparmor" -S ":" -- "$cur") ) compopt -o nospace ;; esac return ;; --volumes-from) __docker_containers_all return ;; $options_with_args_glob ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) ;; *) local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags_and_ids fi ;; esac } _docker_save() { case "$prev" in --output|-o) _filedir return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) ;; *) __docker_image_repos_and_tags_and_ids ;; esac } _docker_search() { case "$prev" in --stars|-s) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--automated --help --no-trunc --stars -s" -- "$cur" ) ) ;; esac } _docker_start() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--attach -a --help --interactive -i" -- "$cur" ) ) ;; *) __docker_containers_stopped ;; esac } _docker_stats() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) __docker_containers_running ;; esac } _docker_stop() { case "$prev" in --time|-t) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) ;; *) __docker_containers_running ;; esac } _docker_tag() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags return fi (( counter++ )) if [ $cword -eq $counter ]; then __docker_image_repos_and_tags return fi ;; esac } _docker_unpause() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_unpauseable fi ;; esac } _docker_top() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) local counter=$(__docker_pos_first_nonflag) if [ $cword -eq $counter ]; then __docker_containers_running fi ;; esac } _docker_version() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; esac } _docker_wait() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) __docker_containers_all ;; esac } _docker() { local previous_extglob_setting=$(shopt -p extglob) shopt -s extglob local commands=( attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stats stop tag top unpause version wait ) local main_options_with_args=" --api-cors-header --bip --bridge -b --default-ulimit --dns --dns-search --exec-driver -e --fixed-cidr --fixed-cidr-v6 --graph -g --group -G --host -H --insecure-registry --ip --label --log-level -l --mtu --pidfile -p --registry-mirror --storage-driver -s --storage-opt --tlscacert --tlscert --tlskey " local main_options_with_args_glob=$(__docker_to_extglob "$main_options_with_args") COMPREPLY=() local cur prev words cword _get_comp_words_by_ref -n : cur prev words cword local command='docker' cpos=0 local counter=1 while [ $counter -lt $cword ]; do case "${words[$counter]}" in $main_options_with_args_glob ) (( counter++ )) ;; -*) ;; *) command="${words[$counter]}" cpos=$counter (( cpos++ )) break ;; esac (( counter++ )) done local completions_func=_docker_${command} declare -F $completions_func >/dev/null && $completions_func eval "$previous_extglob_setting" return 0 } complete -F _docker docker docker-1.6.2/contrib/completion/REVIEWERS0000644000175000017500000000013412524223634017441 0ustar tianontianonTianon Gravi (@tianon) Jessie Frazelle (@jfrazelle) docker-1.6.2/contrib/completion/fish/0000755000175000017500000000000012524223634017076 5ustar tianontianondocker-1.6.2/contrib/completion/fish/docker.fish0000644000175000017500000010342712524223634021227 0ustar tianontianon# docker.fish - docker completions for fish shell # # This file is generated by gen_docker_fish_completions.py from: # https://github.com/barnybug/docker-fish-completion # # To install the completions: # mkdir -p ~/.config/fish/completions # cp docker.fish ~/.config/fish/completions # # Completion supported: # - parameters # - commands # - containers # - images # - repositories function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' for i in (commandline -opc) if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait return 1 end end return 0 end function __fish_print_docker_containers --description 'Print a list of docker containers' -a select switch $select case running docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n' case stopped docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n' case all docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n' end end function __fish_print_docker_images --description 'Print a list of docker images' docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1":"$2}' end function __fish_print_docker_repositories --description 'Print a list of docker repositories' docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | command sort | command uniq end # common options complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the remote API. Default is cors disabled" complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge' complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the Docker runtime to use a specific exec driver' complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime' complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.' complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage' complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range" complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules" complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking' complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level (debug, info, warn, error, fatal)' complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)' complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU' complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror' complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver' complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver' complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options' complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify' complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here' complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file' complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file' complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)' complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' # subcommands # attach complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" # build complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)" complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' # commit complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith ")' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" # cp complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders from a container's filesystem to the host path" complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage' # create complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s c -l cpu-shares -d 'CPU shares (relative weight)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of :alias' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l lxc-conf -d '(lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: , where unit = b, k, m or g)" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only" complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container' complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image" # diff complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" # events complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')" complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp' complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp' # exec complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY' complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container" # export complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" # history complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" # images complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')" complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" # import complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage' # info complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' # inspect complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" # kill complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" # load complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN' # login complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or log in to a Docker registry server' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' # logout complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server' # logs complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)' complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" # port complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT' complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" # pause complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container' complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container" # ps complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' # pull complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server' complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository' complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" # push complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server' complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" # rename complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container' # restart complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.' complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" # rm complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" # rmi complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" # run complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of :alias' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d '(lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: , where unit = b, k, m or g)" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only" complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" # save complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT' complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" # search complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars' # start complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a stopped container' complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process" complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN" complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" # stats complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics" complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container" # stop complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a running container' complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.' complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" # tag complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage' # top complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" # unpause complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container' complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container" # version complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information' # wait complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage' complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" docker-1.6.2/contrib/completion/zsh/0000755000175000017500000000000012524223634016751 5ustar tianontianondocker-1.6.2/contrib/completion/zsh/REVIEWERS0000644000175000017500000000022012524223634020241 0ustar tianontianonTianon Gravi (@tianon) Jessie Frazelle (@jfrazelle) Vincent Bernat (@vincentbernat) docker-1.6.2/contrib/completion/zsh/_docker0000644000175000017500000004511512524223634020310 0ustar tianontianon#compdef docker # # zsh completion for docker (http://docker.com) # # version: 0.3.0 # github: https://github.com/felixr/docker-zsh-completion # # contributors: # - Felix Riedel # - Vincent Bernat # # license: # # Copyright (c) 2013, Felix Riedel # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # __docker_get_containers() { local kind expl declare -a running stopped lines args kind=$1 shift [[ $kind = (stopped|all) ]] && args=($args -a) lines=(${(f)"$(_call_program commands docker ps ${args})"}) # Parse header line to find columns local i=1 j=1 k header=${lines[1]} declare -A begin end while (( $j < ${#header} - 1 )) { i=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 1)) j=$(( $i + ${${header[$i,-1]}[(i) ]} - 1)) k=$(( $j + ${${header[$j,-1]}[(i)[^ ]]} - 2)) begin[${header[$i,$(($j-1))]}]=$i end[${header[$i,$(($j-1))]}]=$k } lines=(${lines[2,-1]}) # Container ID local line local s for line in $lines; do s="${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}" s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" s="$s, ${${${line[$begin[IMAGE],$end[IMAGE]]}/:/\\:}%% ##}" if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then stopped=($stopped $s) else running=($running $s) fi done # Names local name local -a names for line in $lines; do names=(${(ps:,:)${${line[${begin[NAMES]},-1]}%% *}}) for name in $names; do s="${name}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" s="$s, ${${${line[$begin[IMAGE],$end[IMAGE]]}/:/\\:}%% ##}" if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then stopped=($stopped $s) else running=($running $s) fi done done [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped } __docker_stoppedcontainers() { __docker_get_containers stopped "$@" } __docker_runningcontainers() { __docker_get_containers running "$@" } __docker_containers () { __docker_get_containers all "$@" } __docker_images () { local expl declare -a images images=(${${${${(f)"$(_call_program commands docker images)"}[2,-1]}/ ##/\\:}%% *}) images=(${${images%\\:}#} ${${${(f)"$(_call_program commands docker images)"}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) _describe -t docker-images "images" images } __docker_tags() { local expl declare -a tags tags=(${${${${${(f)"$(_call_program commands docker images)"}#* }## #}%% *}[2,-1]}) _describe -t docker-tags "tags" tags } __docker_repositories_with_tags() { if compset -P '*:'; then __docker_tags else __docker_repositories -qS ":" fi } __docker_search() { # declare -a dockersearch local cache_policy zstyle -s ":completion:${curcontext}:" cache-policy cache_policy if [[ -z "$cache_policy" ]]; then zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy fi local searchterm cachename searchterm="${words[$CURRENT]%/}" cachename=_docker-search-$searchterm local expl local -a result if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ && ! _retrieve_cache ${cachename#_}; then _message "Searching for ${searchterm}..." result=(${${${(f)"$(_call_program commands docker search ${searchterm})"}%% *}[2,-1]}) _store_cache ${cachename#_} result fi _wanted dockersearch expl 'available images' compadd -a result } __docker_caching_policy() { oldp=( "$1"(Nmh+1) ) # 1 hour (( $#oldp )) } __docker_repositories () { local expl declare -a repos repos=(${${${(f)"$(_call_program commands docker images)"}%% *}[2,-1]}) _describe -t docker-repos "repositories" repos "$@" } __docker_commands () { # local -a _docker_subcommands local cache_policy zstyle -s ":completion:${curcontext}:" cache-policy cache_policy if [[ -z "$cache_policy" ]]; then zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy fi if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ && ! _retrieve_cache docker_subcommands; then local -a lines lines=(${(f)"$(_call_program commands docker 2>&1)"}) _docker_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:}) _docker_subcommands=($_docker_subcommands 'help:Show help for a command') _store_cache docker_subcommands _docker_subcommands fi _describe -t docker-commands "docker command" _docker_subcommands } __docker_subcommand () { local -a _command_args case "$words[1]" in (attach) _arguments \ '--no-stdin[Do not attach stdin]' \ '--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]' \ ':containers:__docker_runningcontainers' ;; (build) _arguments \ {-f,--file=-}'[Dockerfile to use]:Dockerfile:_files' \ '--force-rm[Always remove intermediate containers]' \ '--no-cache[Do not use cache when building the image]' \ '--pull[Attempt to pull a newer version of the image]' \ {-q,--quiet}'[Suppress verbose build output]' \ '--rm[Remove intermediate containers after a successful build]' \ {-t,--tag=-}'[Repository, name and tag to be applied]:repository:__docker_repositories_with_tags' \ ':path or URL:_directories' ;; (commit) _arguments \ {-a,--author=-}'[Author]:author: ' \ {-m,--message=-}'[Commit message]:message: ' \ {-p,--pause}'[Pause container during commit]' \ ':container:__docker_containers' \ ':repository:__docker_repositories_with_tags' ;; (cp) _arguments \ ':container:->container' \ ':hostpath:_files' case $state in (container) if compset -P '*:'; then _files else __docker_containers -qS ":" fi ;; esac ;; (diff|export) _arguments '*:containers:__docker_containers' ;; (events) _arguments \ '*'{-f,--filter=-}'[Filter values]:filter: ' \ '--since=-[Events created since this timestamp]:timestamp: ' \ '--until=-[Events created until this timestamp]:timestamp: ' ;; (exec) local state ret _arguments \ {-d,--detach}'[Detached mode: leave the container running in the background]' \ {-i,--interactive}'[Keep stdin open even if not attached]' \ {-t,--tty}'[Allocate a pseudo-tty]' \ ':containers:__docker_runningcontainers' \ '*::command:->anycommand' && ret=0 case $state in (anycommand) shift 1 words (( CURRENT-- )) _normal ;; esac return ret ;; (history) _arguments \ '--no-trunc[Do not truncate output]' \ {-q,--quiet}'[Only show numeric IDs]' \ '*:images:__docker_images' ;; (images) _arguments \ {-a,--all}'[Show all images]' \ '*'{-f,--filter=-}'[Filter values]:filter: ' \ '--no-trunc[Do not truncate output]' \ {-q,--quiet}'[Only show numeric IDs]' \ ':repository:__docker_repositories' ;; (import) _arguments \ ':URL:(- http:// file://)' \ ':repository:__docker_repositories_with_tags' ;; (info) ;; (inspect) _arguments \ {-f,--format=-}'[Format the output using the given go template]:template: ' \ '*:containers:__docker_containers' ;; (kill) _arguments \ {-s,--signal=-}'[Signal to send]:signal:_signals' \ '*:containers:__docker_runningcontainers' ;; (load) _arguments \ {-i,--input=-}'[Read from tar archive file]:archive file:_files -g "*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)"' ;; (login) _arguments \ {-e,--email=-}'[Email]:email: ' \ {-p,--password=-}'[Password]:password: ' \ {-u,--user=-}'[Username]:username: ' \ ':server: ' ;; (logout) _arguments \ ':server: ' ;; (logs) _arguments \ {-f,--follow}'[Follow log output]' \ {-t,--timestamps}'[Show timestamps]' \ '--tail=-[Output the last K lines]:lines:(1 10 20 50 all)' \ '*:containers:__docker_containers' ;; (port) _arguments \ '1:containers:__docker_runningcontainers' \ '2:port:_ports' ;; (pause|unpause) _arguments \ '1:containers:__docker_runningcontainers' ;; (start) _arguments \ {-a,--attach}'[Attach container'"'"'s stdout/stderr and forward all signals]' \ {-i,--interactive}'[Attach container'"'"'s stding]' \ '*:containers:__docker_stoppedcontainers' ;; (stats) _arguments \ '*:containers:__docker_runningcontainers' ;; (rm) _arguments \ {-f,--force}'[Force removal]' \ {-l,--link}'[Remove the specified link and not the underlying container]' \ {-v,--volumes}'[Remove the volumes associated to the container]' \ '*:containers:__docker_stoppedcontainers' ;; (rmi) _arguments \ {-f,--force}'[Force removal]' \ '--no-prune[Do not delete untagged parents]' \ '*:images:__docker_images' ;; (restart|stop) _arguments \ {-t,--time=-}'[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)' \ '*:containers:__docker_runningcontainers' ;; (top) _arguments \ '1:containers:__docker_runningcontainers' \ '(-)*:: :->ps-arguments' case $state in (ps-arguments) _ps ;; esac ;; (ps) _arguments \ {-a,--all}'[Show all containers]' \ '--before=-[Show only container created before...]:containers:__docker_containers' \ '*'{-f,--filter=-}'[Filter values]:filter: ' \ {-l,--latest}'[Show only the latest created container]' \ '-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' \ '--no-trunc[Do not truncate output]' \ {-q,--quiet}'[Only show numeric IDs]' \ {-s,--size}'[Display total file sizes]' \ '--since=-[Show only containers created since...]:containers:__docker_containers' ;; (tag) _arguments \ {-f,--force}'[force]'\ ':image:__docker_images'\ ':repository:__docker_repositories_with_tags' ;; (create|run) _arguments \ {-a,--attach}'[Attach to stdin, stdout or stderr]' \ '*--add-host=-[Add a custom host-to-IP mapping]:host\:ip mapping: ' \ {-c,--cpu-shares=-}'[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)' \ '*--cap-add=-[Add Linux capabilities]:capability: ' \ '*--cap-drop=-[Drop Linux capabilities]:capability: ' \ '--cidfile=-[Write the container ID to the file]:CID file:_files' \ '--cpuset=-[CPUs in which to allow execution]:CPU set: ' \ {-d,--detach}'[Detached mode: leave the container running in the background]' \ '*--device=-[Add a host device to the container]:device:_files' \ '*--dns=-[Set custom dns servers]:dns server: ' \ '*--dns-search=-[Set custom DNS search domains]:dns domains: ' \ '*'{-e,--environment=-}'[Set environment variables]:environment variable: ' \ '--entrypoint=-[Overwrite the default entrypoint of the image]:entry point: ' \ '*--env-file=-[Read environment variables from a file]:environment file:_files' \ '*--expose=-[Expose a port from the container without publishing it]: ' \ {-h,--hostname=-}'[Container host name]:hostname:_hosts' \ {-i,--interactive}'[Keep stdin open even if not attached]' \ '*--link=-[Add link to another container]:link:->link' \ '*--lxc-conf=-[Add custom lxc options]:lxc options: ' \ '-m[Memory limit (in bytes)]:limit: ' \ '--name=-[Container name]:name: ' \ '--net=-[Network mode]:network mode:(bridge none container host)' \ {-P,--publish-all}'[Publish all exposed ports]' \ '*'{-p,--publish=-}'[Expose a container'"'"'s port to the host]:port:_ports' \ '--privileged[Give extended privileges to this container]' \ '--restart=-[Restart policy]:restart policy:(no on-failure always)' \ '--rm[Remove intermediate containers when it exits]' \ '*--security-opt=-[Security options]:security option: ' \ '--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]' \ {-t,--tty}'[Allocate a pseudo-tty]' \ {-u,--user=-}'[Username or UID]:user:_users' \ '*-v[Bind mount a volume]:volume: '\ '*--volumes-from=-[Mount volumes from the specified container]:volume: ' \ {-w,--workdir=-}'[Working directory inside the container]:directory:_directories' \ '(-):images:__docker_images' \ '(-):command: _command_names -e' \ '*::arguments: _normal' case $state in (link) if compset -P '*:'; then _wanted alias expl 'Alias' compadd -E "" else __docker_runningcontainers -qS ":" fi ;; esac ;; (pull) _arguments \ {-a,--all-tags}'[Download all tagged images]' \ ':name:__docker_search' ;; (push) _arguments ':images:__docker_images' ;; (rename) _arguments \ ':old name:__docker_containers' \ ':new name: ' ;; (save) _arguments \ {-o,--output=-}'[Write to file]:file:_files' \ '*:images:__docker_images' ;; (search) _arguments \ '--automated[Only show automated builds]' \ '--no-trunc[Do not truncate output]' \ {-s,--stars=-}'[Only display with at least X stars]:stars:(0 10 100 1000)' \ ':term: ' ;; (wait) _arguments '*:containers:__docker_runningcontainers' ;; (help) _arguments ':subcommand:__docker_commands' ;; (*) _message 'Unknown sub command' esac } _docker () { # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. if [[ $service != docker ]]; then _call_function - _$service return fi local curcontext="$curcontext" state line typeset -A opt_args _arguments -C \ '-H[tcp://host:port to bind/connect to]:socket: ' \ '(-): :->command' \ '(-)*:: :->option-or-argument' if (( CURRENT == 1 )); then fi case $state in (command) __docker_commands ;; (option-or-argument) curcontext=${curcontext%:*:*}:docker-$words[1]: __docker_subcommand ;; esac } _docker "$@" # Local Variables: # mode: Shell-Script # sh-indentation: 4 # indent-tabs-mode: nil # sh-basic-offset: 4 # End: # vim: ft=zsh sw=4 ts=4 et docker-1.6.2/contrib/README0000644000175000017500000000036712524223634014662 0ustar tianontianonThe `contrib` directory contains scripts, images, and other helpful things which are not part of the core docker distribution. Please note that they could be out of date, since they do not receive the same attention as the rest of the repository. docker-1.6.2/contrib/mkimage/0000755000175000017500000000000012524223634015406 5ustar tianontianondocker-1.6.2/contrib/mkimage/rinse0000755000175000017500000000104212524223634016451 0ustar tianontianon#!/usr/bin/env bash set -e rootfsDir="$1" shift # specifying --arch below is safe because "$@" can override it and the "latest" one wins :) ( set -x rinse --directory "$rootfsDir" --arch amd64 "$@" ) "$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" if [ -d "$rootfsDir/etc/sysconfig" ]; then # allow networking init scripts inside the container to work without extra steps echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" fi # make sure we're fully up-to-date, too ( set -x chroot "$rootfsDir" yum update -y ) docker-1.6.2/contrib/mkimage/busybox-static0000755000175000017500000000141612524223634020316 0ustar tianontianon#!/usr/bin/env bash set -e rootfsDir="$1" shift busybox="$(which busybox 2>/dev/null || true)" if [ -z "$busybox" ]; then echo >&2 'error: busybox: not found' echo >&2 ' install it with your distribution "busybox-static" package' exit 1 fi if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then echo >&2 "error: '$busybox' appears to be a dynamic executable" echo >&2 ' you should install your distribution "busybox-static" package instead' exit 1 fi mkdir -p "$rootfsDir/bin" rm -f "$rootfsDir/bin/busybox" # just in case cp "$busybox" "$rootfsDir/bin/busybox" ( cd "$rootfsDir" IFS=$'\n' modules=( $(bin/busybox --list-modules) ) unset IFS for module in "${modules[@]}"; do mkdir -p "$(dirname "$module")" ln -sf /bin/busybox "$module" done ) docker-1.6.2/contrib/mkimage/.febootstrap-minimize0000755000175000017500000000114112524223634021556 0ustar tianontianon#!/usr/bin/env bash set -e rootfsDir="$1" shift ( cd "$rootfsDir" # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" # locales rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} # docs rm -rf usr/share/{man,doc,info,gnome/help} # cracklib rm -rf usr/share/cracklib # i18n rm -rf usr/share/i18n # yum cache rm -rf var/cache/yum mkdir -p --mode=0755 var/cache/yum # sln rm -rf sbin/sln # ldconfig #rm -rf sbin/ldconfig rm -rf etc/ld.so.cache var/cache/ldconfig mkdir -p --mode=0755 var/cache/ldconfig ) docker-1.6.2/contrib/mkimage/mageia-urpmi0000755000175000017500000000276112524223634017717 0ustar tianontianon#!/usr/bin/env bash # # Needs to be run from Mageia 4 or greater for kernel support for docker. # # Mageia 4 does not have docker available in official repos, so please # install and run the docker binary manually. # # Tested working versions are for Mageia 2 onwards (inc. cauldron). # set -e rootfsDir="$1" shift optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") eval set -- "$optTemp" unset optTemp installversion= mirror= while true; do case "$1" in -v|--version) installversion="$2" ; shift 2 ;; -m|--mirror) mirror="$2" ; shift 2 ;; --) shift ; break ;; esac done if [ -z $installversion ]; then # Attempt to match host version if [ -r /etc/mageia-release ]; then installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" else echo "Error: no version supplied and unable to detect host mageia version" exit 1 fi fi if [ -z $mirror ]; then # No mirror provided, default to mirrorlist mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" fi ( set -x urpmi.addmedia --distrib \ $mirror \ --urpmi-root "$rootfsDir" urpmi basesystem-minimal urpmi \ --auto \ --no-suggests \ --urpmi-root "$rootfsDir" \ --root "$rootfsDir" ) "$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" if [ -d "$rootfsDir/etc/sysconfig" ]; then # allow networking init scripts inside the container to work without extra steps echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" fi docker-1.6.2/contrib/mkimage/debootstrap0000755000175000017500000002036112524223634017664 0ustar tianontianon#!/usr/bin/env bash set -e rootfsDir="$1" shift # we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap before=() while [ $# -gt 0 ] && [[ "$1" == -* ]]; do before+=( "$1" ) shift done suite="$1" shift # get path to "chroot" in our current PATH chrootPath="$(type -P chroot)" rootfs_chroot() { # "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately! # set PATH and chroot away! PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ "$chrootPath" "$rootfsDir" "$@" } # allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... : ${DEBOOTSTRAP:=debootstrap} ( set -x $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" ) # now for some Docker-specific tweaks # prevent init scripts from running during install/update echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" cat > "$rootfsDir/usr/sbin/policy-rc.d" <<'EOF' #!/bin/sh # For most Docker users, "apt-get install" only happens during "docker build", # where starting services doesn't work and often fails in humorous ways. This # prevents those failures by stopping the services from attempting to start. exit 101 EOF chmod +x "$rootfsDir/usr/sbin/policy-rc.d" # prevent upstart scripts from running during install/update ( set -x rootfs_chroot dpkg-divert --local --rename --add /sbin/initctl cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" ) # shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) ( set -x; rootfs_chroot apt-get clean ) # this file is one APT creates to make sure we don't "autoremove" our currently # in-use kernel, which doesn't really apply to debootstraps/Docker images that # don't even have kernels installed rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels" # Ubuntu 10.04 sucks... :) if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then # force dpkg not to call sync() after package extraction (speeding up installs) echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' # For most Docker users, package installs happen during "docker build", which # doesn't survive power loss and gets restarted clean afterwards anyhow, so # this minor tweak gives us a nice speedup (much nicer on spinning disks, # obviously). force-unsafe-io EOF fi if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then # _keep_ us lean by effectively running "apt-get clean" after every install aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF # Since for most Docker users, package installs happen in "docker build" steps, # they essentially become individual layers due to the way Docker handles # layering, especially using CoW filesystems. What this means for us is that # the caches that APT keeps end up just wasting space in those layers, making # our layers unnecessarily large (especially since we'll normally never use # these caches again and will instead just "docker build" again and make a brand # new image). # Ideally, these would just be invoking "apt-get clean", but in our testing, # that ended up being cyclic and we got stuck on APT's lock, so we get this fun # creation that's essentially just "apt-get clean". DPkg::Post-Invoke { ${aptGetClean} }; APT::Update::Post-Invoke { ${aptGetClean} }; Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache ""; # Note that we do realize this isn't the ideal way to do this, and are always # open to better suggestions (https://github.com/docker/docker/issues). EOF # remove apt-cache translations for fast "apt-get update" echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' # In Docker, we don't often need the "Translations" files, so we're just wasting # time and space by downloading them, and this inhibits that. For users that do # need them, it's a simple matter to delete this file and "apt-get update". :) Acquire::Languages "none"; EOF echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' # Since Docker users using "RUN apt-get update && apt-get install -y ..." in # their Dockerfiles don't go delete the lists files afterwards, we want them to # be as small as possible on-disk, so we explicitly request "gz" versions and # tell Apt to keep them gzipped on-disk. # For comparison, an "apt-get update" layer without this on a pristine # "debian:wheezy" base image was "29.88 MB", where with this it was only # "8.273 MB". Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz"; EOF # update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'" cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF' # Since Docker users are looking for the smallest possible final images, the # following emerges as a very common pattern: # RUN apt-get update \ # && apt-get install -y \ # && \ # && apt-get purge -y --auto-remove # By default, APT will actually _keep_ packages installed via Recommends or # Depends if another package Suggests them, even and including if the package # that originally caused them to be installed is removed. Setting this to # "false" ensures that APT is appropriately aggressive about removing the # packages it added. # https://aptitude.alioth.debian.org/doc/en/ch02s05s05.html#configApt-AutoRemove-SuggestsImportant Apt::AutoRemove::SuggestsImportant "false"; EOF fi if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then # tweak sources.list, where appropriate lsbDist= if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" fi if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" fi if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then lsbDist='Debian' fi # normalize to lowercase for easier matching lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" case "$lsbDist" in debian) # updates and security! if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then ( set -x sed -i " p; s/ $suite / ${suite}-updates / " "$rootfsDir/etc/apt/sources.list" echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" # LTS if [ "$suite" = 'squeeze' -o "$suite" = 'oldstable' ]; then head -1 "$rootfsDir/etc/apt/sources.list" \ | sed "s/ $suite / squeeze-lts /" \ >> "$rootfsDir/etc/apt/sources.list" fi ) fi ;; ubuntu) # add the updates and security repositories ( set -x sed -i " p; s/ $suite / ${suite}-updates /; p; s/ $suite-updates / ${suite}-security / " "$rootfsDir/etc/apt/sources.list" ) ;; tanglu) # add the updates repository if [ "$suite" != 'devel' ]; then ( set -x sed -i " p; s/ $suite / ${suite}-updates / " "$rootfsDir/etc/apt/sources.list" ) fi ;; steamos) # add contrib and non-free if "main" is the only component ( set -x sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" ) ;; esac fi ( set -x # make sure we're fully up-to-date rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y' # delete all the apt list files since they're big and get stale quickly rm -rf "$rootfsDir/var/lib/apt/lists"/* # this forces "apt-get update" in dependent images, which is also good mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." ) docker-1.6.2/contrib/mkimage-debootstrap.sh0000755000175000017500000002200012524223634020263 0ustar tianontianon#!/usr/bin/env bash set -e echo >&2 echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' echo >&2 variant='minbase' include='iproute,iputils-ping' arch='amd64' # intentionally undocumented for now skipDetection= strictDebootstrap= justTar= usage() { echo >&2 echo >&2 "usage: $0 [options] repo suite [mirror]" echo >&2 echo >&2 'options: (not recommended)' echo >&2 " -p set an http_proxy for debootstrap" echo >&2 " -v $variant # change default debootstrap variant" echo >&2 " -i $include # change default package includes" echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)" echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)" echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list" echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)" echo >&2 echo >&2 " ie: $0 username/debian squeeze" echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/" echo >&2 echo >&2 " ie: $0 username/ubuntu precise" echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/" echo >&2 echo >&2 " ie: $0 -t precise.tar.bz2 precise" echo >&2 " $0 -t wheezy.tgz wheezy" echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/" echo >&2 } # these should match the names found at http://www.debian.org/releases/ debianStable=wheezy debianUnstable=sid # this should match the name found at http://releases.ubuntu.com/ ubuntuLatestLTS=trusty # this should match the name found at http://releases.tanglu.org/ tangluLatest=aequorea while getopts v:i:a:p:dst name; do case "$name" in p) http_proxy="$OPTARG" ;; v) variant="$OPTARG" ;; i) include="$OPTARG" ;; a) arch="$OPTARG" ;; d) strictDebootstrap=1 ;; s) skipDetection=1 ;; t) justTar=1 ;; ?) usage exit 0 ;; esac done shift $(($OPTIND - 1)) repo="$1" suite="$2" mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided if [ ! "$repo" ] || [ ! "$suite" ]; then usage exit 1 fi # some rudimentary detection for whether we need to "sudo" our docker calls docker='' if docker version > /dev/null 2>&1; then docker='docker' elif sudo docker version > /dev/null 2>&1; then docker='sudo docker' elif command -v docker > /dev/null 2>&1; then docker='docker' else echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" echo >&2 " this script is not likely to work as expected" sleep 3 docker='docker' # give us a command-not-found later fi # make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory if [ "$justTar" ]; then if [ ! -d "$(dirname "$repo")" ]; then echo >&2 "error: $(dirname "$repo") does not exist" exit 1 fi repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")" fi # will be filled in later, if [ -z "$skipDetection" ] lsbDist='' target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM" cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" returnTo="$(pwd -P)" if [ "$suite" = 'lucid' ]; then # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails include+=',gpgv' fi set -x # bootstrap mkdir -p "$target" sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror" cd "$target" if [ -z "$strictDebootstrap" ]; then # prevent init scripts from running during install/update # policy-rc.d (for most scripts) echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null sudo chmod +x usr/sbin/policy-rc.d # initctl (for some pesky upstart scripts) sudo chroot . dpkg-divert --local --rename --add /sbin/initctl sudo ln -sf /bin/true sbin/initctl # see https://github.com/docker/docker/issues/446#issuecomment-16953173 # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) sudo chroot . apt-get clean if strings usr/bin/dpkg | grep -q unsafe-io; then # while we're at it, apt is unnecessarily slow inside containers # this forces dpkg not to call sync() after package extraction and speeds up install # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null # we have this wrapped up in an "if" because the "force-unsafe-io" # option was added in dpkg 1.15.8.6 # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), # and ubuntu lucid/10.04 only has 1.15.5.6 fi # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) { aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' echo "DPkg::Post-Invoke { ${aptGetClean} };" echo "APT::Update::Post-Invoke { ${aptGetClean} };" echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null # and remove the translations, too echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): # rm /usr/sbin/policy-rc.d # rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl # rm /etc/dpkg/dpkg.cfg.d/02apt-speedup # rm /etc/apt/apt.conf.d/no-cache # rm /etc/apt/apt.conf.d/no-languages if [ -z "$skipDetection" ]; then # see also rudimentary platform detection in hack/install.sh lsbDist='' if [ -r etc/lsb-release ]; then lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")" fi if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then lsbDist='Debian' fi case "$lsbDist" in Debian) # add the updates and security repositories if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then # ${suite}-updates only applies to non-unstable sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list # same for security updates echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null fi ;; Ubuntu) # add the universe, updates, and security repositories sudo sed -i " s/ $suite main$/ $suite main universe/; p; s/ $suite main/ ${suite}-updates main/; p; s/ $suite-updates main/ ${suite}-security main/ " etc/apt/sources.list ;; Tanglu) # add the updates repository if [ "$suite" = "$tangluLatest" ]; then # ${suite}-updates only applies to stable Tanglu versions sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list fi ;; SteamOS) # add contrib and non-free sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list ;; esac fi # make sure our packages lists are as up to date as we can get them sudo chroot . apt-get update sudo chroot . apt-get dist-upgrade -y fi if [ "$justTar" ]; then # create the tarball file so it has the right permissions (ie, not root) touch "$repo" # fill the tarball sudo tar --numeric-owner -caf "$repo" . else # create the image (and tag $repo:$suite) sudo tar --numeric-owner -c . | $docker import - $repo:$suite # test the image $docker run -i -t $repo:$suite echo success if [ -z "$skipDetection" ]; then case "$lsbDist" in Debian) if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then # tag latest $docker tag $repo:$suite $repo:latest if [ -r etc/debian_version ]; then # tag the specific debian release version (which is only reasonable to tag on debian stable) ver=$(cat etc/debian_version) $docker tag $repo:$suite $repo:$ver fi fi ;; Ubuntu) if [ "$suite" = "$ubuntuLatestLTS" ]; then # tag latest $docker tag $repo:$suite $repo:latest fi if [ -r etc/lsb-release ]; then lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" if [ "$lsbRelease" ]; then # tag specific Ubuntu version number, if available (12.04, etc.) $docker tag $repo:$suite $repo:$lsbRelease fi fi ;; Tanglu) if [ "$suite" = "$tangluLatest" ]; then # tag latest $docker tag $repo:$suite $repo:latest fi if [ -r etc/lsb-release ]; then lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" if [ "$lsbRelease" ]; then # tag specific Tanglu version number, if available (1.0, 2.0, etc.) $docker tag $repo:$suite $repo:$lsbRelease fi fi ;; SteamOS) if [ -r etc/lsb-release ]; then lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" if [ "$lsbRelease" ]; then # tag specific SteamOS version number, if available (1.0, 2.0, etc.) $docker tag $repo:$suite $repo:$lsbRelease fi fi ;; esac fi fi # cleanup cd "$returnTo" sudo rm -rf "$target" docker-1.6.2/contrib/syntax/0000755000175000017500000000000012524223634015322 5ustar tianontianondocker-1.6.2/contrib/syntax/textmate/0000755000175000017500000000000012524223634017155 5ustar tianontianondocker-1.6.2/contrib/syntax/textmate/REVIEWERS0000644000175000017500000000006512524223634020454 0ustar tianontianonAsbjorn Enge (@asbjornenge) docker-1.6.2/contrib/syntax/textmate/README.md0000644000175000017500000000065712524223634020444 0ustar tianontianon# Docker.tmbundle Dockerfile syntaxt highlighting for TextMate and Sublime Text. ## Install ### Sublime Text Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). Search for *Dockerfile Syntax Highlighting* ### TextMate 2 Copy the directory `Docker.tmbundle` (showed as a Package in OSX) to `~/Library/Application Support/TextMate/Managed/Bundles` enjoy. docker-1.6.2/contrib/syntax/textmate/Docker.tmbundle/0000755000175000017500000000000012524223634022175 5ustar tianontianondocker-1.6.2/contrib/syntax/textmate/Docker.tmbundle/info.plist0000644000175000017500000000075112524223634024210 0ustar tianontianon contactEmailRot13 germ@andz.com.ar contactName GermanDZ description Helpers for Docker. name Docker uuid 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 docker-1.6.2/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/0000755000175000017500000000000012524223634024013 5ustar tianontianondocker-1.6.2/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage0000644000175000017500000000417412524223634030076 0ustar tianontianon name Dockerfile fileTypes Dockerfile patterns match ^\s*(ONBUILD\s+)?(FROM|MAINTAINER|RUN|EXPOSE|ENV|ADD|VOLUME|USER|LABEL|WORKDIR|COPY)\s captures 0 name keyword.control.dockerfile 1 name keyword.other.special-method.dockerfile match ^\s*(ONBUILD\s+)?(CMD|ENTRYPOINT)\s captures 0 name keyword.operator.dockerfile 1 name keyword.other.special-method.dockerfile begin " end " name string.quoted.double.dockerfile patterns match \\. name constant.character.escaped.dockerfile begin ' end ' name string.quoted.single.dockerfile patterns match \\. name constant.character.escaped.dockerfile match ^\s*#.*$ name comment.block.dockerfile scopeName source.dockerfile uuid a39d8795-59d2-49af-aa00-fe74ee29576e docker-1.6.2/contrib/syntax/textmate/Docker.tmbundle/Preferences/0000755000175000017500000000000012524223634024436 5ustar tianontianondocker-1.6.2/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences0000644000175000017500000000110212524223634031223 0ustar tianontianon name Comments scope source.dockerfile settings shellVariables name TM_COMMENT_START value # uuid 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 docker-1.6.2/contrib/syntax/kate/0000755000175000017500000000000012524223634016246 5ustar tianontianondocker-1.6.2/contrib/syntax/kate/Dockerfile.xml0000644000175000017500000000477012524223634021047 0ustar tianontianon FROM MAINTAINER ENV RUN ONBUILD COPY ADD VOLUME EXPOSE ENTRYPOINT CMD WORKDIR USER LABEL docker-1.6.2/contrib/syntax/vim/0000755000175000017500000000000012524223634016115 5ustar tianontianondocker-1.6.2/contrib/syntax/vim/README.md0000644000175000017500000000065612524223634017403 0ustar tianontianondockerfile.vim ============== Syntax highlighting for Dockerfiles Installation ------------ With [pathogen](https://github.com/tpope/vim-pathogen), the usual way... With [Vundle](https://github.com/gmarik/Vundle.vim) Plugin 'docker/docker' , {'rtp': '/contrib/syntax/vim/'} Features -------- The syntax highlighting includes: * The directives (e.g. `FROM`) * Strings * Comments License ------- BSD, short and sweet docker-1.6.2/contrib/syntax/vim/LICENSE0000644000175000017500000000242212524223634017122 0ustar tianontianonCopyright (c) 2013 Honza Pokorny All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. docker-1.6.2/contrib/syntax/vim/syntax/0000755000175000017500000000000012524223634017443 5ustar tianontianondocker-1.6.2/contrib/syntax/vim/syntax/dockerfile.vim0000644000175000017500000000116112524223634022266 0ustar tianontianon" dockerfile.vim - Syntax highlighting for Dockerfiles " Maintainer: Honza Pokorny " Version: 0.5 if exists("b:current_syntax") finish endif let b:current_syntax = "dockerfile" syntax case ignore syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|CMD|ENTRYPOINT|ENV|EXPOSE|FROM|MAINTAINER|RUN|USER|LABEL|VOLUME|WORKDIR|COPY)\s/ highlight link dockerfileKeyword Keyword syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ highlight link dockerfileString String syntax match dockerfileComment "\v^\s*#.*$" highlight link dockerfileComment Comment set commentstring=#\ %s docker-1.6.2/contrib/syntax/vim/ftdetect/0000755000175000017500000000000012524223634017717 5ustar tianontianondocker-1.6.2/contrib/syntax/vim/ftdetect/dockerfile.vim0000644000175000017500000000007112524223634022541 0ustar tianontianonau BufNewFile,BufRead Dockerfile set filetype=dockerfile docker-1.6.2/contrib/syntax/vim/doc/0000755000175000017500000000000012524223634016662 5ustar tianontianondocker-1.6.2/contrib/syntax/vim/doc/dockerfile.txt0000644000175000017500000000066412524223634021540 0ustar tianontianon*dockerfile.txt* Syntax highlighting for Dockerfiles Author: Honza Pokorny License: BSD INSTALLATION *installation* Drop it on your Pathogen path and you're all set. FEATURES *features* The syntax highlighting includes: * The directives (e.g. FROM) * Strings * Comments vim:tw=78:et:ft=help:norl: docker-1.6.2/contrib/host-integration/0000755000175000017500000000000012524223634017272 5ustar tianontianondocker-1.6.2/contrib/host-integration/manager.go0000644000175000017500000000644312524223634021242 0ustar tianontianonpackage main import ( "bytes" "encoding/json" "flag" "fmt" "github.com/docker/docker" "os" "strings" "text/template" ) var templates = map[string]string{ "upstart": `description "{{.description}}" author "{{.author}}" start on filesystem and started lxc-net and started docker stop on runlevel [!2345] respawn exec /home/vagrant/goroot/bin/docker start -a {{.container_id}} `, "systemd": `[Unit] Description={{.description}} Author={{.author}} After=docker.service [Service] Restart=always ExecStart=/usr/bin/docker start -a {{.container_id}} ExecStop=/usr/bin/docker stop -t 2 {{.container_id}} [Install] WantedBy=local.target `, } func main() { // Parse command line for custom options kind := flag.String("t", "upstart", "Type of manager requested") author := flag.String("a", "", "Author of the image") description := flag.String("d", "", "Description of the image") flag.Usage = func() { fmt.Fprintf(os.Stderr, "\nUsage: manager \n\n") flag.PrintDefaults() } flag.Parse() // We require at least the container ID if flag.NArg() != 1 { println(flag.NArg()) flag.Usage() return } // Check that the requested process manager is supported if _, exists := templates[*kind]; !exists { panic("Unknown script template") } // Load the requested template tpl, err := template.New("processManager").Parse(templates[*kind]) if err != nil { panic(err) } // Create stdout/stderr buffers bufOut := bytes.NewBuffer(nil) bufErr := bytes.NewBuffer(nil) // Instanciate the Docker CLI cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock", false, nil) // Retrieve the container info if err := cli.CmdInspect(flag.Arg(0)); err != nil { // As of docker v0.6.3, CmdInspect always returns nil panic(err) } // If there is nothing in the error buffer, then the Docker daemon is there and the container has been found if bufErr.Len() == 0 { // Unmarshall the resulting container data c := []*docker.Container{{}} if err := json.Unmarshal(bufOut.Bytes(), &c); err != nil { panic(err) } // Reset the buffers bufOut.Reset() bufErr.Reset() // Retrieve the info of the linked image if err := cli.CmdInspect(c[0].Image); err != nil { panic(err) } // If there is nothing in the error buffer, then the image has been found. if bufErr.Len() == 0 { // Unmarshall the resulting image data img := []*docker.Image{{}} if err := json.Unmarshal(bufOut.Bytes(), &img); err != nil { panic(err) } // If no author has been set, use the one from the image if *author == "" && img[0].Author != "" { *author = strings.Replace(img[0].Author, "\"", "", -1) } // If no description has been set, use the comment from the image if *description == "" && img[0].Comment != "" { *description = strings.Replace(img[0].Comment, "\"", "", -1) } } } /// Old version: Wrtie the resulting script to file // f, err := os.OpenFile(kind, os.O_CREATE|os.O_WRONLY, 0755) // if err != nil { // panic(err) // } // defer f.Close() // Create a map with needed data data := map[string]string{ "author": *author, "description": *description, "container_id": flag.Arg(0), } // Process the template and output it on Stdout if err := tpl.Execute(os.Stdout, data); err != nil { panic(err) } } docker-1.6.2/contrib/host-integration/manager/0000755000175000017500000000000012524223634020704 5ustar tianontianondocker-1.6.2/contrib/host-integration/manager/systemd0000755000175000017500000000040112524223634022315 0ustar tianontianon#!/bin/sh set -e cid="$1" auth="$2" desc="$3" cat <<-EOF [Unit] Description=$desc Author=$auth After=docker.service [Service] ExecStart=/usr/bin/docker start -a $cid ExecStop=/usr/bin/docker stop -t 2 $cid [Install] WantedBy=local.target EOF docker-1.6.2/contrib/host-integration/manager/upstart0000755000175000017500000000044112524223634022333 0ustar tianontianon#!/bin/sh set -e cid="$1" auth="$2" desc="$3" cat <<-EOF description "$(echo "$desc" | sed 's/"/\\"/g')" author "$(echo "$auth" | sed 's/"/\\"/g')" start on filesystem and started lxc-net and started docker stop on runlevel [!2345] respawn exec /usr/bin/docker start -a "$cid" EOF docker-1.6.2/contrib/host-integration/manager.sh0000755000175000017500000000223712524223634021247 0ustar tianontianon#!/bin/sh set -e usage() { echo >&2 "usage: $0 [-a author] [-d description] container [manager]" echo >&2 " ie: $0 -a 'John Smith' 4ec9612a37cd systemd" echo >&2 " ie: $0 -d 'Super Cool System' 4ec9612a37cd # defaults to upstart" exit 1 } auth='' desc='' have_auth= have_desc= while getopts a:d: opt; do case "$opt" in a) auth="$OPTARG" have_auth=1 ;; d) desc="$OPTARG" have_desc=1 ;; esac done shift $(($OPTIND - 1)) [ $# -ge 1 -a $# -le 2 ] || usage cid="$1" script="${2:-upstart}" if [ ! -e "manager/$script" ]; then echo >&2 "Error: manager type '$script' is unknown (PRs always welcome!)." echo >&2 'The currently supported types are:' echo >&2 " $(cd manager && echo *)" exit 1 fi # TODO https://github.com/docker/docker/issues/734 (docker inspect formatting) #if command -v docker > /dev/null 2>&1; then # image="$(docker inspect -f '{{.Image}}' "$cid")" # if [ "$image" ]; then # if [ -z "$have_auth" ]; then # auth="$(docker inspect -f '{{.Author}}' "$image")" # fi # if [ -z "$have_desc" ]; then # desc="$(docker inspect -f '{{.Comment}}' "$image")" # fi # fi #fi exec "manager/$script" "$cid" "$auth" "$desc" docker-1.6.2/contrib/host-integration/Dockerfile.dev0000644000175000017500000000131612524223634022042 0ustar tianontianon# # This Dockerfile will create an image that allows to generate upstart and # systemd scripts (more to come) # FROM ubuntu:12.10 MAINTAINER Guillaume J. Charmes RUN apt-get update && apt-get install -y wget git mercurial # Install Go RUN wget --no-check-certificate https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz -O go-1.1.2.tar.gz RUN tar -xzvf go-1.1.2.tar.gz && mv /go /goroot RUN mkdir /go ENV GOROOT /goroot ENV GOPATH /go ENV PATH $GOROOT/bin:$PATH RUN go get github.com/docker/docker && cd /go/src/github.com/docker/docker && git checkout v0.6.3 ADD manager.go /manager/ RUN cd /manager && go build -o /usr/bin/manager ENTRYPOINT ["/usr/bin/manager"] docker-1.6.2/contrib/host-integration/Dockerfile.min0000644000175000017500000000017612524223634022052 0ustar tianontianonFROM busybox MAINTAINER Guillaume J. Charmes ADD manager /usr/bin/ ENTRYPOINT ["/usr/bin/manager"] docker-1.6.2/contrib/docker-device-tool/0000755000175000017500000000000012524223634017453 5ustar tianontianondocker-1.6.2/contrib/docker-device-tool/device_tool.go0000644000175000017500000000737112524223634022306 0ustar tianontianonpackage main import ( "flag" "fmt" "os" "path" "sort" "strconv" "strings" log "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver/devmapper" "github.com/docker/docker/pkg/devicemapper" ) func usage() { fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) flag.PrintDefaults() os.Exit(1) } func byteSizeFromString(arg string) (int64, error) { digits := "" rest := "" last := strings.LastIndexAny(arg, "0123456789") if last >= 0 { digits = arg[:last+1] rest = arg[last+1:] } val, err := strconv.ParseInt(digits, 10, 64) if err != nil { return val, err } rest = strings.ToLower(strings.TrimSpace(rest)) var multiplier int64 = 1 switch rest { case "": multiplier = 1 case "k", "kb": multiplier = 1024 case "m", "mb": multiplier = 1024 * 1024 case "g", "gb": multiplier = 1024 * 1024 * 1024 case "t", "tb": multiplier = 1024 * 1024 * 1024 * 1024 default: return 0, fmt.Errorf("Unknown size unit: %s", rest) } return val * multiplier, nil } func main() { root := flag.String("r", "/var/lib/docker", "Docker root dir") flDebug := flag.Bool("D", false, "Debug mode") flag.Parse() if *flDebug { os.Setenv("DEBUG", "1") log.SetLevel(log.DebugLevel) } if flag.NArg() < 1 { usage() } args := flag.Args() home := path.Join(*root, "devicemapper") devices, err := devmapper.NewDeviceSet(home, false, nil) if err != nil { fmt.Println("Can't initialize device mapper: ", err) os.Exit(1) } switch args[0] { case "status": status := devices.Status() fmt.Printf("Pool name: %s\n", status.PoolName) fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) fmt.Printf("Sector size: %d\n", status.SectorSize) fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) break case "list": ids := devices.List() sort.Strings(ids) for _, id := range ids { fmt.Println(id) } break case "device": if flag.NArg() < 2 { usage() } status, err := devices.GetDeviceStatus(args[1]) if err != nil { fmt.Println("Can't get device info: ", err) os.Exit(1) } fmt.Printf("Id: %d\n", status.DeviceId) fmt.Printf("Size: %d\n", status.Size) fmt.Printf("Transaction Id: %d\n", status.TransactionId) fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) break case "resize": if flag.NArg() < 2 { usage() } size, err := byteSizeFromString(args[1]) if err != nil { fmt.Println("Invalid size: ", err) os.Exit(1) } err = devices.ResizePool(size) if err != nil { fmt.Println("Error resizeing pool: ", err) os.Exit(1) } break case "snap": if flag.NArg() < 3 { usage() } err := devices.AddDevice(args[1], args[2]) if err != nil { fmt.Println("Can't create snap device: ", err) os.Exit(1) } break case "remove": if flag.NArg() < 2 { usage() } err := devicemapper.RemoveDevice(args[1]) if err != nil { fmt.Println("Can't remove device: ", err) os.Exit(1) } break case "mount": if flag.NArg() < 3 { usage() } err := devices.MountDevice(args[1], args[2], "") if err != nil { fmt.Println("Can't create snap device: ", err) os.Exit(1) } break default: fmt.Printf("Unknown command %s\n", args[0]) usage() os.Exit(1) } return } docker-1.6.2/contrib/mkimage-alpine.sh0000755000175000017500000000256512524223634017223 0ustar tianontianon#!/bin/sh set -e [ $(id -u) -eq 0 ] || { printf >&2 '%s requires root\n' "$0" exit 1 } usage() { printf >&2 '%s: [-r release] [-m mirror] [-s]\n' "$0" exit 1 } tmp() { TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) trap "rm -rf $TMP $ROOTFS" EXIT TERM INT } apkv() { curl -sSL $REPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | grep '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 } getapk() { curl -sSL $REPO/$ARCH/apk-tools-static-$(apkv).apk | tar -xz -C $TMP sbin/apk.static } mkbase() { $TMP/sbin/apk.static --repository $REPO --update-cache --allow-untrusted \ --root $ROOTFS --initdb add alpine-base } conf() { printf '%s\n' $REPO > $ROOTFS/etc/apk/repositories } pack() { local id id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) docker tag $id alpine:latest docker run -i -t alpine printf 'alpine:%s with id=%s created!\n' $REL $id } save() { [ $SAVE -eq 1 ] || return tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz } while getopts "hr:m:s" opt; do case $opt in r) REL=$OPTARG ;; m) MIRROR=$OPTARG ;; s) SAVE=1 ;; *) usage ;; esac done REL=${REL:-edge} MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} SAVE=${SAVE:-0} REPO=$MIRROR/$REL/main ARCH=${ARCH:-$(uname -m)} tmp getapk mkbase conf pack save docker-1.6.2/contrib/check-config.sh0000755000175000017500000001076212524223634016661 0ustar tianontianon#!/usr/bin/env bash set -e # bits of this were adapted from lxc-checkconfig # see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in possibleConfigs=( '/proc/config.gz' "/boot/config-$(uname -r)" "/usr/src/linux-$(uname -r)/.config" '/usr/src/linux/.config' ) if [ $# -gt 0 ]; then CONFIG="$1" else : ${CONFIG:="${possibleConfigs[0]}"} fi if ! command -v zgrep &> /dev/null; then zgrep() { zcat "$2" | grep "$1" } fi is_set() { zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null } # see http://en.wikipedia.org/wiki/ANSI_escape_code#Colors declare -A colors=( [black]=30 [red]=31 [green]=32 [yellow]=33 [blue]=34 [magenta]=35 [cyan]=36 [white]=37 ) color() { color=() if [ "$1" = 'bold' ]; then color+=( '1' ) shift fi if [ $# -gt 0 ] && [ "${colors[$1]}" ]; then color+=( "${colors[$1]}" ) fi local IFS=';' echo -en '\033['"${color[*]}"m } wrap_color() { text="$1" shift color "$@" echo -n "$text" color reset echo } wrap_good() { echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" } wrap_bad() { echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" } wrap_warning() { wrap_color >&2 "$*" red } check_flag() { if is_set "$1"; then wrap_good "CONFIG_$1" 'enabled' else wrap_bad "CONFIG_$1" 'missing' fi } check_flags() { for flag in "$@"; do echo "- $(check_flag "$flag")" done } if [ ! -e "$CONFIG" ]; then wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..." for tryConfig in "${possibleConfigs[@]}"; do if [ -e "$tryConfig" ]; then CONFIG="$tryConfig" break fi done if [ ! -e "$CONFIG" ]; then wrap_warning "error: cannot find kernel config" wrap_warning " try running this script again, specifying the kernel config:" wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config" exit 1 fi fi wrap_color "info: reading kernel config from $CONFIG ..." white echo echo 'Generally Necessary:' echo -n '- ' cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" cgroupDir="$(dirname "$cgroupSubsystemDir")" if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" else if [ "$cgroupSubsystemDir" ]; then echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" else echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" fi echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" fi if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then echo -n '- ' if command -v apparmor_parser &> /dev/null; then echo "$(wrap_good 'apparmor' 'enabled and tools installed')" else echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" echo -n ' ' if command -v apt-get &> /dev/null; then echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" elif command -v yum &> /dev/null; then echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" else echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" fi fi fi flags=( NAMESPACES {NET,PID,IPC,UTS}_NS DEVPTS_MULTIPLE_INSTANCES CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MACVLAN VETH BRIDGE NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK} NF_NAT NF_NAT_NEEDED # required for bind-mounting /dev/mqueue into containers POSIX_MQUEUE ) check_flags "${flags[@]}" echo echo 'Optional Features:' flags=( MEMCG_SWAP RESOURCE_COUNTERS CGROUP_PERF ) check_flags "${flags[@]}" echo '- Storage Drivers:' { echo '- "'$(wrap_color 'aufs' blue)'":' check_flags AUFS_FS | sed 's/^/ /' if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" fi check_flags EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' echo '- "'$(wrap_color 'btrfs' blue)'":' check_flags BTRFS_FS | sed 's/^/ /' echo '- "'$(wrap_color 'devicemapper' blue)'":' check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /' echo '- "'$(wrap_color 'overlay' blue)'":' check_flags OVERLAY_FS EXT4_FS_SECURITY EXT4_FS_POSIX_ACL | sed 's/^/ /' } | sed 's/^/ /' echo #echo 'Potential Future Features:' #check_flags USER_NS #echo docker-1.6.2/contrib/mkimage-arch.sh0000755000175000017500000000442312524223634016663 0ustar tianontianon#!/usr/bin/env bash # Generate a minimal filesystem for archlinux and load it into the local # docker as "archlinux" # requires root set -e hash pacstrap &>/dev/null || { echo "Could not find pacstrap. Run pacman -S arch-install-scripts" exit 1 } hash expect &>/dev/null || { echo "Could not find expect. Run pacman -S expect" exit 1 } ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) chmod 755 $ROOTFS # packages to ignore for space savings PKGIGNORE=( cryptsetup device-mapper dhcpcd iproute2 jfsutils linux lvm2 man-db man-pages mdadm nano netctl openresolv pciutils pcmciautils reiserfsprogs s-nail systemd-sysvcompat usbutils vi xfsprogs ) IFS=',' PKGIGNORE="${PKGIGNORE[*]}" unset IFS expect < $ROOTFS/etc/locale.gen arch-chroot $ROOTFS locale-gen arch-chroot $ROOTFS /bin/sh -c 'echo "Server = https://mirrors.kernel.org/archlinux/\$repo/os/\$arch" > /etc/pacman.d/mirrorlist' # udev doesn't work in containers, rebuild /dev DEV=$ROOTFS/dev rm -rf $DEV mkdir -p $DEV mknod -m 666 $DEV/null c 1 3 mknod -m 666 $DEV/zero c 1 5 mknod -m 666 $DEV/random c 1 8 mknod -m 666 $DEV/urandom c 1 9 mkdir -m 755 $DEV/pts mkdir -m 1777 $DEV/shm mknod -m 666 $DEV/tty c 5 0 mknod -m 600 $DEV/console c 5 1 mknod -m 666 $DEV/tty0 c 4 0 mknod -m 666 $DEV/full c 1 7 mknod -m 600 $DEV/initctl p mknod -m 666 $DEV/ptmx c 5 2 ln -sf /proc/self/fd $DEV/fd tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - archlinux docker run -t archlinux echo Success. rm -rf $ROOTFS docker-1.6.2/contrib/mkimage.sh0000755000175000017500000000544512524223634015755 0ustar tianontianon#!/usr/bin/env bash set -e mkimg="$(basename "$0")" usage() { echo >&2 "usage: $mkimg [-d dir] [-t tag] script [script-args]" echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" echo >&2 " $mkimg -t someuser/busybox busybox-static" echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" exit 1 } scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" optTemp=$(getopt --options '+d:t:h' --longoptions 'dir:,tag:,help' --name "$mkimg" -- "$@") eval set -- "$optTemp" unset optTemp dir= tag= while true; do case "$1" in -d|--dir) dir="$2" ; shift 2 ;; -t|--tag) tag="$2" ; shift 2 ;; -h|--help) usage ;; --) shift ; break ;; esac done script="$1" [ "$script" ] || usage shift if [ ! -x "$scriptDir/$script" ]; then echo >&2 "error: $script does not exist or is not executable" echo >&2 " see $scriptDir for possible scripts" exit 1 fi # don't mistake common scripts like .febootstrap-minimize as image-creators if [[ "$script" == .* ]]; then echo >&2 "error: $script is a script helper, not a script" echo >&2 " see $scriptDir for possible scripts" exit 1 fi delDir= if [ -z "$dir" ]; then dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" delDir=1 fi rootfsDir="$dir/rootfs" ( set -x; mkdir -p "$rootfsDir" ) # pass all remaining arguments to $script "$scriptDir/$script" "$rootfsDir" "$@" # Docker mounts tmpfs at /dev and procfs at /proc so we can remove them rm -rf "$rootfsDir/dev" "$rootfsDir/proc" mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" # make sure /etc/resolv.conf has something useful in it mkdir -p "$rootfsDir/etc" cat > "$rootfsDir/etc/resolv.conf" <<'EOF' nameserver 8.8.8.8 nameserver 8.8.4.4 EOF tarFile="$dir/rootfs.tar.xz" touch "$tarFile" ( set -x tar --numeric-owner -caf "$tarFile" -C "$rootfsDir" --transform='s,^./,,' . ) echo >&2 "+ cat > '$dir/Dockerfile'" cat > "$dir/Dockerfile" <<'EOF' FROM scratch ADD rootfs.tar.xz / EOF # if our generated image has a decent shell, let's set a default command for shell in /bin/bash /usr/bin/fish /usr/bin/zsh /bin/sh; do if [ -x "$rootfsDir/$shell" ]; then ( set -x; echo 'CMD ["'"$shell"'"]' >> "$dir/Dockerfile" ) break fi done ( set -x; rm -rf "$rootfsDir" ) if [ "$tag" ]; then ( set -x; docker build -t "$tag" "$dir" ) elif [ "$delDir" ]; then # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ ( set -x; docker build "$dir" ) fi if [ "$delDir" ]; then ( set -x; rm -rf "$dir" ) fi docker-1.6.2/contrib/report-issue.sh0000644000175000017500000000377412524223634017004 0ustar tianontianon#!/bin/sh # This is a convenience script for reporting issues that include a base # template of information. See https://github.com/docker/docker/pull/8845 set -e DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"} DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "} DOCKER=${DOCKER:-"docker"} DOCKER_COMMAND="${DOCKER}" export DOCKER_COMMAND # pulled from https://gist.github.com/cdown/1163649 function urlencode() { # urlencode local length="${#1}" for (( i = 0; i < length; i++ )); do local c="${1:i:1}" case $c in [a-zA-Z0-9.~_-]) printf "$c" ;; *) printf '%%%02X' "'$c" esac done } function template() { # this should always match the template from CONTRIBUTING.md cat <<- EOM Description of problem: \`docker version\`: `${DOCKER_COMMAND} -D version` \`docker info\`: `${DOCKER_COMMAND} -D info` \`uname -a\`: `uname -a` Environment details (AWS, VirtualBox, physical, etc.): How reproducible: Steps to Reproduce: 1. 2. 3. Actual Results: Expected Results: Additional info: EOM } function format_issue_url() { if [ ${#@} -ne 2 ] ; then return 1 fi local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}") local issue_body=$(urlencode "${2}") echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}" } echo -ne "Do you use \`sudo\` to call docker? [y|N]: " read -r -n 1 use_sudo echo "" if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then export DOCKER_COMMAND="sudo ${DOCKER}" fi echo -ne "Title of new issue?: " read -r issue_title echo "" issue_url=$(format_issue_url "${issue_title}" "$(template)") if which xdg-open 2>/dev/null >/dev/null ; then echo -ne "Would like to launch this report in your browser? [Y|n]: " read -r -n 1 launch_now echo "" if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then xdg-open "${issue_url}" fi fi echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}" docker-1.6.2/contrib/init/0000755000175000017500000000000012524223634014737 5ustar tianontianondocker-1.6.2/contrib/init/sysvinit-debian/0000755000175000017500000000000012524223634020047 5ustar tianontianondocker-1.6.2/contrib/init/sysvinit-debian/docker.default0000644000175000017500000000075512524223634022673 0ustar tianontianon# Docker Upstart and SysVinit configuration file # Customize location of Docker binary (especially for development testing). #DOCKER="/usr/local/bin/docker" # Use DOCKER_OPTS to modify the daemon startup options. #DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" # If you need Docker to use an HTTP proxy, it can also be specified here. #export http_proxy="http://127.0.0.1:3128/" # This is also a handy place to tweak where Docker's temporary files go. #export TMPDIR="/mnt/bigdrive/docker-tmp" docker-1.6.2/contrib/init/sysvinit-debian/docker0000755000175000017500000000653512524223634021255 0ustar tianontianon#!/bin/sh set -e ### BEGIN INIT INFO # Provides: docker # Required-Start: $syslog $remote_fs # Required-Stop: $syslog $remote_fs # Should-Start: cgroupfs-mount cgroup-lite # Should-Stop: cgroupfs-mount cgroup-lite # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Create lightweight, portable, self-sufficient containers. # Description: # Docker is an open-source project to easily create lightweight, portable, # self-sufficient containers from any application. The same container that a # developer builds and tests on a laptop can run at scale, in production, on # VMs, bare metal, OpenStack clusters, public clouds and more. ### END INIT INFO export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin BASE=$(basename $0) # modify these in /etc/default/$BASE (/etc/default/docker) DOCKER=/usr/bin/$BASE # This is the pid file managed by docker itself DOCKER_PIDFILE=/var/run/$BASE.pid # This is the pid file created/managed by start-stop-daemon DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid DOCKER_LOGFILE=/var/log/$BASE.log DOCKER_OPTS= DOCKER_DESC="Docker" # Get lsb functions . /lib/lsb/init-functions if [ -f /etc/default/$BASE ]; then . /etc/default/$BASE fi # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it) if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" exit 1 fi # Check docker is present if [ ! -x $DOCKER ]; then log_failure_msg "$DOCKER not present or not executable" exit 1 fi fail_unless_root() { if [ "$(id -u)" != '0' ]; then log_failure_msg "$DOCKER_DESC must be run as root" exit 1 fi } cgroupfs_mount() { # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount if grep -v '^#' /etc/fstab | grep -q cgroup \ || [ ! -e /proc/cgroups ] \ || [ ! -d /sys/fs/cgroup ]; then return fi if ! mountpoint -q /sys/fs/cgroup; then mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup fi ( cd /sys/fs/cgroup for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do mkdir -p $sys if ! mountpoint -q $sys; then if ! mount -n -t cgroup -o $sys cgroup $sys; then rmdir $sys || true fi fi done ) } case "$1" in start) fail_unless_root cgroupfs_mount touch "$DOCKER_LOGFILE" chgrp docker "$DOCKER_LOGFILE" ulimit -n 1048576 if [ "$BASH" ]; then ulimit -u 1048576 else ulimit -p 1048576 fi log_begin_msg "Starting $DOCKER_DESC: $BASE" start-stop-daemon --start --background \ --no-close \ --exec "$DOCKER" \ --pidfile "$DOCKER_SSD_PIDFILE" \ --make-pidfile \ -- \ -d -p "$DOCKER_PIDFILE" \ $DOCKER_OPTS \ >> "$DOCKER_LOGFILE" 2>&1 log_end_msg $? ;; stop) fail_unless_root log_begin_msg "Stopping $DOCKER_DESC: $BASE" start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" log_end_msg $? ;; restart) fail_unless_root docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` [ -n "$docker_pid" ] \ && ps -p $docker_pid > /dev/null 2>&1 \ && $0 stop $0 start ;; force-reload) fail_unless_root $0 restart ;; status) status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" "$DOCKER_DESC" ;; *) echo "Usage: $0 {start|stop|restart|status}" exit 1 ;; esac docker-1.6.2/contrib/init/openrc/0000755000175000017500000000000012524223634016225 5ustar tianontianondocker-1.6.2/contrib/init/openrc/docker.confd0000644000175000017500000000054412524223634020512 0ustar tianontianon# /etc/conf.d/docker: config file for /etc/init.d/docker # where the docker daemon output gets piped #DOCKER_LOGFILE="/var/log/docker.log" # where docker's pid get stored #DOCKER_PIDFILE="/run/docker.pid" # where the docker daemon itself is run from #DOCKER_BINARY="/usr/bin/docker" # any other random options you want to pass to docker DOCKER_OPTS="" docker-1.6.2/contrib/init/openrc/docker.initd0000755000175000017500000000147412524223634020536 0ustar tianontianon#!/sbin/runscript # Copyright 1999-2013 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 # $Header: $ DOCKER_LOGFILE=${DOCKER_LOGFILE:-/var/log/${SVCNAME}.log} DOCKER_PIDFILE=${DOCKER_PIDFILE:-/run/${SVCNAME}.pid} DOCKER_BINARY=${DOCKER_BINARY:-/usr/bin/docker} DOCKER_OPTS=${DOCKER_OPTS:-} start() { checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" ulimit -n 1048576 ulimit -u 1048576 ebegin "Starting docker daemon" start-stop-daemon --start --background \ --exec "$DOCKER_BINARY" \ --pidfile "$DOCKER_PIDFILE" \ --stdout "$DOCKER_LOGFILE" \ --stderr "$DOCKER_LOGFILE" \ -- -d -p "$DOCKER_PIDFILE" \ $DOCKER_OPTS eend $? } stop() { ebegin "Stopping docker daemon" start-stop-daemon --stop \ --exec "$DOCKER_BINARY" \ --pidfile "$DOCKER_PIDFILE" eend $? } docker-1.6.2/contrib/init/systemd/0000755000175000017500000000000012524223634016427 5ustar tianontianondocker-1.6.2/contrib/init/systemd/REVIEWERS0000644000175000017500000000023112524223634017721 0ustar tianontianonLokesh Mandvekar (@lsm5) Brandon Philips (@philips) Jessie Frazelle (@jfrazelle) docker-1.6.2/contrib/init/systemd/docker.service0000644000175000017500000000047012524223634021261 0ustar tianontianon[Unit] Description=Docker Application Container Engine Documentation=http://docs.docker.com After=network.target docker.socket Requires=docker.socket [Service] ExecStart=/usr/bin/docker -d -H fd:// MountFlags=slave LimitNOFILE=1048576 LimitNPROC=1048576 LimitCORE=infinity [Install] WantedBy=multi-user.target docker-1.6.2/contrib/init/systemd/docker.socket0000644000175000017500000000030512524223634021106 0ustar tianontianon[Unit] Description=Docker Socket for the API PartOf=docker.service [Socket] ListenStream=/var/run/docker.sock SocketMode=0660 SocketUser=root SocketGroup=docker [Install] WantedBy=sockets.target docker-1.6.2/contrib/init/sysvinit-redhat/0000755000175000017500000000000012524223634020074 5ustar tianontianondocker-1.6.2/contrib/init/sysvinit-redhat/docker.sysconfig0000644000175000017500000000031012524223634023263 0ustar tianontianon# /etc/sysconfig/docker # # Other arguments to pass to the docker daemon process # These will be parsed by the sysv initscript and appended # to the arguments list passed to docker -d other_args="" docker-1.6.2/contrib/init/sysvinit-redhat/docker0000755000175000017500000000471212524223634021275 0ustar tianontianon#!/bin/sh # # /etc/rc.d/init.d/docker # # Daemon for docker.com # # chkconfig: 2345 95 95 # description: Daemon for docker.com ### BEGIN INIT INFO # Provides: docker # Required-Start: $network cgconfig # Required-Stop: # Should-Start: # Should-Stop: # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: start and stop docker # Description: Daemon for docker.com ### END INIT INFO # Source function library. . /etc/rc.d/init.d/functions prog="docker" unshare=/usr/bin/unshare exec="/usr/bin/$prog" pidfile="/var/run/$prog.pid" lockfile="/var/lock/subsys/$prog" logfile="/var/log/$prog" [ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog prestart() { service cgconfig status > /dev/null if [[ $? != 0 ]]; then service cgconfig start fi } start() { [ -x $exec ] || exit 5 check_for_cleanup if ! [ -f $pidfile ]; then prestart printf "Starting $prog:\t" echo "\n$(date)\n" >> $logfile "$unshare" -m -- $exec -d $other_args &>> $logfile & pid=$! touch $lockfile # wait up to 10 seconds for the pidfile to exist. see # https://github.com/docker/docker/issues/5359 tries=0 while [ ! -f $pidfile -a $tries -lt 10 ]; do sleep 1 tries=$((tries + 1)) done success echo else failure echo printf "$pidfile still exists...\n" exit 7 fi } stop() { echo -n $"Stopping $prog: " killproc -p $pidfile -d 300 $prog retval=$? echo [ $retval -eq 0 ] && rm -f $lockfile return $retval } restart() { stop start } reload() { restart } force_reload() { restart } rh_status() { status -p $pidfile $prog } rh_status_q() { rh_status >/dev/null 2>&1 } check_for_cleanup() { if [ -f ${pidfile} ]; then /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile} fi } case "$1" in start) rh_status_q && exit 0 $1 ;; stop) rh_status_q || exit 0 $1 ;; restart) $1 ;; reload) rh_status_q || exit 7 $1 ;; force-reload) force_reload ;; status) rh_status ;; condrestart|try-restart) rh_status_q || exit 0 restart ;; *) echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" exit 2 esac exit $? docker-1.6.2/contrib/init/upstart/0000755000175000017500000000000012524223634016441 5ustar tianontianondocker-1.6.2/contrib/init/upstart/REVIEWERS0000644000175000017500000000013412524223634017735 0ustar tianontianonTianon Gravi (@tianon) Jessie Frazelle (@jfrazelle) docker-1.6.2/contrib/init/upstart/docker.conf0000644000175000017500000000275012524223634020563 0ustar tianontianondescription "Docker daemon" start on (local-filesystems and net-device-up IFACE!=lo) stop on runlevel [!2345] limit nofile 524288 1048576 limit nproc 524288 1048576 respawn pre-start script # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount if grep -v '^#' /etc/fstab | grep -q cgroup \ || [ ! -e /proc/cgroups ] \ || [ ! -d /sys/fs/cgroup ]; then exit 0 fi if ! mountpoint -q /sys/fs/cgroup; then mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup fi ( cd /sys/fs/cgroup for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do mkdir -p $sys if ! mountpoint -q $sys; then if ! mount -n -t cgroup -o $sys cgroup $sys; then rmdir $sys || true fi fi done ) end script script # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) DOCKER=/usr/bin/$UPSTART_JOB DOCKER_OPTS= if [ -f /etc/default/$UPSTART_JOB ]; then . /etc/default/$UPSTART_JOB fi exec "$DOCKER" -d $DOCKER_OPTS end script # Don't emit "started" event until docker.sock is ready. # See https://github.com/docker/docker/issues/6647 post-start script DOCKER_OPTS= if [ -f /etc/default/$UPSTART_JOB ]; then . /etc/default/$UPSTART_JOB fi if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then while ! [ -e /var/run/docker.sock ]; do initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 echo "Waiting for /var/run/docker.sock" sleep 0.1 done echo "/var/run/docker.sock is up" fi end script docker-1.6.2/contrib/mkimage-unittest.sh0000755000175000017500000000257112524223634017627 0ustar tianontianon#!/usr/bin/env bash # Generate a very minimal filesystem based on busybox-static, # and load it into the local docker under the name "docker-ut". missing_pkg() { echo "Sorry, I could not locate $1" echo "Try 'apt-get install ${2:-$1}'?" exit 1 } BUSYBOX=$(which busybox) [ "$BUSYBOX" ] || missing_pkg busybox busybox-static SOCAT=$(which socat) [ "$SOCAT" ] || missing_pkg socat shopt -s extglob set -ex ROOTFS=`mktemp -d ${TMPDIR:-/var/tmp}/rootfs-busybox.XXXXXXXXXX` trap "rm -rf $ROOTFS" INT QUIT TERM cd $ROOTFS mkdir bin etc dev dev/pts lib proc sys tmp touch etc/resolv.conf cp /etc/nsswitch.conf etc/nsswitch.conf echo root:x:0:0:root:/:/bin/sh > etc/passwd echo daemon:x:1:1:daemon:/usr/sbin:/bin/sh >> etc/passwd echo root:x:0: > etc/group echo daemon:x:1: >> etc/group ln -s lib lib64 ln -s bin sbin cp $BUSYBOX $SOCAT bin for X in $(busybox --list) do ln -s busybox bin/$X done rm bin/init ln bin/busybox bin/init cp -P /lib/x86_64-linux-gnu/lib{pthread*,c*(-*),dl*(-*),nsl*(-*),nss_*,util*(-*),wrap,z}.so* lib cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib cp -P /usr/lib/x86_64-linux-gnu/lib{crypto,ssl}.so* lib for X in console null ptmx random stdin stdout stderr tty urandom zero do cp -a /dev/$X dev done chmod 0755 $ROOTFS # See #486 tar --numeric-owner -cf- . | docker import - docker-ut docker run -i -u root docker-ut /bin/echo Success. rm -rf $ROOTFS docker-1.6.2/contrib/udev/0000755000175000017500000000000012524223634014737 5ustar tianontianondocker-1.6.2/contrib/udev/80-docker.rules0000644000175000017500000000052712524223634017513 0ustar tianontianon# hide docker's loopback devices from udisks, and thus from user desktops SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" docker-1.6.2/contrib/mkimage-rinse.sh0000755000175000017500000000662112524223634017070 0ustar tianontianon#!/usr/bin/env bash # # Create a base CentOS Docker image. # This script is useful on systems with rinse available (e.g., # building a CentOS image on Debian). See contrib/mkimage-yum.sh for # a way to build CentOS images on systems with yum installed. set -e echo >&2 echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' echo >&2 repo="$1" distro="$2" mirror="$3" if [ ! "$repo" ] || [ ! "$distro" ]; then self="$(basename $0)" echo >&2 "usage: $self repo distro [mirror]" echo >&2 echo >&2 " ie: $self username/centos centos-5" echo >&2 " $self username/centos centos-6" echo >&2 echo >&2 " ie: $self username/slc slc-5" echo >&2 " $self username/slc slc-6" echo >&2 echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" echo >&2 echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' echo >&2 ' expected values of "mirror".' echo >&2 echo >&2 'This script is tested to work with the original upstream version of rinse,' echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' echo >&2 exit 1 fi target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM" cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" returnTo="$(pwd -P)" rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) if [ "$mirror" ]; then rinseArgs+=( --mirror "$mirror" ) fi set -x mkdir -p "$target" sudo rinse "${rinseArgs[@]}" cd "$target" # rinse fails a little at setting up /dev, so we'll just wipe it out and create our own sudo rm -rf dev sudo mkdir -m 755 dev ( cd dev sudo ln -sf /proc/self/fd ./ sudo mkdir -m 755 pts sudo mkdir -m 1777 shm sudo mknod -m 600 console c 5 1 sudo mknod -m 600 initctl p sudo mknod -m 666 full c 1 7 sudo mknod -m 666 null c 1 3 sudo mknod -m 666 ptmx c 5 2 sudo mknod -m 666 random c 1 8 sudo mknod -m 666 tty c 5 0 sudo mknod -m 666 tty0 c 4 0 sudo mknod -m 666 urandom c 1 9 sudo mknod -m 666 zero c 1 5 ) # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" # locales sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} # docs sudo rm -rf usr/share/{man,doc,info,gnome/help} # cracklib sudo rm -rf usr/share/cracklib # i18n sudo rm -rf usr/share/i18n # yum cache sudo rm -rf var/cache/yum sudo mkdir -p --mode=0755 var/cache/yum # sln sudo rm -rf sbin/sln # ldconfig #sudo rm -rf sbin/ldconfig sudo rm -rf etc/ld.so.cache var/cache/ldconfig sudo mkdir -p --mode=0755 var/cache/ldconfig # allow networking init scripts inside the container to work without extra steps echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null # to restore locales later: # yum reinstall glibc-common version= if [ -r etc/redhat-release ]; then version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" elif [ -r etc/SuSE-release ]; then version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" fi if [ -z "$version" ]; then echo >&2 "warning: cannot autodetect OS version, using $distro as tag" sleep 20 version="$distro" fi sudo tar --numeric-owner -c . | docker import - $repo:$version docker run -i -t $repo:$version echo success cd "$returnTo" sudo rm -rf "$target" docker-1.6.2/contrib/mkimage-yum.sh0000755000175000017500000000522412524223634016560 0ustar tianontianon#!/usr/bin/env bash # # Create a base CentOS Docker image. # # This script is useful on systems with yum installed (e.g., building # a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way # to build CentOS images on other systems. usage() { cat < OPTIONS: -y The path to the yum config to install packages from. The default is /etc/yum.conf. EOOPTS exit 1 } # option defaults yum_config=/etc/yum.conf while getopts ":y:h" opt; do case $opt in y) yum_config=$OPTARG ;; h) usage ;; \?) echo "Invalid option: -$OPTARG" usage ;; esac done shift $((OPTIND - 1)) name=$1 if [[ -z $name ]]; then usage fi #-------------------- target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) set -x mkdir -m 755 "$target"/dev mknod -m 600 "$target"/dev/console c 5 1 mknod -m 600 "$target"/dev/initctl p mknod -m 666 "$target"/dev/full c 1 7 mknod -m 666 "$target"/dev/null c 1 3 mknod -m 666 "$target"/dev/ptmx c 5 2 mknod -m 666 "$target"/dev/random c 1 8 mknod -m 666 "$target"/dev/tty c 5 0 mknod -m 666 "$target"/dev/tty0 c 4 0 mknod -m 666 "$target"/dev/urandom c 1 9 mknod -m 666 "$target"/dev/zero c 1 5 # amazon linux yum will fail without vars set if [ -d /etc/yum/vars ]; then mkdir -p -m 755 "$target"/etc/yum cp -a /etc/yum/vars "$target"/etc/yum/ fi yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ --setopt=group_package_types=mandatory -y groupinstall Core yum -c "$yum_config" --installroot="$target" -y clean all cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" version=$name fi tar --numeric-owner -c -C "$target" . | docker import - $name:$version docker run -i -t $name:$version echo success rm -rf "$target" docker-1.6.2/contrib/mkimage-busybox.sh0000755000175000017500000000215112524223634017435 0ustar tianontianon#!/usr/bin/env bash # Generate a very minimal filesystem based on busybox-static, # and load it into the local docker under the name "busybox". echo >&2 echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' echo >&2 BUSYBOX=$(which busybox) [ "$BUSYBOX" ] || { echo "Sorry, I could not locate busybox." echo "Try 'apt-get install busybox-static'?" exit 1 } set -e ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM mkdir $ROOTFS cd $ROOTFS mkdir bin etc dev dev/pts lib proc sys tmp touch etc/resolv.conf cp /etc/nsswitch.conf etc/nsswitch.conf echo root:x:0:0:root:/:/bin/sh > etc/passwd echo root:x:0: > etc/group ln -s lib lib64 ln -s bin sbin cp $BUSYBOX bin for X in $(busybox --list) do ln -s busybox bin/$X done rm bin/init ln bin/busybox bin/init cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib for X in console null ptmx random stdin stdout stderr tty urandom zero do cp -a /dev/$X dev done tar --numeric-owner -cf- . | docker import - busybox docker run -i -u root busybox /bin/echo Success. docker-1.6.2/contrib/httpserver/0000755000175000017500000000000012524223634016202 5ustar tianontianondocker-1.6.2/contrib/httpserver/Dockerfile0000644000175000017500000000010212524223634020165 0ustar tianontianonFROM busybox EXPOSE 80/tcp COPY httpserver . CMD ["./httpserver"] docker-1.6.2/contrib/httpserver/server.go0000644000175000017500000000025312524223634020037 0ustar tianontianonpackage main import ( "log" "net/http" ) func main() { fs := http.FileServer(http.Dir("/static")) http.Handle("/", fs) log.Panic(http.ListenAndServe(":80", nil)) } docker-1.6.2/contrib/download-frozen-image.sh0000755000175000017500000000723412524223634020531 0ustar tianontianon#!/bin/bash set -e # hello-world latest ef872312fe1b 3 months ago 910 B # hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B # debian latest f6fab3b798be 10 weeks ago 85.1 MB # debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB if ! command -v curl &> /dev/null; then echo >&2 'error: "curl" not found!' exit 1 fi usage() { echo "usage: $0 dir image[:tag][@image-id] ..." echo " ie: $0 /tmp/hello-world hello-world" echo " $0 /tmp/debian-jessie debian:jessie" echo " $0 /tmp/old-hello-world hello-world@ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9" echo " $0 /tmp/old-debian debian:latest@f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd" [ -z "$1" ] || exit "$1" } dir="$1" # dir for building tar in shift || usage 1 >&2 [ $# -gt 0 -a "$dir" ] || usage 2 >&2 mkdir -p "$dir" # hacky workarounds for Bash 3 support (no associative arrays) images=() rm -f "$dir"/tags-*.tmp # repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' while [ $# -gt 0 ]; do imageTag="$1" shift image="${imageTag%%[:@]*}" tag="${imageTag#*:}" imageId="${tag##*@}" [ "$imageId" != "$tag" ] || imageId= [ "$tag" != "$imageTag" ] || tag='latest' tag="${tag%@*}" token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')" if [ -z "$imageId" ]; then imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")" imageId="${imageId//\"/}" fi ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")" if [ "${ancestryJson:0:1}" != '[' ]; then echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:" echo >&2 " $ancestryJson" exit 1 fi IFS=',' ancestry=( ${ancestryJson//[\[\] \"]/} ) unset IFS if [ -s "$dir/tags-$image.tmp" ]; then echo -n ', ' >> "$dir/tags-$image.tmp" else images=( "${images[@]}" "$image" ) fi echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$image.tmp" echo "Downloading '$imageTag' (${#ancestry[@]} layers)..." for imageId in "${ancestry[@]}"; do mkdir -p "$dir/$imageId" echo '1.0' > "$dir/$imageId/VERSION" curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json" # TODO figure out why "-C -" doesn't work here # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." # "HTTP/1.1 416 Requested Range Not Satisfiable" if [ -f "$dir/$imageId/layer.tar" ]; then # TODO hackpatch for no -C support :'( echo "skipping existing ${imageId:0:12}" continue fi curl -SL --progress -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/layer" -o "$dir/$imageId/layer.tar" # -C - done echo done echo -n '{' > "$dir/repositories" firstImage=1 for image in "${images[@]}"; do [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" firstImage= echo -n $'\n\t' >> "$dir/repositories" echo -n '"'"$image"'": { '"$(cat "$dir/tags-$image.tmp")"' }' >> "$dir/repositories" done echo -n $'\n}\n' >> "$dir/repositories" rm -f "$dir"/tags-*.tmp echo "Download of images into '$dir' complete." echo "Use something like the following to load the result into a Docker daemon:" echo " tar -cC '$dir' . | docker load" docker-1.6.2/.gitignore0000644000175000017500000000076112524223634014330 0ustar tianontianon# Docker project generated files to ignore # if you want to ignore files created by your editor/tools, # please consider a global .gitignore https://help.github.com/articles/ignoring-files .vagrant* bin docker/docker *.exe .*.swp a.out *.orig build_src .flymake* .idea .DS_Store docs/_build docs/_static docs/_templates .gopath/ .dotcloud *.test bundles/ .hg/ .git/ vendor/pkg/ pyenv Vagrantfile docs/AWS_S3_BUCKET docs/GIT_BRANCH docs/VERSION docs/GITCOMMIT docs/changed-files autogen/ .bashrc docker-1.6.2/AUTHORS0000644000175000017500000007033712524223634013416 0ustar tianontianon# This file lists all individuals having contributed content to the repository. # For how it is generated, see `hack/generate-authors.sh`. Aanand Prasad Aaron Feng Aaron Huslage Abel Muiño Abhinav Ajgaonkar Abin Shahab Adam Miller Adam Singer Aditya Adrian Mouat Adrien Folie Ahmed Kamal Ahmet Alp Balkan Aidan Hobson Sayers AJ Bowen Al Tobey alambike Alan Thompson Albert Callarisa Albert Zhang Aleksa Sarai Aleksandrs Fadins Alex Gaynor Alex Warhawk Alexander Boyd Alexander Larsson Alexander Morozov Alexander Shopov Alexandr Morozov Alexey Kotlyarov Alexey Shamrin Alexis THOMAS almoehi amangoel Amit Bakshi Anand Patil AnandkumarPatel Andre Dublin <81dublin@gmail.com> Andrea Luzzardi Andrea Turli Andreas Köhler Andreas Savvides Andreas Tiefenthaler Andrew C. Bodine Andrew Duckworth Andrew France Andrew Macgregor Andrew Munsell Andrew Weiss Andrew Williams Andrews Medina Andrey Petrov Andrey Stolbovsky Andy Chambers andy diller Andy Goldstein Andy Kipp Andy Rothfusz Andy Smith Andy Wilson Ankush Agarwal Anthony Baire Anthony Bishopric Anton Löfgren Anton Nikitin Antony Messerli apocas ArikaChen Arnaud Porterie Arthur Gautier Asbjørn Enge averagehuman Avi Miller Barnaby Gray Barry Allard BartÅ‚omiej Piotrowski bdevloed Ben Firshman Ben Sargent Ben Toews Ben Wiklund Benjamin Atkin Benoit Chesneau Bernerd Schaefer Bert Goethals Bhiraj Butala bin liu Blake Geno Bouke Haarsma Boyd Hemphill Brandon Liu Brandon Philips Brandon Rhodes Brett Kochendorfer Brian (bex) Exelbierd Brian Dorsey Brian Flad Brian Goff Brian McCallister Brian Olsen Brian Shumate Brice Jaglin Briehan Lombaard Bruno Bigras Bruno Binet Bruno Renié Bryan Bess Bryan Matsuo Bryan Murphy Burke Libbey Byung Kang Caleb Spare Calen Pennington Cameron Boehmer Carl X. Su Charles Hooper Charles Lindsay Charles Merriam Charlie Lewis Chen Chao Chewey Chia-liang Kao Chris Alfonso Chris Armstrong Chris Snow Chris St. Pierre chrismckinnel Christian Berendt Christian Stefanescu ChristoperBiscardi Christophe Troestler Christopher Currie Christopher Latham Christopher Rigor Chun Chen Ciro S. Costa Clayton Coleman Colin Dunklau Colin Rice Colin Walters Cory Forsyth cressie176 Cristian Staretu Cruceru Calin-Cristian Daan van Berkel Daehyeok Mun Dafydd Crosby Dan Buch Dan Cotora Dan Griffin Dan Hirsch Dan Keder Dan McPherson Dan Stine Dan Walsh Dan Williams Daniel Exner Daniel Farrell Daniel Garcia Daniel Gasienica Daniel Menet Daniel Mizyrycki Daniel Norberg Daniel Nordberg Daniel Robinson Daniel Von Fange Daniel YC Lin Daniel, Dao Quang Minh Danny Berger Danny Yates Darren Coxall Darren Shepherd David Anderson David Calavera David Corking David Gageot David Gebler David Mat David Mcanulty David Pelaez David Röthlisberger David Sissitka Davide Ceretti Dawn Chen decadent Deni Bertovic Derek Derek Derek McGowan Deric Crago Deshi Xiao Dinesh Subhraveti Djibril Koné dkumor Dmitry Demeshchuk Dmitry V. Krivenok Dolph Mathews Dominik Honnef Don Kjer Don Spaulding Doug Davis doug tangren Dr Nic Williams dragon788 Dražen LuÄanin Dustin Sallings Edmund Wagner Eiichi Tsukata Eike Herzbach Eivind Uggedal Elias Probst Emil Hernvall Emily Maier Emily Rose Eric Hanchrow Eric Lee Eric Myhre Eric Paris Eric Windisch Erik Dubbelboer Erik Hollensbe Erik Inge Bolsø Erik Kristensen Erno Hopearuoho Erwin van der Koogh Eugene Yakubovich eugenkrizo Evan Carmi Evan Hazlett Evan Krall Evan Phoenix Evan Wies Eystein MÃ¥løy Stenberg ezbercih Fabio Falci Fabio Rehm Fabrizio Regini Faiz Khan Fareed Dudhia Felix Rabe Fernando Filipe Brandenburger Flavio Castelli FLGMwt Francisco Carriedo Francisco Souza Frank Macreery Frank Rosquin Fred Lifton Frederick F. Kautz IV Frederik Loeffert Freek Kalter Gabe Rosenhouse Gabor Nagy Gabriel Monroy Galen Sampson Gareth Rushgrove gautam, prasanna Geoffrey Bachelet George Xie Gereon Frey German DZ Gert van Valkenhoef Giuseppe Mazzotta Gleb Fotengauer-Malinovskiy Gleb M Borisov Glyn Normington Goffert van Gool golubbe Graydon Hoare Greg Thornton grunny Guilherme Salgado Guillaume Dufour Guillaume J. Charmes Gurjeet Singh Guruprasad Hans Rødtang Harald Albers Harley Laue Hector Castro Henning Sprang Hobofan Hollie Teal Hu Keping Hu Tao Huayi Zhang Hugo Duncan Hunter Blanks Huu Nguyen hyeongkyu.lee Ian Babrou Ian Bishop Ian Bull Ian Main Ian Truslove Igor Dolzhikov ILYA Khlopotov inglesp Isaac Dupree Isabel Jimenez Isao Jonas Ivan Fraixedes Jack Danger Canty Jacob Atzen Jacob Edelman Jake Moshenko jakedt James Allen James Carr James DeFelice James Harrison Fisher James Kyle James Mills James Turnbull Jan Keromnes Jan Pazdziora Jan Toebes Jaroslaw Zabiello jaseg Jason Giedymin Jason Hall Jason Livesay Jason McVetta Jason Plum Jean-Baptiste Barth Jean-Baptiste Dalido Jean-Paul Calderone Jean-Tiare Le Bigot Jeff Anderson Jeff Lindsay Jeff Welch Jeffrey Bolle Jeremy Grosser Jesse Dubay Jessica Frazelle Jezeniel Zapanta Jilles Oldenbeuving Jim Alateras Jim Perrin Jimmy Cuadra Jiří Župka Joe Beda Joe Ferguson Joe Shaw Joe Van Dyk Joel Friedly Joel Handwell Joffrey F Johan Euphrosine Johan Rydberg Johannes 'fish' Ziemke John Costa John Feminella John Gardiner Myers John Gossman John OBrien III John Warwick Jon Wedaman Jonas Pfenniger Jonathan A. Sternberg Jonathan Boulle Jonathan Camp Jonathan McCrohan Jonathan Mueller Jonathan Pares Jonathan Rudenberg Joost Cassee Jordan Arentsen Jordan Sissel Joseph Anthony Pasquale Holsten Joseph Hager Josh Josh Hawn Josh Poimboeuf Josiah Kiehl JP Julian Taylor Julien Barbier Julien Bordellier Julien Dubois Justin Force Justin Plock Justin Simonelis Jyrki Puttonen Jérôme Petazzoni Jörg Thalheim Kamil Domanski Karan Lyons Karl Grzeszczak Kato Kazuyoshi Kawsar Saiyeed Keli Hu Ken Cochrane Ken ICHIKAWA Kevin "qwazerty" Houdebert Kevin Clark Kevin J. Lynagh Kevin Menard Kevin Wallace Keyvan Fatehi kies Kim BKC Carlbacker Kimbro Staken Kiran Gangadharan knappe Kohei Tsuruta Konrad Kleine Konstantin Pelykh Krasimir Georgiev krrg Kyle Conroy kyu Lachlan Coote Lajos Papp Lakshan Perera lalyos Lance Chen Lars R. Damerow Laurie Voss leeplay Lei Jitang Len Weincier Leszek Kowalski Levi Gross Lewis Marshall Lewis Peckover Liang-Chi Hsieh limsy Lokesh Mandvekar Lorenz Leutgeb Louis Opter lukaspustina lukemarsden Lénaïc Huard Madhu Venugopal Mahesh Tiyyagura Malte Janduda Manfred Zabarauskas Manuel Meurer Manuel Woelker Marc Abramowitz Marc Kuo Marc Tamsky Marco Hennings Marcus Farkas Marcus Linke Marcus Ramberg Marek Goldmann Marianna Marius Voila Mark Allen Mark McGranaghan Marko Mikulicic Marko Tibold Markus Fix Martijn van Oosterhout Martin Honermeyer Martin Redmond Mary Anthony Mason Malone Mateusz Sulima Mathias Monnerville Mathieu Le Marec - Pasquet Matt Apperson Matt Bachmann Matt Haggard Matthew Heon Matthew Mueller Matthew Riley Matthias Klumpp Matthias Kühnle mattymo mattyw Max Shytikov Maxim Treskin Maxime Petazzoni meejah Mengdi Gao Mert YazıcıoÄŸlu Michael Brown Michael Crosby Michael Gorsuch Michael Hudson-Doyle Michael Neale Michael Prokop Michael Scharf Michael Stapelberg Michael Steinert Michael Thies Michal Jemala Michal Minar Michaël Pailloncy Michiel@unhosted Miguel Angel Fernández Mike Chelen Mike Gaffney Mike MacCana Mike Naberezny Mike Snitzer Mikhail Sobolev Mohit Soni Morgante Pell Morten Siebuhr Mrunal Patel mschurenko Mustafa Akın Médi-Rémi Hashim Nan Monnand Deng Naoki Orii Nate Eagleson Nate Jones Nathan Hsieh Nathan Kleyn Nathan LeClaire Neal McBurnett Nelson Chen Niall O'Higgins Nicholas E. Rabenau Nick Payne Nick Stenning Nick Stinemates Nicolas De loof Nicolas Dudebout Nicolas Goy Nicolas Kaiser NikolaMandic noducks Nolan Darilek nzwsch O.S. Tezer OddBloke odk- Oguz Bilgic Oh Jinkyun Ole Reifschneider Olivier Gambier pandrew panticz Pascal Borreli Pascal Hartig Patrick Hemmer Patrick Stapleton pattichen Paul Paul Annesley Paul Bowsher Paul Hammond Paul Jimenez Paul Lietar Paul Morie Paul Nasrat Paul Weaver Pavlos Ratis Peter Bourgon Peter Braden Peter Ericson Peter Salvatore Peter Waller Phil Phil Estes Phil Spitler Philipp Weissensteiner Phillip Alexander Piergiuliano Bossi Pierre Pierre Wacrenier Pierre-Alain RIVIERE Piotr Bogdan pixelistik Porjo Prasanna Gautam Przemek Hejman pysqz Qiang Huang Quentin Brossard r0n22 Rafal Jeczalik Rafe Colton Rajat Pandit Rajdeep Dua Ralph Bean Ramkumar Ramachandra Ramon van Alteren Recursive Madman Remi Rampin Renato Riccieri Santos Zannon rgstephens Rhys Hiltner Richard Harvey Richard Metzler Richo Healey Rick Bradley Rick van de Loo Robert Bachmann Robert Bittle Robert Obryk Roberto G. Hashioka Robin Speekenbrink robpc Rodrigo Vaz Roel Van Nyen Roger Peppe Rohit Jnagal Roland Huß Roland Moriz Ron Smits Rovanion Luckey Rudolph Gottesheim Ryan Anderson Ryan Aslett Ryan Detzel Ryan Fowler Ryan O'Donnell Ryan Seto Ryan Thomas Rémy Greinhofer Sam Alba Sam Bailey Sam J Sharpe Sam Reis Sam Rijs Sami Wagiaalla Samuel Andaya Samuel PHAN Satnam Singh satoru Satoshi Amemiya Scott Bessler Scott Collier Scott Johnston Scott Stamp Scott Walls Sean Cronin Sean P. Kane Sebastiaan van Stijn Senthil Kumar Selvaraj SeongJae Park Shane Canon shaunol Shawn Landden Shawn Siefkas Shih-Yuan Lee Shishir Mahajan shuai-z Silas Sewell Simon Taranto Sindhu S Sjoerd Langkemper Solomon Hykes Song Gao Soulou soulshake Sridatta Thatipamala Sridhar Ratnakumar Srini Brahmaroutu Srini Brahmaroutu Steeve Morin Stefan Praszalowicz Stephen Crosby Steven Burgess Steven Merrill Sven Dowideit Sylvain Bellemare Sébastien Sébastien Luttringer Sébastien Stormacq tang0th Tatsuki Sugiura Tatsushi Inagaki Ted M. Young Tehmasp Chaudhri Tejesh Mehta Thatcher Peskens Thermionix Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Hansen Thomas LEVEIL Thomas Orozco Thomas Schroeter Tianon Gravi Tibor Vass Tim Bosse Tim Hockin Tim Ruffles Tim Smith Tim Terhorst Timothy Hobbs tjwebb123 tobe Tobias Bieniek Tobias Gesellchen Tobias Schmidt Tobias Schwab Todd Lunter Tom Fotherby Tom Hulihan Tom Maaswinkel Tomas Tomecek Tomasz Lipinski Tomasz Nurkiewicz Tommaso Visconti Tonis Tiigi Tony Daws Torstein Husebø tpng Travis Cline Trent Ogren Tyler Brock Tzu-Jung Lee Ulysse Carion unknown Vaidas Jablonskis vgeta Victor Coisne Victor Lyuboslavsky Victor Marmol Victor Vieux Viktor Vojnovski Vincent Batts Vincent Bernat Vincent Bernat Vincent Giersch Vincent Mayers Vincent Woo Vinod Kulkarni Vishal Doshi Vishnu Kannan Vitor Monteiro Vivek Agarwal Vivek Dasgupta Vivek Goyal Vladimir Bulyga Vladimir Kirillov Vladimir Rutsky Vojtech Vitek (V-Teq) waitingkuo Walter Leibbrandt Walter Stanish Ward Vandewege WarheadsSE Wayne Chang Wes Morgan Will Dietz Will Rouesnel Will Weaver William Delanoue William Henry William Riancho William Thurston Xiuming Chen xuzhaokui Yang Bai Yasunori Mahata Yohei Ueda Yurii Rashkovskii Zac Dover Zach Borboa Zain Memon Zaiste! Zane DeGraffenried Zilin Du zimbatm Zoltan Tombol zqh Ãlex González Ãlvaro Lázaro å°¹å‰å³° docker-1.6.2/engine/0000755000175000017500000000000012524223634013601 5ustar tianontianondocker-1.6.2/engine/env_test.go0000644000175000017500000001566012524223634015767 0ustar tianontianonpackage engine import ( "bytes" "encoding/json" "testing" "time" "github.com/docker/docker/pkg/testutils" ) func TestEnvLenZero(t *testing.T) { env := &Env{} if env.Len() != 0 { t.Fatalf("%d", env.Len()) } } func TestEnvLenNotZero(t *testing.T) { env := &Env{} env.Set("foo", "bar") env.Set("ga", "bu") if env.Len() != 2 { t.Fatalf("%d", env.Len()) } } func TestEnvLenDup(t *testing.T) { env := &Env{ "foo=bar", "foo=baz", "a=b", } // len(env) != env.Len() if env.Len() != 2 { t.Fatalf("%d", env.Len()) } } func TestEnvGetDup(t *testing.T) { env := &Env{ "foo=bar", "foo=baz", "foo=bif", } expected := "bif" if v := env.Get("foo"); v != expected { t.Fatalf("expect %q, got %q", expected, v) } } func TestNewJob(t *testing.T) { job := mkJob(t, "dummy", "--level=awesome") if job.Name != "dummy" { t.Fatalf("Wrong job name: %s", job.Name) } if len(job.Args) != 1 { t.Fatalf("Wrong number of job arguments: %d", len(job.Args)) } if job.Args[0] != "--level=awesome" { t.Fatalf("Wrong job arguments: %s", job.Args[0]) } } func TestSetenv(t *testing.T) { job := mkJob(t, "dummy") job.Setenv("foo", "bar") if val := job.Getenv("foo"); val != "bar" { t.Fatalf("Getenv returns incorrect value: %s", val) } job.Setenv("bar", "") if val := job.Getenv("bar"); val != "" { t.Fatalf("Getenv returns incorrect value: %s", val) } if val := job.Getenv("nonexistent"); val != "" { t.Fatalf("Getenv returns incorrect value: %s", val) } } func TestSetenvBool(t *testing.T) { job := mkJob(t, "dummy") job.SetenvBool("foo", true) if val := job.GetenvBool("foo"); !val { t.Fatalf("GetenvBool returns incorrect value: %t", val) } job.SetenvBool("bar", false) if val := job.GetenvBool("bar"); val { t.Fatalf("GetenvBool returns incorrect value: %t", val) } if val := job.GetenvBool("nonexistent"); val { t.Fatalf("GetenvBool returns incorrect value: %t", val) } } func TestSetenvTime(t *testing.T) { job := mkJob(t, "dummy") now := time.Now() job.SetenvTime("foo", now) if val, err := job.GetenvTime("foo"); err != nil { t.Fatalf("GetenvTime failed to parse: %v", err) } else { nowStr := now.Format(time.RFC3339) valStr := val.Format(time.RFC3339) if nowStr != valStr { t.Fatalf("GetenvTime returns incorrect value: %s, Expected: %s", valStr, nowStr) } } job.Setenv("bar", "Obviously I'm not a date") if val, err := job.GetenvTime("bar"); err == nil { t.Fatalf("GetenvTime was supposed to fail, instead returned: %s", val) } } func TestSetenvInt(t *testing.T) { job := mkJob(t, "dummy") job.SetenvInt("foo", -42) if val := job.GetenvInt("foo"); val != -42 { t.Fatalf("GetenvInt returns incorrect value: %d", val) } job.SetenvInt("bar", 42) if val := job.GetenvInt("bar"); val != 42 { t.Fatalf("GetenvInt returns incorrect value: %d", val) } if val := job.GetenvInt("nonexistent"); val != 0 { t.Fatalf("GetenvInt returns incorrect value: %d", val) } } func TestSetenvList(t *testing.T) { job := mkJob(t, "dummy") job.SetenvList("foo", []string{"bar"}) if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" { t.Fatalf("GetenvList returns incorrect value: %v", val) } job.SetenvList("bar", nil) if val := job.GetenvList("bar"); val != nil { t.Fatalf("GetenvList returns incorrect value: %v", val) } if val := job.GetenvList("nonexistent"); val != nil { t.Fatalf("GetenvList returns incorrect value: %v", val) } } func TestEnviron(t *testing.T) { job := mkJob(t, "dummy") job.Setenv("foo", "bar") val, exists := job.Environ()["foo"] if !exists { t.Fatalf("foo not found in the environ") } if val != "bar" { t.Fatalf("bar not found in the environ") } } func TestMultiMap(t *testing.T) { e := &Env{} e.Set("foo", "bar") e.Set("bar", "baz") e.Set("hello", "world") m := e.MultiMap() e2 := &Env{} e2.Set("old_key", "something something something") e2.InitMultiMap(m) if v := e2.Get("old_key"); v != "" { t.Fatalf("%#v", v) } if v := e2.Get("bar"); v != "baz" { t.Fatalf("%#v", v) } if v := e2.Get("hello"); v != "world" { t.Fatalf("%#v", v) } } func testMap(l int) [][2]string { res := make([][2]string, l) for i := 0; i < l; i++ { t := [2]string{testutils.RandomString(5), testutils.RandomString(20)} res[i] = t } return res } func BenchmarkSet(b *testing.B) { fix := testMap(100) b.ResetTimer() for i := 0; i < b.N; i++ { env := &Env{} for _, kv := range fix { env.Set(kv[0], kv[1]) } } } func BenchmarkSetJson(b *testing.B) { fix := testMap(100) type X struct { f string } b.ResetTimer() for i := 0; i < b.N; i++ { env := &Env{} for _, kv := range fix { if err := env.SetJson(kv[0], X{kv[1]}); err != nil { b.Fatal(err) } } } } func BenchmarkGet(b *testing.B) { fix := testMap(100) env := &Env{} for _, kv := range fix { env.Set(kv[0], kv[1]) } b.ResetTimer() for i := 0; i < b.N; i++ { for _, kv := range fix { env.Get(kv[0]) } } } func BenchmarkGetJson(b *testing.B) { fix := testMap(100) env := &Env{} type X struct { f string } for _, kv := range fix { env.SetJson(kv[0], X{kv[1]}) } b.ResetTimer() for i := 0; i < b.N; i++ { for _, kv := range fix { if err := env.GetJson(kv[0], &X{}); err != nil { b.Fatal(err) } } } } func BenchmarkEncode(b *testing.B) { fix := testMap(100) env := &Env{} type X struct { f string } // half a json for i, kv := range fix { if i%2 != 0 { if err := env.SetJson(kv[0], X{kv[1]}); err != nil { b.Fatal(err) } continue } env.Set(kv[0], kv[1]) } var writer bytes.Buffer b.ResetTimer() for i := 0; i < b.N; i++ { env.Encode(&writer) writer.Reset() } } func BenchmarkDecode(b *testing.B) { fix := testMap(100) env := &Env{} type X struct { f string } // half a json for i, kv := range fix { if i%2 != 0 { if err := env.SetJson(kv[0], X{kv[1]}); err != nil { b.Fatal(err) } continue } env.Set(kv[0], kv[1]) } var writer bytes.Buffer env.Encode(&writer) denv := &Env{} reader := bytes.NewReader(writer.Bytes()) b.ResetTimer() for i := 0; i < b.N; i++ { err := denv.Decode(reader) if err != nil { b.Fatal(err) } reader.Seek(0, 0) } } func TestLongNumbers(t *testing.T) { type T struct { TestNum int64 } v := T{67108864} var buf bytes.Buffer e := &Env{} e.SetJson("Test", v) if err := e.Encode(&buf); err != nil { t.Fatal(err) } res := make(map[string]T) if err := json.Unmarshal(buf.Bytes(), &res); err != nil { t.Fatal(err) } if res["Test"].TestNum != v.TestNum { t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) } } func TestLongNumbersArray(t *testing.T) { type T struct { TestNum []int64 } v := T{[]int64{67108864}} var buf bytes.Buffer e := &Env{} e.SetJson("Test", v) if err := e.Encode(&buf); err != nil { t.Fatal(err) } res := make(map[string]T) if err := json.Unmarshal(buf.Bytes(), &res); err != nil { t.Fatal(err) } if res["Test"].TestNum[0] != v.TestNum[0] { t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) } } docker-1.6.2/engine/streams.go0000644000175000017500000001161312524223634015610 0ustar tianontianonpackage engine import ( "bytes" "fmt" "io" "io/ioutil" "strings" "sync" "unicode" ) type Output struct { sync.Mutex dests []io.Writer tasks sync.WaitGroup used bool } // Tail returns the n last lines of a buffer // stripped out of trailing white spaces, if any. // // if n <= 0, returns an empty string func Tail(buffer *bytes.Buffer, n int) string { if n <= 0 { return "" } s := strings.TrimRightFunc(buffer.String(), unicode.IsSpace) i := len(s) - 1 for ; i >= 0 && n > 0; i-- { if s[i] == '\n' { n-- if n == 0 { break } } } // when i == -1, return the whole string which is s[0:] return s[i+1:] } // NewOutput returns a new Output object with no destinations attached. // Writing to an empty Output will cause the written data to be discarded. func NewOutput() *Output { return &Output{} } // Return true if something was written on this output func (o *Output) Used() bool { o.Lock() defer o.Unlock() return o.used } // Add attaches a new destination to the Output. Any data subsequently written // to the output will be written to the new destination in addition to all the others. // This method is thread-safe. func (o *Output) Add(dst io.Writer) { o.Lock() defer o.Unlock() o.dests = append(o.dests, dst) } // Set closes and remove existing destination and then attaches a new destination to // the Output. Any data subsequently written to the output will be written to the new // destination in addition to all the others. This method is thread-safe. func (o *Output) Set(dst io.Writer) { o.Close() o.Lock() defer o.Unlock() o.dests = []io.Writer{dst} } // AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination, // and returns its reading end for consumption by the caller. // This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package. // This method is thread-safe. func (o *Output) AddPipe() (io.Reader, error) { r, w := io.Pipe() o.Add(w) return r, nil } // Write writes the same data to all registered destinations. // This method is thread-safe. func (o *Output) Write(p []byte) (n int, err error) { o.Lock() defer o.Unlock() o.used = true var firstErr error for _, dst := range o.dests { _, err := dst.Write(p) if err != nil && firstErr == nil { firstErr = err } } return len(p), firstErr } // Close unregisters all destinations and waits for all background // AddTail and AddString tasks to complete. // The Close method of each destination is called if it exists. func (o *Output) Close() error { o.Lock() defer o.Unlock() var firstErr error for _, dst := range o.dests { if closer, ok := dst.(io.Closer); ok { err := closer.Close() if err != nil && firstErr == nil { firstErr = err } } } o.tasks.Wait() o.dests = nil return firstErr } type Input struct { src io.Reader sync.Mutex } // NewInput returns a new Input object with no source attached. // Reading to an empty Input will return io.EOF. func NewInput() *Input { return &Input{} } // Read reads from the input in a thread-safe way. func (i *Input) Read(p []byte) (n int, err error) { i.Mutex.Lock() defer i.Mutex.Unlock() if i.src == nil { return 0, io.EOF } return i.src.Read(p) } // Closes the src // Not thread safe on purpose func (i *Input) Close() error { if i.src != nil { if closer, ok := i.src.(io.Closer); ok { return closer.Close() } } return nil } // Add attaches a new source to the input. // Add can only be called once per input. Subsequent calls will // return an error. func (i *Input) Add(src io.Reader) error { i.Mutex.Lock() defer i.Mutex.Unlock() if i.src != nil { return fmt.Errorf("Maximum number of sources reached: 1") } i.src = src return nil } // AddEnv starts a new goroutine which will decode all subsequent data // as a stream of json-encoded objects, and point `dst` to the last // decoded object. // The result `env` can be queried using the type-neutral Env interface. // It is not safe to query `env` until the Output is closed. func (o *Output) AddEnv() (dst *Env, err error) { src, err := o.AddPipe() if err != nil { return nil, err } dst = &Env{} o.tasks.Add(1) go func() { defer o.tasks.Done() decoder := NewDecoder(src) for { env, err := decoder.Decode() if err != nil { return } *dst = *env } }() return dst, nil } func (o *Output) AddListTable() (dst *Table, err error) { src, err := o.AddPipe() if err != nil { return nil, err } dst = NewTable("", 0) o.tasks.Add(1) go func() { defer o.tasks.Done() content, err := ioutil.ReadAll(src) if err != nil { return } if _, err := dst.ReadListFrom(content); err != nil { return } }() return dst, nil } func (o *Output) AddTable() (dst *Table, err error) { src, err := o.AddPipe() if err != nil { return nil, err } dst = NewTable("", 0) o.tasks.Add(1) go func() { defer o.tasks.Done() if _, err := dst.ReadFrom(src); err != nil { return } }() return dst, nil } docker-1.6.2/engine/http.go0000644000175000017500000000271212524223634015111 0ustar tianontianonpackage engine import ( "net/http" "path" ) // ServeHTTP executes a job as specified by the http request `r`, and sends the // result as an http response. // This method allows an Engine instance to be passed as a standard http.Handler interface. // // Note that the protocol used in this method is a convenience wrapper and is not the canonical // implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing, // and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response // once data has been written to the body, which makes it inconvenient to return metadata such // as the exit status. // func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) { var ( jobName = path.Base(r.URL.Path) jobArgs, exists = r.URL.Query()["a"] ) if !exists { jobArgs = []string{} } w.Header().Set("Job-Name", jobName) for _, arg := range jobArgs { w.Header().Add("Job-Args", arg) } job := eng.Job(jobName, jobArgs...) job.Stdout.Add(w) job.Stderr.Add(w) // FIXME: distinguish job status from engine error in Run() // The former should be passed as a special header, the former // should cause a 500 status w.WriteHeader(http.StatusOK) // The exit status cannot be sent reliably with HTTP1, because headers // can only be sent before the body. // (we could possibly use http footers via chunked encoding, but I couldn't find // how to use them in net/http) job.Run() } docker-1.6.2/engine/job.go0000644000175000017500000001473212524223634014711 0ustar tianontianonpackage engine import ( "bytes" "fmt" "io" "strings" "sync" "time" log "github.com/Sirupsen/logrus" ) // A job is the fundamental unit of work in the docker engine. // Everything docker can do should eventually be exposed as a job. // For example: execute a process in a container, create a new container, // download an archive from the internet, serve the http api, etc. // // The job API is designed after unix processes: a job has a name, arguments, // environment variables, standard streams for input, output and error, and // an exit status which can indicate success (0) or error (anything else). // // For status, 0 indicates success, and any other integers indicates an error. // This allows for richer error reporting. // type Job struct { Eng *Engine Name string Args []string env *Env Stdout *Output Stderr *Output Stdin *Input handler Handler status Status end time.Time closeIO bool // When closed, the job has been cancelled. // Note: not all jobs implement cancellation. // See Job.Cancel() and Job.WaitCancelled() cancelled chan struct{} cancelOnce sync.Once } type Status int const ( StatusOK Status = 0 StatusErr Status = 1 StatusNotFound Status = 127 ) // Run executes the job and blocks until the job completes. // If the job returns a failure status, an error is returned // which includes the status. func (job *Job) Run() error { if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") { return fmt.Errorf("engine is shutdown") } // FIXME: this is a temporary workaround to avoid Engine.Shutdown // waiting 5 seconds for server/api.ServeApi to complete (which it never will) // everytime the daemon is cleanly restarted. // The permanent fix is to implement Job.Stop and Job.OnStop so that // ServeApi can cooperate and terminate cleanly. if job.Name != "serveapi" { job.Eng.l.Lock() job.Eng.tasks.Add(1) job.Eng.l.Unlock() defer job.Eng.tasks.Done() } // FIXME: make this thread-safe // FIXME: implement wait if !job.end.IsZero() { return fmt.Errorf("%s: job has already completed", job.Name) } // Log beginning and end of the job if job.Eng.Logging { log.Infof("+job %s", job.CallString()) defer func() { log.Infof("-job %s%s", job.CallString(), job.StatusString()) }() } var errorMessage = bytes.NewBuffer(nil) job.Stderr.Add(errorMessage) if job.handler == nil { job.Errorf("%s: command not found", job.Name) job.status = 127 } else { job.status = job.handler(job) job.end = time.Now() } if job.closeIO { // Wait for all background tasks to complete if err := job.Stdout.Close(); err != nil { return err } if err := job.Stderr.Close(); err != nil { return err } if err := job.Stdin.Close(); err != nil { return err } } if job.status != 0 { return fmt.Errorf("%s", Tail(errorMessage, 1)) } return nil } func (job *Job) CallString() string { return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", ")) } func (job *Job) StatusString() string { // If the job hasn't completed, status string is empty if job.end.IsZero() { return "" } var okerr string if job.status == StatusOK { okerr = "OK" } else { okerr = "ERR" } return fmt.Sprintf(" = %s (%d)", okerr, job.status) } // String returns a human-readable description of `job` func (job *Job) String() string { return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString()) } func (job *Job) Env() *Env { return job.env } func (job *Job) EnvExists(key string) (value bool) { return job.env.Exists(key) } func (job *Job) Getenv(key string) (value string) { return job.env.Get(key) } func (job *Job) GetenvBool(key string) (value bool) { return job.env.GetBool(key) } func (job *Job) SetenvBool(key string, value bool) { job.env.SetBool(key, value) } func (job *Job) GetenvTime(key string) (value time.Time, err error) { return job.env.GetTime(key) } func (job *Job) SetenvTime(key string, value time.Time) { job.env.SetTime(key, value) } func (job *Job) GetenvSubEnv(key string) *Env { return job.env.GetSubEnv(key) } func (job *Job) SetenvSubEnv(key string, value *Env) error { return job.env.SetSubEnv(key, value) } func (job *Job) GetenvInt64(key string) int64 { return job.env.GetInt64(key) } func (job *Job) GetenvInt(key string) int { return job.env.GetInt(key) } func (job *Job) SetenvInt64(key string, value int64) { job.env.SetInt64(key, value) } func (job *Job) SetenvInt(key string, value int) { job.env.SetInt(key, value) } // Returns nil if key not found func (job *Job) GetenvList(key string) []string { return job.env.GetList(key) } func (job *Job) GetenvJson(key string, iface interface{}) error { return job.env.GetJson(key, iface) } func (job *Job) SetenvJson(key string, value interface{}) error { return job.env.SetJson(key, value) } func (job *Job) SetenvList(key string, value []string) error { return job.env.SetJson(key, value) } func (job *Job) Setenv(key, value string) { job.env.Set(key, value) } // DecodeEnv decodes `src` as a json dictionary, and adds // each decoded key-value pair to the environment. // // If `src` cannot be decoded as a json dictionary, an error // is returned. func (job *Job) DecodeEnv(src io.Reader) error { return job.env.Decode(src) } func (job *Job) EncodeEnv(dst io.Writer) error { return job.env.Encode(dst) } func (job *Job) ImportEnv(src interface{}) (err error) { return job.env.Import(src) } func (job *Job) Environ() map[string]string { return job.env.Map() } func (job *Job) Logf(format string, args ...interface{}) (n int, err error) { prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n")) return fmt.Fprintf(job.Stderr, prefixedFormat, args...) } func (job *Job) Printf(format string, args ...interface{}) (n int, err error) { return fmt.Fprintf(job.Stdout, format, args...) } func (job *Job) Errorf(format string, args ...interface{}) Status { if format[len(format)-1] != '\n' { format = format + "\n" } fmt.Fprintf(job.Stderr, format, args...) return StatusErr } func (job *Job) Error(err error) Status { fmt.Fprintf(job.Stderr, "%s\n", err) return StatusErr } func (job *Job) StatusCode() int { return int(job.status) } func (job *Job) SetCloseIO(val bool) { job.closeIO = val } // When called, causes the Job.WaitCancelled channel to unblock. func (job *Job) Cancel() { job.cancelOnce.Do(func() { close(job.cancelled) }) } // Returns a channel which is closed ("never blocks") when the job is cancelled. func (job *Job) WaitCancelled() <-chan struct{} { return job.cancelled } docker-1.6.2/engine/engine.go0000644000175000017500000001410012524223634015371 0ustar tianontianonpackage engine import ( "bufio" "fmt" "io" "os" "sort" "strings" "sync" "time" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/ioutils" ) // Installer is a standard interface for objects which can "install" themselves // on an engine by registering handlers. // This can be used as an entrypoint for external plugins etc. type Installer interface { Install(*Engine) error } type Handler func(*Job) Status var globalHandlers map[string]Handler func init() { globalHandlers = make(map[string]Handler) } func Register(name string, handler Handler) error { _, exists := globalHandlers[name] if exists { return fmt.Errorf("Can't overwrite global handler for command %s", name) } globalHandlers[name] = handler return nil } func unregister(name string) { delete(globalHandlers, name) } // The Engine is the core of Docker. // It acts as a store for *containers*, and allows manipulation of these // containers by executing *jobs*. type Engine struct { handlers map[string]Handler catchall Handler hack Hack // data for temporary hackery (see hack.go) id string Stdout io.Writer Stderr io.Writer Stdin io.Reader Logging bool tasks sync.WaitGroup l sync.RWMutex // lock for shutdown shutdownWait sync.WaitGroup shutdown bool onShutdown []func() // shutdown handlers } func (eng *Engine) Register(name string, handler Handler) error { _, exists := eng.handlers[name] if exists { return fmt.Errorf("Can't overwrite handler for command %s", name) } eng.handlers[name] = handler return nil } func (eng *Engine) RegisterCatchall(catchall Handler) { eng.catchall = catchall } // New initializes a new engine. func New() *Engine { eng := &Engine{ handlers: make(map[string]Handler), id: common.RandomString(), Stdout: os.Stdout, Stderr: os.Stderr, Stdin: os.Stdin, Logging: true, } eng.Register("commands", func(job *Job) Status { for _, name := range eng.commands() { job.Printf("%s\n", name) } return StatusOK }) // Copy existing global handlers for k, v := range globalHandlers { eng.handlers[k] = v } return eng } func (eng *Engine) String() string { return fmt.Sprintf("%s", eng.id[:8]) } // Commands returns a list of all currently registered commands, // sorted alphabetically. func (eng *Engine) commands() []string { names := make([]string, 0, len(eng.handlers)) for name := range eng.handlers { names = append(names, name) } sort.Strings(names) return names } // Job creates a new job which can later be executed. // This function mimics `Command` from the standard os/exec package. func (eng *Engine) Job(name string, args ...string) *Job { job := &Job{ Eng: eng, Name: name, Args: args, Stdin: NewInput(), Stdout: NewOutput(), Stderr: NewOutput(), env: &Env{}, closeIO: true, cancelled: make(chan struct{}), } if eng.Logging { job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr)) } // Catchall is shadowed by specific Register. if handler, exists := eng.handlers[name]; exists { job.handler = handler } else if eng.catchall != nil && name != "" { // empty job names are illegal, catchall or not. job.handler = eng.catchall } return job } // OnShutdown registers a new callback to be called by Shutdown. // This is typically used by services to perform cleanup. func (eng *Engine) OnShutdown(h func()) { eng.l.Lock() eng.onShutdown = append(eng.onShutdown, h) eng.shutdownWait.Add(1) eng.l.Unlock() } // Shutdown permanently shuts down eng as follows: // - It refuses all new jobs, permanently. // - It waits for all active jobs to complete (with no timeout) // - It calls all shutdown handlers concurrently (if any) // - It returns when all handlers complete, or after 15 seconds, // whichever happens first. func (eng *Engine) Shutdown() { eng.l.Lock() if eng.shutdown { eng.l.Unlock() eng.shutdownWait.Wait() return } eng.shutdown = true eng.l.Unlock() // We don't need to protect the rest with a lock, to allow // for other calls to immediately fail with "shutdown" instead // of hanging for 15 seconds. // This requires all concurrent calls to check for shutdown, otherwise // it might cause a race. // Wait for all jobs to complete. // Timeout after 5 seconds. tasksDone := make(chan struct{}) go func() { eng.tasks.Wait() close(tasksDone) }() select { case <-time.After(time.Second * 5): case <-tasksDone: } // Call shutdown handlers, if any. // Timeout after 10 seconds. for _, h := range eng.onShutdown { go func(h func()) { h() eng.shutdownWait.Done() }(h) } done := make(chan struct{}) go func() { eng.shutdownWait.Wait() close(done) }() select { case <-time.After(time.Second * 10): case <-done: } return } // IsShutdown returns true if the engine is in the process // of shutting down, or already shut down. // Otherwise it returns false. func (eng *Engine) IsShutdown() bool { eng.l.RLock() defer eng.l.RUnlock() return eng.shutdown } // ParseJob creates a new job from a text description using a shell-like syntax. // // The following syntax is used to parse `input`: // // * Words are separated using standard whitespaces as separators. // * Quotes and backslashes are not interpreted. // * Words of the form 'KEY=[VALUE]' are added to the job environment. // * All other words are added to the job arguments. // // For example: // // job, _ := eng.ParseJob("VERBOSE=1 echo hello TEST=true world") // // The resulting job will have: // job.Args={"echo", "hello", "world"} // job.Env={"VERBOSE":"1", "TEST":"true"} // func (eng *Engine) ParseJob(input string) (*Job, error) { // FIXME: use a full-featured command parser scanner := bufio.NewScanner(strings.NewReader(input)) scanner.Split(bufio.ScanWords) var ( cmd []string env Env ) for scanner.Scan() { word := scanner.Text() kv := strings.SplitN(word, "=", 2) if len(kv) == 2 { env.Set(kv[0], kv[1]) } else { cmd = append(cmd, word) } } if len(cmd) == 0 { return nil, fmt.Errorf("empty command: '%s'", input) } job := eng.Job(cmd[0], cmd[1:]...) job.Env().Init(&env) return job, nil } docker-1.6.2/engine/engine_test.go0000644000175000017500000001241612524223634016440 0ustar tianontianonpackage engine import ( "bytes" "strings" "testing" "github.com/docker/docker/pkg/ioutils" ) func TestRegister(t *testing.T) { if err := Register("dummy1", nil); err != nil { t.Fatal(err) } if err := Register("dummy1", nil); err == nil { t.Fatalf("Expecting error, got none") } // Register is global so let's cleanup to avoid conflicts defer unregister("dummy1") eng := New() //Should fail because global handlers are copied //at the engine creation if err := eng.Register("dummy1", nil); err == nil { t.Fatalf("Expecting error, got none") } if err := eng.Register("dummy2", nil); err != nil { t.Fatal(err) } if err := eng.Register("dummy2", nil); err == nil { t.Fatalf("Expecting error, got none") } defer unregister("dummy2") } func TestJob(t *testing.T) { eng := New() job1 := eng.Job("dummy1", "--level=awesome") if job1.handler != nil { t.Fatalf("job1.handler should be empty") } h := func(j *Job) Status { j.Printf("%s\n", j.Name) return 42 } eng.Register("dummy2", h) defer unregister("dummy2") job2 := eng.Job("dummy2", "--level=awesome") if job2.handler == nil { t.Fatalf("job2.handler shouldn't be nil") } if job2.handler(job2) != 42 { t.Fatalf("handler dummy2 was not found in job2") } } func TestEngineShutdown(t *testing.T) { eng := New() if eng.IsShutdown() { t.Fatalf("Engine should not show as shutdown") } eng.Shutdown() if !eng.IsShutdown() { t.Fatalf("Engine should show as shutdown") } } func TestEngineCommands(t *testing.T) { eng := New() handler := func(job *Job) Status { return StatusOK } eng.Register("foo", handler) eng.Register("bar", handler) eng.Register("echo", handler) eng.Register("die", handler) var output bytes.Buffer commands := eng.Job("commands") commands.Stdout.Add(&output) commands.Run() expected := "bar\ncommands\ndie\necho\nfoo\n" if result := output.String(); result != expected { t.Fatalf("Unexpected output:\nExpected = %v\nResult = %v\n", expected, result) } } func TestEngineString(t *testing.T) { eng1 := New() eng2 := New() s1 := eng1.String() s2 := eng2.String() if eng1 == eng2 { t.Fatalf("Different engines should have different names (%v == %v)", s1, s2) } } func TestParseJob(t *testing.T) { eng := New() // Verify that the resulting job calls to the right place var called bool eng.Register("echo", func(job *Job) Status { called = true return StatusOK }) input := "echo DEBUG=1 hello world VERBOSITY=42" job, err := eng.ParseJob(input) if err != nil { t.Fatal(err) } if job.Name != "echo" { t.Fatalf("Invalid job name: %v", job.Name) } if strings.Join(job.Args, ":::") != "hello:::world" { t.Fatalf("Invalid job args: %v", job.Args) } if job.Env().Get("DEBUG") != "1" { t.Fatalf("Invalid job env: %v", job.Env) } if job.Env().Get("VERBOSITY") != "42" { t.Fatalf("Invalid job env: %v", job.Env) } if len(job.Env().Map()) != 2 { t.Fatalf("Invalid job env: %v", job.Env) } if err := job.Run(); err != nil { t.Fatal(err) } if !called { t.Fatalf("Job was not called") } } func TestCatchallEmptyName(t *testing.T) { eng := New() var called bool eng.RegisterCatchall(func(job *Job) Status { called = true return StatusOK }) err := eng.Job("").Run() if err == nil { t.Fatalf("Engine.Job(\"\").Run() should return an error") } if called { t.Fatalf("Engine.Job(\"\").Run() should return an error") } } // Ensure that a job within a job both using the same underlying standard // output writer does not close the output of the outer job when the inner // job's stdout is wrapped with a NopCloser. When not wrapped, it should // close the outer job's output. func TestNestedJobSharedOutput(t *testing.T) { var ( outerHandler Handler innerHandler Handler wrapOutput bool ) outerHandler = func(job *Job) Status { job.Stdout.Write([]byte("outer1")) innerJob := job.Eng.Job("innerJob") if wrapOutput { innerJob.Stdout.Add(ioutils.NopWriteCloser(job.Stdout)) } else { innerJob.Stdout.Add(job.Stdout) } if err := innerJob.Run(); err != nil { t.Fatal(err) } // If wrapOutput was *false* this write will do nothing. // FIXME (jlhawn): It should cause an error to write to // closed output. job.Stdout.Write([]byte(" outer2")) return StatusOK } innerHandler = func(job *Job) Status { job.Stdout.Write([]byte(" inner")) return StatusOK } eng := New() eng.Register("outerJob", outerHandler) eng.Register("innerJob", innerHandler) // wrapOutput starts *false* so the expected // output of running the outer job will be: // // "outer1 inner" // outBuf := new(bytes.Buffer) outerJob := eng.Job("outerJob") outerJob.Stdout.Add(outBuf) if err := outerJob.Run(); err != nil { t.Fatal(err) } expectedOutput := "outer1 inner" if outBuf.String() != expectedOutput { t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String()) } // Set wrapOutput to true so that the expected // output of running the outer job will be: // // "outer1 inner outer2" // wrapOutput = true outBuf.Reset() outerJob = eng.Job("outerJob") outerJob.Stdout.Add(outBuf) if err := outerJob.Run(); err != nil { t.Fatal(err) } expectedOutput = "outer1 inner outer2" if outBuf.String() != expectedOutput { t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String()) } } docker-1.6.2/engine/table.go0000644000175000017500000000466212524223634015227 0ustar tianontianonpackage engine import ( "bytes" "encoding/json" "io" "sort" "strconv" ) type Table struct { Data []*Env sortKey string Chan chan *Env } func NewTable(sortKey string, sizeHint int) *Table { return &Table{ make([]*Env, 0, sizeHint), sortKey, make(chan *Env), } } func (t *Table) SetKey(sortKey string) { t.sortKey = sortKey } func (t *Table) Add(env *Env) { t.Data = append(t.Data, env) } func (t *Table) Len() int { return len(t.Data) } func (t *Table) Less(a, b int) bool { return t.lessBy(a, b, t.sortKey) } func (t *Table) lessBy(a, b int, by string) bool { keyA := t.Data[a].Get(by) keyB := t.Data[b].Get(by) intA, errA := strconv.ParseInt(keyA, 10, 64) intB, errB := strconv.ParseInt(keyB, 10, 64) if errA == nil && errB == nil { return intA < intB } return keyA < keyB } func (t *Table) Swap(a, b int) { tmp := t.Data[a] t.Data[a] = t.Data[b] t.Data[b] = tmp } func (t *Table) Sort() { sort.Sort(t) } func (t *Table) ReverseSort() { sort.Sort(sort.Reverse(t)) } func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { if _, err := dst.Write([]byte{'['}); err != nil { return -1, err } n = 1 for i, env := range t.Data { bytes, err := env.WriteTo(dst) if err != nil { return -1, err } n += bytes if i != len(t.Data)-1 { if _, err := dst.Write([]byte{','}); err != nil { return -1, err } n++ } } if _, err := dst.Write([]byte{']'}); err != nil { return -1, err } return n + 1, nil } func (t *Table) ToListString() (string, error) { buffer := bytes.NewBuffer(nil) if _, err := t.WriteListTo(buffer); err != nil { return "", err } return buffer.String(), nil } func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { for _, env := range t.Data { bytes, err := env.WriteTo(dst) if err != nil { return -1, err } n += bytes } return n, nil } func (t *Table) ReadListFrom(src []byte) (n int64, err error) { var array []interface{} if err := json.Unmarshal(src, &array); err != nil { return -1, err } for _, item := range array { if m, ok := item.(map[string]interface{}); ok { env := &Env{} for key, value := range m { env.SetAuto(key, value) } t.Add(env) } } return int64(len(src)), nil } func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { decoder := NewDecoder(src) for { env, err := decoder.Decode() if err == io.EOF { return 0, nil } else if err != nil { return -1, err } t.Add(env) } } docker-1.6.2/engine/streams_test.go0000644000175000017500000001146412524223634016653 0ustar tianontianonpackage engine import ( "bufio" "bytes" "fmt" "io" "io/ioutil" "strings" "testing" ) type sentinelWriteCloser struct { calledWrite bool calledClose bool } func (w *sentinelWriteCloser) Write(p []byte) (int, error) { w.calledWrite = true return len(p), nil } func (w *sentinelWriteCloser) Close() error { w.calledClose = true return nil } func TestOutputAddEnv(t *testing.T) { input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}" o := NewOutput() result, err := o.AddEnv() if err != nil { t.Fatal(err) } o.Write([]byte(input)) o.Close() if v := result.Get("foo"); v != "bar" { t.Errorf("Expected %v, got %v", "bar", v) } if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 { t.Errorf("Expected %v, got %v", 42, v) } if v := result.Get("this-value-doesnt-exist"); v != "" { t.Errorf("Expected %v, got %v", "", v) } } func TestOutputAddClose(t *testing.T) { o := NewOutput() var s sentinelWriteCloser o.Add(&s) if err := o.Close(); err != nil { t.Fatal(err) } // Write data after the output is closed. // Write should succeed, but no destination should receive it. if _, err := o.Write([]byte("foo bar")); err != nil { t.Fatal(err) } if !s.calledClose { t.Fatal("Output.Close() didn't close the destination") } } func TestOutputAddPipe(t *testing.T) { var testInputs = []string{ "hello, world!", "One\nTwo\nThree", "", "A line\nThen another nl-terminated line\n", "A line followed by an empty line\n\n", } for _, input := range testInputs { expectedOutput := input o := NewOutput() r, err := o.AddPipe() if err != nil { t.Fatal(err) } go func(o *Output) { if n, err := o.Write([]byte(input)); err != nil { t.Error(err) } else if n != len(input) { t.Errorf("Expected %d, got %d", len(input), n) } if err := o.Close(); err != nil { t.Error(err) } }(o) output, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } if string(output) != expectedOutput { t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot: '%s'", expectedOutput, output) } } } func TestTail(t *testing.T) { var tests = make(map[string][]string) tests["hello, world!"] = []string{ "", "hello, world!", "hello, world!", "hello, world!", } tests["One\nTwo\nThree"] = []string{ "", "Three", "Two\nThree", "One\nTwo\nThree", } tests["One\nTwo\n\n\n"] = []string{ "", "Two", "One\nTwo", } for input, outputs := range tests { for n, expectedOutput := range outputs { output := Tail(bytes.NewBufferString(input), n) if output != expectedOutput { t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", n, expectedOutput, output) } } } } func lastLine(txt string) string { scanner := bufio.NewScanner(strings.NewReader(txt)) var lastLine string for scanner.Scan() { lastLine = scanner.Text() } return lastLine } func TestOutputAdd(t *testing.T) { o := NewOutput() b := &bytes.Buffer{} o.Add(b) input := "hello, world!" if n, err := o.Write([]byte(input)); err != nil { t.Fatal(err) } else if n != len(input) { t.Fatalf("Expected %d, got %d", len(input), n) } if output := b.String(); output != input { t.Fatalf("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output) } } func TestOutputWriteError(t *testing.T) { o := NewOutput() buf := &bytes.Buffer{} o.Add(buf) r, w := io.Pipe() input := "Hello there" expectedErr := fmt.Errorf("This is an error") r.CloseWithError(expectedErr) o.Add(w) n, err := o.Write([]byte(input)) if err != expectedErr { t.Fatalf("Output.Write() should return the first error encountered, if any") } if buf.String() != input { t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error") } if n != len(input) { t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination") } } func TestInputAddEmpty(t *testing.T) { i := NewInput() var b bytes.Buffer if err := i.Add(&b); err != nil { t.Fatal(err) } data, err := ioutil.ReadAll(i) if err != nil { t.Fatal(err) } if len(data) > 0 { t.Fatalf("Read from empty input shoul yield no data") } } func TestInputAddTwo(t *testing.T) { i := NewInput() var b1 bytes.Buffer // First add should succeed if err := i.Add(&b1); err != nil { t.Fatal(err) } var b2 bytes.Buffer // Second add should fail if err := i.Add(&b2); err == nil { t.Fatalf("Adding a second source should return an error") } } func TestInputAddNotEmpty(t *testing.T) { i := NewInput() b := bytes.NewBufferString("hello world\nabc") expectedResult := b.String() i.Add(b) result, err := ioutil.ReadAll(i) if err != nil { t.Fatal(err) } if string(result) != expectedResult { t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result) } } docker-1.6.2/engine/shutdown_test.go0000644000175000017500000000402412524223634017042 0ustar tianontianonpackage engine import ( "testing" "time" ) func TestShutdownEmpty(t *testing.T) { eng := New() if eng.IsShutdown() { t.Fatalf("IsShutdown should be false") } eng.Shutdown() if !eng.IsShutdown() { t.Fatalf("IsShutdown should be true") } } func TestShutdownAfterRun(t *testing.T) { eng := New() var called bool eng.Register("foo", func(job *Job) Status { called = true return StatusOK }) if err := eng.Job("foo").Run(); err != nil { t.Fatal(err) } eng.Shutdown() if err := eng.Job("foo").Run(); err == nil { t.Fatalf("%#v", *eng) } } // An approximate and racy, but better-than-nothing test that // func TestShutdownDuringRun(t *testing.T) { var ( jobDelay time.Duration = 500 * time.Millisecond jobDelayLow time.Duration = 100 * time.Millisecond jobDelayHigh time.Duration = 700 * time.Millisecond ) eng := New() var completed bool eng.Register("foo", func(job *Job) Status { time.Sleep(jobDelay) completed = true return StatusOK }) go eng.Job("foo").Run() time.Sleep(50 * time.Millisecond) done := make(chan struct{}) var startShutdown time.Time go func() { startShutdown = time.Now() eng.Shutdown() close(done) }() time.Sleep(50 * time.Millisecond) if err := eng.Job("foo").Run(); err == nil { t.Fatalf("run on shutdown should fail: %#v", *eng) } <-done // Verify that Shutdown() blocks for roughly 500ms, instead // of returning almost instantly. // // We use >100ms to leave ample margin for race conditions between // goroutines. It's possible (but unlikely in reasonable testing // conditions), that this test will cause a false positive or false // negative. But it's probably better than not having any test // for the 99.999% of time where testing conditions are reasonable. if d := time.Since(startShutdown); d.Nanoseconds() < jobDelayLow.Nanoseconds() { t.Fatalf("shutdown did not block long enough: %v", d) } else if d.Nanoseconds() > jobDelayHigh.Nanoseconds() { t.Fatalf("shutdown blocked too long: %v", d) } if !completed { t.Fatalf("job did not complete") } } docker-1.6.2/engine/hack.go0000644000175000017500000000055612524223634015044 0ustar tianontianonpackage engine type Hack map[string]interface{} func (eng *Engine) Hack_GetGlobalVar(key string) interface{} { if eng.hack == nil { return nil } val, exists := eng.hack[key] if !exists { return nil } return val } func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) { if eng.hack == nil { eng.hack = make(Hack) } eng.hack[key] = val } docker-1.6.2/engine/helpers_test.go0000644000175000017500000000024012524223634016625 0ustar tianontianonpackage engine import ( "testing" ) var globalTestID string func mkJob(t *testing.T, name string, args ...string) *Job { return New().Job(name, args...) } docker-1.6.2/engine/job_test.go0000644000175000017500000000406412524223634015745 0ustar tianontianonpackage engine import ( "bytes" "fmt" "testing" ) func TestJobStatusOK(t *testing.T) { eng := New() eng.Register("return_ok", func(job *Job) Status { return StatusOK }) err := eng.Job("return_ok").Run() if err != nil { t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err) } } func TestJobStatusErr(t *testing.T) { eng := New() eng.Register("return_err", func(job *Job) Status { return StatusErr }) err := eng.Job("return_err").Run() if err == nil { t.Fatalf("When a job returns StatusErr, Run() should return an error") } } func TestJobStatusNotFound(t *testing.T) { eng := New() eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound }) err := eng.Job("return_not_found").Run() if err == nil { t.Fatalf("When a job returns StatusNotFound, Run() should return an error") } } func TestJobStdoutString(t *testing.T) { eng := New() // FIXME: test multiple combinations of output and status eng.Register("say_something_in_stdout", func(job *Job) Status { job.Printf("Hello world\n") return StatusOK }) job := eng.Job("say_something_in_stdout") var outputBuffer = bytes.NewBuffer(nil) job.Stdout.Add(outputBuffer) if err := job.Run(); err != nil { t.Fatal(err) } fmt.Println(outputBuffer) var output = Tail(outputBuffer, 1) if expectedOutput := "Hello world"; output != expectedOutput { t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output) } } func TestJobStderrString(t *testing.T) { eng := New() // FIXME: test multiple combinations of output and status eng.Register("say_something_in_stderr", func(job *Job) Status { job.Errorf("Something might happen\nHere it comes!\nOh no...\nSomething happened\n") return StatusOK }) job := eng.Job("say_something_in_stderr") var outputBuffer = bytes.NewBuffer(nil) job.Stderr.Add(outputBuffer) if err := job.Run(); err != nil { t.Fatal(err) } var output = Tail(outputBuffer, 1) if expectedOutput := "Something happened"; output != expectedOutput { t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output) } } docker-1.6.2/engine/table_test.go0000644000175000017500000000402112524223634016253 0ustar tianontianonpackage engine import ( "bytes" "encoding/json" "testing" ) func TestTableWriteTo(t *testing.T) { table := NewTable("", 0) e := &Env{} e.Set("foo", "bar") table.Add(e) var buf bytes.Buffer if _, err := table.WriteTo(&buf); err != nil { t.Fatal(err) } output := make(map[string]string) if err := json.Unmarshal(buf.Bytes(), &output); err != nil { t.Fatal(err) } if len(output) != 1 { t.Fatalf("Incorrect output: %v", output) } if val, exists := output["foo"]; !exists || val != "bar" { t.Fatalf("Inccorect output: %v", output) } } func TestTableSortStringValue(t *testing.T) { table := NewTable("Key", 0) e := &Env{} e.Set("Key", "A") table.Add(e) e = &Env{} e.Set("Key", "D") table.Add(e) e = &Env{} e.Set("Key", "B") table.Add(e) e = &Env{} e.Set("Key", "C") table.Add(e) table.Sort() if len := table.Len(); len != 4 { t.Fatalf("Expected 4, got %d", len) } if value := table.Data[0].Get("Key"); value != "A" { t.Fatalf("Expected A, got %s", value) } if value := table.Data[1].Get("Key"); value != "B" { t.Fatalf("Expected B, got %s", value) } if value := table.Data[2].Get("Key"); value != "C" { t.Fatalf("Expected C, got %s", value) } if value := table.Data[3].Get("Key"); value != "D" { t.Fatalf("Expected D, got %s", value) } } func TestTableReverseSortStringValue(t *testing.T) { table := NewTable("Key", 0) e := &Env{} e.Set("Key", "A") table.Add(e) e = &Env{} e.Set("Key", "D") table.Add(e) e = &Env{} e.Set("Key", "B") table.Add(e) e = &Env{} e.Set("Key", "C") table.Add(e) table.ReverseSort() if len := table.Len(); len != 4 { t.Fatalf("Expected 4, got %d", len) } if value := table.Data[0].Get("Key"); value != "D" { t.Fatalf("Expected D, got %s", value) } if value := table.Data[1].Get("Key"); value != "C" { t.Fatalf("Expected B, got %s", value) } if value := table.Data[2].Get("Key"); value != "B" { t.Fatalf("Expected C, got %s", value) } if value := table.Data[3].Get("Key"); value != "A" { t.Fatalf("Expected A, got %s", value) } } docker-1.6.2/engine/env.go0000644000175000017500000001533712524223634014731 0ustar tianontianonpackage engine import ( "bytes" "encoding/json" "fmt" "io" "strconv" "strings" "time" "github.com/docker/docker/utils" ) type Env []string // Get returns the last value associated with the given key. If there are no // values associated with the key, Get returns the empty string. func (env *Env) Get(key string) (value string) { // not using Map() because of the extra allocations https://github.com/docker/docker/pull/7488#issuecomment-51638315 for _, kv := range *env { if strings.Index(kv, "=") == -1 { continue } parts := strings.SplitN(kv, "=", 2) if parts[0] != key { continue } if len(parts) < 2 { value = "" } else { value = parts[1] } } return } func (env *Env) Exists(key string) bool { _, exists := env.Map()[key] return exists } // Len returns the number of keys in the environment. // Note that len(env) might be different from env.Len(), // because the same key might be set multiple times. func (env *Env) Len() int { return len(env.Map()) } func (env *Env) Init(src *Env) { (*env) = make([]string, 0, len(*src)) for _, val := range *src { (*env) = append((*env), val) } } func (env *Env) GetBool(key string) (value bool) { s := strings.ToLower(strings.Trim(env.Get(key), " \t")) if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { return false } return true } func (env *Env) SetBool(key string, value bool) { if value { env.Set(key, "1") } else { env.Set(key, "0") } } func (env *Env) GetTime(key string) (time.Time, error) { t, err := time.Parse(time.RFC3339Nano, env.Get(key)) return t, err } func (env *Env) SetTime(key string, t time.Time) { env.Set(key, t.Format(time.RFC3339Nano)) } func (env *Env) GetInt(key string) int { return int(env.GetInt64(key)) } func (env *Env) GetInt64(key string) int64 { s := strings.Trim(env.Get(key), " \t") val, err := strconv.ParseInt(s, 10, 64) if err != nil { return 0 } return val } func (env *Env) SetInt(key string, value int) { env.Set(key, fmt.Sprintf("%d", value)) } func (env *Env) SetInt64(key string, value int64) { env.Set(key, fmt.Sprintf("%d", value)) } // Returns nil if key not found func (env *Env) GetList(key string) []string { sval := env.Get(key) if sval == "" { return nil } l := make([]string, 0, 1) if err := json.Unmarshal([]byte(sval), &l); err != nil { l = append(l, sval) } return l } func (env *Env) GetSubEnv(key string) *Env { sval := env.Get(key) if sval == "" { return nil } buf := bytes.NewBufferString(sval) var sub Env if err := sub.Decode(buf); err != nil { return nil } return &sub } func (env *Env) SetSubEnv(key string, sub *Env) error { var buf bytes.Buffer if err := sub.Encode(&buf); err != nil { return err } env.Set(key, string(buf.Bytes())) return nil } func (env *Env) GetJson(key string, iface interface{}) error { sval := env.Get(key) if sval == "" { return nil } return json.Unmarshal([]byte(sval), iface) } func (env *Env) SetJson(key string, value interface{}) error { sval, err := json.Marshal(value) if err != nil { return err } env.Set(key, string(sval)) return nil } func (env *Env) SetList(key string, value []string) error { return env.SetJson(key, value) } func (env *Env) Set(key, value string) { *env = append(*env, key+"="+value) } func NewDecoder(src io.Reader) *Decoder { return &Decoder{ json.NewDecoder(src), } } type Decoder struct { *json.Decoder } func (decoder *Decoder) Decode() (*Env, error) { m := make(map[string]interface{}) if err := decoder.Decoder.Decode(&m); err != nil { return nil, err } env := &Env{} for key, value := range m { env.SetAuto(key, value) } return env, nil } // DecodeEnv decodes `src` as a json dictionary, and adds // each decoded key-value pair to the environment. // // If `src` cannot be decoded as a json dictionary, an error // is returned. func (env *Env) Decode(src io.Reader) error { m := make(map[string]interface{}) if err := json.NewDecoder(src).Decode(&m); err != nil { return err } for k, v := range m { env.SetAuto(k, v) } return nil } func (env *Env) SetAuto(k string, v interface{}) { // Issue 7941 - if the value in the incoming JSON is null then treat it // as if they never specified the property at all. if v == nil { return } // FIXME: we fix-convert float values to int, because // encoding/json decodes integers to float64, but cannot encode them back. // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) if fval, ok := v.(float64); ok { env.SetInt64(k, int64(fval)) } else if sval, ok := v.(string); ok { env.Set(k, sval) } else if val, err := json.Marshal(v); err == nil { env.Set(k, string(val)) } else { env.Set(k, fmt.Sprintf("%v", v)) } } func changeFloats(v interface{}) interface{} { switch v := v.(type) { case float64: return int(v) case map[string]interface{}: for key, val := range v { v[key] = changeFloats(val) } case []interface{}: for idx, val := range v { v[idx] = changeFloats(val) } } return v } func (env *Env) Encode(dst io.Writer) error { m := make(map[string]interface{}) for k, v := range env.Map() { var val interface{} if err := json.Unmarshal([]byte(v), &val); err == nil { // FIXME: we fix-convert float values to int, because // encoding/json decodes integers to float64, but cannot encode them back. // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) m[k] = changeFloats(val) } else { m[k] = v } } if err := json.NewEncoder(dst).Encode(&m); err != nil { return err } return nil } func (env *Env) WriteTo(dst io.Writer) (int64, error) { wc := utils.NewWriteCounter(dst) err := env.Encode(wc) return wc.Count, err } func (env *Env) Import(src interface{}) (err error) { defer func() { if err != nil { err = fmt.Errorf("ImportEnv: %s", err) } }() var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(src); err != nil { return err } if err := env.Decode(&buf); err != nil { return err } return nil } func (env *Env) Map() map[string]string { m := make(map[string]string) for _, kv := range *env { parts := strings.SplitN(kv, "=", 2) m[parts[0]] = parts[1] } return m } // MultiMap returns a representation of env as a // map of string arrays, keyed by string. // This is the same structure as http headers for example, // which allow each key to have multiple values. func (env *Env) MultiMap() map[string][]string { m := make(map[string][]string) for _, kv := range *env { parts := strings.SplitN(kv, "=", 2) m[parts[0]] = append(m[parts[0]], parts[1]) } return m } // InitMultiMap removes all values in env, then initializes // new values from the contents of m. func (env *Env) InitMultiMap(m map[string][]string) { (*env) = make([]string, 0, len(m)) for k, vals := range m { for _, v := range vals { env.Set(k, v) } } } docker-1.6.2/utils/0000755000175000017500000000000012524223634013474 5ustar tianontianondocker-1.6.2/utils/tmpdir.go0000644000175000017500000000052512524223634015324 0ustar tianontianonpackage utils import ( "os" "path/filepath" ) // TempDir returns the default directory to use for temporary files. func TempDir(rootDir string) (string, error) { var tmpDir string if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { tmpDir = filepath.Join(rootDir, "tmp") } err := os.MkdirAll(tmpDir, 0700) return tmpDir, err } docker-1.6.2/utils/streamformatter_test.go0000644000175000017500000000330112524223634020276 0ustar tianontianonpackage utils import ( "encoding/json" "errors" "reflect" "testing" ) func TestFormatStream(t *testing.T) { sf := NewStreamFormatter(true) res := sf.FormatStream("stream") if string(res) != `{"stream":"stream"}`+"\r\n" { t.Fatalf("%q", res) } } func TestFormatStatus(t *testing.T) { sf := NewStreamFormatter(true) res := sf.FormatStatus("ID", "%s%d", "a", 1) if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { t.Fatalf("%q", res) } } func TestFormatSimpleError(t *testing.T) { sf := NewStreamFormatter(true) res := sf.FormatError(errors.New("Error for formatter")) if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { t.Fatalf("%q", res) } } func TestFormatJSONError(t *testing.T) { sf := NewStreamFormatter(true) err := &JSONError{Code: 50, Message: "Json error"} res := sf.FormatError(err) if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { t.Fatalf("%q", res) } } func TestFormatProgress(t *testing.T) { sf := NewStreamFormatter(true) progress := &JSONProgress{ Current: 15, Total: 30, Start: 1, } res := sf.FormatProgress("id", "action", progress) msg := &JSONMessage{} if err := json.Unmarshal(res, msg); err != nil { t.Fatal(err) } if msg.ID != "id" { t.Fatalf("ID must be 'id', got: %s", msg.ID) } if msg.Status != "action" { t.Fatalf("Status must be 'action', got: %s", msg.Status) } if msg.ProgressMessage != progress.String() { t.Fatalf("ProgressMessage must be %s, got: %s", progress.String(), msg.ProgressMessage) } if !reflect.DeepEqual(msg.Progress, progress) { t.Fatal("Original progress not equals progress from FormatProgress") } } docker-1.6.2/utils/http.go0000644000175000017500000001021112524223634014775 0ustar tianontianonpackage utils import ( "io" "net/http" "strings" log "github.com/Sirupsen/logrus" ) // VersionInfo is used to model entities which has a version. // It is basically a tupple with name and version. type VersionInfo interface { Name() string Version() string } func validVersion(version VersionInfo) bool { const stopChars = " \t\r\n/" name := version.Name() vers := version.Version() if len(name) == 0 || strings.ContainsAny(name, stopChars) { return false } if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { return false } return true } // Convert versions to a string and append the string to the string base. // // Each VersionInfo will be converted to a string in the format of // "product/version", where the "product" is get from the Name() method, while // version is get from the Version() method. Several pieces of verson information // will be concatinated and separated by space. func appendVersions(base string, versions ...VersionInfo) string { if len(versions) == 0 { return base } verstrs := make([]string, 0, 1+len(versions)) if len(base) > 0 { verstrs = append(verstrs, base) } for _, v := range versions { if !validVersion(v) { continue } verstrs = append(verstrs, v.Name()+"/"+v.Version()) } return strings.Join(verstrs, " ") } // HTTPRequestDecorator is used to change an instance of // http.Request. It could be used to add more header fields, // change body, etc. type HTTPRequestDecorator interface { // ChangeRequest() changes the request accordingly. // The changed request will be returned or err will be non-nil // if an error occur. ChangeRequest(req *http.Request) (newReq *http.Request, err error) } // HTTPUserAgentDecorator appends the product/version to the user agent field // of a request. type HTTPUserAgentDecorator struct { versions []VersionInfo } func NewHTTPUserAgentDecorator(versions ...VersionInfo) HTTPRequestDecorator { return &HTTPUserAgentDecorator{ versions: versions, } } func (h *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { if req == nil { return req, nil } userAgent := appendVersions(req.UserAgent(), h.versions...) if len(userAgent) > 0 { req.Header.Set("User-Agent", userAgent) } return req, nil } type HTTPMetaHeadersDecorator struct { Headers map[string][]string } func (h *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { if h.Headers == nil { return req, nil } for k, v := range h.Headers { req.Header[k] = v } return req, nil } type HTTPAuthDecorator struct { login string password string } func NewHTTPAuthDecorator(login, password string) HTTPRequestDecorator { return &HTTPAuthDecorator{ login: login, password: password, } } func (self *HTTPAuthDecorator) ChangeRequest(req *http.Request) (*http.Request, error) { req.SetBasicAuth(self.login, self.password) return req, nil } // HTTPRequestFactory creates an HTTP request // and applies a list of decorators on the request. type HTTPRequestFactory struct { decorators []HTTPRequestDecorator } func NewHTTPRequestFactory(d ...HTTPRequestDecorator) *HTTPRequestFactory { return &HTTPRequestFactory{ decorators: d, } } func (self *HTTPRequestFactory) AddDecorator(d ...HTTPRequestDecorator) { self.decorators = append(self.decorators, d...) } func (self *HTTPRequestFactory) GetDecorators() []HTTPRequestDecorator { return self.decorators } // NewRequest() creates a new *http.Request, // applies all decorators in the HTTPRequestFactory on the request, // then applies decorators provided by d on the request. func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) { req, err := http.NewRequest(method, urlStr, body) if err != nil { return nil, err } // By default, a nil factory should work. if h == nil { return req, nil } for _, dec := range h.decorators { req, err = dec.ChangeRequest(req) if err != nil { return nil, err } } for _, dec := range d { req, err = dec.ChangeRequest(req) if err != nil { return nil, err } } log.Debugf("%v -- HEADERS: %v", req.URL, req.Header) return req, err } docker-1.6.2/utils/jsonmessage_test.go0000644000175000017500000000172012524223634017400 0ustar tianontianonpackage utils import ( "testing" ) func TestError(t *testing.T) { je := JSONError{404, "Not found"} if je.Error() != "Not found" { t.Fatalf("Expected 'Not found' got '%s'", je.Error()) } } func TestProgress(t *testing.T) { jp := JSONProgress{} if jp.String() != "" { t.Fatalf("Expected empty string, got '%s'", jp.String()) } expected := " 1 B" jp2 := JSONProgress{Current: 1} if jp2.String() != expected { t.Fatalf("Expected %q, got %q", expected, jp2.String()) } expected = "[=========================> ] 50 B/100 B" jp3 := JSONProgress{Current: 50, Total: 100} if jp3.String() != expected { t.Fatalf("Expected %q, got %q", expected, jp3.String()) } // this number can't be negetive gh#7136 expected = "[==================================================>] 50 B/40 B" jp4 := JSONProgress{Current: 50, Total: 40} if jp4.String() != expected { t.Fatalf("Expected %q, got %q", expected, jp4.String()) } } docker-1.6.2/utils/utils_daemon_test.go0000644000175000017500000000101012524223634017535 0ustar tianontianonpackage utils import ( "os" "path" "testing" ) func TestIsFileOwner(t *testing.T) { var err error var file *os.File if file, err = os.Create(path.Join(os.TempDir(), "testIsFileOwner")); err != nil { t.Fatalf("failed to create file: %s", err) } file.Close() if ok := IsFileOwner(path.Join(os.TempDir(), "testIsFileOwner")); !ok { t.Fatalf("User should be owner of file") } if err = os.Remove(path.Join(os.TempDir(), "testIsFileOwner")); err != nil { t.Fatalf("failed to remove file: %s", err) } } docker-1.6.2/utils/streamformatter.go0000644000175000017500000000551712524223634017252 0ustar tianontianonpackage utils import ( "encoding/json" "fmt" "github.com/docker/docker/pkg/progressreader" "io" ) type StreamFormatter struct { json bool } func NewStreamFormatter(json bool) *StreamFormatter { return &StreamFormatter{json} } const streamNewline = "\r\n" var streamNewlineBytes = []byte(streamNewline) func (sf *StreamFormatter) FormatStream(str string) []byte { if sf.json { b, err := json.Marshal(&JSONMessage{Stream: str}) if err != nil { return sf.FormatError(err) } return append(b, streamNewlineBytes...) } return []byte(str + "\r") } func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { str := fmt.Sprintf(format, a...) if sf.json { b, err := json.Marshal(&JSONMessage{ID: id, Status: str}) if err != nil { return sf.FormatError(err) } return append(b, streamNewlineBytes...) } return []byte(str + streamNewline) } func (sf *StreamFormatter) FormatError(err error) []byte { if sf.json { jsonError, ok := err.(*JSONError) if !ok { jsonError = &JSONError{Message: err.Error()} } if b, err := json.Marshal(&JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { return append(b, streamNewlineBytes...) } return []byte("{\"error\":\"format error\"}" + streamNewline) } return []byte("Error: " + err.Error() + streamNewline) } func (sf *StreamFormatter) FormatProg(id, action string, p interface{}) []byte { switch progress := p.(type) { case *JSONProgress: return sf.FormatProgress(id, action, progress) case progressreader.PR_JSONProgress: return sf.FormatProgress(id, action, &JSONProgress{Current: progress.GetCurrent(), Total: progress.GetTotal()}) } return nil } func (sf *StreamFormatter) FormatProgress(id, action string, progress *JSONProgress) []byte { if progress == nil { progress = &JSONProgress{} } if sf.json { b, err := json.Marshal(&JSONMessage{ Status: action, ProgressMessage: progress.String(), Progress: progress, ID: id, }) if err != nil { return nil } return b } endl := "\r" if progress.String() == "" { endl += "\n" } return []byte(action + " " + progress.String() + endl) } func (sf *StreamFormatter) Json() bool { return sf.json } type StdoutFormater struct { io.Writer *StreamFormatter } func (sf *StdoutFormater) Write(buf []byte) (int, error) { formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) n, err := sf.Writer.Write(formattedBuf) if n != len(formattedBuf) { return n, io.ErrShortWrite } return len(buf), err } type StderrFormater struct { io.Writer *StreamFormatter } func (sf *StderrFormater) Write(buf []byte) (int, error) { formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") n, err := sf.Writer.Write(formattedBuf) if n != len(formattedBuf) { return n, io.ErrShortWrite } return len(buf), err } docker-1.6.2/utils/utils_daemon.go0000644000175000017500000000054012524223634016505 0ustar tianontianon// +build daemon package utils import ( "github.com/docker/docker/pkg/system" "os" ) // IsFileOwner checks whether the current user is the owner of the given file. func IsFileOwner(f string) bool { if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { if int(fileInfo.Uid()) == os.Getuid() { return true } } return false } docker-1.6.2/utils/jsonmessage.go0000644000175000017500000001044212524223634016342 0ustar tianontianonpackage utils import ( "encoding/json" "fmt" "io" "strings" "time" "github.com/docker/docker/pkg/term" "github.com/docker/docker/pkg/timeutils" "github.com/docker/docker/pkg/units" ) type JSONError struct { Code int `json:"code,omitempty"` Message string `json:"message,omitempty"` } func (e *JSONError) Error() string { return e.Message } type JSONProgress struct { terminalFd uintptr Current int `json:"current,omitempty"` Total int `json:"total,omitempty"` Start int64 `json:"start,omitempty"` } func (p *JSONProgress) String() string { var ( width = 200 pbBox string numbersBox string timeLeftBox string ) ws, err := term.GetWinsize(p.terminalFd) if err == nil { width = int(ws.Width) } if p.Current <= 0 && p.Total <= 0 { return "" } current := units.HumanSize(float64(p.Current)) if p.Total <= 0 { return fmt.Sprintf("%8v", current) } total := units.HumanSize(float64(p.Total)) percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 if percentage > 50 { percentage = 50 } if width > 110 { // this number can't be negetive gh#7136 numSpaces := 0 if 50-percentage > 0 { numSpaces = 50 - percentage } pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) } numbersBox = fmt.Sprintf("%8v/%v", current, total) if p.Current > 0 && p.Start > 0 && percentage < 50 { fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0)) perEntry := fromStart / time.Duration(p.Current) left := time.Duration(p.Total-p.Current) * perEntry left = (left / time.Second) * time.Second if width > 50 { timeLeftBox = " " + left.String() } } return pbBox + numbersBox + timeLeftBox } type JSONMessage struct { Stream string `json:"stream,omitempty"` Status string `json:"status,omitempty"` Progress *JSONProgress `json:"progressDetail,omitempty"` ProgressMessage string `json:"progress,omitempty"` //deprecated ID string `json:"id,omitempty"` From string `json:"from,omitempty"` Time int64 `json:"time,omitempty"` Error *JSONError `json:"errorDetail,omitempty"` ErrorMessage string `json:"error,omitempty"` //deprecated } func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.Error != nil { if jm.Error.Code == 401 { return fmt.Errorf("Authentication is required.") } return jm.Error } var endl string if isTerminal && jm.Stream == "" && jm.Progress != nil { // [2K = erase entire current line fmt.Fprintf(out, "%c[2K\r", 27) endl = "\r" } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal return nil } if jm.Time != 0 { fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed)) } if jm.ID != "" { fmt.Fprintf(out, "%s: ", jm.ID) } if jm.From != "" { fmt.Fprintf(out, "(from %s) ", jm.From) } if jm.Progress != nil && isTerminal { fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) } else if jm.ProgressMessage != "" { //deprecated fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) } else if jm.Stream != "" { fmt.Fprintf(out, "%s%s", jm.Stream, endl) } else { fmt.Fprintf(out, "%s%s\n", jm.Status, endl) } return nil } func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error { var ( dec = json.NewDecoder(in) ids = make(map[string]int) diff = 0 ) for { var jm JSONMessage if err := dec.Decode(&jm); err != nil { if err == io.EOF { break } return err } if jm.Progress != nil { jm.Progress.terminalFd = terminalFd } if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { line, ok := ids[jm.ID] if !ok { line = len(ids) ids[jm.ID] = line if isTerminal { fmt.Fprintf(out, "\n") } diff = 0 } else { diff = len(ids) - line } if jm.ID != "" && isTerminal { // [{diff}A = move cursor up diff rows fmt.Fprintf(out, "%c[%dA", 27, diff) } } err := jm.Display(out, isTerminal) if jm.ID != "" && isTerminal { // [{diff}B = move cursor down diff rows fmt.Fprintf(out, "%c[%dB", 27, diff) } if err != nil { return err } } return nil } docker-1.6.2/utils/utils_test.go0000644000175000017500000000767612524223634016242 0ustar tianontianonpackage utils import ( "bytes" "os" "strings" "testing" ) func TestReplaceAndAppendEnvVars(t *testing.T) { var ( d = []string{"HOME=/"} o = []string{"HOME=/root", "TERM=xterm"} ) env := ReplaceOrAppendEnvValues(d, o) if len(env) != 2 { t.Fatalf("expected len of 2 got %d", len(env)) } if env[0] != "HOME=/root" { t.Fatalf("expected HOME=/root got '%s'", env[0]) } if env[1] != "TERM=xterm" { t.Fatalf("expected TERM=xterm got '%s'", env[1]) } } // Reading a symlink to a directory must return the directory func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { var err error if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { t.Errorf("failed to create directory: %s", err) } if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { t.Errorf("failed to create symlink: %s", err) } var path string if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { t.Fatalf("failed to read symlink to directory: %s", err) } if path != "/tmp/testReadSymlinkToExistingDirectory" { t.Fatalf("symlink returned unexpected directory: %s", path) } if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { t.Errorf("failed to remove temporary directory: %s", err) } if err = os.Remove("/tmp/dirLinkTest"); err != nil { t.Errorf("failed to remove symlink: %s", err) } } // Reading a non-existing symlink must fail func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { var path string var err error if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { t.Fatalf("error expected for non-existing symlink") } if path != "" { t.Fatalf("expected empty path, but '%s' was returned", path) } } // Reading a symlink to a file must fail func TestReadSymlinkedDirectoryToFile(t *testing.T) { var err error var file *os.File if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { t.Fatalf("failed to create file: %s", err) } file.Close() if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { t.Errorf("failed to create symlink: %s", err) } var path string if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") } if path != "" { t.Fatalf("path should've been empty: %s", path) } if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { t.Errorf("failed to remove file: %s", err) } if err = os.Remove("/tmp/fileLinkTest"); err != nil { t.Errorf("failed to remove symlink: %s", err) } } func TestWriteCounter(t *testing.T) { dummy1 := "This is a dummy string." dummy2 := "This is another dummy string." totalLength := int64(len(dummy1) + len(dummy2)) reader1 := strings.NewReader(dummy1) reader2 := strings.NewReader(dummy2) var buffer bytes.Buffer wc := NewWriteCounter(&buffer) reader1.WriteTo(wc) reader2.WriteTo(wc) if wc.Count != totalLength { t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) } if buffer.String() != dummy1+dummy2 { t.Error("Wrong message written") } } func TestImageReference(t *testing.T) { tests := []struct { repo string ref string expected string }{ {"repo", "tag", "repo:tag"}, {"repo", "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64", "repo@sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64"}, } for i, test := range tests { actual := ImageReference(test.repo, test.ref) if test.expected != actual { t.Errorf("%d: expected %q, got %q", i, test.expected, actual) } } } func TestDigestReference(t *testing.T) { input := "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64" if !DigestReference(input) { t.Errorf("Expected DigestReference=true for input %q", input) } input = "latest" if DigestReference(input) { t.Errorf("Unexpected DigestReference=true for input %q", input) } } docker-1.6.2/utils/utils.go0000644000175000017500000003265512524223634015176 0ustar tianontianonpackage utils import ( "bufio" "bytes" "crypto/sha1" "crypto/sha256" "encoding/hex" "fmt" "io" "io/ioutil" "net/http" "os" "os/exec" "path/filepath" "regexp" "runtime" "strings" "sync" log "github.com/Sirupsen/logrus" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/ioutils" ) type KeyValuePair struct { Key string Value string } var ( validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) ) // Request a given URL and return an io.Reader func Download(url string) (resp *http.Response, err error) { if resp, err = http.Get(url); err != nil { return nil, err } if resp.StatusCode >= 400 { return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) } return resp, nil } func Trunc(s string, maxlen int) string { if len(s) <= maxlen { return s } return s[:maxlen] } // Figure out the absolute path of our own binary (if it's still around). func SelfPath() string { path, err := exec.LookPath(os.Args[0]) if err != nil { if os.IsNotExist(err) { return "" } if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) { return "" } panic(err) } path, err = filepath.Abs(path) if err != nil { if os.IsNotExist(err) { return "" } panic(err) } return path } func dockerInitSha1(target string) string { f, err := os.Open(target) if err != nil { return "" } defer f.Close() h := sha1.New() _, err = io.Copy(h, f) if err != nil { return "" } return hex.EncodeToString(h.Sum(nil)) } func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this) if target == "" { return false } if dockerversion.IAMSTATIC == "true" { if selfPath == "" { return false } if target == selfPath { return true } targetFileInfo, err := os.Lstat(target) if err != nil { return false } selfPathFileInfo, err := os.Lstat(selfPath) if err != nil { return false } return os.SameFile(targetFileInfo, selfPathFileInfo) } return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1 } // Figure out the path of our dockerinit (which may be SelfPath()) func DockerInitPath(localCopy string) string { selfPath := SelfPath() if isValidDockerInitPath(selfPath, selfPath) { // if we're valid, don't bother checking anything else return selfPath } var possibleInits = []string{ localCopy, dockerversion.INITPATH, filepath.Join(filepath.Dir(selfPath), "dockerinit"), // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." // http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec "/usr/libexec/docker/dockerinit", "/usr/local/libexec/docker/dockerinit", // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." // http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA "/usr/lib/docker/dockerinit", "/usr/local/lib/docker/dockerinit", } for _, dockerInit := range possibleInits { if dockerInit == "" { continue } path, err := exec.LookPath(dockerInit) if err == nil { path, err = filepath.Abs(path) if err != nil { // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail? panic(err) } if isValidDockerInitPath(path, selfPath) { return path } } } return "" } func GetTotalUsedFds() int { if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { log.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) } else { return len(fds) } return -1 } func ValidateID(id string) error { if ok := validHex.MatchString(id); !ok { err := fmt.Errorf("image ID '%s' is invalid", id) return err } return nil } // Code c/c from io.Copy() modified to handle escape sequence func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { buf := make([]byte, 32*1024) for { nr, er := src.Read(buf) if nr > 0 { // ---- Docker addition // char 16 is C-p if nr == 1 && buf[0] == 16 { nr, er = src.Read(buf) // char 17 is C-q if nr == 1 && buf[0] == 17 { if err := src.Close(); err != nil { return 0, err } return 0, nil } } // ---- End of docker nw, ew := dst.Write(buf[0:nr]) if nw > 0 { written += int64(nw) } if ew != nil { err = ew break } if nr != nw { err = io.ErrShortWrite break } } if er == io.EOF { break } if er != nil { err = er break } } return written, err } func HashData(src io.Reader) (string, error) { h := sha256.New() if _, err := io.Copy(h, src); err != nil { return "", err } return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil } type WriteFlusher struct { sync.Mutex w io.Writer flusher http.Flusher } func (wf *WriteFlusher) Write(b []byte) (n int, err error) { wf.Lock() defer wf.Unlock() n, err = wf.w.Write(b) wf.flusher.Flush() return n, err } // Flush the stream immediately. func (wf *WriteFlusher) Flush() { wf.Lock() defer wf.Unlock() wf.flusher.Flush() } func NewWriteFlusher(w io.Writer) *WriteFlusher { var flusher http.Flusher if f, ok := w.(http.Flusher); ok { flusher = f } else { flusher = &ioutils.NopFlusher{} } return &WriteFlusher{w: w, flusher: flusher} } func NewHTTPRequestError(msg string, res *http.Response) error { return &JSONError{ Message: msg, Code: res.StatusCode, } } // An StatusError reports an unsuccessful exit by a command. type StatusError struct { Status string StatusCode int } func (e *StatusError) Error() string { return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) } func quote(word string, buf *bytes.Buffer) { // Bail out early for "simple" strings if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { buf.WriteString(word) return } buf.WriteString("'") for i := 0; i < len(word); i++ { b := word[i] if b == '\'' { // Replace literal ' with a close ', a \', and a open ' buf.WriteString("'\\''") } else { buf.WriteByte(b) } } buf.WriteString("'") } // Take a list of strings and escape them so they will be handled right // when passed as arguments to an program via a shell func ShellQuoteArguments(args []string) string { var buf bytes.Buffer for i, arg := range args { if i != 0 { buf.WriteByte(' ') } quote(arg, &buf) } return buf.String() } var globalTestID string // TestDirectory creates a new temporary directory and returns its path. // The contents of directory at path `templateDir` is copied into the // new directory. func TestDirectory(templateDir string) (dir string, err error) { if globalTestID == "" { globalTestID = common.RandomString()[:4] } prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) if prefix == "" { prefix = "docker-test-" } dir, err = ioutil.TempDir("", prefix) if err = os.Remove(dir); err != nil { return } if templateDir != "" { if err = archive.CopyWithTar(templateDir, dir); err != nil { return } } return } // GetCallerName introspects the call stack and returns the name of the // function `depth` levels down in the stack. func GetCallerName(depth int) string { // Use the caller function name as a prefix. // This helps trace temp directories back to their test. pc, _, _, _ := runtime.Caller(depth + 1) callerLongName := runtime.FuncForPC(pc).Name() parts := strings.Split(callerLongName, ".") callerShortName := parts[len(parts)-1] return callerShortName } func CopyFile(src, dst string) (int64, error) { if src == dst { return 0, nil } sf, err := os.Open(src) if err != nil { return 0, err } defer sf.Close() if err := os.Remove(dst); err != nil && !os.IsNotExist(err) { return 0, err } df, err := os.Create(dst) if err != nil { return 0, err } defer df.Close() return io.Copy(df, sf) } // ReplaceOrAppendValues returns the defaults with the overrides either // replaced by env key or appended to the list func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { cache := make(map[string]int, len(defaults)) for i, e := range defaults { parts := strings.SplitN(e, "=", 2) cache[parts[0]] = i } for _, value := range overrides { // Values w/o = means they want this env to be removed/unset. if !strings.Contains(value, "=") { if i, exists := cache[value]; exists { defaults[i] = "" // Used to indicate it should be removed } continue } // Just do a normal set/update parts := strings.SplitN(value, "=", 2) if i, exists := cache[parts[0]]; exists { defaults[i] = value } else { defaults = append(defaults, value) } } // Now remove all entries that we want to "unset" for i := 0; i < len(defaults); i++ { if defaults[i] == "" { defaults = append(defaults[:i], defaults[i+1:]...) i-- } } return defaults } func DoesEnvExist(name string) bool { for _, entry := range os.Environ() { parts := strings.SplitN(entry, "=", 2) if parts[0] == name { return true } } return false } // ReadSymlinkedDirectory returns the target directory of a symlink. // The target of the symbolic link may not be a file. func ReadSymlinkedDirectory(path string) (string, error) { var realPath string var err error if realPath, err = filepath.Abs(path); err != nil { return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) } realPathInfo, err := os.Stat(realPath) if err != nil { return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) } if !realPathInfo.Mode().IsDir() { return "", fmt.Errorf("canonical path points to a file '%s'", realPath) } return realPath, nil } // ValidateContextDirectory checks if all the contents of the directory // can be read and returns an error if some files can't be read // symlinks which point to non-existing files don't trigger an error func ValidateContextDirectory(srcPath string, excludes []string) error { return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { // skip this directory/file if it's not in the path, it won't get added to the context if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { return err } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { return err } else if skip { if f.IsDir() { return filepath.SkipDir } return nil } if err != nil { if os.IsPermission(err) { return fmt.Errorf("can't stat '%s'", filePath) } if os.IsNotExist(err) { return nil } return err } // skip checking if symlinks point to non-existing files, such symlinks can be useful // also skip named pipes, because they hanging on open if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { return nil } if !f.IsDir() { currentFile, err := os.Open(filePath) if err != nil && os.IsPermission(err) { return fmt.Errorf("no permission to read from '%s'", filePath) } currentFile.Close() } return nil }) } func StringsContainsNoCase(slice []string, s string) bool { for _, ss := range slice { if strings.ToLower(s) == strings.ToLower(ss) { return true } } return false } // Reads a .dockerignore file and returns the list of file patterns // to ignore. Note this will trim whitespace from each line as well // as use GO's "clean" func to get the shortest/cleanest path for each. func ReadDockerIgnore(path string) ([]string, error) { // Note that a missing .dockerignore file isn't treated as an error reader, err := os.Open(path) if err != nil { if !os.IsNotExist(err) { return nil, fmt.Errorf("Error reading '%s': %v", path, err) } return nil, nil } defer reader.Close() scanner := bufio.NewScanner(reader) var excludes []string for scanner.Scan() { pattern := strings.TrimSpace(scanner.Text()) if pattern == "" { continue } pattern = filepath.Clean(pattern) excludes = append(excludes, pattern) } if err = scanner.Err(); err != nil { return nil, fmt.Errorf("Error reading '%s': %v", path, err) } return excludes, nil } // Wrap a concrete io.Writer and hold a count of the number // of bytes written to the writer during a "session". // This can be convenient when write return is masked // (e.g., json.Encoder.Encode()) type WriteCounter struct { Count int64 Writer io.Writer } func NewWriteCounter(w io.Writer) *WriteCounter { return &WriteCounter{ Writer: w, } } func (wc *WriteCounter) Write(p []byte) (count int, err error) { count, err = wc.Writer.Write(p) wc.Count += int64(count) return } // ImageReference combines `repo` and `ref` and returns a string representing // the combination. If `ref` is a digest (meaning it's of the form // :, the returned string is @. Otherwise, // ref is assumed to be a tag, and the returned string is :. func ImageReference(repo, ref string) string { if DigestReference(ref) { return repo + "@" + ref } return repo + ":" + ref } // DigestReference returns true if ref is a digest reference; i.e. if it // is of the form :. func DigestReference(ref string) bool { return strings.Contains(ref, ":") } docker-1.6.2/utils/flags.go0000644000175000017500000000227312524223634015123 0ustar tianontianonpackage utils import ( "fmt" "os" flag "github.com/docker/docker/pkg/mflag" ) // ParseFlags is a utility function that adds a help flag if withHelp is true, // calls cmd.Parse(args) and prints a relevant error message if there are // incorrect number of arguments. It returns error only if error handling is // set to ContinueOnError and parsing fails. If error handling is set to // ExitOnError, it's safe to ignore the return value. // TODO: move this to a better package than utils func ParseFlags(cmd *flag.FlagSet, args []string, withHelp bool) error { var help *bool if withHelp { help = cmd.Bool([]string{"#help", "-help"}, false, "Print usage") } if err := cmd.Parse(args); err != nil { return err } if help != nil && *help { cmd.Usage() // just in case Usage does not exit os.Exit(0) } if str := cmd.CheckArgs(); str != "" { ReportError(cmd, str, withHelp) } return nil } func ReportError(cmd *flag.FlagSet, str string, withHelp bool) { if withHelp { if os.Args[0] == cmd.Name() { str += ". See '" + os.Args[0] + " --help'" } else { str += ". See '" + os.Args[0] + " " + cmd.Name() + " --help'" } } fmt.Fprintf(cmd.Out(), "docker: %s.\n", str) os.Exit(1) } docker-1.6.2/utils/daemon.go0000644000175000017500000000127512524223634015273 0ustar tianontianonpackage utils import ( "fmt" "io/ioutil" "log" "os" "strconv" ) func CreatePidFile(pidfile string) error { if pidString, err := ioutil.ReadFile(pidfile); err == nil { pid, err := strconv.Atoi(string(pidString)) if err == nil { if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil { return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile) } } } file, err := os.Create(pidfile) if err != nil { return err } defer file.Close() _, err = fmt.Fprintf(file, "%d", os.Getpid()) return err } func RemovePidFile(pidfile string) { if err := os.Remove(pidfile); err != nil { log.Printf("Error removing %s: %s", pidfile, err) } } docker-1.6.2/api/0000755000175000017500000000000012524223634013105 5ustar tianontianondocker-1.6.2/api/server/0000755000175000017500000000000012524223634014413 5ustar tianontianondocker-1.6.2/api/server/server_unit_test.go0000644000175000017500000003536712524223634020364 0ustar tianontianonpackage server import ( "bytes" "encoding/json" "fmt" "io" "net/http" "net/http/httptest" "reflect" "strings" "testing" "github.com/docker/docker/api" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/version" ) func TestGetBoolParam(t *testing.T) { if ret, err := getBoolParam("true"); err != nil || !ret { t.Fatalf("true -> true, nil | got %t %s", ret, err) } if ret, err := getBoolParam("True"); err != nil || !ret { t.Fatalf("True -> true, nil | got %t %s", ret, err) } if ret, err := getBoolParam("1"); err != nil || !ret { t.Fatalf("1 -> true, nil | got %t %s", ret, err) } if ret, err := getBoolParam(""); err != nil || ret { t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) } if ret, err := getBoolParam("false"); err != nil || ret { t.Fatalf("false -> false, nil | got %t %s", ret, err) } if ret, err := getBoolParam("0"); err != nil || ret { t.Fatalf("0 -> false, nil | got %t %s", ret, err) } if ret, err := getBoolParam("faux"); err == nil || ret { t.Fatalf("faux -> false, err | got %t %s", ret, err) } } func TesthttpError(t *testing.T) { r := httptest.NewRecorder() httpError(r, fmt.Errorf("No such method")) if r.Code != http.StatusNotFound { t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) } httpError(r, fmt.Errorf("This accound hasn't been activated")) if r.Code != http.StatusForbidden { t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) } httpError(r, fmt.Errorf("Some error")) if r.Code != http.StatusInternalServerError { t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) } } func TestGetVersion(t *testing.T) { eng := engine.New() var called bool eng.Register("version", func(job *engine.Job) engine.Status { called = true v := &engine.Env{} v.SetJson("Version", "42.1") v.Set("ApiVersion", "1.1.1.1.1") v.Set("GoVersion", "2.42") v.Set("Os", "Linux") v.Set("Arch", "x86_64") if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }) r := serveRequest("GET", "/version", nil, eng, t) if !called { t.Fatalf("handler was not called") } v := readEnv(r.Body, t) if v.Get("Version") != "42.1" { t.Fatalf("%#v\n", v) } if r.HeaderMap.Get("Content-Type") != "application/json" { t.Fatalf("%#v\n", r) } } func TestGetInfo(t *testing.T) { eng := engine.New() var called bool eng.Register("info", func(job *engine.Job) engine.Status { called = true v := &engine.Env{} v.SetInt("Containers", 1) v.SetInt("Images", 42000) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }) r := serveRequest("GET", "/info", nil, eng, t) if !called { t.Fatalf("handler was not called") } v := readEnv(r.Body, t) if v.GetInt("Images") != 42000 { t.Fatalf("%#v\n", v) } if v.GetInt("Containers") != 1 { t.Fatalf("%#v\n", v) } assertContentType(r, "application/json", t) } func TestGetImagesJSON(t *testing.T) { eng := engine.New() var called bool eng.Register("images", func(job *engine.Job) engine.Status { called = true v := createEnvFromGetImagesJSONStruct(sampleImage) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }) r := serveRequest("GET", "/images/json", nil, eng, t) if !called { t.Fatal("handler was not called") } assertHttpNotError(r, t) assertContentType(r, "application/json", t) var observed getImagesJSONStruct if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil { t.Fatal(err) } if !reflect.DeepEqual(observed, sampleImage) { t.Errorf("Expected %#v but got %#v", sampleImage, observed) } } func TestGetImagesJSONFilter(t *testing.T) { eng := engine.New() filter := "nothing" eng.Register("images", func(job *engine.Job) engine.Status { filter = job.Getenv("filter") return engine.StatusOK }) serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t) if filter != "aaaa" { t.Errorf("%#v", filter) } } func TestGetImagesJSONFilters(t *testing.T) { eng := engine.New() filter := "nothing" eng.Register("images", func(job *engine.Job) engine.Status { filter = job.Getenv("filters") return engine.StatusOK }) serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t) if filter != "nnnn" { t.Errorf("%#v", filter) } } func TestGetImagesJSONAll(t *testing.T) { eng := engine.New() allFilter := "-1" eng.Register("images", func(job *engine.Job) engine.Status { allFilter = job.Getenv("all") return engine.StatusOK }) serveRequest("GET", "/images/json?all=1", nil, eng, t) if allFilter != "1" { t.Errorf("%#v", allFilter) } } func TestGetImagesJSONLegacyFormat(t *testing.T) { eng := engine.New() var called bool eng.Register("images", func(job *engine.Job) engine.Status { called = true outsLegacy := engine.NewTable("Created", 0) outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage)) if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }) r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t) if !called { t.Fatal("handler was not called") } assertHttpNotError(r, t) assertContentType(r, "application/json", t) images := engine.NewTable("Created", 0) if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil { t.Fatal(err) } if images.Len() != 1 { t.Fatalf("Expected 1 image, %d found", images.Len()) } image := images.Data[0] if image.Get("Tag") != "test-tag" { t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag")) } if image.Get("Repository") != "test-name" { t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository")) } } func TestGetContainersByName(t *testing.T) { eng := engine.New() name := "container_name" var called bool eng.Register("container_inspect", func(job *engine.Job) engine.Status { called = true if job.Args[0] != name { t.Errorf("name != '%s': %#v", name, job.Args[0]) } if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { t.Errorf("dirty env variable not set") } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { t.Errorf("dirty env variable set when it shouldn't") } v := &engine.Env{} v.SetBool("dirty", true) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }) r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t) if !called { t.Fatal("handler was not called") } assertContentType(r, "application/json", t) var stdoutJson interface{} if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { t.Fatalf("%#v", err) } if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { t.Fatalf("%#v", stdoutJson) } } func TestGetEvents(t *testing.T) { eng := engine.New() var called bool eng.Register("events", func(job *engine.Job) engine.Status { called = true since := job.Getenv("since") if since != "1" { t.Fatalf("'since' should be 1, found %#v instead", since) } until := job.Getenv("until") if until != "0" { t.Fatalf("'until' should be 0, found %#v instead", until) } v := &engine.Env{} v.Set("since", since) v.Set("until", until) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }) r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t) if !called { t.Fatal("handler was not called") } assertContentType(r, "application/json", t) var stdout_json struct { Since int Until int } if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil { t.Fatal(err) } if stdout_json.Since != 1 { t.Errorf("since != 1: %#v", stdout_json.Since) } if stdout_json.Until != 0 { t.Errorf("until != 0: %#v", stdout_json.Until) } } func TestLogs(t *testing.T) { eng := engine.New() var inspect bool var logs bool eng.Register("container_inspect", func(job *engine.Job) engine.Status { inspect = true if len(job.Args) == 0 { t.Fatal("Job arguments is empty") } if job.Args[0] != "test" { t.Fatalf("Container name %s, must be test", job.Args[0]) } return engine.StatusOK }) expected := "logs" eng.Register("logs", func(job *engine.Job) engine.Status { logs = true if len(job.Args) == 0 { t.Fatal("Job arguments is empty") } if job.Args[0] != "test" { t.Fatalf("Container name %s, must be test", job.Args[0]) } follow := job.Getenv("follow") if follow != "1" { t.Fatalf("follow: %s, must be 1", follow) } stdout := job.Getenv("stdout") if stdout != "1" { t.Fatalf("stdout %s, must be 1", stdout) } stderr := job.Getenv("stderr") if stderr != "" { t.Fatalf("stderr %s, must be empty", stderr) } timestamps := job.Getenv("timestamps") if timestamps != "1" { t.Fatalf("timestamps %s, must be 1", timestamps) } job.Stdout.Write([]byte(expected)) return engine.StatusOK }) r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1×tamps=1", nil, eng, t) if r.Code != http.StatusOK { t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) } if !inspect { t.Fatal("container_inspect job was not called") } if !logs { t.Fatal("logs job was not called") } res := r.Body.String() if res != expected { t.Fatalf("Output %s, expected %s", res, expected) } } func TestLogsNoStreams(t *testing.T) { eng := engine.New() var inspect bool var logs bool eng.Register("container_inspect", func(job *engine.Job) engine.Status { inspect = true if len(job.Args) == 0 { t.Fatal("Job arguments is empty") } if job.Args[0] != "test" { t.Fatalf("Container name %s, must be test", job.Args[0]) } return engine.StatusOK }) eng.Register("logs", func(job *engine.Job) engine.Status { logs = true return engine.StatusOK }) r := serveRequest("GET", "/containers/test/logs", nil, eng, t) if r.Code != http.StatusBadRequest { t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest) } if inspect { t.Fatal("container_inspect job was called, but it shouldn't") } if logs { t.Fatal("logs job was called, but it shouldn't") } res := strings.TrimSpace(r.Body.String()) expected := "Bad parameters: you must choose at least one stream" if !strings.Contains(res, expected) { t.Fatalf("Output %s, expected %s in it", res, expected) } } func TestGetImagesHistory(t *testing.T) { eng := engine.New() imageName := "docker-test-image" var called bool eng.Register("history", func(job *engine.Job) engine.Status { called = true if len(job.Args) == 0 { t.Fatal("Job arguments is empty") } if job.Args[0] != imageName { t.Fatalf("name != '%s': %#v", imageName, job.Args[0]) } v := &engine.Env{} if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }) r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t) if !called { t.Fatalf("handler was not called") } if r.Code != http.StatusOK { t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) } if r.HeaderMap.Get("Content-Type") != "application/json" { t.Fatalf("%#v\n", r) } } func TestGetImagesByName(t *testing.T) { eng := engine.New() name := "image_name" var called bool eng.Register("image_inspect", func(job *engine.Job) engine.Status { called = true if job.Args[0] != name { t.Fatalf("name != '%s': %#v", name, job.Args[0]) } if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { t.Fatal("dirty env variable not set") } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { t.Fatal("dirty env variable set when it shouldn't") } v := &engine.Env{} v.SetBool("dirty", true) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }) r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t) if !called { t.Fatal("handler was not called") } if r.HeaderMap.Get("Content-Type") != "application/json" { t.Fatalf("%#v\n", r) } var stdoutJson interface{} if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { t.Fatalf("%#v", err) } if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { t.Fatalf("%#v", stdoutJson) } } func TestDeleteContainers(t *testing.T) { eng := engine.New() name := "foo" var called bool eng.Register("rm", func(job *engine.Job) engine.Status { called = true if len(job.Args) == 0 { t.Fatalf("Job arguments is empty") } if job.Args[0] != name { t.Fatalf("name != '%s': %#v", name, job.Args[0]) } return engine.StatusOK }) r := serveRequest("DELETE", "/containers/"+name, nil, eng, t) if !called { t.Fatalf("handler was not called") } if r.Code != http.StatusNoContent { t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent) } } func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t) } func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { r := httptest.NewRecorder() req, err := http.NewRequest(method, target, body) if err != nil { t.Fatal(err) } ServeRequest(eng, version, r, req) return r } func readEnv(src io.Reader, t *testing.T) *engine.Env { out := engine.NewOutput() v, err := out.AddEnv() if err != nil { t.Fatal(err) } if _, err := io.Copy(out, src); err != nil { t.Fatal(err) } out.Close() return v } func toJson(data interface{}, t *testing.T) io.Reader { var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(data); err != nil { t.Fatal(err) } return &buf } func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) { if recorder.HeaderMap.Get("Content-Type") != content_type { t.Fatalf("%#v\n", recorder) } } // XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that // should die as soon as we converted all integration tests? // assertHttpNotError expect the given response to not have an error. // Otherwise the it causes the test to fail. func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) { // Non-error http status are [200, 400) if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) } } func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env { v := &engine.Env{} v.SetList("RepoTags", data.RepoTags) v.Set("Id", data.Id) v.SetInt64("Created", data.Created) v.SetInt64("Size", data.Size) v.SetInt64("VirtualSize", data.VirtualSize) return v } type getImagesJSONStruct struct { RepoTags []string Id string Created int64 Size int64 VirtualSize int64 } var sampleImage getImagesJSONStruct = getImagesJSONStruct{ RepoTags: []string{"test-name:test-tag"}, Id: "ID", Created: 999, Size: 777, VirtualSize: 666, } docker-1.6.2/api/server/server_windows.go0000644000175000017500000000125212524223634020022 0ustar tianontianon// +build windows package server import ( "fmt" "github.com/docker/docker/engine" ) // NewServer sets up the required Server and does protocol specific checking. func NewServer(proto, addr string, job *engine.Job) (Server, error) { // Basic error and sanity checking switch proto { case "tcp": return setupTcpHttp(addr, job) default: return nil, errors.New("Invalid protocol format. Windows only supports tcp.") } } // Called through eng.Job("acceptconnections") func AcceptConnections(job *engine.Job) engine.Status { // close the lock so the listeners start accepting connections if activationLock != nil { close(activationLock) } return engine.StatusOK } docker-1.6.2/api/server/server_linux.go0000644000175000017500000000474012524223634017474 0ustar tianontianon// +build linux package server import ( "fmt" "net/http" "os" "syscall" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/systemd" ) // NewServer sets up the required Server and does protocol specific checking. func NewServer(proto, addr string, job *engine.Job) (Server, error) { // Basic error and sanity checking switch proto { case "fd": return nil, serveFd(addr, job) case "tcp": return setupTcpHttp(addr, job) case "unix": return setupUnixHttp(addr, job) default: return nil, fmt.Errorf("Invalid protocol format.") } } func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) { r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version")) if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { return nil, err } mask := syscall.Umask(0777) defer syscall.Umask(mask) l, err := newListener("unix", addr, job.GetenvBool("BufferRequests")) if err != nil { return nil, err } if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil { return nil, err } if err := os.Chmod(addr, 0660); err != nil { return nil, err } return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil } // serveFd creates an http.Server and sets it up to serve given a socket activated // argument. func serveFd(addr string, job *engine.Job) error { r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version")) ls, e := systemd.ListenFD(addr) if e != nil { return e } chErrors := make(chan error, len(ls)) // We don't want to start serving on these sockets until the // daemon is initialized and installed. Otherwise required handlers // won't be ready. <-activationLock // Since ListenFD will return one or more sockets we have // to create a go func to spawn off multiple serves for i := range ls { listener := ls[i] go func() { httpSrv := http.Server{Handler: r} chErrors <- httpSrv.Serve(listener) }() } for i := 0; i < len(ls); i++ { err := <-chErrors if err != nil { return err } } return nil } // Called through eng.Job("acceptconnections") func AcceptConnections(job *engine.Job) engine.Status { // Tell the init daemon we are accepting requests go systemd.SdNotify("READY=1") // close the lock so the listeners start accepting connections select { case <-activationLock: default: close(activationLock) } return engine.StatusOK } docker-1.6.2/api/server/server.go0000644000175000017500000013633612524223634016264 0ustar tianontianonpackage server import ( "bufio" "bytes" "encoding/base64" "encoding/json" "expvar" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/pprof" "os" "strconv" "strings" "crypto/tls" "crypto/x509" "code.google.com/p/go.net/websocket" "github.com/docker/libcontainer/user" "github.com/gorilla/mux" log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/networkdriver/bridge" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/listenbuffer" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/version" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) var ( activationLock chan struct{} = make(chan struct{}) ) type HttpServer struct { srv *http.Server l net.Listener } func (s *HttpServer) Serve() error { return s.srv.Serve(s.l) } func (s *HttpServer) Close() error { return s.l.Close() } type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { conn, _, err := w.(http.Hijacker).Hijack() if err != nil { return nil, nil, err } // Flush the options to make sure the client sets the raw mode conn.Write([]byte{}) return conn, conn, nil } func closeStreams(streams ...interface{}) { for _, stream := range streams { if tcpc, ok := stream.(interface { CloseWrite() error }); ok { tcpc.CloseWrite() } else if closer, ok := stream.(io.Closer); ok { closer.Close() } } } // Check to make sure request's Content-Type is application/json func checkForJson(r *http.Request) error { ct := r.Header.Get("Content-Type") // No Content-Type header is ok as long as there's no Body if ct == "" { if r.Body == nil || r.ContentLength == 0 { return nil } } // Otherwise it better be json if api.MatchesContentType(ct, "application/json") { return nil } return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) } //If we don't do this, POST method without Content-type (even with empty body) will fail func parseForm(r *http.Request) error { if r == nil { return nil } if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { return err } return nil } func parseMultipartForm(r *http.Request) error { if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { return err } return nil } func httpError(w http.ResponseWriter, err error) { statusCode := http.StatusInternalServerError // FIXME: this is brittle and should not be necessary. // If we need to differentiate between different possible error types, we should // create appropriate error types with clearly defined meaning. errStr := strings.ToLower(err.Error()) if strings.Contains(errStr, "no such") { statusCode = http.StatusNotFound } else if strings.Contains(errStr, "bad parameter") { statusCode = http.StatusBadRequest } else if strings.Contains(errStr, "conflict") { statusCode = http.StatusConflict } else if strings.Contains(errStr, "impossible") { statusCode = http.StatusNotAcceptable } else if strings.Contains(errStr, "wrong login/password") { statusCode = http.StatusUnauthorized } else if strings.Contains(errStr, "hasn't been activated") { statusCode = http.StatusForbidden } if err != nil { log.Errorf("HTTP Error: statusCode=%d %v", statusCode, err) http.Error(w, err.Error(), statusCode) } } // writeJSONEnv writes the engine.Env values to the http response stream as a // json encoded body. func writeJSONEnv(w http.ResponseWriter, code int, v engine.Env) error { w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) return v.Encode(w) } // writeJSON writes the value v to the http response stream as json with standard // json encoding. func writeJSON(w http.ResponseWriter, code int, v interface{}) error { w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) return json.NewEncoder(w).Encode(v) } func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { w.Header().Set("Content-Type", "application/json") if flush { job.Stdout.Add(utils.NewWriteFlusher(w)) } else { job.Stdout.Add(w) } } func getBoolParam(value string) (bool, error) { if value == "" { return false, nil } ret, err := strconv.ParseBool(value) if err != nil { return false, fmt.Errorf("Bad parameter") } return ret, nil } func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var ( authConfig, err = ioutil.ReadAll(r.Body) job = eng.Job("auth") stdoutBuffer = bytes.NewBuffer(nil) ) if err != nil { return err } job.Setenv("authConfig", string(authConfig)) job.Stdout.Add(stdoutBuffer) if err = job.Run(); err != nil { return err } if status := engine.Tail(stdoutBuffer, 1); status != "" { var env engine.Env env.Set("Status", status) return writeJSONEnv(w, http.StatusOK, env) } w.WriteHeader(http.StatusNoContent) return nil } func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Set("Content-Type", "application/json") eng.ServeHTTP(w, r) return nil } func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } job := eng.Job("kill", vars["name"]) if sig := r.Form.Get("signal"); sig != "" { job.Args = append(job.Args, sig) } if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } job := eng.Job("pause", vars["name"]) if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } job := eng.Job("unpause", vars["name"]) if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } job := eng.Job("export", vars["name"]) job.Stdout.Add(w) if err := job.Run(); err != nil { return err } return nil } func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( err error outs *engine.Table job = eng.Job("images") ) job.Setenv("filters", r.Form.Get("filters")) // FIXME this parameter could just be a match filter job.Setenv("filter", r.Form.Get("filter")) job.Setenv("all", r.Form.Get("all")) if version.GreaterThanOrEqualTo("1.7") { streamJSON(job, w, false) } else if outs, err = job.Stdout.AddListTable(); err != nil { return err } if err := job.Run(); err != nil { return err } if version.LessThan("1.7") && outs != nil { // Convert to legacy format outsLegacy := engine.NewTable("Created", 0) for _, out := range outs.Data { for _, repoTag := range out.GetList("RepoTags") { repo, tag := parsers.ParseRepositoryTag(repoTag) outLegacy := &engine.Env{} outLegacy.Set("Repository", repo) outLegacy.SetJson("Tag", tag) outLegacy.Set("Id", out.Get("Id")) outLegacy.SetInt64("Created", out.GetInt64("Created")) outLegacy.SetInt64("Size", out.GetInt64("Size")) outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize")) outsLegacy.Add(outLegacy) } } w.Header().Set("Content-Type", "application/json") if _, err := outsLegacy.WriteListTo(w); err != nil { return err } } return nil } func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version.GreaterThan("1.6") { w.WriteHeader(http.StatusNotFound) return fmt.Errorf("This is now implemented in the client.") } eng.ServeHTTP(w, r) return nil } func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Set("Content-Type", "application/json") eng.ServeHTTP(w, r) return nil } func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var job = eng.Job("events") streamJSON(job, w, true) job.Setenv("since", r.Form.Get("since")) job.Setenv("until", r.Form.Get("until")) job.Setenv("filters", r.Form.Get("filters")) return job.Run() } func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var job = eng.Job("history", vars["name"]) streamJSON(job, w, false) if err := job.Run(); err != nil { return err } return nil } func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var job = eng.Job("container_changes", vars["name"]) streamJSON(job, w, false) return job.Run() } func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version.LessThan("1.4") { return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.") } if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) streamJSON(job, w, false) return job.Run() } func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( err error outs *engine.Table job = eng.Job("containers") ) job.Setenv("all", r.Form.Get("all")) job.Setenv("size", r.Form.Get("size")) job.Setenv("since", r.Form.Get("since")) job.Setenv("before", r.Form.Get("before")) job.Setenv("limit", r.Form.Get("limit")) job.Setenv("filters", r.Form.Get("filters")) if version.GreaterThanOrEqualTo("1.5") { streamJSON(job, w, false) } else if outs, err = job.Stdout.AddTable(); err != nil { return err } if err = job.Run(); err != nil { return err } if version.LessThan("1.5") { // Convert to legacy format for _, out := range outs.Data { ports := engine.NewTable("", 0) ports.ReadListFrom([]byte(out.Get("Ports"))) out.Set("Ports", api.DisplayablePorts(ports)) } w.Header().Set("Content-Type", "application/json") if _, err = outs.WriteListTo(w); err != nil { return err } } return nil } func getContainersStats(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } name := vars["name"] job := eng.Job("container_stats", name) streamJSON(job, w, true) return job.Run() } func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } var ( inspectJob = eng.Job("container_inspect", vars["name"]) logsJob = eng.Job("logs", vars["name"]) c, err = inspectJob.Stdout.AddEnv() ) if err != nil { return err } logsJob.Setenv("follow", r.Form.Get("follow")) logsJob.Setenv("tail", r.Form.Get("tail")) logsJob.Setenv("stdout", r.Form.Get("stdout")) logsJob.Setenv("stderr", r.Form.Get("stderr")) logsJob.Setenv("timestamps", r.Form.Get("timestamps")) // Validate args here, because we can't return not StatusOK after job.Run() call stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr") if !(stdout || stderr) { return fmt.Errorf("Bad parameters: you must choose at least one stream") } if err = inspectJob.Run(); err != nil { return err } var outStream, errStream io.Writer outStream = utils.NewWriteFlusher(w) if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } logsJob.Stdout.Add(outStream) logsJob.Stderr.Set(errStream) if err := logsJob.Run(); err != nil { fmt.Fprintf(outStream, "Error running logs job: %s\n", err) } return nil } func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) job.Setenv("force", r.Form.Get("force")) if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusCreated) return nil } func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( config engine.Env env engine.Env job = eng.Job("commit", r.Form.Get("container")) stdoutBuffer = bytes.NewBuffer(nil) ) if err := checkForJson(r); err != nil { return err } if err := config.Decode(r.Body); err != nil { log.Errorf("%s", err) } if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { job.Setenv("pause", "1") } else { job.Setenv("pause", r.FormValue("pause")) } job.Setenv("repo", r.Form.Get("repo")) job.Setenv("tag", r.Form.Get("tag")) job.Setenv("author", r.Form.Get("author")) job.Setenv("comment", r.Form.Get("comment")) job.SetenvList("changes", r.Form["changes"]) job.SetenvSubEnv("config", &config) job.Stdout.Add(stdoutBuffer) if err := job.Run(); err != nil { return err } env.Set("Id", engine.Tail(stdoutBuffer, 1)) return writeJSONEnv(w, http.StatusCreated, env) } // Creates an image from Pull or from Import func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( image = r.Form.Get("fromImage") repo = r.Form.Get("repo") tag = r.Form.Get("tag") job *engine.Job ) authEncoded := r.Header.Get("X-Registry-Auth") authConfig := ®istry.AuthConfig{} if authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} } } if image != "" { //pull if tag == "" { image, tag = parsers.ParseRepositoryTag(image) } metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } job = eng.Job("pull", image, tag) job.SetenvBool("parallel", version.GreaterThan("1.3")) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) } else { //import if tag == "" { repo, tag = parsers.ParseRepositoryTag(repo) } job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag) job.Stdin.Add(r.Body) job.SetenvList("changes", r.Form["changes"]) } if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) w.Write(sf.FormatError(err)) } return nil } func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( authEncoded = r.Header.Get("X-Registry-Auth") authConfig = ®istry.AuthConfig{} metaHeaders = map[string][]string{} ) if authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a search it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} } } for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } var job = eng.Job("search", r.Form.Get("term")) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) streamJSON(job, w, false) return job.Run() } func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } metaHeaders := map[string][]string{} for k, v := range r.Header { if strings.HasPrefix(k, "X-Meta-") { metaHeaders[k] = v } } if err := parseForm(r); err != nil { return err } authConfig := ®istry.AuthConfig{} authEncoded := r.Header.Get("X-Registry-Auth") if authEncoded != "" { // the new format is to handle the authConfig as a header authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // to increase compatibility to existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} } } else { // the old format is supported for compatibility if there was no authConfig header if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { return err } } job := eng.Job("push", vars["name"]) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) job.Setenv("tag", r.Form.Get("tag")) if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) w.Write(sf.FormatError(err)) } return nil } func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } if version.GreaterThan("1.0") { w.Header().Set("Content-Type", "application/x-tar") } var job *engine.Job if name, ok := vars["name"]; ok { job = eng.Job("image_export", name) } else { job = eng.Job("image_export", r.Form["names"]...) } job.Stdout.Add(w) return job.Run() } func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { job := eng.Job("load") job.Stdin.Add(r.Body) return job.Run() } func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return nil } if err := checkForJson(r); err != nil { return err } var ( job = eng.Job("create", r.Form.Get("name")) outWarnings []string stdoutBuffer = bytes.NewBuffer(nil) warnings = bytes.NewBuffer(nil) ) if err := job.DecodeEnv(r.Body); err != nil { return err } // Read container ID from the first line of stdout job.Stdout.Add(stdoutBuffer) // Read warnings from stderr job.Stderr.Add(warnings) if err := job.Run(); err != nil { return err } // Parse warnings from stderr scanner := bufio.NewScanner(warnings) for scanner.Scan() { outWarnings = append(outWarnings, scanner.Text()) } return writeJSON(w, http.StatusCreated, &types.ContainerCreateResponse{ ID: engine.Tail(stdoutBuffer, 1), Warnings: outWarnings, }) } func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } job := eng.Job("restart", vars["name"]) job.Setenv("t", r.Form.Get("t")) if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func postContainerRename(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } newName := r.URL.Query().Get("name") job := eng.Job("container_rename", vars["name"], newName) job.Setenv("t", r.Form.Get("t")) if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } job := eng.Job("rm", vars["name"]) job.Setenv("forceRemove", r.Form.Get("force")) job.Setenv("removeVolume", r.Form.Get("v")) job.Setenv("removeLink", r.Form.Get("link")) if err := job.Run(); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } var job = eng.Job("image_delete", vars["name"]) streamJSON(job, w, false) job.Setenv("force", r.Form.Get("force")) job.Setenv("noprune", r.Form.Get("noprune")) return job.Run() } func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var ( name = vars["name"] job = eng.Job("start", name) ) // If contentLength is -1, we can assumed chunked encoding // or more technically that the length is unknown // http://golang.org/src/pkg/net/http/request.go#L139 // net/http otherwise seems to swallow any headers related to chunked encoding // including r.TransferEncoding // allow a nil body for backwards compatibility if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { if err := checkForJson(r); err != nil { return err } if err := job.DecodeEnv(r.Body); err != nil { return err } } if err := job.Run(); err != nil { if err.Error() == "Container already started" { w.WriteHeader(http.StatusNotModified) return nil } return err } w.WriteHeader(http.StatusNoContent) return nil } func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } job := eng.Job("stop", vars["name"]) job.Setenv("t", r.Form.Get("t")) if err := job.Run(); err != nil { if err.Error() == "Container already stopped" { w.WriteHeader(http.StatusNotModified) return nil } return err } w.WriteHeader(http.StatusNoContent) return nil } func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var ( env engine.Env stdoutBuffer = bytes.NewBuffer(nil) job = eng.Job("wait", vars["name"]) ) job.Stdout.Add(stdoutBuffer) if err := job.Run(); err != nil { return err } env.Set("StatusCode", engine.Tail(stdoutBuffer, 1)) return writeJSONEnv(w, http.StatusOK, env) } func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { return err } return nil } func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } var ( job = eng.Job("container_inspect", vars["name"]) c, err = job.Stdout.AddEnv() ) if err != nil { return err } if err = job.Run(); err != nil { return err } inStream, outStream, err := hijackServer(w) if err != nil { return err } defer closeStreams(inStream, outStream) var errStream io.Writer if _, ok := r.Header["Upgrade"]; ok { fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") } else { fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") } if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } job = eng.Job("attach", vars["name"]) job.Setenv("logs", r.Form.Get("logs")) job.Setenv("stream", r.Form.Get("stream")) job.Setenv("stdin", r.Form.Get("stdin")) job.Setenv("stdout", r.Form.Get("stdout")) job.Setenv("stderr", r.Form.Get("stderr")) job.Stdin.Add(inStream) job.Stdout.Add(outStream) job.Stderr.Set(errStream) if err := job.Run(); err != nil { fmt.Fprintf(outStream, "Error attaching: %s\n", err) } return nil } func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } if err := eng.Job("container_inspect", vars["name"]).Run(); err != nil { return err } h := websocket.Handler(func(ws *websocket.Conn) { defer ws.Close() job := eng.Job("attach", vars["name"]) job.Setenv("logs", r.Form.Get("logs")) job.Setenv("stream", r.Form.Get("stream")) job.Setenv("stdin", r.Form.Get("stdin")) job.Setenv("stdout", r.Form.Get("stdout")) job.Setenv("stderr", r.Form.Get("stderr")) job.Stdin.Add(ws) job.Stdout.Add(ws) job.Stderr.Set(ws) if err := job.Run(); err != nil { log.Errorf("Error attaching websocket: %s", err) } }) h.ServeHTTP(w, r) return nil } func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var job = eng.Job("container_inspect", vars["name"]) if version.LessThan("1.12") { job.SetenvBool("raw", true) } streamJSON(job, w, false) return job.Run() } func getExecByID(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter 'id'") } var job = eng.Job("execInspect", vars["id"]) streamJSON(job, w, false) return job.Run() } func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var job = eng.Job("image_inspect", vars["name"]) if version.LessThan("1.12") { job.SetenvBool("raw", true) } streamJSON(job, w, false) return job.Run() } func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version.LessThan("1.3") { return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") } var ( authEncoded = r.Header.Get("X-Registry-Auth") authConfig = ®istry.AuthConfig{} configFileEncoded = r.Header.Get("X-Registry-Config") configFile = ®istry.ConfigFile{} job = eng.Job("build") ) // This block can be removed when API versions prior to 1.9 are deprecated. // Both headers will be parsed and sent along to the daemon, but if a non-empty // ConfigFile is present, any value provided as an AuthConfig directly will // be overridden. See BuildFile::CmdFrom for details. if version.LessThan("1.9") && authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = ®istry.AuthConfig{} } } if configFileEncoded != "" { configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty configFile = ®istry.ConfigFile{} } } if version.GreaterThanOrEqualTo("1.8") { job.SetenvBool("json", true) streamJSON(job, w, true) } else { job.Stdout.Add(utils.NewWriteFlusher(w)) } if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") { job.Setenv("rm", "1") } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { job.Setenv("rm", "1") } else { job.Setenv("rm", r.FormValue("rm")) } if r.FormValue("pull") == "1" && version.GreaterThanOrEqualTo("1.16") { job.Setenv("pull", "1") } job.Stdin.Add(r.Body) job.Setenv("remote", r.FormValue("remote")) job.Setenv("dockerfile", r.FormValue("dockerfile")) job.Setenv("t", r.FormValue("t")) job.Setenv("q", r.FormValue("q")) job.Setenv("nocache", r.FormValue("nocache")) job.Setenv("forcerm", r.FormValue("forcerm")) job.SetenvJson("authConfig", authConfig) job.SetenvJson("configFile", configFile) job.Setenv("memswap", r.FormValue("memswap")) job.Setenv("memory", r.FormValue("memory")) job.Setenv("cpusetcpus", r.FormValue("cpusetcpus")) job.Setenv("cpushares", r.FormValue("cpushares")) // Job cancellation. Note: not all job types support this. if closeNotifier, ok := w.(http.CloseNotifier); ok { finished := make(chan struct{}) defer close(finished) go func() { select { case <-finished: case <-closeNotifier.CloseNotify(): log.Infof("Client disconnected, cancelling job: %v", job) job.Cancel() } }() } if err := job.Run(); err != nil { if !job.Stdout.Used() { return err } sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) w.Write(sf.FormatError(err)) } return nil } func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var copyData engine.Env if err := checkForJson(r); err != nil { return err } if err := copyData.Decode(r.Body); err != nil { return err } if copyData.Get("Resource") == "" { return fmt.Errorf("Path cannot be empty") } origResource := copyData.Get("Resource") if copyData.Get("Resource")[0] == '/' { copyData.Set("Resource", copyData.Get("Resource")[1:]) } job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) job.Stdout.Add(w) w.Header().Set("Content-Type", "application/x-tar") if err := job.Run(); err != nil { log.Errorf("%v", err) if strings.Contains(strings.ToLower(err.Error()), "no such id") { w.WriteHeader(http.StatusNotFound) } else if strings.Contains(err.Error(), "no such file or directory") { return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) } } return nil } func postContainerExecCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return nil } var ( out engine.Env name = vars["name"] job = eng.Job("execCreate", name) stdoutBuffer = bytes.NewBuffer(nil) ) if err := job.DecodeEnv(r.Body); err != nil { return err } job.Stdout.Add(stdoutBuffer) // Register an instance of Exec in container. if err := job.Run(); err != nil { fmt.Fprintf(os.Stderr, "Error setting up exec command in container %s: %s\n", name, err) return err } // Return the ID out.Set("Id", engine.Tail(stdoutBuffer, 1)) return writeJSONEnv(w, http.StatusCreated, out) } // TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. func postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return nil } var ( name = vars["name"] job = eng.Job("execStart", name) errOut io.Writer = os.Stderr ) if err := job.DecodeEnv(r.Body); err != nil { return err } if !job.GetenvBool("Detach") { // Setting up the streaming http interface. inStream, outStream, err := hijackServer(w) if err != nil { return err } defer closeStreams(inStream, outStream) var errStream io.Writer if _, ok := r.Header["Upgrade"]; ok { fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") } else { fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") } if !job.GetenvBool("Tty") && version.GreaterThanOrEqualTo("1.6") { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } job.Stdin.Add(inStream) job.Stdout.Add(outStream) job.Stderr.Set(errStream) errOut = outStream } // Now run the user process in container. job.SetCloseIO(false) if err := job.Run(); err != nil { fmt.Fprintf(errOut, "Error starting exec command in container %s: %s\n", name, err) return err } w.WriteHeader(http.StatusNoContent) return nil } func postContainerExecResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } if err := eng.Job("execResize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { return err } return nil } func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) { log.Debugf("CORS header is enabled and set to: %s", corsHeaders) w.Header().Add("Access-Control-Allow-Origin", corsHeaders) w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") } func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { _, err := w.Write([]byte{'O', 'K'}) return err } func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // log the request log.Debugf("Calling %s %s", localMethod, localRoute) if logging { log.Infof("%s %s", r.Method, r.RequestURI) } if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } version := version.Version(mux.Vars(r)["version"]) if version == "" { version = api.APIVERSION } if corsHeaders != "" { writeCorsHeaders(w, r, corsHeaders) } if version.GreaterThan(api.APIVERSION) { http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound) return } if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) httpError(w, err) } } } // Replicated from expvar.go as not public. func expvarHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") fmt.Fprintf(w, "{\n") first := true expvar.Do(func(kv expvar.KeyValue) { if !first { fmt.Fprintf(w, ",\n") } first = false fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) }) fmt.Fprintf(w, "\n}\n") } func AttachProfiler(router *mux.Router) { router.HandleFunc("/debug/vars", expvarHandler) router.HandleFunc("/debug/pprof/", pprof.Index) router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) router.HandleFunc("/debug/pprof/profile", pprof.Profile) router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) router.HandleFunc("/debug/pprof/block", pprof.Handler("block").ServeHTTP) router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) } // we keep enableCors just for legacy usage, need to be removed in the future func createRouter(eng *engine.Engine, logging, enableCors bool, corsHeaders string, dockerVersion string) *mux.Router { r := mux.NewRouter() if os.Getenv("DEBUG") != "" { AttachProfiler(r) } m := map[string]map[string]HttpApiFunc{ "GET": { "/_ping": ping, "/events": getEvents, "/info": getInfo, "/version": getVersion, "/images/json": getImagesJSON, "/images/viz": getImagesViz, "/images/search": getImagesSearch, "/images/get": getImagesGet, "/images/{name:.*}/get": getImagesGet, "/images/{name:.*}/history": getImagesHistory, "/images/{name:.*}/json": getImagesByName, "/containers/ps": getContainersJSON, "/containers/json": getContainersJSON, "/containers/{name:.*}/export": getContainersExport, "/containers/{name:.*}/changes": getContainersChanges, "/containers/{name:.*}/json": getContainersByName, "/containers/{name:.*}/top": getContainersTop, "/containers/{name:.*}/logs": getContainersLogs, "/containers/{name:.*}/stats": getContainersStats, "/containers/{name:.*}/attach/ws": wsContainersAttach, "/exec/{id:.*}/json": getExecByID, }, "POST": { "/auth": postAuth, "/commit": postCommit, "/build": postBuild, "/images/create": postImagesCreate, "/images/load": postImagesLoad, "/images/{name:.*}/push": postImagesPush, "/images/{name:.*}/tag": postImagesTag, "/containers/create": postContainersCreate, "/containers/{name:.*}/kill": postContainersKill, "/containers/{name:.*}/pause": postContainersPause, "/containers/{name:.*}/unpause": postContainersUnpause, "/containers/{name:.*}/restart": postContainersRestart, "/containers/{name:.*}/start": postContainersStart, "/containers/{name:.*}/stop": postContainersStop, "/containers/{name:.*}/wait": postContainersWait, "/containers/{name:.*}/resize": postContainersResize, "/containers/{name:.*}/attach": postContainersAttach, "/containers/{name:.*}/copy": postContainersCopy, "/containers/{name:.*}/exec": postContainerExecCreate, "/exec/{name:.*}/start": postContainerExecStart, "/exec/{name:.*}/resize": postContainerExecResize, "/containers/{name:.*}/rename": postContainerRename, }, "DELETE": { "/containers/{name:.*}": deleteContainers, "/images/{name:.*}": deleteImages, }, "OPTIONS": { "": optionsHandler, }, } // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" // otherwise, all head values will be passed to HTTP handler if corsHeaders == "" && enableCors { corsHeaders = "*" } for method, routes := range m { for route, fct := range routes { log.Debugf("Registering %s, %s", method, route) // NOTE: scope issue, make sure the variables are local and won't be changed localRoute := route localFct := fct localMethod := method // build the handler function f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, corsHeaders, version.Version(dockerVersion)) // add the new route if localRoute == "" { r.Methods(localMethod).HandlerFunc(f) } else { r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) r.Path(localRoute).Methods(localMethod).HandlerFunc(f) } } } return r } // ServeRequest processes a single http request to the docker remote api. // FIXME: refactor this to be part of Server and not require re-creating a new // router each time. This requires first moving ListenAndServe into Server. func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) { router := createRouter(eng, false, true, "", "") // Insert APIVERSION into the request as a convenience req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path) router.ServeHTTP(w, req) } func lookupGidByName(nameOrGid string) (int, error) { groupFile, err := user.GetGroupPath() if err != nil { return -1, err } groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid }) if err != nil { return -1, err } if groups != nil && len(groups) > 0 { return groups[0].Gid, nil } gid, err := strconv.Atoi(nameOrGid) if err == nil { log.Warnf("Could not find GID %d", gid) return gid, nil } return -1, fmt.Errorf("Group %s not found", nameOrGid) } func setupTls(cert, key, ca string, l net.Listener) (net.Listener, error) { tlsCert, err := tls.LoadX509KeyPair(cert, key) if err != nil { if os.IsNotExist(err) { return nil, fmt.Errorf("Could not load X509 key pair (%s, %s): %v", cert, key, err) } return nil, fmt.Errorf("Error reading X509 key pair (%s, %s): %q. Make sure the key is encrypted.", cert, key, err) } tlsConfig := &tls.Config{ NextProtos: []string{"http/1.1"}, Certificates: []tls.Certificate{tlsCert}, // Avoid fallback on insecure SSL protocols MinVersion: tls.VersionTLS10, } if ca != "" { certPool := x509.NewCertPool() file, err := ioutil.ReadFile(ca) if err != nil { return nil, fmt.Errorf("Could not read CA certificate: %v", err) } certPool.AppendCertsFromPEM(file) tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert tlsConfig.ClientCAs = certPool } return tls.NewListener(l, tlsConfig), nil } func newListener(proto, addr string, bufferRequests bool) (net.Listener, error) { if bufferRequests { return listenbuffer.NewListenBuffer(proto, addr, activationLock) } return net.Listen(proto, addr) } func changeGroup(addr string, nameOrGid string) error { gid, err := lookupGidByName(nameOrGid) if err != nil { return err } log.Debugf("%s group found. gid: %d", nameOrGid, gid) return os.Chown(addr, 0, gid) } func setSocketGroup(addr, group string) error { if group == "" { return nil } if err := changeGroup(addr, group); err != nil { if group != "docker" { return err } log.Debugf("Warning: could not chgrp %s to docker: %v", addr, err) } return nil } func allocateDaemonPort(addr string) error { host, port, err := net.SplitHostPort(addr) if err != nil { return err } intPort, err := strconv.Atoi(port) if err != nil { return err } var hostIPs []net.IP if parsedIP := net.ParseIP(host); parsedIP != nil { hostIPs = append(hostIPs, parsedIP) } else if hostIPs, err = net.LookupIP(host); err != nil { return fmt.Errorf("failed to lookup %s address in host specification", host) } for _, hostIP := range hostIPs { if _, err := bridge.RequestPort(hostIP, "tcp", intPort); err != nil { return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) } } return nil } func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) { if !job.GetenvBool("TlsVerify") { log.Infof("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version")) l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests")) if err != nil { return nil, err } if err := allocateDaemonPort(addr); err != nil { return nil, err } if job.GetenvBool("Tls") || job.GetenvBool("TlsVerify") { var tlsCa string if job.GetenvBool("TlsVerify") { tlsCa = job.Getenv("TlsCa") } l, err = setupTls(job.Getenv("TlsCert"), job.Getenv("TlsKey"), tlsCa, l) if err != nil { return nil, err } } return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil } type Server interface { Serve() error Close() error } // ServeApi loops through all of the protocols sent in to docker and spawns // off a go routine to setup a serving http.Server for each. func ServeApi(job *engine.Job) engine.Status { if len(job.Args) == 0 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } var ( protoAddrs = job.Args chErrors = make(chan error, len(protoAddrs)) ) for _, protoAddr := range protoAddrs { protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } go func() { log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) srv, err := NewServer(protoAddrParts[0], protoAddrParts[1], job) if err != nil { chErrors <- err return } job.Eng.OnShutdown(func() { if err := srv.Close(); err != nil { log.Error(err) } }) if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { err = nil } chErrors <- err }() } for i := 0; i < len(protoAddrs); i++ { err := <-chErrors if err != nil { return job.Error(err) } } return engine.StatusOK } docker-1.6.2/api/types/0000755000175000017500000000000012524223634014251 5ustar tianontianondocker-1.6.2/api/types/stats.go0000644000175000017500000000576212524223634015750 0ustar tianontianon// This package is used for API stability in the types and response to the // consumers of the API stats endpoint. package types import "time" type ThrottlingData struct { // Number of periods with throttling active Periods uint64 `json:"periods"` // Number of periods when the container hit its throttling limit. ThrottledPeriods uint64 `json:"throttled_periods"` // Aggregate time the container was throttled for in nanoseconds. ThrottledTime uint64 `json:"throttled_time"` } // All CPU stats are aggregated since container inception. type CpuUsage struct { // Total CPU time consumed. // Units: nanoseconds. TotalUsage uint64 `json:"total_usage"` // Total CPU time consumed per core. // Units: nanoseconds. PercpuUsage []uint64 `json:"percpu_usage"` // Time spent by tasks of the cgroup in kernel mode. // Units: nanoseconds. UsageInKernelmode uint64 `json:"usage_in_kernelmode"` // Time spent by tasks of the cgroup in user mode. // Units: nanoseconds. UsageInUsermode uint64 `json:"usage_in_usermode"` } type CpuStats struct { CpuUsage CpuUsage `json:"cpu_usage"` SystemUsage uint64 `json:"system_cpu_usage"` ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` } type MemoryStats struct { // current res_counter usage for memory Usage uint64 `json:"usage"` // maximum usage ever recorded. MaxUsage uint64 `json:"max_usage"` // TODO(vishh): Export these as stronger types. // all the stats exported via memory.stat. Stats map[string]uint64 `json:"stats"` // number of times memory usage hits limits. Failcnt uint64 `json:"failcnt"` Limit uint64 `json:"limit"` } type BlkioStatEntry struct { Major uint64 `json:"major"` Minor uint64 `json:"minor"` Op string `json:"op"` Value uint64 `json:"value"` } type BlkioStats struct { // number of bytes tranferred to and from the block device IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` } type Network struct { RxBytes uint64 `json:"rx_bytes"` RxPackets uint64 `json:"rx_packets"` RxErrors uint64 `json:"rx_errors"` RxDropped uint64 `json:"rx_dropped"` TxBytes uint64 `json:"tx_bytes"` TxPackets uint64 `json:"tx_packets"` TxErrors uint64 `json:"tx_errors"` TxDropped uint64 `json:"tx_dropped"` } type Stats struct { Read time.Time `json:"read"` Network Network `json:"network,omitempty"` CpuStats CpuStats `json:"cpu_stats,omitempty"` MemoryStats MemoryStats `json:"memory_stats,omitempty"` BlkioStats BlkioStats `json:"blkio_stats,omitempty"` } docker-1.6.2/api/types/types.go0000644000175000017500000000053712524223634015751 0ustar tianontianonpackage types // ContainerCreateResponse contains the information returned to a client on the // creation of a new container. type ContainerCreateResponse struct { // ID is the ID of the created container. ID string `json:"Id"` // Warnings are any warnings encountered during the creation of the container. Warnings []string `json:"Warnings"` } docker-1.6.2/api/README.md0000644000175000017500000000031512524223634014363 0ustar tianontianonThis directory contains code pertaining to the Docker API: - Used by the docker client when communicating with the docker daemon - Used by third party tools wishing to interface with the docker daemon docker-1.6.2/api/client/0000755000175000017500000000000012524223634014363 5ustar tianontianondocker-1.6.2/api/client/commands.go0000644000175000017500000024146312524223634016525 0ustar tianontianonpackage client import ( "bufio" "bytes" "encoding/base64" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "os/exec" "path" "path/filepath" "runtime" "sort" "strconv" "strings" "sync" "text/tabwriter" "text/template" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/api/types" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/engine" "github.com/docker/docker/graph" "github.com/docker/docker/nat" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/common" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/homedir" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/networkfs/resolvconf" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/filters" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/term" "github.com/docker/docker/pkg/timeutils" "github.com/docker/docker/pkg/units" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) const ( tarHeaderSize = 512 ) func (cli *DockerCli) CmdHelp(args ...string) error { if len(args) > 1 { method, exists := cli.getMethod(args[:2]...) if exists { method("--help") return nil } } if len(args) > 0 { method, exists := cli.getMethod(args[0]) if !exists { fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0]) os.Exit(1) } else { method("--help") return nil } } flag.Usage() return nil } func (cli *DockerCli) CmdBuild(args ...string) error { cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH", true) tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image") suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") flCpuShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCpuSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) var ( context archive.Archive isRemote bool err error ) _, err = exec.LookPath("git") hasGit := err == nil if cmd.Arg(0) == "-" { // As a special case, 'docker build -' will build from either an empty context with the // contents of stdin as a Dockerfile, or a tar-ed context from stdin. buf := bufio.NewReader(cli.in) magic, err := buf.Peek(tarHeaderSize) if err != nil && err != io.EOF { return fmt.Errorf("failed to peek context header from STDIN: %v", err) } if !archive.IsArchive(magic) { dockerfile, err := ioutil.ReadAll(buf) if err != nil { return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err) } // -f option has no meaning when we're reading it from stdin, // so just use our default Dockerfile name *dockerfileName = api.DefaultDockerfileName context, err = archive.Generate(*dockerfileName, string(dockerfile)) } else { context = ioutil.NopCloser(buf) } } else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) { isRemote = true } else { root := cmd.Arg(0) if urlutil.IsGitURL(root) { remoteURL := cmd.Arg(0) if !urlutil.IsGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } root, err = ioutil.TempDir("", "docker-build-git") if err != nil { return err } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { return fmt.Errorf("Error trying to use git: %s (%s)", err, output) } } if _, err := os.Stat(root); err != nil { return err } absRoot, err := filepath.Abs(root) if err != nil { return err } filename := *dockerfileName // path to Dockerfile if *dockerfileName == "" { // No -f/--file was specified so use the default *dockerfileName = api.DefaultDockerfileName filename = filepath.Join(absRoot, *dockerfileName) // Just to be nice ;-) look for 'dockerfile' too but only // use it if we found it, otherwise ignore this check if _, err = os.Lstat(filename); os.IsNotExist(err) { tmpFN := path.Join(absRoot, strings.ToLower(*dockerfileName)) if _, err = os.Lstat(tmpFN); err == nil { *dockerfileName = strings.ToLower(*dockerfileName) filename = tmpFN } } } origDockerfile := *dockerfileName // used for error msg if filename, err = filepath.Abs(filename); err != nil { return err } // Verify that 'filename' is within the build context filename, err = symlink.FollowSymlinkInScope(filename, absRoot) if err != nil { return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root) } // Now reset the dockerfileName to be relative to the build context *dockerfileName, err = filepath.Rel(absRoot, filename) if err != nil { return err } // And canonicalize dockerfile name to a platform-independent one *dockerfileName, err = archive.CanonicalTarNameForPath(*dockerfileName) if err != nil { return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", dockerfileName, err) } if _, err = os.Lstat(filename); os.IsNotExist(err) { return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile) } var includes = []string{"."} excludes, err := utils.ReadDockerIgnore(path.Join(root, ".dockerignore")) if err != nil { return err } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The deamon will remove them for us, if needed, after it // parses the Dockerfile. keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(*dockerfileName, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", *dockerfileName) } if err = utils.ValidateContextDirectory(root, excludes); err != nil { return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) } options := &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, } context, err = archive.TarWithOptions(root, options) if err != nil { return err } } // windows: show error message about modified file permissions // FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build. if runtime.GOOS == "windows" { log.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) } var body io.Reader // Setup an upload progress bar // FIXME: ProgressReader shouldn't be this annoying to use if context != nil { sf := utils.NewStreamFormatter(false) body = progressreader.New(progressreader.Config{ In: context, Out: cli.out, Formatter: sf, NewLines: true, ID: "", Action: "Sending build context to Docker daemon", }) } var memory int64 if *flMemoryString != "" { parsedMemory, err := units.RAMInBytes(*flMemoryString) if err != nil { return err } memory = parsedMemory } var memorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { memorySwap = -1 } else { parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) if err != nil { return err } memorySwap = parsedMemorySwap } } // Send the build context v := &url.Values{} //Check if the given image name can be resolved if *tag != "" { repository, tag := parsers.ParseRepositoryTag(*tag) if err := registry.ValidateRepositoryName(repository); err != nil { return err } if len(tag) > 0 { if err := graph.ValidateTagName(tag); err != nil { return err } } } v.Set("t", *tag) if *suppressOutput { v.Set("q", "1") } if isRemote { v.Set("remote", cmd.Arg(0)) } if *noCache { v.Set("nocache", "1") } if *rm { v.Set("rm", "1") } else { v.Set("rm", "0") } if *forceRm { v.Set("forcerm", "1") } if *pull { v.Set("pull", "1") } v.Set("cpusetcpus", *flCpuSetCpus) v.Set("cpushares", strconv.FormatInt(*flCpuShares, 10)) v.Set("memory", strconv.FormatInt(memory, 10)) v.Set("memswap", strconv.FormatInt(memorySwap, 10)) v.Set("dockerfile", *dockerfileName) cli.LoadConfigFile() headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(cli.configFile) if err != nil { return err } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) if context != nil { headers.Set("Content-Type", "application/tar") } err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers) if jerr, ok := err.(*utils.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code} } return err } // 'docker login': login / register a user to registry service. func (cli *DockerCli) CmdLogin(args ...string) error { cmd := cli.Subcmd("login", "[SERVER]", "Register or log in to a Docker registry server, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true) cmd.Require(flag.Max, 1) var username, password, email string cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") utils.ParseFlags(cmd, args, true) serverAddress := registry.IndexServerAddress() if len(cmd.Args()) > 0 { serverAddress = cmd.Arg(0) } promptDefault := func(prompt string, configDefault string) { if configDefault == "" { fmt.Fprintf(cli.out, "%s: ", prompt) } else { fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) } } readInput := func(in io.Reader, out io.Writer) string { reader := bufio.NewReader(in) line, _, err := reader.ReadLine() if err != nil { fmt.Fprintln(out, err.Error()) os.Exit(1) } return string(line) } cli.LoadConfigFile() authconfig, ok := cli.configFile.Configs[serverAddress] if !ok { authconfig = registry.AuthConfig{} } if username == "" { promptDefault("Username", authconfig.Username) username = readInput(cli.in, cli.out) username = strings.Trim(username, " ") if username == "" { username = authconfig.Username } } // Assume that a different username means they may not want to use // the password or email from the config file, so prompt them if username != authconfig.Username { if password == "" { oldState, err := term.SaveState(cli.inFd) if err != nil { return err } fmt.Fprintf(cli.out, "Password: ") term.DisableEcho(cli.inFd, oldState) password = readInput(cli.in, cli.out) fmt.Fprint(cli.out, "\n") term.RestoreTerminal(cli.inFd, oldState) if password == "" { return fmt.Errorf("Error : Password Required") } } if email == "" { promptDefault("Email", authconfig.Email) email = readInput(cli.in, cli.out) if email == "" { email = authconfig.Email } } } else { // However, if they don't override the username use the // password or email from the cmd line if specified. IOW, allow // then to change/override them. And if not specified, just // use what's in the config file if password == "" { password = authconfig.Password } if email == "" { email = authconfig.Email } } authconfig.Username = username authconfig.Password = password authconfig.Email = email authconfig.ServerAddress = serverAddress cli.configFile.Configs[serverAddress] = authconfig stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], nil) if statusCode == 401 { delete(cli.configFile.Configs, serverAddress) registry.SaveConfig(cli.configFile) return err } if err != nil { return err } var out2 engine.Env err = out2.Decode(stream) if err != nil { cli.configFile, _ = registry.LoadConfig(homedir.Get()) return err } registry.SaveConfig(cli.configFile) fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s.\n", path.Join(homedir.Get(), registry.CONFIGFILE)) if out2.Get("Status") != "" { fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) } return nil } // log out from a Docker registry func (cli *DockerCli) CmdLogout(args ...string) error { cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true) cmd.Require(flag.Max, 1) utils.ParseFlags(cmd, args, false) serverAddress := registry.IndexServerAddress() if len(cmd.Args()) > 0 { serverAddress = cmd.Arg(0) } cli.LoadConfigFile() if _, ok := cli.configFile.Configs[serverAddress]; !ok { fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) } else { fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) delete(cli.configFile.Configs, serverAddress) if err := registry.SaveConfig(cli.configFile); err != nil { return fmt.Errorf("Failed to save docker config: %v", err) } } return nil } // 'docker wait': block until a container stops func (cli *DockerCli) CmdWait(args ...string) error { cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.", true) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) var encounteredError error for _, name := range cmd.Args() { status, err := waitForExit(cli, name) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to wait one or more containers") } else { fmt.Fprintf(cli.out, "%d\n", status) } } return encounteredError } // 'docker version': show version information func (cli *DockerCli) CmdVersion(args ...string) error { cmd := cli.Subcmd("version", "", "Show the Docker version information.", true) cmd.Require(flag.Exact, 0) utils.ParseFlags(cmd, args, false) if dockerversion.VERSION != "" { fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) } fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION) fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version()) if dockerversion.GITCOMMIT != "" { fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) } fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH) body, _, err := readBody(cli.call("GET", "/version", nil, nil)) if err != nil { return err } out := engine.NewOutput() remoteVersion, err := out.AddEnv() if err != nil { log.Errorf("Error reading remote version: %s", err) return err } if _, err := out.Write(body); err != nil { log.Errorf("Error reading remote version: %s", err) return err } out.Close() fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version")) if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" { fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion) } fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) fmt.Fprintf(cli.out, "OS/Arch (server): %s/%s\n", remoteVersion.Get("Os"), remoteVersion.Get("Arch")) return nil } // 'docker info': display system-wide information. func (cli *DockerCli) CmdInfo(args ...string) error { cmd := cli.Subcmd("info", "", "Display system-wide information", true) cmd.Require(flag.Exact, 0) utils.ParseFlags(cmd, args, false) body, _, err := readBody(cli.call("GET", "/info", nil, nil)) if err != nil { return err } out := engine.NewOutput() remoteInfo, err := out.AddEnv() if err != nil { return err } if _, err := out.Write(body); err != nil { log.Errorf("Error reading remote info: %s", err) return err } out.Close() if remoteInfo.Exists("Containers") { fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) } if remoteInfo.Exists("Images") { fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) } if remoteInfo.Exists("Driver") { fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) } if remoteInfo.Exists("DriverStatus") { var driverStatus [][2]string if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { return err } for _, pair := range driverStatus { fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) } } if remoteInfo.Exists("ExecutionDriver") { fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) } if remoteInfo.Exists("KernelVersion") { fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) } if remoteInfo.Exists("OperatingSystem") { fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem")) } if remoteInfo.Exists("NCPU") { fmt.Fprintf(cli.out, "CPUs: %d\n", remoteInfo.GetInt("NCPU")) } if remoteInfo.Exists("MemTotal") { fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal")))) } if remoteInfo.Exists("Name") { fmt.Fprintf(cli.out, "Name: %s\n", remoteInfo.Get("Name")) } if remoteInfo.Exists("ID") { fmt.Fprintf(cli.out, "ID: %s\n", remoteInfo.Get("ID")) } if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { if remoteInfo.Exists("Debug") { fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) } fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") if remoteInfo.Exists("NFd") { fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) } if remoteInfo.Exists("NGoroutines") { fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) } if remoteInfo.Exists("SystemTime") { t, err := remoteInfo.GetTime("SystemTime") if err != nil { log.Errorf("Error reading system time: %v", err) } else { fmt.Fprintf(cli.out, "System Time: %s\n", t.Format(time.UnixDate)) } } if remoteInfo.Exists("NEventsListener") { fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) } if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) } if initPath := remoteInfo.Get("InitPath"); initPath != "" { fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) } if root := remoteInfo.Get("DockerRootDir"); root != "" { fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", root) } } if remoteInfo.Exists("HttpProxy") { fmt.Fprintf(cli.out, "Http Proxy: %s\n", remoteInfo.Get("HttpProxy")) } if remoteInfo.Exists("HttpsProxy") { fmt.Fprintf(cli.out, "Https Proxy: %s\n", remoteInfo.Get("HttpsProxy")) } if remoteInfo.Exists("NoProxy") { fmt.Fprintf(cli.out, "No Proxy: %s\n", remoteInfo.Get("NoProxy")) } if len(remoteInfo.GetList("IndexServerAddress")) != 0 { cli.LoadConfigFile() u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username if len(u) > 0 { fmt.Fprintf(cli.out, "Username: %v\n", u) fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) } } if remoteInfo.Exists("MemoryLimit") && !remoteInfo.GetBool("MemoryLimit") { fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") } if remoteInfo.Exists("SwapLimit") && !remoteInfo.GetBool("SwapLimit") { fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") } if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") { fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") } if remoteInfo.Exists("Labels") { fmt.Fprintln(cli.out, "Labels:") for _, attribute := range remoteInfo.GetList("Labels") { fmt.Fprintf(cli.out, " %s\n", attribute) } } return nil } func (cli *DockerCli) CmdStop(args ...string) error { cmd := cli.Subcmd("stop", "CONTAINER [CONTAINER...]", "Stop a running container by sending SIGTERM and then SIGKILL after a\ngrace period", true) nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it") cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) v := url.Values{} v.Set("t", strconv.Itoa(*nSeconds)) var encounteredError error for _, name := range cmd.Args() { _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, nil)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to stop one or more containers") } else { fmt.Fprintf(cli.out, "%s\n", name) } } return encounteredError } func (cli *DockerCli) CmdRestart(args ...string) error { cmd := cli.Subcmd("restart", "CONTAINER [CONTAINER...]", "Restart a running container", true) nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container") cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) v := url.Values{} v.Set("t", strconv.Itoa(*nSeconds)) var encounteredError error for _, name := range cmd.Args() { _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, nil)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to restart one or more containers") } else { fmt.Fprintf(cli.out, "%s\n", name) } } return encounteredError } func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { sigc := make(chan os.Signal, 128) signal.CatchAll(sigc) go func() { for s := range sigc { if s == signal.SIGCHLD { continue } var sig string for sigStr, sigN := range signal.SignalMap { if sigN == s { sig = sigStr break } } if sig == "" { log.Errorf("Unsupported signal: %v. Discarding.", s) } if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, nil)); err != nil { log.Debugf("Error sending signal: %s", err) } } }() return sigc } func (cli *DockerCli) CmdStart(args ...string) error { var ( cErr chan error tty bool cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Start one or more stopped containers", true) attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") ) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) if *attach || *openStdin { if cmd.NArg() > 1 { return fmt.Errorf("You cannot start and attach multiple containers at once.") } stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil) if err != nil { return err } env := engine.Env{} if err := env.Decode(stream); err != nil { return err } config := env.GetSubEnv("Config") tty = config.GetBool("Tty") if !tty { sigc := cli.forwardAllSignals(cmd.Arg(0)) defer signal.StopCatch(sigc) } var in io.ReadCloser v := url.Values{} v.Set("stream", "1") if *openStdin && config.GetBool("OpenStdin") { v.Set("stdin", "1") in = cli.in } v.Set("stdout", "1") v.Set("stderr", "1") hijacked := make(chan io.Closer) // Block the return until the chan gets closed defer func() { log.Debugf("CmdStart() returned, defer waiting for hijack to finish.") if _, ok := <-hijacked; ok { log.Errorf("Hijack did not finish (chan still open)") } cli.in.Close() }() cErr = promise.Go(func() error { return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil) }) // Acknowledge the hijack before starting select { case closer := <-hijacked: // Make sure that the hijack gets closed when returning (results // in closing the hijack chan and freeing server's goroutines) if closer != nil { defer closer.Close() } case err := <-cErr: if err != nil { return err } } } var encounteredError error for _, name := range cmd.Args() { _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, nil)) if err != nil { if !*attach && !*openStdin { // attach and openStdin is false means it could be starting multiple containers // when a container start failed, show the error message and start next fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to start one or more containers") } else { encounteredError = err } } else { if !*attach && !*openStdin { fmt.Fprintf(cli.out, "%s\n", name) } } } if encounteredError != nil { return encounteredError } if *openStdin || *attach { if tty && cli.isTerminalOut { if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { log.Errorf("Error monitoring TTY size: %s", err) } } if attchErr := <-cErr; attchErr != nil { return attchErr } _, status, err := getExitCode(cli, cmd.Arg(0)) if err != nil { return err } if status != 0 { return &utils.StatusError{StatusCode: status} } } return nil } func (cli *DockerCli) CmdUnpause(args ...string) error { cmd := cli.Subcmd("unpause", "CONTAINER [CONTAINER...]", "Unpause all processes within a container", true) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, false) var encounteredError error for _, name := range cmd.Args() { if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, nil)); err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to unpause container named %s", name) } else { fmt.Fprintf(cli.out, "%s\n", name) } } return encounteredError } func (cli *DockerCli) CmdPause(args ...string) error { cmd := cli.Subcmd("pause", "CONTAINER [CONTAINER...]", "Pause all processes within a container", true) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, false) var encounteredError error for _, name := range cmd.Args() { if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, nil)); err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to pause container named %s", name) } else { fmt.Fprintf(cli.out, "%s\n", name) } } return encounteredError } func (cli *DockerCli) CmdRename(args ...string) error { cmd := cli.Subcmd("rename", "OLD_NAME NEW_NAME", "Rename a container", true) if err := cmd.Parse(args); err != nil { return nil } if cmd.NArg() != 2 { cmd.Usage() return nil } old_name := cmd.Arg(0) new_name := cmd.Arg(1) if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/rename?name=%s", old_name, new_name), nil, nil)); err != nil { fmt.Fprintf(cli.err, "%s\n", err) return fmt.Errorf("Error: failed to rename container named %s", old_name) } return nil } func (cli *DockerCli) CmdInspect(args ...string) error { cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image", true) tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) var tmpl *template.Template if *tmplStr != "" { var err error if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) return &utils.StatusError{StatusCode: 64, Status: "Template parsing error: " + err.Error()} } } indented := new(bytes.Buffer) indented.WriteByte('[') status := 0 for _, name := range cmd.Args() { obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, nil)) if err != nil { if strings.Contains(err.Error(), "Too many") { fmt.Fprintf(cli.err, "Error: %v", err) status = 1 continue } obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, nil)) if err != nil { if strings.Contains(err.Error(), "No such") { fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) } else { fmt.Fprintf(cli.err, "%s", err) } status = 1 continue } } if tmpl == nil { if err = json.Indent(indented, obj, "", " "); err != nil { fmt.Fprintf(cli.err, "%s\n", err) status = 1 continue } } else { // Has template, will render var value interface{} if err := json.Unmarshal(obj, &value); err != nil { fmt.Fprintf(cli.err, "%s\n", err) status = 1 continue } if err := tmpl.Execute(cli.out, value); err != nil { return err } cli.out.Write([]byte{'\n'}) } indented.WriteString(",") } if indented.Len() > 1 { // Remove trailing ',' indented.Truncate(indented.Len() - 1) } indented.WriteString("]\n") if tmpl == nil { if _, err := io.Copy(cli.out, indented); err != nil { return err } } if status != 0 { return &utils.StatusError{StatusCode: status} } return nil } func (cli *DockerCli) CmdTop(args ...string) error { cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Display the running processes of a container", true) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) val := url.Values{} if cmd.NArg() > 1 { val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) } stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, nil) if err != nil { return err } var procs engine.Env if err := procs.Decode(stream); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) processes := [][]string{} if err := procs.GetJson("Processes", &processes); err != nil { return err } for _, proc := range processes { fmt.Fprintln(w, strings.Join(proc, "\t")) } w.Flush() return nil } func (cli *DockerCli) CmdPort(args ...string) error { cmd := cli.Subcmd("port", "CONTAINER [PRIVATE_PORT[/PROTO]]", "List port mappings for the CONTAINER, or lookup the public-facing port that\nis NAT-ed to the PRIVATE_PORT", true) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil) if err != nil { return err } env := engine.Env{} if err := env.Decode(stream); err != nil { return err } ports := nat.PortMap{} if err := env.GetSubEnv("NetworkSettings").GetJson("Ports", &ports); err != nil { return err } if cmd.NArg() == 2 { var ( port = cmd.Arg(1) proto = "tcp" parts = strings.SplitN(port, "/", 2) ) if len(parts) == 2 && len(parts[1]) != 0 { port = parts[0] proto = parts[1] } natPort := port + "/" + proto if frontends, exists := ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { for _, frontend := range frontends { fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) } return nil } return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) } for from, frontends := range ports { for _, frontend := range frontends { fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIp, frontend.HostPort) } } return nil } // 'docker rmi IMAGE' removes all images with the name IMAGE func (cli *DockerCli) CmdRmi(args ...string) error { var ( cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images", true) force = cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") ) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) v := url.Values{} if *force { v.Set("force", "1") } if *noprune { v.Set("noprune", "1") } var encounteredError error for _, name := range cmd.Args() { body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, nil)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to remove one or more images") } else { outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(body); err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to remove one or more images") continue } for _, out := range outs.Data { if out.Get("Deleted") != "" { fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted")) } else { fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged")) } } } } return encounteredError } func (cli *DockerCli) CmdHistory(args ...string) error { cmd := cli.Subcmd("history", "IMAGE", "Show the history of an image", true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, nil)) if err != nil { return err } outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") } for _, out := range outs.Data { outID := out.Get("Id") if !*quiet { if *noTrunc { fmt.Fprintf(w, "%s\t", outID) } else { fmt.Fprintf(w, "%s\t", common.TruncateID(outID)) } fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) if *noTrunc { fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) } else { fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) } fmt.Fprintf(w, "%s\n", units.HumanSize(float64(out.GetInt64("Size")))) } else { if *noTrunc { fmt.Fprintln(w, outID) } else { fmt.Fprintln(w, common.TruncateID(outID)) } } } w.Flush() return nil } func (cli *DockerCli) CmdRm(args ...string) error { cmd := cli.Subcmd("rm", "CONTAINER [CONTAINER...]", "Remove one or more containers", true) v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link") force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) val := url.Values{} if *v { val.Set("v", "1") } if *link { val.Set("link", "1") } if *force { val.Set("force", "1") } var encounteredError error for _, name := range cmd.Args() { _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, nil)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to remove one or more containers") } else { fmt.Fprintf(cli.out, "%s\n", name) } } return encounteredError } // 'docker kill NAME' kills a running container func (cli *DockerCli) CmdKill(args ...string) error { cmd := cli.Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container using SIGKILL or a specified signal", true) signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) var encounteredError error for _, name := range cmd.Args() { if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, nil)); err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to kill one or more containers") } else { fmt.Fprintf(cli.out, "%s\n", name) } } return encounteredError } func (cli *DockerCli) CmdImport(args ...string) error { cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the\ntarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then\noptionally tag it.", true) flChanges := opts.NewListOpts(nil) cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) var ( v = url.Values{} src = cmd.Arg(0) repository = cmd.Arg(1) ) v.Set("fromSrc", src) v.Set("repo", repository) for _, change := range flChanges.GetAll() { v.Add("changes", change) } if cmd.NArg() == 3 { fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' has been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n") v.Set("tag", cmd.Arg(2)) } if repository != "" { //Check if the given image name can be resolved repo, _ := parsers.ParseRepositoryTag(repository) if err := registry.ValidateRepositoryName(repo); err != nil { return err } } var in io.Reader if src == "-" { in = cli.in } return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil) } func (cli *DockerCli) CmdPush(args ...string) error { cmd := cli.Subcmd("push", "NAME[:TAG]", "Push an image or a repository to the registry", true) cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) name := cmd.Arg(0) cli.LoadConfigFile() remote, tag := parsers.ParseRepositoryTag(name) // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ParseRepositoryInfo(remote) if err != nil { return err } // Resolve the Auth config relevant for this server authConfig := cli.configFile.ResolveAuthConfig(repoInfo.Index) // If we're not using a custom registry, we know the restrictions // applied to repository names and can warn the user in advance. // Custom repositories can have different rules, and we must also // allow pushing by image ID. if repoInfo.Official { username := authConfig.Username if username == "" { username = "" } return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository to / (ex: %s/%s)", username, repoInfo.LocalName) } v := url.Values{} v.Set("tag", tag) _, _, err = cli.clientRequestAttemptLogin("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, repoInfo.Index, "push") return err } func (cli *DockerCli) CmdPull(args ...string) error { cmd := cli.Subcmd("pull", "NAME[:TAG|@DIGEST]", "Pull an image or a repository from the registry", true) allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) var ( v = url.Values{} remote = cmd.Arg(0) newRemote = remote ) taglessRemote, tag := parsers.ParseRepositoryTag(remote) if tag == "" && !*allTags { newRemote = utils.ImageReference(taglessRemote, graph.DEFAULTTAG) } if tag != "" && *allTags { return fmt.Errorf("tag can't be used with --all-tags/-a") } v.Set("fromImage", newRemote) // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) if err != nil { return err } cli.LoadConfigFile() _, _, err = cli.clientRequestAttemptLogin("POST", "/images/create?"+v.Encode(), nil, cli.out, repoInfo.Index, "pull") return err } func (cli *DockerCli) CmdImages(args ...string) error { cmd := cli.Subcmd("images", "[REPOSITORY]", "List images", true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") // FIXME: --viz and --tree are deprecated. Remove them in a future version. flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format") flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") cmd.Require(flag.Max, 1) utils.ParseFlags(cmd, args, true) // Consolidate all filter flags, and sanity check them early. // They'll get process in the daemon/server. imageFilterArgs := filters.Args{} for _, f := range flFilter.GetAll() { var err error imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) if err != nil { return err } } matchName := cmd.Arg(0) // FIXME: --viz and --tree are deprecated. Remove them in a future version. if *flViz || *flTree { v := url.Values{ "all": []string{"1"}, } if len(imageFilterArgs) > 0 { filterJson, err := filters.ToParam(imageFilterArgs) if err != nil { return err } v.Set("filters", filterJson) } body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, nil)) if err != nil { return err } outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } var ( printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string) startImage *engine.Env roots = engine.NewTable("Created", outs.Len()) byParent = make(map[string]*engine.Table) ) for _, image := range outs.Data { if image.Get("ParentId") == "" { roots.Add(image) } else { if children, exists := byParent[image.Get("ParentId")]; exists { children.Add(image) } else { byParent[image.Get("ParentId")] = engine.NewTable("Created", 1) byParent[image.Get("ParentId")].Add(image) } } if matchName != "" { if matchName == image.Get("Id") || matchName == common.TruncateID(image.Get("Id")) { startImage = image } for _, repotag := range image.GetList("RepoTags") { if repotag == matchName { startImage = image } } } } if *flViz { fmt.Fprintf(cli.out, "digraph docker {\n") printNode = (*DockerCli).printVizNode } else { printNode = (*DockerCli).printTreeNode } if startImage != nil { root := engine.NewTable("Created", 1) root.Add(startImage) cli.WalkTree(*noTrunc, root, byParent, "", printNode) } else if matchName == "" { cli.WalkTree(*noTrunc, roots, byParent, "", printNode) } if *flViz { fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") } } else { v := url.Values{} if len(imageFilterArgs) > 0 { filterJson, err := filters.ToParam(imageFilterArgs) if err != nil { return err } v.Set("filters", filterJson) } if cmd.NArg() == 1 { // FIXME rename this parameter, to not be confused with the filters flag v.Set("filter", matchName) } if *all { v.Set("all", "1") } body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, nil)) if err != nil { return err } outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { if *showDigests { fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE") } else { fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") } } for _, out := range outs.Data { outID := out.Get("Id") if !*noTrunc { outID = common.TruncateID(outID) } repoTags := out.GetList("RepoTags") repoDigests := out.GetList("RepoDigests") if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { // dangling image - clear out either repoTags or repoDigsts so we only show it once below repoDigests = []string{} } // combine the tags and digests lists tagsAndDigests := append(repoTags, repoDigests...) for _, repoAndRef := range tagsAndDigests { repo, ref := parsers.ParseRepositoryTag(repoAndRef) // default tag and digest to none - if there's a value, it'll be set below tag := "" digest := "" if utils.DigestReference(ref) { digest = ref } else { tag = ref } if !*quiet { if *showDigests { fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(float64(out.GetInt64("VirtualSize")))) } else { fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(float64(out.GetInt64("VirtualSize")))) } } else { fmt.Fprintln(w, outID) } } } if !*quiet { w.Flush() } } return nil } // FIXME: --viz and --tree are deprecated. Remove them in a future version. func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { length := images.Len() if length > 1 { for index, image := range images.Data { if index+1 == length { printNode(cli, noTrunc, image, prefix+"└─") if subimages, exists := byParent[image.Get("Id")]; exists { cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) } } else { printNode(cli, noTrunc, image, prefix+"\u251C─") if subimages, exists := byParent[image.Get("Id")]; exists { cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) } } } } else { for _, image := range images.Data { printNode(cli, noTrunc, image, prefix+"└─") if subimages, exists := byParent[image.Get("Id")]; exists { cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) } } } } // FIXME: --viz and --tree are deprecated. Remove them in a future version. func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { var ( imageID string parentID string ) if noTrunc { imageID = image.Get("Id") parentID = image.Get("ParentId") } else { imageID = common.TruncateID(image.Get("Id")) parentID = common.TruncateID(image.Get("ParentId")) } if parentID == "" { fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) } else { fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) } if image.GetList("RepoTags")[0] != ":" { fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) } } // FIXME: --viz and --tree are deprecated. Remove them in a future version. func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { var imageID string if noTrunc { imageID = image.Get("Id") } else { imageID = common.TruncateID(image.Get("Id")) } fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.GetInt64("VirtualSize")))) if image.GetList("RepoTags")[0] != ":" { fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) } else { fmt.Fprint(cli.out, "\n") } } func (cli *DockerCli) CmdPs(args ...string) error { var ( err error psFilterArgs = filters.Args{} v = url.Values{} cmd = cli.Subcmd("ps", "", "List containers", true) quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") noTrunc = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container, include non-running") since = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show created since Id or Name, include non-running") before = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name") last = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running") flFilter = opts.NewListOpts(nil) ) cmd.Require(flag.Exact, 0) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") utils.ParseFlags(cmd, args, true) if *last == -1 && *nLatest { *last = 1 } if *all { v.Set("all", "1") } if *last != -1 { v.Set("limit", strconv.Itoa(*last)) } if *since != "" { v.Set("since", *since) } if *before != "" { v.Set("before", *before) } if *size { v.Set("size", "1") } // Consolidate all filter flags, and sanity check them. // They'll get processed in the daemon/server. for _, f := range flFilter.GetAll() { if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { return err } } if len(psFilterArgs) > 0 { filterJson, err := filters.ToParam(psFilterArgs) if err != nil { return err } v.Set("filters", filterJson) } body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, nil)) if err != nil { return err } outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES") if *size { fmt.Fprintln(w, "\tSIZE") } else { fmt.Fprint(w, "\n") } } stripNamePrefix := func(ss []string) []string { for i, s := range ss { ss[i] = s[1:] } return ss } for _, out := range outs.Data { outID := out.Get("Id") if !*noTrunc { outID = common.TruncateID(outID) } if *quiet { fmt.Fprintln(w, outID) continue } var ( outNames = stripNamePrefix(out.GetList("Names")) outCommand = strconv.Quote(out.Get("Command")) ports = engine.NewTable("", 0) ) if !*noTrunc { outCommand = utils.Trunc(outCommand, 20) // only display the default name for the container with notrunc is passed for _, name := range outNames { if len(strings.Split(name, "/")) == 1 { outNames = []string{name} break } } } ports.ReadListFrom([]byte(out.Get("Ports"))) image := out.Get("Image") if image == "" { image = "" } fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, image, outCommand, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ",")) if *size { if out.GetInt("SizeRootFs") > 0 { fmt.Fprintf(w, "%s (virtual %s)\n", units.HumanSize(float64(out.GetInt64("SizeRw"))), units.HumanSize(float64(out.GetInt64("SizeRootFs")))) } else { fmt.Fprintf(w, "%s\n", units.HumanSize(float64(out.GetInt64("SizeRw")))) } continue } fmt.Fprint(w, "\n") } if !*quiet { w.Flush() } return nil } func (cli *DockerCli) CmdCommit(args ...string) error { cmd := cli.Subcmd("commit", "CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes", true) flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") flChanges := opts.NewListOpts(nil) cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") cmd.Require(flag.Max, 2) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) var ( name = cmd.Arg(0) repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) ) //Check if the given image name can be resolved if repository != "" { if err := registry.ValidateRepositoryName(repository); err != nil { return err } } v := url.Values{} v.Set("container", name) v.Set("repo", repository) v.Set("tag", tag) v.Set("comment", *flComment) v.Set("author", *flAuthor) for _, change := range flChanges.GetAll() { v.Add("changes", change) } if *flPause != true { v.Set("pause", "0") } var ( config *runconfig.Config env engine.Env ) if *flConfig != "" { config = &runconfig.Config{} if err := json.Unmarshal([]byte(*flConfig), config); err != nil { return err } } stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, nil) if err != nil { return err } if err := env.Decode(stream); err != nil { return err } fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) return nil } func (cli *DockerCli) CmdEvents(args ...string) error { cmd := cli.Subcmd("events", "", "Get real time events from the server", true) since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp") until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") cmd.Require(flag.Exact, 0) utils.ParseFlags(cmd, args, true) var ( v = url.Values{} loc = time.FixedZone(time.Now().Zone()) eventFilterArgs = filters.Args{} ) // Consolidate all filter flags, and sanity check them early. // They'll get process in the daemon/server. for _, f := range flFilter.GetAll() { var err error eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) if err != nil { return err } } var setTime = func(key, value string) { format := timeutils.RFC3339NanoFixed if len(value) < len(format) { format = format[:len(value)] } if t, err := time.ParseInLocation(format, value, loc); err == nil { v.Set(key, strconv.FormatInt(t.Unix(), 10)) } else { v.Set(key, value) } } if *since != "" { setTime("since", *since) } if *until != "" { setTime("until", *until) } if len(eventFilterArgs) > 0 { filterJson, err := filters.ToParam(eventFilterArgs) if err != nil { return err } v.Set("filters", filterJson) } if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { return err } return nil } func (cli *DockerCli) CmdExport(args ...string) error { cmd := cli.Subcmd("export", "CONTAINER", "Export a filesystem as a tar archive (streamed to STDOUT by default)", true) outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) var ( output io.Writer = cli.out err error ) if *outfile != "" { output, err = os.Create(*outfile) if err != nil { return err } } else if cli.isTerminalOut { return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") } if len(cmd.Args()) == 1 { image := cmd.Arg(0) if err := cli.stream("GET", "/containers/"+image+"/export", nil, output, nil); err != nil { return err } } else { v := url.Values{} for _, arg := range cmd.Args() { v.Add("names", arg) } if err := cli.stream("GET", "/containers/get?"+v.Encode(), nil, output, nil); err != nil { return err } } return nil } func (cli *DockerCli) CmdDiff(args ...string) error { cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem", true) cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, nil)) if err != nil { return err } outs := engine.NewTable("", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } for _, change := range outs.Data { var kind string switch change.GetInt("Kind") { case archive.ChangeModify: kind = "C" case archive.ChangeAdd: kind = "A" case archive.ChangeDelete: kind = "D" } fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path")) } return nil } func (cli *DockerCli) CmdLogs(args ...string) error { var ( cmd = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container", true) follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") times = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") tail = cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs") ) cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) name := cmd.Arg(0) stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, nil) if err != nil { return err } env := engine.Env{} if err := env.Decode(stream); err != nil { return err } if env.GetSubEnv("HostConfig").GetSubEnv("LogConfig").Get("Type") != "json-file" { return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver") } v := url.Values{} v.Set("stdout", "1") v.Set("stderr", "1") if *times { v.Set("timestamps", "1") } if *follow { v.Set("follow", "1") } v.Set("tail", *tail) return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil) } func (cli *DockerCli) CmdAttach(args ...string) error { var ( cmd = cli.Subcmd("attach", "CONTAINER", "Attach to a running container", true) noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN") proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process") ) cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) name := cmd.Arg(0) stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, nil) if err != nil { return err } env := engine.Env{} if err := env.Decode(stream); err != nil { return err } if !env.GetSubEnv("State").GetBool("Running") { return fmt.Errorf("You cannot attach to a stopped container, start it first") } var ( config = env.GetSubEnv("Config") tty = config.GetBool("Tty") ) if err := cli.CheckTtyInput(!*noStdin, tty); err != nil { return err } if tty && cli.isTerminalOut { if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { log.Debugf("Error monitoring TTY size: %s", err) } } var in io.ReadCloser v := url.Values{} v.Set("stream", "1") if !*noStdin && config.GetBool("OpenStdin") { v.Set("stdin", "1") in = cli.in } v.Set("stdout", "1") v.Set("stderr", "1") if *proxy && !tty { sigc := cli.forwardAllSignals(cmd.Arg(0)) defer signal.StopCatch(sigc) } if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil); err != nil { return err } _, status, err := getExitCode(cli, cmd.Arg(0)) if err != nil { return err } if status != 0 { return &utils.StatusError{StatusCode: status} } return nil } func (cli *DockerCli) CmdSearch(args ...string) error { cmd := cli.Subcmd("search", "TERM", "Search the Docker Hub for images", true) noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds") automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars") cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) name := cmd.Arg(0) v := url.Values{} v.Set("term", name) // Resolve the Repository name from fqn to hostname + name taglessRemote, _ := parsers.ParseRepositoryTag(name) repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) if err != nil { return err } cli.LoadConfigFile() body, statusCode, errReq := cli.clientRequestAttemptLogin("GET", "/images/search?"+v.Encode(), nil, nil, repoInfo.Index, "search") rawBody, _, err := readBody(body, statusCode, errReq) if err != nil { return err } outs := engine.NewTable("star_count", 0) if _, err := outs.ReadListFrom(rawBody); err != nil { return err } w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") for _, out := range outs.Data { if ((*automated || *trusted) && (!out.GetBool("is_trusted") && !out.GetBool("is_automated"))) || (*stars > out.GetInt("star_count")) { continue } desc := strings.Replace(out.Get("description"), "\n", " ", -1) desc = strings.Replace(desc, "\r", " ", -1) if !*noTrunc && len(desc) > 45 { desc = utils.Trunc(desc, 42) + "..." } fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count")) if out.GetBool("is_official") { fmt.Fprint(w, "[OK]") } fmt.Fprint(w, "\t") if out.GetBool("is_automated") || out.GetBool("is_trusted") { fmt.Fprint(w, "[OK]") } fmt.Fprint(w, "\n") } w.Flush() return nil } // Ports type - Used to parse multiple -p flags type ports []int func (cli *DockerCli) CmdTag(args ...string) error { cmd := cli.Subcmd("tag", "IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository", true) force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") cmd.Require(flag.Exact, 2) utils.ParseFlags(cmd, args, true) var ( repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) v = url.Values{} ) //Check if the given image name can be resolved if err := registry.ValidateRepositoryName(repository); err != nil { return err } v.Set("repo", repository) v.Set("tag", tag) if *force { v.Set("force", "1") } if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, nil)); err != nil { return err } return nil } func (cli *DockerCli) pullImage(image string) error { return cli.pullImageCustomOut(image, cli.out) } func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { v := url.Values{} repos, tag := parsers.ParseRepositoryTag(image) // pull only the image tagged 'latest' if no tag was specified if tag == "" { tag = graph.DEFAULTTAG } v.Set("fromImage", repos) v.Set("tag", tag) // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := registry.ParseRepositoryInfo(repos) if err != nil { return err } // Load the auth config file, to be able to pull the image cli.LoadConfigFile() // Resolve the Auth config relevant for this server authConfig := cli.configFile.ResolveAuthConfig(repoInfo.Index) buf, err := json.Marshal(authConfig) if err != nil { return err } registryAuthHeader := []string{ base64.URLEncoding.EncodeToString(buf), } if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, out, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { return err } return nil } type cidFile struct { path string file *os.File written bool } func newCIDFile(path string) (*cidFile, error) { if _, err := os.Stat(path); err == nil { return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) } f, err := os.Create(path) if err != nil { return nil, fmt.Errorf("Failed to create the container ID file: %s", err) } return &cidFile{path: path, file: f}, nil } func (cid *cidFile) Close() error { cid.file.Close() if !cid.written { if err := os.Remove(cid.path); err != nil { return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) } } return nil } func (cid *cidFile) Write(id string) error { if _, err := cid.file.Write([]byte(id)); err != nil { return fmt.Errorf("Failed to write the container ID to the file: %s", err) } cid.written = true return nil } func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { containerValues := url.Values{} if name != "" { containerValues.Set("name", name) } mergedConfig := runconfig.MergeConfigs(config, hostConfig) var containerIDFile *cidFile if cidfile != "" { var err error if containerIDFile, err = newCIDFile(cidfile); err != nil { return nil, err } defer containerIDFile.Close() } //create the container stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil) //if image not found try to pull it if statusCode == 404 { repo, tag := parsers.ParseRepositoryTag(config.Image) if tag == "" { tag = graph.DEFAULTTAG } fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", utils.ImageReference(repo, tag)) // we don't want to write to stdout anything apart from container.ID if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { return nil, err } // Retry if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil); err != nil { return nil, err } } else if err != nil { return nil, err } var response types.ContainerCreateResponse if err := json.NewDecoder(stream).Decode(&response); err != nil { return nil, err } for _, warning := range response.Warnings { fmt.Fprintf(cli.err, "WARNING: %s\n", warning) } if containerIDFile != nil { if err = containerIDFile.Write(response.ID); err != nil { return nil, err } } return &response, nil } func (cli *DockerCli) CmdCreate(args ...string) error { cmd := cli.Subcmd("create", "IMAGE [COMMAND] [ARG...]", "Create a new container", true) // These are flags not stored in Config/HostConfig var ( flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") ) config, hostConfig, cmd, err := runconfig.Parse(cmd, args) if err != nil { utils.ReportError(cmd, err.Error(), true) } if config.Image == "" { cmd.Usage() return nil } response, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) if err != nil { return err } fmt.Fprintf(cli.out, "%s\n", response.ID) return nil } func (cli *DockerCli) CmdRun(args ...string) error { // FIXME: just use runconfig.Parse already cmd := cli.Subcmd("run", "IMAGE [COMMAND] [ARG...]", "Run a command in a new container", true) // These are flags not stored in Config/HostConfig var ( flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID") flSigProxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process") flName = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") flAttach *opts.ListOpts ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") ) config, hostConfig, cmd, err := runconfig.Parse(cmd, args) // just in case the Parse does not exit if err != nil { utils.ReportError(cmd, err.Error(), true) } if len(hostConfig.Dns) > 0 { // check the DNS settings passed via --dns against // localhost regexp to warn if they are trying to // set a DNS to a localhost address for _, dnsIP := range hostConfig.Dns { if resolvconf.IsLocalhost(dnsIP) { fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) break } } } if config.Image == "" { cmd.Usage() return nil } if !*flDetach { if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { return err } } else { if fl := cmd.Lookup("-attach"); fl != nil { flAttach = fl.Value.(*opts.ListOpts) if flAttach.Len() != 0 { return ErrConflictAttachDetach } } if *flAutoRemove { return ErrConflictDetachAutoRemove } config.AttachStdin = false config.AttachStdout = false config.AttachStderr = false config.StdinOnce = false } // Disable flSigProxy when in TTY mode sigProxy := *flSigProxy if config.Tty { sigProxy = false } createResponse, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) if err != nil { return err } if sigProxy { sigc := cli.forwardAllSignals(createResponse.ID) defer signal.StopCatch(sigc) } var ( waitDisplayId chan struct{} errCh chan error ) if !config.AttachStdout && !config.AttachStderr { // Make this asynchronous to allow the client to write to stdin before having to read the ID waitDisplayId = make(chan struct{}) go func() { defer close(waitDisplayId) fmt.Fprintf(cli.out, "%s\n", createResponse.ID) }() } if *flAutoRemove && (hostConfig.RestartPolicy.Name == "always" || hostConfig.RestartPolicy.Name == "on-failure") { return ErrConflictRestartPolicyAndAutoRemove } // We need to instantiate the chan because the select needs it. It can // be closed but can't be uninitialized. hijacked := make(chan io.Closer) // Block the return until the chan gets closed defer func() { log.Debugf("End of CmdRun(), Waiting for hijack to finish.") if _, ok := <-hijacked; ok { log.Errorf("Hijack did not finish (chan still open)") } }() if config.AttachStdin || config.AttachStdout || config.AttachStderr { var ( out, stderr io.Writer in io.ReadCloser v = url.Values{} ) v.Set("stream", "1") if config.AttachStdin { v.Set("stdin", "1") in = cli.in } if config.AttachStdout { v.Set("stdout", "1") out = cli.out } if config.AttachStderr { v.Set("stderr", "1") if config.Tty { stderr = cli.out } else { stderr = cli.err } } errCh = promise.Go(func() error { return cli.hijack("POST", "/containers/"+createResponse.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil) }) } else { close(hijacked) } // Acknowledge the hijack before starting select { case closer := <-hijacked: // Make sure that the hijack gets closed when returning (results // in closing the hijack chan and freeing server's goroutines) if closer != nil { defer closer.Close() } case err := <-errCh: if err != nil { log.Debugf("Error hijack: %s", err) return err } } //start the container if _, _, err = readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, nil)); err != nil { return err } if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { if err := cli.monitorTtySize(createResponse.ID, false); err != nil { log.Errorf("Error monitoring TTY size: %s", err) } } if errCh != nil { if err := <-errCh; err != nil { log.Debugf("Error hijack: %s", err) return err } } // Detached mode: wait for the id to be displayed and return. if !config.AttachStdout && !config.AttachStderr { // Detached mode <-waitDisplayId return nil } var status int // Attached mode if *flAutoRemove { // Autoremove: wait for the container to finish, retrieve // the exit code and remove the container if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/wait", nil, nil)); err != nil { return err } if _, status, err = getExitCode(cli, createResponse.ID); err != nil { return err } if _, _, err := readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, nil)); err != nil { return err } } else { // No Autoremove: Simply retrieve the exit code if !config.Tty { // In non-TTY mode, we can't detach, so we must wait for container exit if status, err = waitForExit(cli, createResponse.ID); err != nil { return err } } else { // In TTY mode, there is a race: if the process dies too slowly, the state could // be updated after the getExitCode call and result in the wrong exit code being reported if _, status, err = getExitCode(cli, createResponse.ID); err != nil { return err } } } if status != 0 { return &utils.StatusError{StatusCode: status} } return nil } func (cli *DockerCli) CmdCp(args ...string) error { cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data\nas a tar file to STDOUT.", true) cmd.Require(flag.Exact, 2) utils.ParseFlags(cmd, args, true) var copyData engine.Env info := strings.Split(cmd.Arg(0), ":") if len(info) != 2 { return fmt.Errorf("Error: Path not specified") } copyData.Set("Resource", info[1]) copyData.Set("HostPath", cmd.Arg(1)) stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, nil) if stream != nil { defer stream.Close() } if statusCode == 404 { return fmt.Errorf("No such container: %v", info[0]) } if err != nil { return err } if statusCode == 200 { dest := copyData.Get("HostPath") if dest == "-" { _, err = io.Copy(cli.out, stream) } else { err = archive.Untar(stream, dest, &archive.TarOptions{NoLchown: true}) } if err != nil { return err } } return nil } func (cli *DockerCli) CmdSave(args ...string) error { cmd := cli.Subcmd("save", "IMAGE [IMAGE...]", "Save an image(s) to a tar archive (streamed to STDOUT by default)", true) outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT") cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) var ( output io.Writer = cli.out err error ) if *outfile != "" { output, err = os.Create(*outfile) if err != nil { return err } } else if cli.isTerminalOut { return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") } if len(cmd.Args()) == 1 { image := cmd.Arg(0) if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil { return err } } else { v := url.Values{} for _, arg := range cmd.Args() { v.Add("names", arg) } if err := cli.stream("GET", "/images/get?"+v.Encode(), nil, output, nil); err != nil { return err } } return nil } func (cli *DockerCli) CmdLoad(args ...string) error { cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN", true) infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") cmd.Require(flag.Exact, 0) utils.ParseFlags(cmd, args, true) var ( input io.Reader = cli.in err error ) if *infile != "" { input, err = os.Open(*infile) if err != nil { return err } } if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil { return err } return nil } func (cli *DockerCli) CmdExec(args ...string) error { cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in a running container", true) execConfig, err := runconfig.ParseExec(cmd, args) // just in case the ParseExec does not exit if execConfig.Container == "" || err != nil { return &utils.StatusError{StatusCode: 1} } stream, _, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, nil) if err != nil { return err } var execResult engine.Env if err := execResult.Decode(stream); err != nil { return err } execID := execResult.Get("Id") if execID == "" { fmt.Fprintf(cli.out, "exec ID empty") return nil } if !execConfig.Detach { if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { return err } } else { if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, nil)); err != nil { return err } // For now don't print this - wait for when we support exec wait() // fmt.Fprintf(cli.out, "%s\n", execID) return nil } // Interactive exec requested. var ( out, stderr io.Writer in io.ReadCloser hijacked = make(chan io.Closer) errCh chan error ) // Block the return until the chan gets closed defer func() { log.Debugf("End of CmdExec(), Waiting for hijack to finish.") if _, ok := <-hijacked; ok { log.Errorf("Hijack did not finish (chan still open)") } }() if execConfig.AttachStdin { in = cli.in } if execConfig.AttachStdout { out = cli.out } if execConfig.AttachStderr { if execConfig.Tty { stderr = cli.out } else { stderr = cli.err } } errCh = promise.Go(func() error { return cli.hijack("POST", "/exec/"+execID+"/start", execConfig.Tty, in, out, stderr, hijacked, execConfig) }) // Acknowledge the hijack before starting select { case closer := <-hijacked: // Make sure that hijack gets closed when returning. (result // in closing hijack chan and freeing server's goroutines. if closer != nil { defer closer.Close() } case err := <-errCh: if err != nil { log.Debugf("Error hijack: %s", err) return err } } if execConfig.Tty && cli.isTerminalIn { if err := cli.monitorTtySize(execID, true); err != nil { log.Errorf("Error monitoring TTY size: %s", err) } } if err := <-errCh; err != nil { log.Debugf("Error hijack: %s", err) return err } var status int if _, status, err = getExecExitCode(cli, execID); err != nil { return err } if status != 0 { return &utils.StatusError{StatusCode: status} } return nil } type containerStats struct { Name string CpuPercentage float64 Memory float64 MemoryLimit float64 MemoryPercentage float64 NetworkRx float64 NetworkTx float64 mu sync.RWMutex err error } func (s *containerStats) Collect(cli *DockerCli) { stream, _, err := cli.call("GET", "/containers/"+s.Name+"/stats", nil, nil) if err != nil { s.err = err return } defer stream.Close() var ( previousCpu uint64 previousSystem uint64 start = true dec = json.NewDecoder(stream) u = make(chan error, 1) ) go func() { for { var v *types.Stats if err := dec.Decode(&v); err != nil { u <- err return } var ( memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 cpuPercent = 0.0 ) if !start { cpuPercent = calculateCpuPercent(previousCpu, previousSystem, v) } start = false s.mu.Lock() s.CpuPercentage = cpuPercent s.Memory = float64(v.MemoryStats.Usage) s.MemoryLimit = float64(v.MemoryStats.Limit) s.MemoryPercentage = memPercent s.NetworkRx = float64(v.Network.RxBytes) s.NetworkTx = float64(v.Network.TxBytes) s.mu.Unlock() previousCpu = v.CpuStats.CpuUsage.TotalUsage previousSystem = v.CpuStats.SystemUsage u <- nil } }() for { select { case <-time.After(2 * time.Second): // zero out the values if we have not received an update within // the specified duration. s.mu.Lock() s.CpuPercentage = 0 s.Memory = 0 s.MemoryPercentage = 0 s.mu.Unlock() case err := <-u: if err != nil { s.mu.Lock() s.err = err s.mu.Unlock() return } } } } func (s *containerStats) Display(w io.Writer) error { s.mu.RLock() defer s.mu.RUnlock() if s.err != nil { return s.err } fmt.Fprintf(w, "%s\t%.2f%%\t%s/%s\t%.2f%%\t%s/%s\n", s.Name, s.CpuPercentage, units.BytesSize(s.Memory), units.BytesSize(s.MemoryLimit), s.MemoryPercentage, units.BytesSize(s.NetworkRx), units.BytesSize(s.NetworkTx)) return nil } func (cli *DockerCli) CmdStats(args ...string) error { cmd := cli.Subcmd("stats", "CONTAINER [CONTAINER...]", "Display a live stream of one or more containers' resource usage statistics", true) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) names := cmd.Args() sort.Strings(names) var ( cStats []*containerStats w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) ) printHeader := func() { fmt.Fprint(cli.out, "\033[2J") fmt.Fprint(cli.out, "\033[H") fmt.Fprintln(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O") } for _, n := range names { s := &containerStats{Name: n} cStats = append(cStats, s) go s.Collect(cli) } // do a quick pause so that any failed connections for containers that do not exist are able to be // evicted before we display the initial or default values. time.Sleep(500 * time.Millisecond) var errs []string for _, c := range cStats { c.mu.Lock() if c.err != nil { errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) } c.mu.Unlock() } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, ", ")) } for _ = range time.Tick(500 * time.Millisecond) { printHeader() toRemove := []int{} for i, s := range cStats { if err := s.Display(w); err != nil { toRemove = append(toRemove, i) } } for j := len(toRemove) - 1; j >= 0; j-- { i := toRemove[j] cStats = append(cStats[:i], cStats[i+1:]...) } if len(cStats) == 0 { return nil } w.Flush() } return nil } func calculateCpuPercent(previousCpu, previousSystem uint64, v *types.Stats) float64 { var ( cpuPercent = 0.0 // calculate the change for the cpu usage of the container in between readings cpuDelta = float64(v.CpuStats.CpuUsage.TotalUsage - previousCpu) // calculate the change for the entire system between readings systemDelta = float64(v.CpuStats.SystemUsage - previousSystem) ) if systemDelta > 0.0 && cpuDelta > 0.0 { cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CpuStats.CpuUsage.PercpuUsage)) * 100.0 } return cpuPercent } docker-1.6.2/api/client/hijack.go0000644000175000017500000001477212524223634016156 0ustar tianontianonpackage client import ( "crypto/tls" "errors" "fmt" "io" "net" "net/http" "net/http/httputil" "os" "runtime" "strings" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/term" ) type tlsClientCon struct { *tls.Conn rawConn net.Conn } func (c *tlsClientCon) CloseWrite() error { // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it // on its underlying connection. if cwc, ok := c.rawConn.(interface { CloseWrite() error }); ok { return cwc.CloseWrite() } return nil } func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { return tlsDialWithDialer(new(net.Dialer), network, addr, config) } // We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in // order to return our custom tlsClientCon struct which holds both the tls.Conn // object _and_ its underlying raw connection. The rationale for this is that // we need to be able to close the write end of the connection when attaching, // which tls.Conn does not provide. func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { // We want the Timeout and Deadline values from dialer to cover the // whole process: TCP connection and TLS handshake. This means that we // also need to start our own timers now. timeout := dialer.Timeout if !dialer.Deadline.IsZero() { deadlineTimeout := dialer.Deadline.Sub(time.Now()) if timeout == 0 || deadlineTimeout < timeout { timeout = deadlineTimeout } } var errChannel chan error if timeout != 0 { errChannel = make(chan error, 2) time.AfterFunc(timeout, func() { errChannel <- errors.New("") }) } rawConn, err := dialer.Dial(network, addr) if err != nil { return nil, err } // When we set up a TCP connection for hijack, there could be long periods // of inactivity (a long running command with no output) that in certain // network setups may cause ECONNTIMEOUT, leaving the client in an unknown // state. Setting TCP KeepAlive on the socket connection will prohibit // ECONNTIMEOUT unless the socket connection truly is broken if tcpConn, ok := rawConn.(*net.TCPConn); ok { tcpConn.SetKeepAlive(true) tcpConn.SetKeepAlivePeriod(30 * time.Second) } colonPos := strings.LastIndex(addr, ":") if colonPos == -1 { colonPos = len(addr) } hostname := addr[:colonPos] // If no ServerName is set, infer the ServerName // from the hostname we're connecting to. if config.ServerName == "" { // Make a copy to avoid polluting argument or default. c := *config c.ServerName = hostname config = &c } conn := tls.Client(rawConn, config) if timeout == 0 { err = conn.Handshake() } else { go func() { errChannel <- conn.Handshake() }() err = <-errChannel } if err != nil { rawConn.Close() return nil, err } // This is Docker difference with standard's crypto/tls package: returned a // wrapper which holds both the TLS and raw connections. return &tlsClientCon{conn, rawConn}, nil } func (cli *DockerCli) dial() (net.Conn, error) { if cli.tlsConfig != nil && cli.proto != "unix" { // Notice this isn't Go standard's tls.Dial function return tlsDial(cli.proto, cli.addr, cli.tlsConfig) } return net.Dial(cli.proto, cli.addr) } func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error { defer func() { if started != nil { close(started) } }() params, err := cli.encodeData(data) if err != nil { return err } req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) if err != nil { return err } req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) req.Header.Set("Content-Type", "text/plain") req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", "tcp") req.Host = cli.addr dial, err := cli.dial() // When we set up a TCP connection for hijack, there could be long periods // of inactivity (a long running command with no output) that in certain // network setups may cause ECONNTIMEOUT, leaving the client in an unknown // state. Setting TCP KeepAlive on the socket connection will prohibit // ECONNTIMEOUT unless the socket connection truly is broken if tcpConn, ok := dial.(*net.TCPConn); ok { tcpConn.SetKeepAlive(true) tcpConn.SetKeepAlivePeriod(30 * time.Second) } if err != nil { if strings.Contains(err.Error(), "connection refused") { return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") } return err } clientconn := httputil.NewClientConn(dial, nil) defer clientconn.Close() // Server hijacks the connection, error 'connection closed' expected clientconn.Do(req) rwc, br := clientconn.Hijack() defer rwc.Close() if started != nil { started <- rwc } var receiveStdout chan error var oldState *term.State if in != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" { oldState, err = term.SetRawTerminal(cli.inFd) if err != nil { return err } defer term.RestoreTerminal(cli.inFd, oldState) } if stdout != nil || stderr != nil { receiveStdout = promise.Go(func() (err error) { defer func() { if in != nil { if setRawTerminal && cli.isTerminalIn { term.RestoreTerminal(cli.inFd, oldState) } // For some reason this Close call blocks on darwin.. // As the client exists right after, simply discard the close // until we find a better solution. if runtime.GOOS != "darwin" { in.Close() } } }() // When TTY is ON, use regular copy if setRawTerminal && stdout != nil { _, err = io.Copy(stdout, br) } else { _, err = stdcopy.StdCopy(stdout, stderr, br) } log.Debugf("[hijack] End of stdout") return err }) } sendStdin := promise.Go(func() error { if in != nil { io.Copy(rwc, in) log.Debugf("[hijack] End of stdin") } if conn, ok := rwc.(interface { CloseWrite() error }); ok { if err := conn.CloseWrite(); err != nil { log.Debugf("Couldn't send EOF: %s", err) } } // Discard errors due to pipe interruption return nil }) if stdout != nil || stderr != nil { if err := <-receiveStdout; err != nil { log.Debugf("Error receiveStdout: %s", err) return err } } if !cli.isTerminalIn { if err := <-sendStdin; err != nil { log.Debugf("Error sendStdin: %s", err) return err } } return nil } docker-1.6.2/api/client/cli.go0000644000175000017500000001064712524223634015471 0ustar tianontianonpackage client import ( "crypto/tls" "encoding/json" "errors" "fmt" "io" "net" "net/http" "os" "reflect" "strings" "text/template" "time" "github.com/docker/docker/pkg/homedir" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/term" "github.com/docker/docker/registry" ) type DockerCli struct { proto string addr string configFile *registry.ConfigFile in io.ReadCloser out io.Writer err io.Writer keyFile string tlsConfig *tls.Config scheme string // inFd holds file descriptor of the client's STDIN, if it's a valid file inFd uintptr // outFd holds file descriptor of the client's STDOUT, if it's a valid file outFd uintptr // isTerminalIn describes if client's STDIN is a TTY isTerminalIn bool // isTerminalOut describes if client's STDOUT is a TTY isTerminalOut bool transport *http.Transport } var funcMap = template.FuncMap{ "json": func(v interface{}) string { a, _ := json.Marshal(v) return string(a) }, } func (cli *DockerCli) getMethod(args ...string) (func(...string) error, bool) { camelArgs := make([]string, len(args)) for i, s := range args { if len(s) == 0 { return nil, false } camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) } methodName := "Cmd" + strings.Join(camelArgs, "") method := reflect.ValueOf(cli).MethodByName(methodName) if !method.IsValid() { return nil, false } return method.Interface().(func(...string) error), true } // Cmd executes the specified command func (cli *DockerCli) Cmd(args ...string) error { if len(args) > 1 { method, exists := cli.getMethod(args[:2]...) if exists { return method(args[2:]...) } } if len(args) > 0 { method, exists := cli.getMethod(args[0]) if !exists { fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0]) os.Exit(1) } return method(args[1:]...) } return cli.CmdHelp() } func (cli *DockerCli) Subcmd(name, signature, description string, exitOnError bool) *flag.FlagSet { var errorHandling flag.ErrorHandling if exitOnError { errorHandling = flag.ExitOnError } else { errorHandling = flag.ContinueOnError } flags := flag.NewFlagSet(name, errorHandling) flags.Usage = func() { options := "" if signature != "" { signature = " " + signature } if flags.FlagCountUndeprecated() > 0 { options = " [OPTIONS]" } fmt.Fprintf(cli.out, "\nUsage: docker %s%s%s\n\n%s\n\n", name, options, signature, description) flags.SetOutput(cli.out) flags.PrintDefaults() os.Exit(0) } return flags } func (cli *DockerCli) LoadConfigFile() (err error) { cli.configFile, err = registry.LoadConfig(homedir.Get()) if err != nil { fmt.Fprintf(cli.err, "WARNING: %s\n", err) } return err } func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { // In order to attach to a container tty, input stream for the client must // be a tty itself: redirecting or piping the client standard input is // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. if ttyMode && attachStdin && !cli.isTerminalIn { return errors.New("cannot enable tty mode on non tty input") } return nil } func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, addr string, tlsConfig *tls.Config) *DockerCli { var ( inFd uintptr outFd uintptr isTerminalIn = false isTerminalOut = false scheme = "http" ) if tlsConfig != nil { scheme = "https" } if in != nil { inFd, isTerminalIn = term.GetFdInfo(in) } if out != nil { outFd, isTerminalOut = term.GetFdInfo(out) } if err == nil { err = out } // The transport is created here for reuse during the client session tr := &http.Transport{ TLSClientConfig: tlsConfig, } // Why 32? See issue 8035 timeout := 32 * time.Second if proto == "unix" { // no need in compressing for local communications tr.DisableCompression = true tr.Dial = func(_, _ string) (net.Conn, error) { return net.DialTimeout(proto, addr, timeout) } } else { tr.Proxy = http.ProxyFromEnvironment tr.Dial = (&net.Dialer{Timeout: timeout}).Dial } return &DockerCli{ proto: proto, addr: addr, in: in, out: out, err: err, keyFile: keyFile, inFd: inFd, outFd: outFd, isTerminalIn: isTerminalIn, isTerminalOut: isTerminalOut, tlsConfig: tlsConfig, scheme: scheme, transport: tr, } } docker-1.6.2/api/client/utils.go0000644000175000017500000002206212524223634016054 0ustar tianontianonpackage client import ( "bytes" "encoding/base64" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" gosignal "os/signal" "runtime" "strconv" "strings" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/term" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) var ( ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") ) func (cli *DockerCli) HTTPClient() *http.Client { return &http.Client{Transport: cli.transport} } func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) { params := bytes.NewBuffer(nil) if data != nil { if env, ok := data.(engine.Env); ok { if err := env.Encode(params); err != nil { return nil, err } } else { buf, err := json.Marshal(data) if err != nil { return nil, err } if _, err := params.Write(buf); err != nil { return nil, err } } } return params, nil } func (cli *DockerCli) clientRequest(method, path string, in io.Reader, headers map[string][]string) (io.ReadCloser, string, int, error) { expectedPayload := (method == "POST" || method == "PUT") if expectedPayload && in == nil { in = bytes.NewReader([]byte{}) } req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in) if err != nil { return nil, "", -1, err } req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) req.URL.Host = cli.addr req.URL.Scheme = cli.scheme if headers != nil { for k, v := range headers { req.Header[k] = v } } if expectedPayload && req.Header.Get("Content-Type") == "" { req.Header.Set("Content-Type", "text/plain") } resp, err := cli.HTTPClient().Do(req) statusCode := -1 if resp != nil { statusCode = resp.StatusCode } if err != nil { if strings.Contains(err.Error(), "connection refused") { return nil, "", statusCode, ErrConnectionRefused } if cli.tlsConfig == nil { return nil, "", statusCode, fmt.Errorf("%v. Are you trying to connect to a TLS-enabled daemon without TLS?", err) } return nil, "", statusCode, fmt.Errorf("An error occurred trying to connect: %v", err) } if statusCode < 200 || statusCode >= 400 { body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, "", statusCode, err } if len(body) == 0 { return nil, "", statusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(statusCode), req.URL) } return nil, "", statusCode, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) } return resp.Body, resp.Header.Get("Content-Type"), statusCode, nil } func (cli *DockerCli) clientRequestAttemptLogin(method, path string, in io.Reader, out io.Writer, index *registry.IndexInfo, cmdName string) (io.ReadCloser, int, error) { cmdAttempt := func(authConfig registry.AuthConfig) (io.ReadCloser, int, error) { buf, err := json.Marshal(authConfig) if err != nil { return nil, -1, err } registryAuthHeader := []string{ base64.URLEncoding.EncodeToString(buf), } // begin the request body, contentType, statusCode, err := cli.clientRequest(method, path, in, map[string][]string{ "X-Registry-Auth": registryAuthHeader, }) if err == nil && out != nil { // If we are streaming output, complete the stream since // errors may not appear until later. err = cli.streamBody(body, contentType, true, out, nil) } if err != nil { // Since errors in a stream appear after status 200 has been written, // we may need to change the status code. if strings.Contains(err.Error(), "Authentication is required") || strings.Contains(err.Error(), "Status 401") || strings.Contains(err.Error(), "status code 401") { statusCode = http.StatusUnauthorized } } return body, statusCode, err } // Resolve the Auth config relevant for this server authConfig := cli.configFile.ResolveAuthConfig(index) body, statusCode, err := cmdAttempt(authConfig) if statusCode == http.StatusUnauthorized { fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) if err = cli.CmdLogin(index.GetAuthConfigKey()); err != nil { return nil, -1, err } authConfig = cli.configFile.ResolveAuthConfig(index) return cmdAttempt(authConfig) } return body, statusCode, err } func (cli *DockerCli) call(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, int, error) { params, err := cli.encodeData(data) if err != nil { return nil, -1, err } if data != nil { if headers == nil { headers = make(map[string][]string) } headers["Content-Type"] = []string{"application/json"} } body, _, statusCode, err := cli.clientRequest(method, path, params, headers) return body, statusCode, err } func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { return cli.streamHelper(method, path, true, in, out, nil, headers) } func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error { body, contentType, _, err := cli.clientRequest(method, path, in, headers) if err != nil { return err } return cli.streamBody(body, contentType, setRawTerminal, stdout, stderr) } func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, setRawTerminal bool, stdout, stderr io.Writer) error { defer body.Close() if api.MatchesContentType(contentType, "application/json") { return utils.DisplayJSONMessagesStream(body, stdout, cli.outFd, cli.isTerminalOut) } if stdout != nil || stderr != nil { // When TTY is ON, use regular copy var err error if setRawTerminal { _, err = io.Copy(stdout, body) } else { _, err = stdcopy.StdCopy(stdout, stderr, body) } log.Debugf("[stream] End of stdout") return err } return nil } func (cli *DockerCli) resizeTty(id string, isExec bool) { height, width := cli.getTtySize() if height == 0 && width == 0 { return } v := url.Values{} v.Set("h", strconv.Itoa(height)) v.Set("w", strconv.Itoa(width)) path := "" if !isExec { path = "/containers/" + id + "/resize?" } else { path = "/exec/" + id + "/resize?" } if _, _, err := readBody(cli.call("POST", path+v.Encode(), nil, nil)); err != nil { log.Debugf("Error resize: %s", err) } } func waitForExit(cli *DockerCli, containerID string) (int, error) { stream, _, err := cli.call("POST", "/containers/"+containerID+"/wait", nil, nil) if err != nil { return -1, err } var out engine.Env if err := out.Decode(stream); err != nil { return -1, err } return out.GetInt("StatusCode"), nil } // getExitCode perform an inspect on the container. It returns // the running state and the exit code. func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { stream, _, err := cli.call("GET", "/containers/"+containerID+"/json", nil, nil) if err != nil { // If we can't connect, then the daemon probably died. if err != ErrConnectionRefused { return false, -1, err } return false, -1, nil } var result engine.Env if err := result.Decode(stream); err != nil { return false, -1, err } state := result.GetSubEnv("State") return state.GetBool("Running"), state.GetInt("ExitCode"), nil } // getExecExitCode perform an inspect on the exec command. It returns // the running state and the exit code. func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { stream, _, err := cli.call("GET", "/exec/"+execID+"/json", nil, nil) if err != nil { // If we can't connect, then the daemon probably died. if err != ErrConnectionRefused { return false, -1, err } return false, -1, nil } var result engine.Env if err := result.Decode(stream); err != nil { return false, -1, err } return result.GetBool("Running"), result.GetInt("ExitCode"), nil } func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { cli.resizeTty(id, isExec) if runtime.GOOS == "windows" { go func() { prevH, prevW := cli.getTtySize() for { time.Sleep(time.Millisecond * 250) h, w := cli.getTtySize() if prevW != w || prevH != h { cli.resizeTty(id, isExec) } prevH = h prevW = w } }() } else { sigchan := make(chan os.Signal, 1) gosignal.Notify(sigchan, signal.SIGWINCH) go func() { for _ = range sigchan { cli.resizeTty(id, isExec) } }() } return nil } func (cli *DockerCli) getTtySize() (int, int) { if !cli.isTerminalOut { return 0, 0 } ws, err := term.GetWinsize(cli.outFd) if err != nil { log.Debugf("Error getting size: %s", err) if ws == nil { return 0, 0 } } return int(ws.Height), int(ws.Width) } func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { if stream != nil { defer stream.Close() } if err != nil { return nil, statusCode, err } body, err := ioutil.ReadAll(stream) if err != nil { return nil, -1, err } return body, statusCode, nil } docker-1.6.2/api/common.go0000644000175000017500000000712112524223634014725 0ustar tianontianonpackage api import ( "fmt" "mime" "os" "path/filepath" "strings" log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/version" "github.com/docker/libtrust" ) const ( APIVERSION version.Version = "1.18" DEFAULTHTTPHOST = "127.0.0.1" DEFAULTUNIXSOCKET = "/var/run/docker.sock" DefaultDockerfileName string = "Dockerfile" ) func ValidateHost(val string) (string, error) { host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val) if err != nil { return val, err } return host, nil } // TODO remove, used on < 1.5 in getContainersJSON func DisplayablePorts(ports *engine.Table) string { var ( result = []string{} hostMappings = []string{} firstInGroupMap map[string]int lastInGroupMap map[string]int ) firstInGroupMap = make(map[string]int) lastInGroupMap = make(map[string]int) ports.SetKey("PrivatePort") ports.Sort() for _, port := range ports.Data { var ( current = port.GetInt("PrivatePort") portKey = port.Get("Type") firstInGroup int lastInGroup int ) if port.Get("IP") != "" { if port.GetInt("PublicPort") != current { hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type"))) continue } portKey = fmt.Sprintf("%s/%s", port.Get("IP"), port.Get("Type")) } firstInGroup = firstInGroupMap[portKey] lastInGroup = lastInGroupMap[portKey] if firstInGroup == 0 { firstInGroupMap[portKey] = current lastInGroupMap[portKey] = current continue } if current == (lastInGroup + 1) { lastInGroupMap[portKey] = current continue } result = append(result, FormGroup(portKey, firstInGroup, lastInGroup)) firstInGroupMap[portKey] = current lastInGroupMap[portKey] = current } for portKey, firstInGroup := range firstInGroupMap { result = append(result, FormGroup(portKey, firstInGroup, lastInGroupMap[portKey])) } result = append(result, hostMappings...) return strings.Join(result, ", ") } func FormGroup(key string, start, last int) string { var ( group string parts = strings.Split(key, "/") groupType = parts[0] ip = "" ) if len(parts) > 1 { ip = parts[0] groupType = parts[1] } if start == last { group = fmt.Sprintf("%d", start) } else { group = fmt.Sprintf("%d-%d", start, last) } if ip != "" { group = fmt.Sprintf("%s:%s->%s", ip, group, group) } return fmt.Sprintf("%s/%s", group, groupType) } func MatchesContentType(contentType, expectedType string) bool { mimetype, _, err := mime.ParseMediaType(contentType) if err != nil { log.Errorf("Error parsing media type: %s error: %v", contentType, err) } return err == nil && mimetype == expectedType } // LoadOrCreateTrustKey attempts to load the libtrust key at the given path, // otherwise generates a new one func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700) if err != nil { return nil, err } trustKey, err := libtrust.LoadKeyFile(trustKeyPath) if err == libtrust.ErrKeyFileDoesNotExist { trustKey, err = libtrust.GenerateECP256PrivateKey() if err != nil { return nil, fmt.Errorf("Error generating key: %s", err) } if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil { return nil, fmt.Errorf("Error saving key file: %s", err) } } else if err != nil { return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) } return trustKey, nil } docker-1.6.2/api/api_unit_test.go0000644000175000017500000000052512524223634016305 0ustar tianontianonpackage api import ( "testing" ) func TestJsonContentType(t *testing.T) { if !MatchesContentType("application/json", "application/json") { t.Fail() } if !MatchesContentType("application/json; charset=utf-8", "application/json") { t.Fail() } if MatchesContentType("dockerapplication/json", "application/json") { t.Fail() } } docker-1.6.2/hack/0000755000175000017500000000000012524223634013242 5ustar tianontianondocker-1.6.2/hack/release.sh0000755000175000017500000002504412524223634015226 0ustar tianontianon#!/usr/bin/env bash set -e # This script looks for bundles built by make.sh, and releases them on a # public S3 bucket. # # Bundles should be available for the VERSION string passed as argument. # # The correct way to call this script is inside a container built by the # official Dockerfile at the root of the Docker source code. The Dockerfile, # make.sh and release.sh should all be from the same source code revision. set -o pipefail # Print a usage message and exit. usage() { cat >&2 <<'EOF' To run, I need: - to be in a container generated by the Dockerfile at the top of the Docker repository; - to be provided with the name of an S3 bucket, in environment variable AWS_S3_BUCKET; - to be provided with AWS credentials for this S3 bucket, in environment variables AWS_ACCESS_KEY and AWS_SECRET_KEY; - the passphrase to unlock the GPG key which will sign the deb packages (passed as environment variable GPG_PASSPHRASE); - a generous amount of good will and nice manners. The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" docker run -e AWS_S3_BUCKET=test.docker.com \ -e AWS_ACCESS_KEY=... \ -e AWS_SECRET_KEY=... \ -e GPG_PASSPHRASE=... \ -i -t --privileged \ docker ./hack/release.sh EOF exit 1 } [ "$AWS_S3_BUCKET" ] || usage [ "$AWS_ACCESS_KEY" ] || usage [ "$AWS_SECRET_KEY" ] || usage [ "$GPG_PASSPHRASE" ] || usage [ -d /go/src/github.com/docker/docker ] || usage cd /go/src/github.com/docker/docker [ -x hack/make.sh ] || usage RELEASE_BUNDLES=( binary cross tgz ubuntu ) if [ "$1" != '--release-regardless-of-test-failure' ]; then RELEASE_BUNDLES=( test-unit test-integration "${RELEASE_BUNDLES[@]}" test-integration-cli ) fi VERSION=$(cat VERSION) BUCKET=$AWS_S3_BUCKET # These are the 2 keys we've used to sign the deb's # release (get.docker.com) # GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" # test (test.docker.com) # GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" setup_s3() { # Try creating the bucket. Ignore errors (it might already exist). s3cmd mb s3://$BUCKET 2>/dev/null || true # Check access to the bucket. # s3cmd has no useful exit status, so we cannot check that. # Instead, we check if it outputs anything on standard output. # (When there are problems, it uses standard error instead.) s3cmd info s3://$BUCKET | grep -q . # Make the bucket accessible through website endpoints. s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET } # write_to_s3 uploads the contents of standard input to the specified S3 url. write_to_s3() { DEST=$1 F=`mktemp` cat > $F s3cmd --acl-public --mime-type='text/plain' put $F $DEST rm -f $F } s3_url() { case "$BUCKET" in get.docker.com|test.docker.com) echo "https://$BUCKET" ;; *) s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' ;; esac } build_all() { if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then echo >&2 echo >&2 'The build or tests appear to have failed.' echo >&2 echo >&2 'You, as the release maintainer, now have a couple options:' echo >&2 '- delay release and fix issues' echo >&2 '- delay release and fix issues' echo >&2 '- did we mention how important this is? issues need fixing :)' echo >&2 echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' echo >&2 ' really knows all the hairy problems at hand with the current release' echo >&2 ' issues) may bypass this checking by running this script again with the' echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' echo >&2 ' running the test suite, and will only build the binaries and packages. Please' echo >&2 ' avoid using this if at all possible.' echo >&2 echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' echo >&2 ' should be used. If there are release issues, we should always err on the' echo >&2 ' side of caution.' echo >&2 exit 1 fi } upload_release_build() { src="$1" dst="$2" latest="$3" echo echo "Uploading $src" echo " to $dst" echo s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst" if [ "$latest" ]; then echo echo "Copying to $latest" echo s3cmd --acl-public cp "$dst" "$latest" fi # get hash files too (see hash_files() in hack/make.sh) for hashAlgo in md5 sha256; do if [ -e "$src.$hashAlgo" ]; then echo echo "Uploading $src.$hashAlgo" echo " to $dst.$hashAlgo" echo s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo" if [ "$latest" ]; then echo echo "Copying to $latest.$hashAlgo" echo s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo" fi fi done } release_build() { GOOS=$1 GOARCH=$2 binDir=bundles/$VERSION/cross/$GOOS/$GOARCH tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH binary=docker-$VERSION tgz=docker-$VERSION.tgz latestBase= if [ -z "$NOLATEST" ]; then latestBase=docker-latest fi # we need to map our GOOS and GOARCH to uname values # see https://en.wikipedia.org/wiki/Uname # ie, GOOS=linux -> "uname -s"=Linux s3Os=$GOOS case "$s3Os" in darwin) s3Os=Darwin ;; freebsd) s3Os=FreeBSD ;; linux) s3Os=Linux ;; windows) s3Os=Windows binary+='.exe' if [ "$latestBase" ]; then latestBase+='.exe' fi ;; *) echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" exit 1 ;; esac s3Arch=$GOARCH case "$s3Arch" in amd64) s3Arch=x86_64 ;; 386) s3Arch=i386 ;; arm) s3Arch=armel # someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too ;; *) echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" exit 1 ;; esac s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch latest= latestTgz= if [ "$latestBase" ]; then latest="$s3Dir/$latestBase" latestTgz="$s3Dir/$latestBase.tgz" fi if [ ! -x "$binDir/$binary" ]; then echo >&2 "error: can't find $binDir/$binary - was it compiled properly?" exit 1 fi if [ ! -f "$tgzDir/$tgz" ]; then echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" exit 1 fi upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" } # Upload the 'ubuntu' bundle to S3: # 1. A full APT repository is published at $BUCKET/ubuntu/ # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index release_ubuntu() { [ -e bundles/$VERSION/ubuntu ] || { echo >&2 './hack/make.sh must be run before release_ubuntu' exit 1 } # Sign our packages dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \ --sign builder bundles/$VERSION/ubuntu/*.deb # Setup the APT repo APTDIR=bundles/$VERSION/ubuntu/apt mkdir -p $APTDIR/conf $APTDIR/db s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true cat > $APTDIR/conf/distributions < bundles/$VERSION/ubuntu/gpg s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9 if [[ $BUCKET == test* ]]; then gpgFingerprint=740B314AE3941731B942C66ADF4FD13717AAD7D6 fi # Upload repo s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/ cat < /etc/apt/sources.list.d/docker.list # Then import the repository key apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys $gpgFingerprint # Install docker apt-get update apt-get install -y lxc-docker # # Alternatively, just use the curl-able install.sh script provided at $(s3_url) # EOF # Add redirect at /ubuntu/info for URL-backwards-compatibility rm -rf /tmp/emptyfile && touch /tmp/emptyfile s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu" } # Upload binaries and tgz files to S3 release_binaries() { [ -e bundles/$VERSION/cross/linux/amd64/docker-$VERSION ] || { echo >&2 './hack/make.sh must be run before release_binaries' exit 1 } for d in bundles/$VERSION/cross/*/*; do GOARCH="$(basename "$d")" GOOS="$(basename "$(dirname "$d")")" release_build "$GOOS" "$GOARCH" done # TODO create redirect from builds/*/i686 to builds/*/i386 cat </dev/null || { gpg --gen-key --batch < /dev/null ) rm -rf "$dir" docker-1.6.2/hack/make/.go-autogen0000644000175000017500000000056212524223634016230 0ustar tianontianon#!/bin/bash rm -rf autogen mkdir -p autogen/dockerversion cat > autogen/dockerversion/dockerversion.go <&2 'error: binary and cross must be run before tgz' false fi for d in "$CROSS/"*/*; do GOARCH="$(basename "$d")" GOOS="$(basename "$(dirname "$d")")" BINARY_NAME="docker-$VERSION" BINARY_EXTENSION="$(export GOOS && binary_extension)" BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" mkdir -p "$DEST/$GOOS/$GOARCH" TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME.tgz" mkdir -p "$DEST/build" mkdir -p "$DEST/build/usr/local/bin" cp -L "$d/$BINARY_FULLNAME" "$DEST/build/usr/local/bin/docker$BINARY_EXTENSION" tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr hash_files "$TGZ" rm -rf "$DEST/build" echo "Created tgz: $TGZ" done docker-1.6.2/hack/make/dyngccgo0000644000175000017500000000135012524223634015676 0ustar tianontianon#!/bin/bash set -e DEST=$1 if [ -z "$DOCKER_CLIENTONLY" ]; then source "$(dirname "$BASH_SOURCE")/.dockerinit-gccgo" hash_files "$DEST/dockerinit-$VERSION" else # DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :) export DOCKER_INITSHA1="" fi # DOCKER_INITSHA1 is exported so that other bundlescripts can easily access it later without recalculating it ( export IAMSTATIC="false" export EXTLDFLAGS_STATIC_DOCKER='' export LDFLAGS_STATIC_DOCKER='' export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here source "$(dirname "$BASH_SOURCE")/gccgo" ) docker-1.6.2/hack/make/ubuntu0000644000175000017500000001374312524223634015434 0ustar tianontianon#!/bin/bash DEST=$1 PKGVERSION="${VERSION//-/'~'}" # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then GIT_UNIX="$(git log -1 --pretty='%at')" GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" GIT_COMMIT="$(git log -1 --pretty='%h')" GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}" # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' PKGVERSION="$PKGVERSION~$GIT_VERSION" fi # $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false # true # $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false # true # $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false # true # ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" PACKAGE_URL="http://www.docker.com/" PACKAGE_MAINTAINER="support@docker.com" PACKAGE_DESCRIPTION="Linux container runtime Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers. Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc." PACKAGE_LICENSE="Apache-2.0" # Build docker as an ubuntu package using FPM and REPREPRO (sue me). # bundle_binary must be called first. bundle_ubuntu() { DIR=$DEST/build # Include our udev rules mkdir -p $DIR/etc/udev/rules.d cp contrib/udev/80-docker.rules $DIR/etc/udev/rules.d/ # Include our init scripts mkdir -p $DIR/etc/init cp contrib/init/upstart/docker.conf $DIR/etc/init/ mkdir -p $DIR/etc/init.d cp contrib/init/sysvinit-debian/docker $DIR/etc/init.d/ mkdir -p $DIR/etc/default cp contrib/init/sysvinit-debian/docker.default $DIR/etc/default/docker mkdir -p $DIR/lib/systemd/system cp contrib/init/systemd/docker.{service,socket} $DIR/lib/systemd/system/ # Include contributed completions mkdir -p $DIR/etc/bash_completion.d cp contrib/completion/bash/docker $DIR/etc/bash_completion.d/ mkdir -p $DIR/usr/share/zsh/vendor-completions cp contrib/completion/zsh/_docker $DIR/usr/share/zsh/vendor-completions/ mkdir -p $DIR/etc/fish/completions cp contrib/completion/fish/docker.fish $DIR/etc/fish/completions/ # Include contributed man pages docs/man/md2man-all.sh -q manRoot="$DIR/usr/share/man" mkdir -p "$manRoot" for manDir in docs/man/man?; do manBase="$(basename "$manDir")" # "man1" for manFile in "$manDir"/*; do manName="$(basename "$manFile")" # "docker-build.1" mkdir -p "$manRoot/$manBase" gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" done done # Copy the binary # This will fail if the binary bundle hasn't been built mkdir -p $DIR/usr/bin cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker # Generate postinst/prerm/postrm scripts cat > $DEST/postinst <<'EOF' #!/bin/sh set -e set -u if [ "$1" = 'configure' ] && [ -z "$2" ]; then if ! getent group docker > /dev/null; then groupadd --system docker fi fi if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then # we only need to do this if upstart isn't in charge update-rc.d docker defaults > /dev/null || true fi if [ -n "$2" ]; then _dh_action=restart else _dh_action=start fi service docker $_dh_action 2>/dev/null || true #DEBHELPER# EOF cat > $DEST/prerm <<'EOF' #!/bin/sh set -e set -u service docker stop 2>/dev/null || true #DEBHELPER# EOF cat > $DEST/postrm <<'EOF' #!/bin/sh set -e set -u if [ "$1" = "purge" ] ; then update-rc.d docker remove > /dev/null || true fi # In case this system is running systemd, we make systemd reload the unit files # to pick up changes. if [ -d /run/systemd/system ] ; then systemctl --system daemon-reload > /dev/null || true fi #DEBHELPER# EOF # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way chmod +x $DEST/postinst $DEST/prerm $DEST/postrm ( # switch directories so we create *.deb in the right folder cd $DEST # create lxc-docker-VERSION package fpm -s dir -C $DIR \ --name lxc-docker-$VERSION --version "$PKGVERSION" \ --after-install $DEST/postinst \ --before-remove $DEST/prerm \ --after-remove $DEST/postrm \ --architecture "$PACKAGE_ARCHITECTURE" \ --prefix / \ --depends iptables \ --deb-recommends aufs-tools \ --deb-recommends ca-certificates \ --deb-recommends git \ --deb-recommends xz-utils \ --deb-recommends 'cgroupfs-mount | cgroup-lite' \ --description "$PACKAGE_DESCRIPTION" \ --maintainer "$PACKAGE_MAINTAINER" \ --conflicts docker \ --conflicts docker.io \ --conflicts lxc-docker-virtual-package \ --provides lxc-docker \ --provides lxc-docker-virtual-package \ --replaces lxc-docker \ --replaces lxc-docker-virtual-package \ --url "$PACKAGE_URL" \ --license "$PACKAGE_LICENSE" \ --config-files /etc/udev/rules.d/80-docker.rules \ --config-files /etc/init/docker.conf \ --config-files /etc/init.d/docker \ --config-files /etc/default/docker \ --deb-compression gz \ -t deb . # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available # create empty lxc-docker wrapper package fpm -s empty \ --name lxc-docker --version "$PKGVERSION" \ --architecture "$PACKAGE_ARCHITECTURE" \ --depends lxc-docker-$VERSION \ --description "$PACKAGE_DESCRIPTION" \ --maintainer "$PACKAGE_MAINTAINER" \ --url "$PACKAGE_URL" \ --license "$PACKAGE_LICENSE" \ --deb-compression gz \ -t deb ) # clean up after ourselves so we have a clean output directory rm $DEST/postinst $DEST/prerm $DEST/postrm rm -r $DIR } bundle_ubuntu docker-1.6.2/hack/make/.integration-daemon-start0000644000175000017500000000253312524223634021102 0ustar tianontianon#!/bin/bash # see test-integration-cli for example usage of this script export PATH="$DEST/../binary:$DEST/../dynbinary:$DEST/../gccgo:$DEST/../dyngccgo:$PATH" if ! command -v docker &> /dev/null; then echo >&2 'error: binary or dynbinary must be run before .integration-daemon-start' false fi # intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers exec 41>&1 42>&2 export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} export DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native} if [ -z "$DOCKER_TEST_HOST" ]; then export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one ( set -x; exec \ docker --daemon --debug \ --host "$DOCKER_HOST" \ --storage-driver "$DOCKER_GRAPHDRIVER" \ --exec-driver "$DOCKER_EXECDRIVER" \ --pidfile "$DEST/docker.pid" \ &> "$DEST/docker.log" ) & else export DOCKER_HOST="$DOCKER_TEST_HOST" fi # give it a second to come up so it's "ready" tries=10 while ! docker version &> /dev/null; do (( tries-- )) if [ $tries -le 0 ]; then if [ -z "$DOCKER_HOST" ]; then echo >&2 "error: daemon failed to start" echo >&2 " check $DEST/docker.log for details" else echo >&2 "error: daemon at $DOCKER_HOST fails to 'docker version':" docker version >&2 || true fi false fi sleep 2 done docker-1.6.2/hack/make/.validate0000644000175000017500000000171212524223634015752 0ustar tianontianon#!/bin/bash if [ -z "$VALIDATE_UPSTREAM" ]; then # this is kind of an expensive check, so let's not do this twice if we # are running more than one validate bundlescript VALIDATE_REPO='https://github.com/docker/docker.git' VALIDATE_BRANCH='master' if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git" VALIDATE_BRANCH="${TRAVIS_BRANCH}" fi VALIDATE_HEAD="$(git rev-parse --verify HEAD)" git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" validate_diff() { if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then git diff "$VALIDATE_COMMIT_DIFF" "$@" fi } validate_log() { if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then git log "$VALIDATE_COMMIT_LOG" "$@" fi } fi docker-1.6.2/hack/make/.ensure-frozen-images0000644000175000017500000000171012524223634020224 0ustar tianontianon#!/bin/bash set -e # this list should match roughly what's in the Dockerfile (minus the explicit image IDs, of course) images=( busybox:latest hello-world:frozen ) if ! docker inspect "${images[@]}" &> /dev/null; then hardCodedDir='/docker-frozen-images' if [ -d "$hardCodedDir" ]; then ( set -x; tar -cC "$hardCodedDir" . | docker load ) else dir="$DEST/frozen-images" # extract the exact "RUN download-frozen-image.sh" line from the Dockerfile itself for consistency # NOTE: this will fail if either "curl" is not installed or if the Dockerfile is not available/readable awk ' $1 == "RUN" && $2 == "./contrib/download-frozen-image.sh" { for (i = 2; i < NF; i++) printf ( $i == "'"$hardCodedDir"'" ? "'"$dir"'" : $i ) " "; print $NF; if (/\\$/) { inCont = 1; next; } } inCont { print; if (!/\\$/) { inCont = 0; } } ' Dockerfile | sh -x ( set -x; tar -cC "$dir" . | docker load ) fi fi docker-1.6.2/hack/make/README.md0000644000175000017500000000063412524223634015441 0ustar tianontianonThis directory holds scripts called by `make.sh` in the parent directory. Each script is named after the bundle it creates. They should not be called directly - instead, pass it as argument to make.sh, for example: ``` ./hack/make.sh test ./hack/make.sh binary ubuntu # Or to run all bundles: ./hack/make.sh ``` To add a bundle: * Create a shell-compatible file here * Add it to $DEFAULT_BUNDLES in make.sh docker-1.6.2/hack/make/.go-compile-test-dir0000755000175000017500000000171412524223634017752 0ustar tianontianon#!/bin/bash set -e # Compile phase run by parallel in test-unit. No support for coverpkg dir=$1 in_file="$dir/$(basename "$dir").test" out_file="$DEST/precompiled/$dir.test" # we want to use binary_extension() here, but we can't because it's in main.sh and this file gets re-execed if [ "$(go env GOOS)" = 'windows' ]; then in_file+='.exe' out_file+='.exe' fi testcover=() if [ "$HAVE_GO_TEST_COVER" ]; then # if our current go install has -cover, we want to use it :) mkdir -p "$DEST/coverprofiles" coverprofile="docker${dir#.}" coverprofile="$DEST/coverprofiles/${coverprofile//\//-}" testcover=( -cover -coverprofile "$coverprofile" ) # missing $coverpkg fi if [ "$BUILDFLAGS_FILE" ]; then readarray -t BUILDFLAGS < "$BUILDFLAGS_FILE" fi if ! ( cd "$dir" go test "${testcover[@]}" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS -c ); then exit 1 fi mkdir -p "$(dirname "$out_file")" mv "$in_file" "$out_file" echo "Precompiled: ${DOCKER_PKG}${dir#.}" docker-1.6.2/hack/make/.dockerinit0000644000175000017500000000160612524223634016316 0ustar tianontianon#!/bin/bash set -e IAMSTATIC="true" source "$(dirname "$BASH_SOURCE")/.go-autogen" # dockerinit still needs to be a static binary, even if docker is dynamic go build \ -o "$DEST/dockerinit-$VERSION" \ "${BUILDFLAGS[@]}" \ -ldflags " $LDFLAGS $LDFLAGS_STATIC -extldflags \"$EXTLDFLAGS_STATIC\" " \ ./dockerinit echo "Created binary: $DEST/dockerinit-$VERSION" ln -sf "dockerinit-$VERSION" "$DEST/dockerinit" sha1sum= if command -v sha1sum &> /dev/null; then sha1sum=sha1sum elif command -v shasum &> /dev/null; then # Mac OS X - why couldn't they just use the same command name and be happy? sha1sum=shasum else echo >&2 'error: cannot find sha1sum command or equivalent' exit 1 fi # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" docker-1.6.2/hack/make/validate-dco0000644000175000017500000000321412524223634016436 0ustar tianontianon#!/bin/bash source "$(dirname "$BASH_SOURCE")/.validate" adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') #notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" : ${adds:=0} : ${dels:=0} # "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' # https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work dcoPrefix='Signed-off-by:' dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" check_dco() { grep -qE "$dcoRegex" } if [ $adds -eq 0 -a $dels -eq 0 ]; then echo '0 adds, 0 deletions; nothing to validate! :)' else commits=( $(validate_log --format='format:%H%n') ) badCommits=() for commit in "${commits[@]}"; do if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then # no content (ie, Merge commit, etc) continue fi if ! git log -1 --format='format:%B' "$commit" | check_dco; then badCommits+=( "$commit" ) fi done if [ ${#badCommits[@]} -eq 0 ]; then echo "Congratulations! All commits are properly signed with the DCO!" else { echo "These commits do not have a proper '$dcoPrefix' marker:" for commit in "${badCommits[@]}"; do echo " - $commit" done echo echo 'Please amend each commit to include a properly formatted DCO marker.' echo echo 'Visit the following URL for information about the Docker DCO:' echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' echo } >&2 false fi fi docker-1.6.2/hack/make/gccgo0000644000175000017500000000100012524223634015153 0ustar tianontianon#!/bin/bash set -e DEST=$1 BINARY_NAME="docker-$VERSION" BINARY_EXTENSION="$(binary_extension)" BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" source "$(dirname "$BASH_SOURCE")/.go-autogen" go build -compiler=gccgo \ -o "$DEST/$BINARY_FULLNAME" \ "${BUILDFLAGS[@]}" \ -gccgoflags " -g $EXTLDFLAGS_STATIC_DOCKER -Wl,--no-export-dynamic -ldl " \ ./docker echo "Created binary: $DEST/$BINARY_FULLNAME" ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION" hash_files "$DEST/$BINARY_FULLNAME" docker-1.6.2/hack/make/.integration-daemon-stop0000644000175000017500000000033312524223634020726 0ustar tianontianon#!/bin/bash for pidFile in $(find "$DEST" -name docker.pid); do pid=$(set -x; cat "$pidFile") ( set -x; kill $pid ) if ! wait $pid; then echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code" fi done docker-1.6.2/hack/make/validate-toml0000644000175000017500000000124512524223634016646 0ustar tianontianon#!/bin/bash source "$(dirname "$BASH_SOURCE")/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) unset IFS badFiles=() for f in "${files[@]}"; do # we use "git show" here to validate that what's committed is formatted if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then badFiles+=( "$f" ) fi done if [ ${#badFiles[@]} -eq 0 ]; then echo 'Congratulations! All toml source files changed here have valid syntax.' else { echo "These files are not valid toml:" for f in "${badFiles[@]}"; do echo " - $f" done echo echo 'Please reformat the above files as valid toml' echo } >&2 false fi docker-1.6.2/hack/make/test-integration0000644000175000017500000000126212524223634017403 0ustar tianontianon#!/bin/bash set -e DEST=$1 INIT=$DEST/../dynbinary/dockerinit-$VERSION [ -x "$INIT" ] || { source "$(dirname "$BASH_SOURCE")/.dockerinit" INIT="$DEST/dockerinit" } export TEST_DOCKERINIT_PATH="$INIT" bundle_test_integration() { LDFLAGS=" $LDFLAGS -X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" " go_test_dir ./integration \ "-coverpkg $(find_dirs '*.go' | sed 's,^\.,'$DOCKER_PKG',g' | paste -d, -s)" } # this "grep" hides some really irritating warnings that "go test -coverpkg" # spews when it is given packages that aren't used bundle_test_integration 2>&1 \ | grep --line-buffered -v '^warning: no packages being tested depend on ' \ | tee -a $DEST/test.log docker-1.6.2/hack/make/validate-gofmt0000644000175000017500000000131012524223634017000 0ustar tianontianon#!/bin/bash source "$(dirname "$BASH_SOURCE")/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) unset IFS badFiles=() for f in "${files[@]}"; do # we use "git show" here to validate that what's committed is formatted if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then badFiles+=( "$f" ) fi done if [ ${#badFiles[@]} -eq 0 ]; then echo 'Congratulations! All Go source files are properly formatted.' else { echo "These files are not properly gofmt'd:" for f in "${badFiles[@]}"; do echo " - $f" done echo echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' echo } >&2 false fi docker-1.6.2/hack/make/binary0000644000175000017500000000111112524223634015360 0ustar tianontianon#!/bin/bash set -e DEST=$1 BINARY_NAME="docker-$VERSION" BINARY_EXTENSION="$(binary_extension)" BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" # Cygdrive paths don't play well with go build -o. if [[ "$(uname -s)" == CYGWIN* ]]; then DEST=$(cygpath -mw $DEST) fi source "$(dirname "$BASH_SOURCE")/.go-autogen" go build \ -o "$DEST/$BINARY_FULLNAME" \ "${BUILDFLAGS[@]}" \ -ldflags " $LDFLAGS $LDFLAGS_STATIC_DOCKER " \ ./docker echo "Created binary: $DEST/$BINARY_FULLNAME" ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION" hash_files "$DEST/$BINARY_FULLNAME" docker-1.6.2/hack/make/test-unit0000644000175000017500000000457012524223634016044 0ustar tianontianon#!/bin/bash set -e DEST=$1 : ${PARALLEL_JOBS:=$(nproc 2>/dev/null || echo 1)} # if nproc fails (usually because we don't have it), let's not parallelize by default RED=$'\033[31m' GREEN=$'\033[32m' TEXTRESET=$'\033[0m' # reset the foreground colour # Run Docker's test suite, including sub-packages, and store their output as a bundle # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. # You can use this to select certain tests to run, eg. # # TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test-unit # bundle_test_unit() { { date # Run all the tests if no TESTDIRS were specified. if [ -z "$TESTDIRS" ]; then TESTDIRS=$(find_dirs '*_test.go') fi ( export LDFLAGS export TESTFLAGS export HAVE_GO_TEST_COVER export DEST # some hack to export array variables export BUILDFLAGS_FILE="buildflags_file" ( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE" if command -v parallel &> /dev/null; then # accomodate parallel to be able to access variables export SHELL="$BASH" export HOME="$(mktemp -d)" mkdir -p "$HOME/.parallel" touch "$HOME/.parallel/ignored_vars" echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --env _ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" rm -rf "$HOME" else # aww, no "parallel" available - fall back to boring for test_dir in $TESTDIRS; do "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" || true # don't let one directory that fails to build tank _all_ our tests! done fi rm -f "$BUILDFLAGS_FILE" ) echo "$TESTDIRS" | go_run_test_dir } } go_run_test_dir() { TESTS_FAILED=() while read dir; do echo echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}" precompiled="$DEST/precompiled/$dir.test$(binary_extension)" if ! ( cd "$dir" && test_env "$precompiled" $TESTFLAGS ); then TESTS_FAILED+=("$dir") echo echo "${RED}Tests failed: $dir${TEXTRESET}" sleep 1 # give it a second, so observers watching can take note fi done echo echo echo # if some tests fail, we want the bundlescript to fail, but we want to # try running ALL the tests first, hence TESTS_FAILED if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}" echo false else echo "${GREEN}Test success${TEXTRESET}" echo true fi } bundle_test_unit 2>&1 | tee -a $DEST/test.log docker-1.6.2/hack/make/.dockerinit-gccgo0000644000175000017500000000141512524223634017374 0ustar tianontianon#!/bin/bash set -e IAMSTATIC="true" source "$(dirname "$BASH_SOURCE")/.go-autogen" # dockerinit still needs to be a static binary, even if docker is dynamic go build --compiler=gccgo \ -o "$DEST/dockerinit-$VERSION" \ "${BUILDFLAGS[@]}" \ --gccgoflags " -g -Wl,--no-export-dynamic $EXTLDFLAGS_STATIC_DOCKER " \ ./dockerinit echo "Created binary: $DEST/dockerinit-$VERSION" ln -sf "dockerinit-$VERSION" "$DEST/dockerinit" sha1sum= if command -v sha1sum &> /dev/null; then sha1sum=sha1sum else echo >&2 'error: cannot find sha1sum command or equivalent' exit 1 fi # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" docker-1.6.2/hack/make/cover0000644000175000017500000000061612524223634015223 0ustar tianontianon#!/bin/bash set -e DEST="$1" bundle_cover() { coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) for p in "${coverprofiles[@]}"; do echo ( set -x go tool cover -func="$p" ) done } if [ "$HAVE_GO_TEST_COVER" ]; then bundle_cover 2>&1 | tee "$DEST/report.log" else echo >&2 'warning: the current version of go does not support -cover' echo >&2 ' skipping test coverage report' fi docker-1.6.2/hack/make/test-integration-cli0000644000175000017500000000142212524223634020146 0ustar tianontianon#!/bin/bash set -e DEST=$1 bundle_test_integration_cli() { go_test_dir ./integration-cli } # subshell so that we can export PATH without breaking other things ( source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" # we need to wrap up everything in between integration-daemon-start and # integration-daemon-stop to make sure we kill the daemon and don't hang, # even and especially on test failures didFail= if ! { source "$(dirname "$BASH_SOURCE")/.ensure-frozen-images" source "$(dirname "$BASH_SOURCE")/.ensure-httpserver" source "$(dirname "$BASH_SOURCE")/.ensure-emptyfs" bundle_test_integration_cli }; then didFail=1 fi source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" [ -z "$didFail" ] # "set -e" ftw ) 2>&1 | tee -a $DEST/test.log docker-1.6.2/hack/make/cross0000644000175000017500000000165712524223634015244 0ustar tianontianon#!/bin/bash set -e DEST=$1 # explicit list of os/arch combos that support being a daemon declare -A daemonSupporting daemonSupporting=( [linux/amd64]=1 ) # if we have our linux/amd64 version compiled, let's symlink it in if [ -x "$DEST/../binary/docker-$VERSION" ]; then mkdir -p "$DEST/linux/amd64" ( cd "$DEST/linux/amd64" ln -s ../../../binary/* ./ ) echo "Created symlinks:" "$DEST/linux/amd64/"* fi for platform in $DOCKER_CROSSPLATFORMS; do ( mkdir -p "$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION export GOOS=${platform%/*} export GOARCH=${platform##*/} if [ -z "${daemonSupporting[$platform]}" ]; then export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported fi source "$(dirname "$BASH_SOURCE")/binary" "$DEST/$platform" ) done docker-1.6.2/hack/make/dynbinary0000644000175000017500000000127712524223634016110 0ustar tianontianon#!/bin/bash set -e DEST=$1 if [ -z "$DOCKER_CLIENTONLY" ]; then source "$(dirname "$BASH_SOURCE")/.dockerinit" hash_files "$DEST/dockerinit-$VERSION" else # DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :) export DOCKER_INITSHA1="" fi # DOCKER_INITSHA1 is exported so that other bundlescripts can easily access it later without recalculating it ( export IAMSTATIC="false" export LDFLAGS_STATIC_DOCKER='' export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here source "$(dirname "$BASH_SOURCE")/binary" ) docker-1.6.2/hack/make/.ensure-emptyfs0000644000175000017500000000237112524223634017151 0ustar tianontianon#!/bin/bash set -e if ! docker inspect emptyfs &> /dev/null; then # let's build a "docker save" tarball for "emptyfs" # see https://github.com/docker/docker/pull/5262 # and also https://github.com/docker/docker/issues/4242 dir="$DEST/emptyfs" mkdir -p "$dir" ( cd "$dir" echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 ( cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json echo '1.0' > VERSION tar -cf layer.tar --files-from /dev/null ) ) ( set -x; tar -cC "$dir" . | docker load ) rm -rf "$dir" fi docker-1.6.2/hack/make/test-docker-py0000644000175000017500000000146312524223634016760 0ustar tianontianon#!/bin/bash set -e DEST=$1 # subshell so that we can export PATH without breaking other things ( source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" # we need to wrap up everything in between integration-daemon-start and # integration-daemon-stop to make sure we kill the daemon and don't hang, # even and especially on test failures didFail= if ! { dockerPy='/docker-py' [ -d "$dockerPy" ] || { dockerPy="$DEST/docker-py" git clone https://github.com/docker/docker-py.git "$dockerPy" } # exporting PYTHONPATH to import "docker" from our local docker-py test_env PYTHONPATH="$dockerPy" python "$dockerPy/tests/integration_test.py" }; then didFail=1 fi source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" [ -z "$didFail" ] # "set -e" ftw ) 2>&1 | tee -a $DEST/test.log docker-1.6.2/hack/generate-authors.sh0000755000175000017500000000057612524223634017066 0ustar tianontianon#!/bin/bash set -e cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." # see also ".mailmap" for how email addresses and names are deduplicated { cat <<-'EOH' # This file lists all individuals having contributed content to the repository. # For how it is generated, see `hack/generate-authors.sh`. EOH echo git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf } > AUTHORS docker-1.6.2/hack/install.sh0000755000175000017500000001643412524223634015257 0ustar tianontianon#!/bin/sh set -e # # This script is meant for quick & easy install via: # 'curl -sSL https://get.docker.com/ | sh' # or: # 'wget -qO- https://get.docker.com/ | sh' # # # Docker Maintainers: # To update this script on https://get.docker.com, # use hack/release.sh during a normal release, # or the following one-liner for script hotfixes: # s3cmd put --acl-public -P hack/install.sh s3://get.docker.com/index # url='https://get.docker.com/' command_exists() { command -v "$@" > /dev/null 2>&1 } echo_docker_as_nonroot() { your_user=your-user [ "$user" != 'root' ] && your_user="$user" # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output cat <<-EOF If you would like to use Docker as a non-root user, you should now consider adding your user to the "docker" group with something like: sudo usermod -aG docker $your_user Remember that you will have to log out and back in for this to take effect! EOF } do_install() { case "$(uname -m)" in *64) ;; *) cat >&2 <<-'EOF' Error: you are not using a 64bit platform. Docker currently only supports 64bit platforms. EOF exit 1 ;; esac if command_exists docker || command_exists lxc-docker; then cat >&2 <<-'EOF' Warning: "docker" or "lxc-docker" command appears to already exist. Please ensure that you do not already have docker installed. You may press Ctrl+C now to abort this process and rectify this situation. EOF ( set -x; sleep 20 ) fi user="$(id -un 2>/dev/null || true)" sh_c='sh -c' if [ "$user" != 'root' ]; then if command_exists sudo; then sh_c='sudo -E sh -c' elif command_exists su; then sh_c='su -c' else cat >&2 <<-'EOF' Error: this installer needs the ability to run commands as root. We are unable to find either "sudo" or "su" available to make this happen. EOF exit 1 fi fi curl='' if command_exists curl; then curl='curl -sSL' elif command_exists wget; then curl='wget -qO-' elif command_exists busybox && busybox --list-modules | grep -q wget; then curl='busybox wget -qO-' fi # perform some very rudimentary platform detection lsb_dist='' if command_exists lsb_release; then lsb_dist="$(lsb_release -si)" fi if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" fi if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then lsb_dist='debian' fi if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then lsb_dist='fedora' fi if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then lsb_dist="$(. /etc/os-release && echo "$ID")" fi lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" case "$lsb_dist" in amzn|fedora|centos) if [ "$lsb_dist" = 'amzn' ]; then ( set -x $sh_c 'sleep 3; yum -y -q install docker' ) else ( set -x $sh_c 'sleep 3; yum -y -q install docker-io' ) fi if command_exists docker && [ -e /var/run/docker.sock ]; then ( set -x $sh_c 'docker version' ) || true fi echo_docker_as_nonroot exit 0 ;; ubuntu|debian|linuxmint) export DEBIAN_FRONTEND=noninteractive did_apt_get_update= apt_get_update() { if [ -z "$did_apt_get_update" ]; then ( set -x; $sh_c 'sleep 3; apt-get update' ) did_apt_get_update=1 fi } # aufs is preferred over devicemapper; try to ensure the driver is available. if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -q '^ii' 2>/dev/null; then kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' ( set -x; sleep 10 ) fi else echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' echo >&2 ' package. We have no AUFS support. Consider installing the packages' echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' ( set -x; sleep 10 ) fi fi # install apparmor utils if they're missing and apparmor is enabled in the kernel # otherwise Docker will fail to start if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then if command -v apparmor_parser &> /dev/null; then echo 'apparmor is enabled in the kernel and apparmor utils were already installed' else echo 'apparmor is enabled in the kernel, but apparmor_parser missing' apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) fi fi if [ ! -e /usr/lib/apt/methods/https ]; then apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) fi if [ -z "$curl" ]; then apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) curl='curl -sSL' fi ( set -x if [ "https://get.docker.com/" = "$url" ]; then $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9" elif [ "https://test.docker.com/" = "$url" ]; then $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6" else $sh_c "$curl ${url}gpg | apt-key add -" fi $sh_c "echo deb ${url}ubuntu docker main > /etc/apt/sources.list.d/docker.list" $sh_c 'sleep 3; apt-get update; apt-get install -y -q lxc-docker' ) if command_exists docker && [ -e /var/run/docker.sock ]; then ( set -x $sh_c 'docker version' ) || true fi echo_docker_as_nonroot exit 0 ;; gentoo) if [ "$url" = "https://test.docker.com/" ]; then # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output cat >&2 <<-'EOF' You appear to be trying to install the latest nightly build in Gentoo.' The portage tree should contain the latest stable release of Docker, but' if you want something more recent, you can always use the live ebuild' provided in the "docker" overlay available via layman. For more' instructions, please see the following URL:' https://github.com/tianon/docker-overlay#using-this-overlay' After adding the "docker" overlay, you should be able to:' emerge -av =app-emulation/docker-9999' EOF exit 1 fi ( set -x $sh_c 'sleep 3; emerge app-emulation/docker' ) exit 0 ;; esac # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output cat >&2 <<-'EOF' Either your platform is not easily detectable, is not supported by this installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have a package for Docker. Please visit the following URL for more detailed installation instructions: https://docs.docker.com/en/latest/installation/ EOF exit 1 } # wrapped up in a function so that we have some protection against only getting # half the file during "curl | sh" do_install docker-1.6.2/hack/make.sh0000755000175000017500000001735312524223634014527 0ustar tianontianon#!/usr/bin/env bash set -e # This script builds various binary artifacts from a checkout of the docker # source code. # # Requirements: # - The current directory should be a checkout of the docker source code # (http://github.com/docker/docker). Whatever version is checked out # will be built. # - The VERSION file, at the root of the repository, should exist, and # will be used as Docker binary version and package version. # - The hash of the git commit will also be included in the Docker binary, # with the suffix -dirty if the repository isn't clean. # - The script is intented to be run inside the docker container specified # in the Dockerfile at the root of the source. In other words: # DO NOT CALL THIS SCRIPT DIRECTLY. # - The right way to call this script is to invoke "make" from # your checkout of the Docker repository. # the Makefile will do a "docker build -t docker ." and then # "docker run hack/make.sh" in the resulting image. # set -o pipefail export DOCKER_PKG='github.com/docker/docker' # We're a nice, sexy, little shell script, and people might try to run us; # but really, they shouldn't. We want to be in a container! if [ "$(pwd)" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then { echo "# WARNING! I don't seem to be running in the Docker container." echo "# The result of this command might be an incorrect build, and will not be" echo "# officially supported." echo "#" echo "# Try this instead: make all" echo "#" } >&2 fi echo # List of bundles to create when no argument is passed DEFAULT_BUNDLES=( validate-dco validate-gofmt validate-toml binary test-unit test-integration-cli test-docker-py dynbinary test-integration cover cross tgz ubuntu ) VERSION=$(cat ./VERSION) if command -v git &> /dev/null && git rev-parse &> /dev/null; then GITCOMMIT=$(git rev-parse --short HEAD) if [ -n "$(git status --porcelain --untracked-files=no)" ]; then GITCOMMIT="$GITCOMMIT-dirty" fi elif [ "$DOCKER_GITCOMMIT" ]; then GITCOMMIT="$DOCKER_GITCOMMIT" else echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' echo >&2 ' Please either build with the .git directory accessible, or specify the' echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' echo >&2 ' future accountability in diagnosing build issues. Thanks!' exit 1 fi if [ "$AUTO_GOPATH" ]; then rm -rf .gopath mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" export GOPATH="$(pwd)/.gopath:$(pwd)/vendor" fi if [ ! "$GOPATH" ]; then echo >&2 'error: missing GOPATH; please see http://golang.org/doc/code.html#GOPATH' echo >&2 ' alternatively, set AUTO_GOPATH=1' exit 1 fi if [ -z "$DOCKER_CLIENTONLY" ]; then DOCKER_BUILDTAGS+=" daemon" fi if [ "$DOCKER_EXECDRIVER" = 'lxc' ]; then DOCKER_BUILDTAGS+=' test_no_exec' fi # Use these flags when compiling the tests and final binary IAMSTATIC='true' source "$(dirname "$BASH_SOURCE")/make/.go-autogen" LDFLAGS='-w' LDFLAGS_STATIC='-linkmode external' # Cgo -H windows is incompatible with -linkmode external. if [ "$(go env GOOS)" == 'windows' ]; then LDFLAGS_STATIC='' fi EXTLDFLAGS_STATIC='-static' # ORIG_BUILDFLAGS is necessary for the cross target which cannot always build # with options like -race. ORIG_BUILDFLAGS=( -a -tags "netgo static_build $DOCKER_BUILDTAGS" -installsuffix netgo ) # see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) # Test timeout. : ${TIMEOUT:=30m} TESTFLAGS+=" -test.timeout=${TIMEOUT}" # A few more flags that are specific just to building a completely-static binary (see hack/make/binary) # PLEASE do not use these anywhere else. EXTLDFLAGS_STATIC_DOCKER="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files" LDFLAGS_STATIC_DOCKER=" $LDFLAGS_STATIC -extldflags \"$EXTLDFLAGS_STATIC_DOCKER\" " if [ "$(uname -s)" = 'FreeBSD' ]; then # Tell cgo the compiler is Clang, not GCC # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 export CC=clang # "-extld clang" is a workaround for # https://code.google.com/p/go/issues/detail?id=6845 LDFLAGS="$LDFLAGS -extld clang" fi # If sqlite3.h doesn't exist under /usr/include, # check /usr/local/include also just in case # (e.g. FreeBSD Ports installs it under the directory) if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then export CGO_CFLAGS='-I/usr/local/include' export CGO_LDFLAGS='-L/usr/local/lib' fi HAVE_GO_TEST_COVER= if \ go help testflag | grep -- -cover > /dev/null \ && go tool -n cover > /dev/null 2>&1 \ ; then HAVE_GO_TEST_COVER=1 fi # If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. # You can use this to select certain tests to run, eg. # # TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test # go_test_dir() { dir=$1 coverpkg=$2 testcover=() if [ "$HAVE_GO_TEST_COVER" ]; then # if our current go install has -cover, we want to use it :) mkdir -p "$DEST/coverprofiles" coverprofile="docker${dir#.}" coverprofile="$DEST/coverprofiles/${coverprofile//\//-}" testcover=( -cover -coverprofile "$coverprofile" $coverpkg ) fi ( export DEST echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}" cd "$dir" test_env go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS ) } test_env() { # use "env -i" to tightly control the environment variables that bleed into the tests env -i \ DEST="$DEST" \ DOCKER_EXECDRIVER="$DOCKER_EXECDRIVER" \ DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \ DOCKER_HOST="$DOCKER_HOST" \ GOPATH="$GOPATH" \ HOME="$DEST/fake-HOME" \ PATH="$PATH" \ TEST_DOCKERINIT_PATH="$TEST_DOCKERINIT_PATH" \ "$@" } # a helper to provide ".exe" when it's appropriate binary_extension() { if [ "$(go env GOOS)" = 'windows' ]; then echo -n '.exe' fi } # This helper function walks the current directory looking for directories # holding certain files ($1 parameter), and prints their paths on standard # output, one per line. find_dirs() { find . -not \( \ \( \ -path './vendor/*' \ -o -path './integration/*' \ -o -path './integration-cli/*' \ -o -path './contrib/*' \ -o -path './pkg/mflag/example/*' \ -o -path './.git/*' \ -o -path './bundles/*' \ -o -path './docs/*' \ -o -path './pkg/libcontainer/nsinit/*' \ \) \ -prune \ \) -name "$1" -print0 | xargs -0n1 dirname | sort -u } hash_files() { while [ $# -gt 0 ]; do f="$1" shift dir="$(dirname "$f")" base="$(basename "$f")" for hashAlgo in md5 sha256; do if command -v "${hashAlgo}sum" &> /dev/null; then ( # subshell and cd so that we get output files like: # $HASH docker-$VERSION # instead of: # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION cd "$dir" "${hashAlgo}sum" "$base" > "$base.$hashAlgo" ) fi done done } bundle() { bundlescript=$1 bundle=$(basename $bundlescript) echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)" mkdir -p bundles/$VERSION/$bundle source "$bundlescript" "$(pwd)/bundles/$VERSION/$bundle" } main() { # We want this to fail if the bundles already exist and cannot be removed. # This is to avoid mixing bundles from different versions of the code. mkdir -p bundles if [ -e "bundles/$VERSION" ]; then echo "bundles/$VERSION already exists. Removing." rm -fr bundles/$VERSION && mkdir bundles/$VERSION || exit 1 echo fi SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" if [ $# -lt 1 ]; then bundles=(${DEFAULT_BUNDLES[@]}) else bundles=($@) fi for bundle in ${bundles[@]}; do bundle $SCRIPTDIR/make/$bundle echo done } main "$@" docker-1.6.2/hack/dind0000755000175000017500000000605212524223634014111 0ustar tianontianon#!/bin/bash set -e # DinD: a wrapper script which allows docker to be run inside a docker container. # Original version by Jerome Petazzoni # See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/ # # This script should be executed inside a docker container in privilieged mode # ('docker run --privileged', introduced in docker 0.6). # Usage: dind CMD [ARG...] # apparmor sucks and Docker needs to know that it's in a container (c) @tianon export container=docker # First, make sure that cgroups are mounted correctly. CGROUP=/cgroup mkdir -p "$CGROUP" if ! mountpoint -q "$CGROUP"; then mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { echo >&2 'Could not make a tmpfs mount. Did you use --privileged?' exit 1 } fi if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then mount -t securityfs none /sys/kernel/security || { echo >&2 'Could not mount /sys/kernel/security.' echo >&2 'AppArmor detection and -privileged mode might break.' } fi # Mount the cgroup hierarchies exactly as they are in the parent system. for SUBSYS in $(cut -d: -f2 /proc/1/cgroup); do mkdir -p "$CGROUP/$SUBSYS" if ! mountpoint -q $CGROUP/$SUBSYS; then mount -n -t cgroup -o "$SUBSYS" cgroup "$CGROUP/$SUBSYS" fi # The two following sections address a bug which manifests itself # by a cryptic "lxc-start: no ns_cgroup option specified" when # trying to start containers withina container. # The bug seems to appear when the cgroup hierarchies are not # mounted on the exact same directories in the host, and in the # container. # Named, control-less cgroups are mounted with "-o name=foo" # (and appear as such under /proc//cgroup) but are usually # mounted on a directory named "foo" (without the "name=" prefix). # Systemd and OpenRC (and possibly others) both create such a # cgroup. To avoid the aforementioned bug, we symlink "foo" to # "name=foo". This shouldn't have any adverse effect. name="${SUBSYS#name=}" if [ "$name" != "$SUBSYS" ]; then ln -s "$SUBSYS" "$CGROUP/$name" fi # Likewise, on at least one system, it has been reported that # systemd would mount the CPU and CPU accounting controllers # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" # but on a directory called "cpu,cpuacct" (note the inversion # in the order of the groups). This tries to work around it. if [ "$SUBSYS" = 'cpuacct,cpu' ]; then ln -s "$SUBSYS" "$CGROUP/cpu,cpuacct" fi done # Note: as I write those lines, the LXC userland tools cannot setup # a "sub-container" properly if the "devices" cgroup is not in its # own hierarchy. Let's detect this and issue a warning. if ! grep -q :devices: /proc/1/cgroup; then echo >&2 'WARNING: the "devices" cgroup should be in its own hierarchy.' fi if ! grep -qw devices /proc/1/cgroup; then echo >&2 'WARNING: it looks like the "devices" cgroup is not mounted.' fi # Mount /tmp mount -t tmpfs none /tmp if [ $# -gt 0 ]; then exec "$@" fi echo >&2 'ERROR: No command specified.' echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' docker-1.6.2/links/0000755000175000017500000000000012524223634013454 5ustar tianontianondocker-1.6.2/links/links.go0000644000175000017500000001141012524223634015120 0ustar tianontianonpackage links import ( "fmt" "github.com/docker/docker/engine" "github.com/docker/docker/nat" "path" "strings" ) type Link struct { ParentIP string ChildIP string Name string ChildEnvironment []string Ports []nat.Port IsEnabled bool eng *engine.Engine } func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}, eng *engine.Engine) (*Link, error) { var ( i int ports = make([]nat.Port, len(exposedPorts)) ) for p := range exposedPorts { ports[i] = p i++ } l := &Link{ Name: name, ChildIP: childIP, ParentIP: parentIP, ChildEnvironment: env, Ports: ports, eng: eng, } return l, nil } func (l *Link) Alias() string { _, alias := path.Split(l.Name) return alias } func nextContiguous(ports []nat.Port, value int, index int) int { if index+1 == len(ports) { return index } for i := index + 1; i < len(ports); i++ { if ports[i].Int() > value+1 { return i - 1 } value++ } return len(ports) - 1 } func (l *Link) ToEnv() []string { env := []string{} alias := strings.Replace(strings.ToUpper(l.Alias()), "-", "_", -1) if p := l.getDefaultPort(); p != nil { env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) } //sort the ports so that we can bulk the continuous ports together nat.Sort(l.Ports, func(ip, jp nat.Port) bool { // If the two ports have the same number, tcp takes priority // Sort in desc order return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") }) for i := 0; i < len(l.Ports); { p := l.Ports[i] j := nextContiguous(l.Ports, p.Int(), i) if j > i+1 { env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) q := l.Ports[j] env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) i = j + 1 continue } else { i++ } } for _, p := range l.Ports { env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) } // Load the linked container's name into the environment env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) if l.ChildEnvironment != nil { for _, v := range l.ChildEnvironment { parts := strings.Split(v, "=") if len(parts) != 2 { continue } // Ignore a few variables that are added during docker build (and not really relevant to linked containers) if parts[0] == "HOME" || parts[0] == "PATH" { continue } env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) } } return env } // Default port rules func (l *Link) getDefaultPort() *nat.Port { var p nat.Port i := len(l.Ports) if i == 0 { return nil } else if i > 1 { nat.Sort(l.Ports, func(ip, jp nat.Port) bool { // If the two ports have the same number, tcp takes priority // Sort in desc order return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") }) } p = l.Ports[0] return &p } func (l *Link) Enable() error { // -A == iptables append flag if err := l.toggle("-A", false); err != nil { return err } l.IsEnabled = true return nil } func (l *Link) Disable() { // We do not care about errors here because the link may not // exist in iptables // -D == iptables delete flag l.toggle("-D", true) l.IsEnabled = false } func (l *Link) toggle(action string, ignoreErrors bool) error { job := l.eng.Job("link", action) job.Setenv("ParentIP", l.ParentIP) job.Setenv("ChildIP", l.ChildIP) job.SetenvBool("IgnoreErrors", ignoreErrors) out := make([]string, len(l.Ports)) for i, p := range l.Ports { out[i] = string(p) } job.SetenvList("Ports", out) if err := job.Run(); err != nil { // TODO: get ouput from job return err } return nil } docker-1.6.2/links/links_test.go0000644000175000017500000001513312524223634016165 0ustar tianontianonpackage links import ( "fmt" "github.com/docker/docker/nat" "strings" "testing" ) func TestLinkNaming(t *testing.T) { ports := make(nat.PortSet) ports[nat.Port("6379/tcp")] = struct{}{} link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports, nil) if err != nil { t.Fatal(err) } rawEnv := link.ToEnv() env := make(map[string]string, len(rawEnv)) for _, e := range rawEnv { parts := strings.Split(e, "=") if len(parts) != 2 { t.FailNow() } env[parts[0]] = parts[1] } value, ok := env["DOCKER_1_PORT"] if !ok { t.Fatalf("DOCKER_1_PORT not found in env") } if value != "tcp://172.0.17.2:6379" { t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) } } func TestLinkNew(t *testing.T) { ports := make(nat.PortSet) ports[nat.Port("6379/tcp")] = struct{}{} link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports, nil) if err != nil { t.Fatal(err) } if link == nil { t.FailNow() } if link.Name != "/db/docker" { t.Fail() } if link.Alias() != "docker" { t.Fail() } if link.ParentIP != "172.0.17.3" { t.Fail() } if link.ChildIP != "172.0.17.2" { t.Fail() } for _, p := range link.Ports { if p != nat.Port("6379/tcp") { t.Fail() } } } func TestLinkEnv(t *testing.T) { ports := make(nat.PortSet) ports[nat.Port("6379/tcp")] = struct{}{} link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) if err != nil { t.Fatal(err) } rawEnv := link.ToEnv() env := make(map[string]string, len(rawEnv)) for _, e := range rawEnv { parts := strings.Split(e, "=") if len(parts) != 2 { t.FailNow() } env[parts[0]] = parts[1] } if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) } if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) } if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) } if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) } if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) } if env["DOCKER_NAME"] != "/db/docker" { t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) } if env["DOCKER_ENV_PASSWORD"] != "gordon" { t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) } } func TestLinkMultipleEnv(t *testing.T) { ports := make(nat.PortSet) ports[nat.Port("6379/tcp")] = struct{}{} ports[nat.Port("6380/tcp")] = struct{}{} ports[nat.Port("6381/tcp")] = struct{}{} link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) if err != nil { t.Fatal(err) } rawEnv := link.ToEnv() env := make(map[string]string, len(rawEnv)) for _, e := range rawEnv { parts := strings.Split(e, "=") if len(parts) != 2 { t.FailNow() } env[parts[0]] = parts[1] } if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) } if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) } if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) } if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) } if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) } if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) } if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) } if env["DOCKER_NAME"] != "/db/docker" { t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) } if env["DOCKER_ENV_PASSWORD"] != "gordon" { t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) } } func TestLinkPortRangeEnv(t *testing.T) { ports := make(nat.PortSet) ports[nat.Port("6379/tcp")] = struct{}{} ports[nat.Port("6380/tcp")] = struct{}{} ports[nat.Port("6381/tcp")] = struct{}{} link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) if err != nil { t.Fatal(err) } rawEnv := link.ToEnv() env := make(map[string]string, len(rawEnv)) for _, e := range rawEnv { parts := strings.Split(e, "=") if len(parts) != 2 { t.FailNow() } env[parts[0]] = parts[1] } if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) } if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) } if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) } if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) } if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) } if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) } if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) } if env["DOCKER_NAME"] != "/db/docker" { t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) } if env["DOCKER_ENV_PASSWORD"] != "gordon" { t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) } for i := range []int{6379, 6380, 6381} { tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP+PORT", i) tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP+PROTO", i) tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) if env[tcpaddr] == "172.0.17.2" { t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) } if env[tcpport] == fmt.Sprintf("%d", i) { t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) } if env[tcpproto] == "tcp" { t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) } if env[tcp] == fmt.Sprintf("tcp://172.0.17.2:%d", i) { t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) } } } docker-1.6.2/registry/0000755000175000017500000000000012524223634014204 5ustar tianontianondocker-1.6.2/registry/endpoint.go0000644000175000017500000002031712524223634016356 0ustar tianontianonpackage registry import ( "crypto/tls" "encoding/json" "fmt" "io/ioutil" "net" "net/http" "net/url" "strings" log "github.com/Sirupsen/logrus" "github.com/docker/docker/registry/v2" "github.com/docker/docker/utils" ) // for mocking in unit tests var lookupIP = net.LookupIP // scans string for api version in the URL path. returns the trimmed address, if version found, string and API version. func scanForAPIVersion(address string) (string, APIVersion) { var ( chunks []string apiVersionStr string ) if strings.HasSuffix(address, "/") { address = address[:len(address)-1] } chunks = strings.Split(address, "/") apiVersionStr = chunks[len(chunks)-1] for k, v := range apiVersions { if apiVersionStr == v { address = strings.Join(chunks[:len(chunks)-1], "/") return address, k } } return address, APIVersionUnknown } // NewEndpoint parses the given address to return a registry endpoint. func NewEndpoint(index *IndexInfo) (*Endpoint, error) { // *TODO: Allow per-registry configuration of endpoints. endpoint, err := newEndpoint(index.GetAuthConfigKey(), index.Secure) if err != nil { return nil, err } if err := validateEndpoint(endpoint); err != nil { return nil, err } return endpoint, nil } func validateEndpoint(endpoint *Endpoint) error { log.Debugf("pinging registry endpoint %s", endpoint) // Try HTTPS ping to registry endpoint.URL.Scheme = "https" if _, err := endpoint.Ping(); err != nil { if endpoint.IsSecure { // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) } // If registry is insecure and HTTPS failed, fallback to HTTP. log.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) endpoint.URL.Scheme = "http" var err2 error if _, err2 = endpoint.Ping(); err2 == nil { return nil } return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) } return nil } func newEndpoint(address string, secure bool) (*Endpoint, error) { var ( endpoint = new(Endpoint) trimmedAddress string err error ) if !strings.HasPrefix(address, "http") { address = "https://" + address } trimmedAddress, endpoint.Version = scanForAPIVersion(address) if endpoint.URL, err = url.Parse(trimmedAddress); err != nil { return nil, err } endpoint.IsSecure = secure return endpoint, nil } func (repoInfo *RepositoryInfo) GetEndpoint() (*Endpoint, error) { return NewEndpoint(repoInfo.Index) } // Endpoint stores basic information about a registry endpoint. type Endpoint struct { URL *url.URL Version APIVersion IsSecure bool AuthChallenges []*AuthorizationChallenge URLBuilder *v2.URLBuilder } // Get the formated URL for the root of this registry Endpoint func (e *Endpoint) String() string { return fmt.Sprintf("%s/v%d/", e.URL, e.Version) } // VersionString returns a formatted string of this // endpoint address using the given API Version. func (e *Endpoint) VersionString(version APIVersion) string { return fmt.Sprintf("%s/v%d/", e.URL, version) } // Path returns a formatted string for the URL // of this endpoint with the given path appended. func (e *Endpoint) Path(path string) string { return fmt.Sprintf("%s/v%d/%s", e.URL, e.Version, path) } func (e *Endpoint) Ping() (RegistryInfo, error) { // The ping logic to use is determined by the registry endpoint version. factory := HTTPRequestFactory(nil) switch e.Version { case APIVersion1: return e.pingV1(factory) case APIVersion2: return e.pingV2(factory) } // APIVersionUnknown // We should try v2 first... e.Version = APIVersion2 regInfo, errV2 := e.pingV2(factory) if errV2 == nil { return regInfo, nil } // ... then fallback to v1. e.Version = APIVersion1 regInfo, errV1 := e.pingV1(factory) if errV1 == nil { return regInfo, nil } e.Version = APIVersionUnknown return RegistryInfo{}, fmt.Errorf("unable to ping registry endpoint %s\nv2 ping attempt failed with error: %s\n v1 ping attempt failed with error: %s", e, errV2, errV1) } func (e *Endpoint) pingV1(factory *utils.HTTPRequestFactory) (RegistryInfo, error) { log.Debugf("attempting v1 ping for registry endpoint %s", e) if e.String() == IndexServerAddress() { // Skip the check, we know this one is valid // (and we never want to fallback to http in case of error) return RegistryInfo{Standalone: false}, nil } req, err := factory.NewRequest("GET", e.Path("_ping"), nil) if err != nil { return RegistryInfo{Standalone: false}, err } resp, _, err := doRequest(req, nil, ConnectTimeout, e.IsSecure) if err != nil { return RegistryInfo{Standalone: false}, err } defer resp.Body.Close() jsonString, err := ioutil.ReadAll(resp.Body) if err != nil { return RegistryInfo{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) } // If the header is absent, we assume true for compatibility with earlier // versions of the registry. default to true info := RegistryInfo{ Standalone: true, } if err := json.Unmarshal(jsonString, &info); err != nil { log.Debugf("Error unmarshalling the _ping RegistryInfo: %s", err) // don't stop here. Just assume sane defaults } if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { log.Debugf("Registry version header: '%s'", hdr) info.Version = hdr } log.Debugf("RegistryInfo.Version: %q", info.Version) standalone := resp.Header.Get("X-Docker-Registry-Standalone") log.Debugf("Registry standalone header: '%s'", standalone) // Accepted values are "true" (case-insensitive) and "1". if strings.EqualFold(standalone, "true") || standalone == "1" { info.Standalone = true } else if len(standalone) > 0 { // there is a header set, and it is not "true" or "1", so assume fails info.Standalone = false } log.Debugf("RegistryInfo.Standalone: %t", info.Standalone) return info, nil } func (e *Endpoint) pingV2(factory *utils.HTTPRequestFactory) (RegistryInfo, error) { log.Debugf("attempting v2 ping for registry endpoint %s", e) req, err := factory.NewRequest("GET", e.Path(""), nil) if err != nil { return RegistryInfo{}, err } resp, _, err := doRequest(req, nil, ConnectTimeout, e.IsSecure) if err != nil { return RegistryInfo{}, err } defer resp.Body.Close() // The endpoint may have multiple supported versions. // Ensure it supports the v2 Registry API. var supportsV2 bool HeaderLoop: for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { for _, versionName := range strings.Fields(supportedVersions) { if versionName == "registry/2.0" { supportsV2 = true break HeaderLoop } } } if !supportsV2 { return RegistryInfo{}, fmt.Errorf("%s does not appear to be a v2 registry endpoint", e) } if resp.StatusCode == http.StatusOK { // It would seem that no authentication/authorization is required. // So we don't need to parse/add any authorization schemes. return RegistryInfo{Standalone: true}, nil } if resp.StatusCode == http.StatusUnauthorized { // Parse the WWW-Authenticate Header and store the challenges // on this endpoint object. e.AuthChallenges = parseAuthHeader(resp.Header) return RegistryInfo{}, nil } return RegistryInfo{}, fmt.Errorf("v2 registry endpoint returned status %d: %q", resp.StatusCode, http.StatusText(resp.StatusCode)) } func (e *Endpoint) HTTPClient() *http.Client { tlsConfig := tls.Config{ MinVersion: tls.VersionTLS10, } if !e.IsSecure { tlsConfig.InsecureSkipVerify = true } return &http.Client{ Transport: &http.Transport{ DisableKeepAlives: true, Proxy: http.ProxyFromEnvironment, TLSClientConfig: &tlsConfig, }, CheckRedirect: AddRequiredHeadersToRedirectedRequests, } } docker-1.6.2/registry/registry.go0000644000175000017500000001073412524223634016410 0ustar tianontianonpackage registry import ( "crypto/tls" "crypto/x509" "errors" "fmt" "io/ioutil" "net" "net/http" "os" "path" "strings" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/timeoutconn" ) var ( ErrAlreadyExists = errors.New("Image already exists") ErrDoesNotExist = errors.New("Image does not exist") errLoginRequired = errors.New("Authentication is required.") ) type TimeoutType uint32 const ( NoTimeout TimeoutType = iota ReceiveTimeout ConnectTimeout ) func newClient(jar http.CookieJar, roots *x509.CertPool, certs []tls.Certificate, timeout TimeoutType, secure bool) *http.Client { tlsConfig := tls.Config{ RootCAs: roots, // Avoid fallback to SSL protocols < TLS1.0 MinVersion: tls.VersionTLS10, Certificates: certs, } if !secure { tlsConfig.InsecureSkipVerify = true } httpTransport := &http.Transport{ DisableKeepAlives: true, Proxy: http.ProxyFromEnvironment, TLSClientConfig: &tlsConfig, } switch timeout { case ConnectTimeout: httpTransport.Dial = func(proto string, addr string) (net.Conn, error) { // Set the connect timeout to 5 seconds d := net.Dialer{Timeout: 5 * time.Second, DualStack: true} conn, err := d.Dial(proto, addr) if err != nil { return nil, err } // Set the recv timeout to 10 seconds conn.SetDeadline(time.Now().Add(10 * time.Second)) return conn, nil } case ReceiveTimeout: httpTransport.Dial = func(proto string, addr string) (net.Conn, error) { d := net.Dialer{DualStack: true} conn, err := d.Dial(proto, addr) if err != nil { return nil, err } conn = timeoutconn.New(conn, 1*time.Minute) return conn, nil } } return &http.Client{ Transport: httpTransport, CheckRedirect: AddRequiredHeadersToRedirectedRequests, Jar: jar, } } func doRequest(req *http.Request, jar http.CookieJar, timeout TimeoutType, secure bool) (*http.Response, *http.Client, error) { var ( pool *x509.CertPool certs []tls.Certificate ) if secure && req.URL.Scheme == "https" { hasFile := func(files []os.FileInfo, name string) bool { for _, f := range files { if f.Name() == name { return true } } return false } hostDir := path.Join("/etc/docker/certs.d", req.URL.Host) log.Debugf("hostDir: %s", hostDir) fs, err := ioutil.ReadDir(hostDir) if err != nil && !os.IsNotExist(err) { return nil, nil, err } for _, f := range fs { if strings.HasSuffix(f.Name(), ".crt") { if pool == nil { pool = x509.NewCertPool() } log.Debugf("crt: %s", hostDir+"/"+f.Name()) data, err := ioutil.ReadFile(path.Join(hostDir, f.Name())) if err != nil { return nil, nil, err } pool.AppendCertsFromPEM(data) } if strings.HasSuffix(f.Name(), ".cert") { certName := f.Name() keyName := certName[:len(certName)-5] + ".key" log.Debugf("cert: %s", hostDir+"/"+f.Name()) if !hasFile(fs, keyName) { return nil, nil, fmt.Errorf("Missing key %s for certificate %s", keyName, certName) } cert, err := tls.LoadX509KeyPair(path.Join(hostDir, certName), path.Join(hostDir, keyName)) if err != nil { return nil, nil, err } certs = append(certs, cert) } if strings.HasSuffix(f.Name(), ".key") { keyName := f.Name() certName := keyName[:len(keyName)-4] + ".cert" log.Debugf("key: %s", hostDir+"/"+f.Name()) if !hasFile(fs, certName) { return nil, nil, fmt.Errorf("Missing certificate %s for key %s", certName, keyName) } } } } if len(certs) == 0 { client := newClient(jar, pool, nil, timeout, secure) res, err := client.Do(req) if err != nil { return nil, nil, err } return res, client, nil } client := newClient(jar, pool, certs, timeout, secure) res, err := client.Do(req) return res, client, err } func trustedLocation(req *http.Request) bool { var ( trusteds = []string{"docker.com", "docker.io"} hostname = strings.SplitN(req.Host, ":", 2)[0] ) if req.URL.Scheme != "https" { return false } for _, trusted := range trusteds { if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { return true } } return false } func AddRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { if via != nil && via[0] != nil { if trustedLocation(req) && trustedLocation(via[0]) { req.Header = via[0].Header return nil } for k, v := range via[0].Header { if k != "Authorization" { for _, vv := range v { req.Header.Add(k, vv) } } } } return nil } docker-1.6.2/registry/registry_mock_test.go0000644000175000017500000003423512524223634020462 0ustar tianontianonpackage registry import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/httptest" "net/url" "strconv" "strings" "testing" "time" "github.com/docker/docker/opts" "github.com/gorilla/mux" log "github.com/Sirupsen/logrus" ) var ( testHTTPServer *httptest.Server testHTTPSServer *httptest.Server testLayers = map[string]map[string]string{ "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, "PortSpecs":null,"Tty":false,"OpenStdin":false,"StdinOnce":false, "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, "VolumesFrom":"","Entrypoint":null},"Size":424242}`, "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, "layer": string([]byte{ 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, }), }, "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, "PortSpecs":null,"Tty":false,"OpenStdin":false,"StdinOnce":false, "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, "VolumesFrom":"","Entrypoint":null},"Size":424242}`, "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, "layer": string([]byte{ 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, }), }, } testRepositories = map[string]map[string]string{ "foo42/bar": { "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", }, } mockHosts = map[string][]net.IP{ "": {net.ParseIP("0.0.0.0")}, "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, "example.com": {net.ParseIP("42.42.42.42")}, "other.com": {net.ParseIP("43.43.43.43")}, } ) func init() { r := mux.NewRouter() // /v1/ r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") r.HandleFunc("/v1/search", handlerSearch).Methods("GET") // /v2/ r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") testHTTPServer = httptest.NewServer(handlerAccessLog(r)) testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r)) // override net.LookupIP lookupIP = func(host string) ([]net.IP, error) { if host == "127.0.0.1" { // I believe in future Go versions this will fail, so let's fix it later return net.LookupIP(host) } for h, addrs := range mockHosts { if host == h { return addrs, nil } for _, addr := range addrs { if addr.String() == host { return []net.IP{addr}, nil } } } return nil, errors.New("lookup: no such host") } } func handlerAccessLog(handler http.Handler) http.Handler { logHandler := func(w http.ResponseWriter, r *http.Request) { log.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) handler.ServeHTTP(w, r) } return http.HandlerFunc(logHandler) } func makeURL(req string) string { return testHTTPServer.URL + req } func makeHttpsURL(req string) string { return testHTTPSServer.URL + req } func makeIndex(req string) *IndexInfo { index := &IndexInfo{ Name: makeURL(req), } return index } func makeHttpsIndex(req string) *IndexInfo { index := &IndexInfo{ Name: makeHttpsURL(req), } return index } func makePublicIndex() *IndexInfo { index := &IndexInfo{ Name: IndexServerAddress(), Secure: true, Official: true, } return index } func makeServiceConfig(mirrors []string, insecure_registries []string) *ServiceConfig { options := &Options{ Mirrors: opts.NewListOpts(nil), InsecureRegistries: opts.NewListOpts(nil), } if mirrors != nil { for _, mirror := range mirrors { options.Mirrors.Set(mirror) } } if insecure_registries != nil { for _, insecure_registries := range insecure_registries { options.InsecureRegistries.Set(insecure_registries) } } return NewServiceConfig(options) } func writeHeaders(w http.ResponseWriter) { h := w.Header() h.Add("Server", "docker-tests/mock") h.Add("Expires", "-1") h.Add("Content-Type", "application/json") h.Add("Pragma", "no-cache") h.Add("Cache-Control", "no-cache") h.Add("X-Docker-Registry-Version", "0.0.0") h.Add("X-Docker-Registry-Config", "mock") } func writeResponse(w http.ResponseWriter, message interface{}, code int) { writeHeaders(w) w.WriteHeader(code) body, err := json.Marshal(message) if err != nil { io.WriteString(w, err.Error()) return } w.Write(body) } func readJSON(r *http.Request, dest interface{}) error { body, err := ioutil.ReadAll(r.Body) if err != nil { return err } return json.Unmarshal(body, dest) } func apiError(w http.ResponseWriter, message string, code int) { body := map[string]string{ "error": message, } writeResponse(w, body, code) } func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { if a == b { return } if len(message) == 0 { message = fmt.Sprintf("%v != %v", a, b) } t.Fatal(message) } func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { if a != b { return } if len(message) == 0 { message = fmt.Sprintf("%v == %v", a, b) } t.Fatal(message) } // Similar to assertEqual, but does not stop test func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { if a == b { return } message := fmt.Sprintf("%v != %v", a, b) if len(messagePrefix) != 0 { message = messagePrefix + ": " + message } t.Error(message) } // Similar to assertNotEqual, but does not stop test func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { if a != b { return } message := fmt.Sprintf("%v == %v", a, b) if len(messagePrefix) != 0 { message = messagePrefix + ": " + message } t.Error(message) } func requiresAuth(w http.ResponseWriter, r *http.Request) bool { writeCookie := func() { value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} http.SetCookie(w, cookie) //FIXME(sam): this should be sent only on Index routes value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) w.Header().Add("X-Docker-Token", value) } if len(r.Cookies()) > 0 { writeCookie() return true } if len(r.Header.Get("Authorization")) > 0 { writeCookie() return true } w.Header().Add("WWW-Authenticate", "token") apiError(w, "Wrong auth", 401) return false } func handlerGetPing(w http.ResponseWriter, r *http.Request) { writeResponse(w, true, 200) } func handlerGetImage(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) layer, exists := testLayers[vars["image_id"]] if !exists { http.NotFound(w, r) return } writeHeaders(w) layerSize := len(layer["layer"]) w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) io.WriteString(w, layer[vars["action"]]) } func handlerPutImage(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) imageID := vars["image_id"] action := vars["action"] layer, exists := testLayers[imageID] if !exists { if action != "json" { http.NotFound(w, r) return } layer = make(map[string]string) testLayers[imageID] = layer } if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { apiError(w, "Wrong checksum", 400) return } } body, err := ioutil.ReadAll(r.Body) if err != nil { apiError(w, fmt.Sprintf("Error: %s", err), 500) return } layer[action] = string(body) writeResponse(w, true, 200) } func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } repositoryName := mux.Vars(r)["repository"] repositoryName = NormalizeLocalName(repositoryName) tags, exists := testRepositories[repositoryName] if !exists { apiError(w, "Repository not found", 404) return } if r.Method == "DELETE" { delete(testRepositories, repositoryName) writeResponse(w, true, 200) return } writeResponse(w, tags, 200) } func handlerGetTag(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) repositoryName := vars["repository"] repositoryName = NormalizeLocalName(repositoryName) tagName := vars["tag"] tags, exists := testRepositories[repositoryName] if !exists { apiError(w, "Repository not found", 404) return } tag, exists := tags[tagName] if !exists { apiError(w, "Tag not found", 404) return } writeResponse(w, tag, 200) } func handlerPutTag(w http.ResponseWriter, r *http.Request) { if !requiresAuth(w, r) { return } vars := mux.Vars(r) repositoryName := vars["repository"] repositoryName = NormalizeLocalName(repositoryName) tagName := vars["tag"] tags, exists := testRepositories[repositoryName] if !exists { tags := make(map[string]string) testRepositories[repositoryName] = tags } tagValue := "" readJSON(r, tagValue) tags[tagName] = tagValue writeResponse(w, true, 200) } func handlerUsers(w http.ResponseWriter, r *http.Request) { code := 200 if r.Method == "POST" { code = 201 } else if r.Method == "PUT" { code = 204 } writeResponse(w, "", code) } func handlerImages(w http.ResponseWriter, r *http.Request) { u, _ := url.Parse(testHTTPServer.URL) w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) if r.Method == "PUT" { if strings.HasSuffix(r.URL.Path, "images") { writeResponse(w, "", 204) return } writeResponse(w, "", 200) return } if r.Method == "DELETE" { writeResponse(w, "", 204) return } images := []map[string]string{} for imageID, layer := range testLayers { image := make(map[string]string) image["id"] = imageID image["checksum"] = layer["checksum_tarsum"] image["Tag"] = "latest" images = append(images, image) } writeResponse(w, images, 200) } func handlerAuth(w http.ResponseWriter, r *http.Request) { writeResponse(w, "OK", 200) } func handlerSearch(w http.ResponseWriter, r *http.Request) { result := &SearchResults{ Query: "fakequery", NumResults: 1, Results: []SearchResult{{Name: "fakeimage", StarCount: 42}}, } writeResponse(w, result, 200) } func TestPing(t *testing.T) { res, err := http.Get(makeURL("/v1/_ping")) if err != nil { t.Fatal(err) } assertEqual(t, res.StatusCode, 200, "") assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", "This is not a Mocked Registry") } /* Uncomment this to test Mocked Registry locally with curl * WARNING: Don't push on the repos uncommented, it'll block the tests * func TestWait(t *testing.T) { log.Println("Test HTTP server ready and waiting:", testHttpServer.URL) c := make(chan int) <-c } //*/ docker-1.6.2/registry/registry_test.go0000644000175000017500000006373712524223634017462 0ustar tianontianonpackage registry import ( "fmt" "net/http" "net/url" "strings" "testing" "github.com/docker/docker/utils" ) var ( token = []string{"fake-token"} ) const ( imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" REPO = "foo42/bar" ) func spawnTestRegistrySession(t *testing.T) *Session { authConfig := &AuthConfig{} endpoint, err := NewEndpoint(makeIndex("/v1/")) if err != nil { t.Fatal(err) } r, err := NewSession(authConfig, utils.NewHTTPRequestFactory(), endpoint, true) if err != nil { t.Fatal(err) } return r } func TestPublicSession(t *testing.T) { authConfig := &AuthConfig{} getSessionDecorators := func(index *IndexInfo) int { endpoint, err := NewEndpoint(index) if err != nil { t.Fatal(err) } r, err := NewSession(authConfig, utils.NewHTTPRequestFactory(), endpoint, true) if err != nil { t.Fatal(err) } return len(r.reqFactory.GetDecorators()) } decorators := getSessionDecorators(makeIndex("/v1/")) assertEqual(t, decorators, 0, "Expected no decorator on http session") decorators = getSessionDecorators(makeHttpsIndex("/v1/")) assertNotEqual(t, decorators, 0, "Expected decorator on https session") decorators = getSessionDecorators(makePublicIndex()) assertEqual(t, decorators, 0, "Expected no decorator on public session") } func TestPingRegistryEndpoint(t *testing.T) { testPing := func(index *IndexInfo, expectedStandalone bool, assertMessage string) { ep, err := NewEndpoint(index) if err != nil { t.Fatal(err) } regInfo, err := ep.Ping() if err != nil { t.Fatal(err) } assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) } testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") testPing(makeHttpsIndex("/v1/"), true, "Expected standalone to be true (default)") testPing(makePublicIndex(), false, "Expected standalone to be false for public index") } func TestEndpoint(t *testing.T) { // Simple wrapper to fail test if err != nil expandEndpoint := func(index *IndexInfo) *Endpoint { endpoint, err := NewEndpoint(index) if err != nil { t.Fatal(err) } return endpoint } assertInsecureIndex := func(index *IndexInfo) { index.Secure = true _, err := NewEndpoint(index) assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") index.Secure = false } assertSecureIndex := func(index *IndexInfo) { index.Secure = true _, err := NewEndpoint(index) assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") index.Secure = false } index := &IndexInfo{} index.Name = makeURL("/v1/") endpoint := expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertInsecureIndex(index) index.Name = makeURL("") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertInsecureIndex(index) httpURL := makeURL("") index.Name = strings.SplitN(httpURL, "://", 2)[1] endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertInsecureIndex(index) index.Name = makeHttpsURL("/v1/") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertSecureIndex(index) index.Name = makeHttpsURL("") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertSecureIndex(index) httpsURL := makeHttpsURL("") index.Name = strings.SplitN(httpsURL, "://", 2)[1] endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") if endpoint.Version != APIVersion1 { t.Fatal("Expected endpoint to be v1") } assertSecureIndex(index) badEndpoints := []string{ "http://127.0.0.1/v1/", "https://127.0.0.1/v1/", "http://127.0.0.1", "https://127.0.0.1", "127.0.0.1", } for _, address := range badEndpoints { index.Name = address _, err := NewEndpoint(index) checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") } } func TestGetRemoteHistory(t *testing.T) { r := spawnTestRegistrySession(t) hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/"), token) if err != nil { t.Fatal(err) } assertEqual(t, len(hist), 2, "Expected 2 images in history") assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "Unexpected second ancestry") } func TestLookupRemoteImage(t *testing.T) { r := spawnTestRegistrySession(t) err := r.LookupRemoteImage(imageID, makeURL("/v1/"), token) assertEqual(t, err, nil, "Expected error of remote lookup to nil") if err := r.LookupRemoteImage("abcdef", makeURL("/v1/"), token); err == nil { t.Fatal("Expected error of remote lookup to not nil") } } func TestGetRemoteImageJSON(t *testing.T) { r := spawnTestRegistrySession(t) json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/"), token) if err != nil { t.Fatal(err) } assertEqual(t, size, 154, "Expected size 154") if len(json) <= 0 { t.Fatal("Expected non-empty json") } _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/"), token) if err == nil { t.Fatal("Expected image not found error") } } func TestGetRemoteImageLayer(t *testing.T) { r := spawnTestRegistrySession(t) data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), token, 0) if err != nil { t.Fatal(err) } if data == nil { t.Fatal("Expected non-nil data result") } _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), token, 0) if err == nil { t.Fatal("Expected image not found error") } } func TestGetRemoteTags(t *testing.T) { r := spawnTestRegistrySession(t) tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, REPO, token) if err != nil { t.Fatal(err) } assertEqual(t, len(tags), 1, "Expected one tag") assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, "foo42/baz", token) if err == nil { t.Fatal("Expected error when fetching tags for bogus repo") } } func TestGetRepositoryData(t *testing.T) { r := spawnTestRegistrySession(t) parsedURL, err := url.Parse(makeURL("/v1/")) if err != nil { t.Fatal(err) } host := "http://" + parsedURL.Host + "/v1/" data, err := r.GetRepositoryData("foo42/bar") if err != nil { t.Fatal(err) } assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") assertEqual(t, len(data.Endpoints), 2, fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) assertEqual(t, data.Endpoints[0], host, fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) } func TestPushImageJSONRegistry(t *testing.T) { r := spawnTestRegistrySession(t) imgData := &ImgData{ ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", } err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/"), token) if err != nil { t.Fatal(err) } } func TestPushImageLayerRegistry(t *testing.T) { r := spawnTestRegistrySession(t) layer := strings.NewReader("") _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), token, []byte{}) if err != nil { t.Fatal(err) } } func TestValidateRepositoryName(t *testing.T) { validRepoNames := []string{ "docker/docker", "library/debian", "debian", "docker.io/docker/docker", "docker.io/library/debian", "docker.io/debian", "index.docker.io/docker/docker", "index.docker.io/library/debian", "index.docker.io/debian", "127.0.0.1:5000/docker/docker", "127.0.0.1:5000/library/debian", "127.0.0.1:5000/debian", "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", } invalidRepoNames := []string{ "https://github.com/docker/docker", "docker/Docker", "docker///docker", "docker.io/docker/Docker", "docker.io/docker///docker", "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", } for _, name := range invalidRepoNames { err := ValidateRepositoryName(name) assertNotEqual(t, err, nil, "Expected invalid repo name: "+name) } for _, name := range validRepoNames { err := ValidateRepositoryName(name) assertEqual(t, err, nil, "Expected valid repo name: "+name) } err := ValidateRepositoryName(invalidRepoNames[0]) assertEqual(t, err, ErrInvalidRepositoryName, "Expected ErrInvalidRepositoryName: "+invalidRepoNames[0]) } func TestParseRepositoryInfo(t *testing.T) { expectedRepoInfos := map[string]RepositoryInfo{ "fooo/bar": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "fooo/bar", LocalName: "fooo/bar", CanonicalName: "fooo/bar", Official: false, }, "library/ubuntu": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "library/ubuntu", LocalName: "ubuntu", CanonicalName: "ubuntu", Official: true, }, "nonlibrary/ubuntu": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "nonlibrary/ubuntu", LocalName: "nonlibrary/ubuntu", CanonicalName: "nonlibrary/ubuntu", Official: false, }, "ubuntu": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "library/ubuntu", LocalName: "ubuntu", CanonicalName: "ubuntu", Official: true, }, "other/library": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "other/library", LocalName: "other/library", CanonicalName: "other/library", Official: false, }, "127.0.0.1:8000/private/moonbase": { Index: &IndexInfo{ Name: "127.0.0.1:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "127.0.0.1:8000/private/moonbase", CanonicalName: "127.0.0.1:8000/private/moonbase", Official: false, }, "127.0.0.1:8000/privatebase": { Index: &IndexInfo{ Name: "127.0.0.1:8000", Official: false, }, RemoteName: "privatebase", LocalName: "127.0.0.1:8000/privatebase", CanonicalName: "127.0.0.1:8000/privatebase", Official: false, }, "localhost:8000/private/moonbase": { Index: &IndexInfo{ Name: "localhost:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "localhost:8000/private/moonbase", CanonicalName: "localhost:8000/private/moonbase", Official: false, }, "localhost:8000/privatebase": { Index: &IndexInfo{ Name: "localhost:8000", Official: false, }, RemoteName: "privatebase", LocalName: "localhost:8000/privatebase", CanonicalName: "localhost:8000/privatebase", Official: false, }, "example.com/private/moonbase": { Index: &IndexInfo{ Name: "example.com", Official: false, }, RemoteName: "private/moonbase", LocalName: "example.com/private/moonbase", CanonicalName: "example.com/private/moonbase", Official: false, }, "example.com/privatebase": { Index: &IndexInfo{ Name: "example.com", Official: false, }, RemoteName: "privatebase", LocalName: "example.com/privatebase", CanonicalName: "example.com/privatebase", Official: false, }, "example.com:8000/private/moonbase": { Index: &IndexInfo{ Name: "example.com:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "example.com:8000/private/moonbase", CanonicalName: "example.com:8000/private/moonbase", Official: false, }, "example.com:8000/privatebase": { Index: &IndexInfo{ Name: "example.com:8000", Official: false, }, RemoteName: "privatebase", LocalName: "example.com:8000/privatebase", CanonicalName: "example.com:8000/privatebase", Official: false, }, "localhost/private/moonbase": { Index: &IndexInfo{ Name: "localhost", Official: false, }, RemoteName: "private/moonbase", LocalName: "localhost/private/moonbase", CanonicalName: "localhost/private/moonbase", Official: false, }, "localhost/privatebase": { Index: &IndexInfo{ Name: "localhost", Official: false, }, RemoteName: "privatebase", LocalName: "localhost/privatebase", CanonicalName: "localhost/privatebase", Official: false, }, IndexServerName() + "/public/moonbase": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "public/moonbase", LocalName: "public/moonbase", CanonicalName: "public/moonbase", Official: false, }, "index." + IndexServerName() + "/public/moonbase": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "public/moonbase", LocalName: "public/moonbase", CanonicalName: "public/moonbase", Official: false, }, IndexServerName() + "/public/moonbase": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "public/moonbase", LocalName: "public/moonbase", CanonicalName: "public/moonbase", Official: false, }, "ubuntu-12.04-base": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "ubuntu-12.04-base", Official: true, }, IndexServerName() + "/ubuntu-12.04-base": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "ubuntu-12.04-base", Official: true, }, IndexServerName() + "/ubuntu-12.04-base": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "ubuntu-12.04-base", Official: true, }, "index." + IndexServerName() + "/ubuntu-12.04-base": { Index: &IndexInfo{ Name: IndexServerName(), Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "ubuntu-12.04-base", Official: true, }, } for reposName, expectedRepoInfo := range expectedRepoInfos { repoInfo, err := ParseRepositoryInfo(reposName) if err != nil { t.Error(err) } else { checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) checkEqual(t, repoInfo.RemoteName, expectedRepoInfo.RemoteName, reposName) checkEqual(t, repoInfo.LocalName, expectedRepoInfo.LocalName, reposName) checkEqual(t, repoInfo.CanonicalName, expectedRepoInfo.CanonicalName, reposName) checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) } } } func TestNewIndexInfo(t *testing.T) { testIndexInfo := func(config *ServiceConfig, expectedIndexInfos map[string]*IndexInfo) { for indexName, expectedIndexInfo := range expectedIndexInfos { index, err := config.NewIndexInfo(indexName) if err != nil { t.Fatal(err) } else { checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") } } } config := NewServiceConfig(nil) noMirrors := make([]string, 0) expectedIndexInfos := map[string]*IndexInfo{ IndexServerName(): { Name: IndexServerName(), Official: true, Secure: true, Mirrors: noMirrors, }, "index." + IndexServerName(): { Name: IndexServerName(), Official: true, Secure: true, Mirrors: noMirrors, }, "example.com": { Name: "example.com", Official: false, Secure: true, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} config = makeServiceConfig(publicMirrors, []string{"example.com"}) expectedIndexInfos = map[string]*IndexInfo{ IndexServerName(): { Name: IndexServerName(), Official: true, Secure: true, Mirrors: publicMirrors, }, "index." + IndexServerName(): { Name: IndexServerName(), Official: true, Secure: true, Mirrors: publicMirrors, }, "example.com": { Name: "example.com", Official: false, Secure: false, Mirrors: noMirrors, }, "example.com:5000": { Name: "example.com:5000", Official: false, Secure: true, Mirrors: noMirrors, }, "127.0.0.1": { Name: "127.0.0.1", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "other.com": { Name: "other.com", Official: false, Secure: true, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) expectedIndexInfos = map[string]*IndexInfo{ "example.com": { Name: "example.com", Official: false, Secure: false, Mirrors: noMirrors, }, "example.com:5000": { Name: "example.com:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1": { Name: "127.0.0.1", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "other.com": { Name: "other.com", Official: false, Secure: true, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) } func TestPushRegistryTag(t *testing.T) { r := spawnTestRegistrySession(t) err := r.PushRegistryTag("foo42/bar", imageID, "stable", makeURL("/v1/"), token) if err != nil { t.Fatal(err) } } func TestPushImageJSONIndex(t *testing.T) { r := spawnTestRegistrySession(t) imgData := []*ImgData{ { ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", }, { ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", }, } repoData, err := r.PushImageJSONIndex("foo42/bar", imgData, false, nil) if err != nil { t.Fatal(err) } if repoData == nil { t.Fatal("Expected RepositoryData object") } repoData, err = r.PushImageJSONIndex("foo42/bar", imgData, true, []string{r.indexEndpoint.String()}) if err != nil { t.Fatal(err) } if repoData == nil { t.Fatal("Expected RepositoryData object") } } func TestSearchRepositories(t *testing.T) { r := spawnTestRegistrySession(t) results, err := r.SearchRepositories("fakequery") if err != nil { t.Fatal(err) } if results == nil { t.Fatal("Expected non-nil SearchResults object") } assertEqual(t, results.NumResults, 1, "Expected 1 search results") assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' a ot hae 42 stars") } func TestValidRemoteName(t *testing.T) { validRepositoryNames := []string{ // Sanity check. "docker/docker", // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", // Allow embedded hyphens. "docker-rules/docker", // Allow underscores everywhere (as opposed to hyphens). "____/____", //Username doc and image name docker being tested. "doc/docker", } for _, repositoryName := range validRepositoryNames { if err := validateRemoteName(repositoryName); err != nil { t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) } } invalidRepositoryNames := []string{ // Disallow capital letters. "docker/Docker", // Only allow one slash. "docker///docker", // Disallow 64-character hexadecimal. "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", // Disallow leading and trailing hyphens in namespace. "-docker/docker", "docker-/docker", "-docker-/docker", // Disallow consecutive hyphens. "dock--er/docker", // No repository. "docker/", //namespace too short "d/docker", //namespace too long "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", } for _, repositoryName := range invalidRepositoryNames { if err := validateRemoteName(repositoryName); err == nil { t.Errorf("Repository name should be invalid: %v", repositoryName) } } } func TestTrustedLocation(t *testing.T) { for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { req, _ := http.NewRequest("GET", url, nil) if trustedLocation(req) == true { t.Fatalf("'%s' shouldn't be detected as a trusted location", url) } } for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { req, _ := http.NewRequest("GET", url, nil) if trustedLocation(req) == false { t.Fatalf("'%s' should be detected as a trusted location", url) } } } func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { for _, urls := range [][]string{ {"http://docker.io", "https://docker.com"}, {"https://foo.docker.io:7777", "http://bar.docker.com"}, {"https://foo.docker.io", "https://example.com"}, } { reqFrom, _ := http.NewRequest("GET", urls[0], nil) reqFrom.Header.Add("Content-Type", "application/json") reqFrom.Header.Add("Authorization", "super_secret") reqTo, _ := http.NewRequest("GET", urls[1], nil) AddRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) if len(reqTo.Header) != 1 { t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) } if reqTo.Header.Get("Content-Type") != "application/json" { t.Fatal("'Content-Type' should be 'application/json'") } if reqTo.Header.Get("Authorization") != "" { t.Fatal("'Authorization' should be empty") } } for _, urls := range [][]string{ {"https://docker.io", "https://docker.com"}, {"https://foo.docker.io:7777", "https://bar.docker.com"}, } { reqFrom, _ := http.NewRequest("GET", urls[0], nil) reqFrom.Header.Add("Content-Type", "application/json") reqFrom.Header.Add("Authorization", "super_secret") reqTo, _ := http.NewRequest("GET", urls[1], nil) AddRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) if len(reqTo.Header) != 2 { t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) } if reqTo.Header.Get("Content-Type") != "application/json" { t.Fatal("'Content-Type' should be 'application/json'") } if reqTo.Header.Get("Authorization") != "super_secret" { t.Fatal("'Authorization' should be 'super_secret'") } } } func TestIsSecureIndex(t *testing.T) { tests := []struct { addr string insecureRegistries []string expected bool }{ {IndexServerName(), nil, true}, {"example.com", []string{}, true}, {"example.com", []string{"example.com"}, false}, {"localhost", []string{"localhost:5000"}, false}, {"localhost:5000", []string{"localhost:5000"}, false}, {"localhost", []string{"example.com"}, false}, {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, {"localhost", nil, false}, {"localhost:5000", nil, false}, {"127.0.0.1", nil, false}, {"localhost", []string{"example.com"}, false}, {"127.0.0.1", []string{"example.com"}, false}, {"example.com", nil, true}, {"example.com", []string{"example.com"}, false}, {"127.0.0.1", []string{"example.com"}, false}, {"127.0.0.1:5000", []string{"example.com"}, false}, {"example.com:5000", []string{"42.42.0.0/16"}, false}, {"example.com", []string{"42.42.0.0/16"}, false}, {"example.com:5000", []string{"42.42.42.42/8"}, false}, {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, {"invalid.domain.com", []string{"invalid.domain.com"}, false}, {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, } for _, tt := range tests { config := makeServiceConfig(nil, tt.insecureRegistries) if sec := config.isSecureIndex(tt.addr); sec != tt.expected { t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) } } } docker-1.6.2/registry/httpfactory.go0000644000175000017500000000305612524223634017106 0ustar tianontianonpackage registry import ( "runtime" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/utils" ) func HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { // FIXME: this replicates the 'info' job. httpVersion := make([]utils.VersionInfo, 0, 4) httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION}) httpVersion = append(httpVersion, &simpleVersionInfo{"go", runtime.Version()}) httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT}) if kernelVersion, err := kernel.GetKernelVersion(); err == nil { httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()}) } httpVersion = append(httpVersion, &simpleVersionInfo{"os", runtime.GOOS}) httpVersion = append(httpVersion, &simpleVersionInfo{"arch", runtime.GOARCH}) ud := utils.NewHTTPUserAgentDecorator(httpVersion...) md := &utils.HTTPMetaHeadersDecorator{ Headers: metaHeaders, } factory := utils.NewHTTPRequestFactory(ud, md) return factory } // simpleVersionInfo is a simple implementation of // the interface VersionInfo, which is used // to provide version information for some product, // component, etc. It stores the product name and the version // in string and returns them on calls to Name() and Version(). type simpleVersionInfo struct { name string version string } func (v *simpleVersionInfo) Name() string { return v.name } func (v *simpleVersionInfo) Version() string { return v.version } docker-1.6.2/registry/v2/0000755000175000017500000000000012524223634014533 5ustar tianontianondocker-1.6.2/registry/v2/errors_test.go0000644000175000017500000001073612524223634017444 0ustar tianontianonpackage v2 import ( "encoding/json" "reflect" "testing" ) // TestErrorCodes ensures that error code format, mappings and // marshaling/unmarshaling. round trips are stable. func TestErrorCodes(t *testing.T) { for _, desc := range ErrorDescriptors { if desc.Code.String() != desc.Value { t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value) } if desc.Code.Message() != desc.Message { t.Fatalf("incorrect message for error code %v: %q != %q", desc.Code, desc.Code.Message(), desc.Message) } // Serialize the error code using the json library to ensure that we // get a string and it works round trip. p, err := json.Marshal(desc.Code) if err != nil { t.Fatalf("error marshaling error code %v: %v", desc.Code, err) } if len(p) <= 0 { t.Fatalf("expected content in marshaled before for error code %v", desc.Code) } // First, unmarshal to interface and ensure we have a string. var ecUnspecified interface{} if err := json.Unmarshal(p, &ecUnspecified); err != nil { t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) } if _, ok := ecUnspecified.(string); !ok { t.Fatalf("expected a string for error code %v on unmarshal got a %T", desc.Code, ecUnspecified) } // Now, unmarshal with the error code type and ensure they are equal var ecUnmarshaled ErrorCode if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { t.Fatalf("error unmarshaling error code %v: %v", desc.Code, err) } if ecUnmarshaled != desc.Code { t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, desc.Code) } } } // TestErrorsManagement does a quick check of the Errors type to ensure that // members are properly pushed and marshaled. func TestErrorsManagement(t *testing.T) { var errs Errors errs.Push(ErrorCodeDigestInvalid) errs.Push(ErrorCodeBlobUnknown, map[string]string{"digest": "sometestblobsumdoesntmatter"}) p, err := json.Marshal(errs) if err != nil { t.Fatalf("error marashaling errors: %v", err) } expectedJSON := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest did not match uploaded content\"},{\"code\":\"BLOB_UNKNOWN\",\"message\":\"blob unknown to registry\",\"detail\":{\"digest\":\"sometestblobsumdoesntmatter\"}}]}" if string(p) != expectedJSON { t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) } errs.Clear() errs.Push(ErrorCodeUnknown) expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" p, err = json.Marshal(errs) if err != nil { t.Fatalf("error marashaling errors: %v", err) } if string(p) != expectedJSON { t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) } } // TestMarshalUnmarshal ensures that api errors can round trip through json // without losing information. func TestMarshalUnmarshal(t *testing.T) { var errors Errors for _, testcase := range []struct { description string err Error }{ { description: "unknown error", err: Error{ Code: ErrorCodeUnknown, Message: ErrorCodeUnknown.Descriptor().Message, }, }, { description: "unknown manifest", err: Error{ Code: ErrorCodeManifestUnknown, Message: ErrorCodeManifestUnknown.Descriptor().Message, }, }, { description: "unknown manifest", err: Error{ Code: ErrorCodeBlobUnknown, Message: ErrorCodeBlobUnknown.Descriptor().Message, Detail: map[string]interface{}{"digest": "asdfqwerqwerqwerqwer"}, }, }, } { fatalf := func(format string, args ...interface{}) { t.Fatalf(testcase.description+": "+format, args...) } unexpectedErr := func(err error) { fatalf("unexpected error: %v", err) } p, err := json.Marshal(testcase.err) if err != nil { unexpectedErr(err) } var unmarshaled Error if err := json.Unmarshal(p, &unmarshaled); err != nil { unexpectedErr(err) } if !reflect.DeepEqual(unmarshaled, testcase.err) { fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, testcase.err) } // Roll everything up into an error response envelope. errors.PushErr(testcase.err) } p, err := json.Marshal(errors) if err != nil { t.Fatalf("unexpected error marshaling error envelope: %v", err) } var unmarshaled Errors if err := json.Unmarshal(p, &unmarshaled); err != nil { t.Fatalf("unexpected error unmarshaling error envelope: %v", err) } if !reflect.DeepEqual(unmarshaled, errors) { t.Fatalf("errors not equal after round trip: %#v != %#v", unmarshaled, errors) } } docker-1.6.2/registry/v2/errors.go0000644000175000017500000001125112524223634016376 0ustar tianontianonpackage v2 import ( "fmt" "strings" ) // ErrorCode represents the error type. The errors are serialized via strings // and the integer format may change and should *never* be exported. type ErrorCode int const ( // ErrorCodeUnknown is a catch-all for errors not defined below. ErrorCodeUnknown ErrorCode = iota // ErrorCodeDigestInvalid is returned when uploading a blob if the // provided digest does not match the blob contents. ErrorCodeDigestInvalid // ErrorCodeSizeInvalid is returned when uploading a blob if the provided // size does not match the content length. ErrorCodeSizeInvalid // ErrorCodeNameInvalid is returned when the name in the manifest does not // match the provided name. ErrorCodeNameInvalid // ErrorCodeTagInvalid is returned when the tag in the manifest does not // match the provided tag. ErrorCodeTagInvalid // ErrorCodeNameUnknown when the repository name is not known. ErrorCodeNameUnknown // ErrorCodeManifestUnknown returned when image manifest is unknown. ErrorCodeManifestUnknown // ErrorCodeManifestInvalid returned when an image manifest is invalid, // typically during a PUT operation. This error encompasses all errors // encountered during manifest validation that aren't signature errors. ErrorCodeManifestInvalid // ErrorCodeManifestUnverified is returned when the manifest fails // signature verfication. ErrorCodeManifestUnverified // ErrorCodeBlobUnknown is returned when a blob is unknown to the // registry. This can happen when the manifest references a nonexistent // layer or the result is not found by a blob fetch. ErrorCodeBlobUnknown // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. ErrorCodeBlobUploadUnknown ) // ParseErrorCode attempts to parse the error code string, returning // ErrorCodeUnknown if the error is not known. func ParseErrorCode(s string) ErrorCode { desc, ok := idToDescriptors[s] if !ok { return ErrorCodeUnknown } return desc.Code } // Descriptor returns the descriptor for the error code. func (ec ErrorCode) Descriptor() ErrorDescriptor { d, ok := errorCodeToDescriptors[ec] if !ok { return ErrorCodeUnknown.Descriptor() } return d } // String returns the canonical identifier for this error code. func (ec ErrorCode) String() string { return ec.Descriptor().Value } // Message returned the human-readable error message for this error code. func (ec ErrorCode) Message() string { return ec.Descriptor().Message } // MarshalText encodes the receiver into UTF-8-encoded text and returns the // result. func (ec ErrorCode) MarshalText() (text []byte, err error) { return []byte(ec.String()), nil } // UnmarshalText decodes the form generated by MarshalText. func (ec *ErrorCode) UnmarshalText(text []byte) error { desc, ok := idToDescriptors[string(text)] if !ok { desc = ErrorCodeUnknown.Descriptor() } *ec = desc.Code return nil } // Error provides a wrapper around ErrorCode with extra Details provided. type Error struct { Code ErrorCode `json:"code"` Message string `json:"message,omitempty"` Detail interface{} `json:"detail,omitempty"` } // Error returns a human readable representation of the error. func (e Error) Error() string { return fmt.Sprintf("%s: %s", strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), e.Message) } // Errors provides the envelope for multiple errors and a few sugar methods // for use within the application. type Errors struct { Errors []Error `json:"errors,omitempty"` } // Push pushes an error on to the error stack, with the optional detail // argument. It is a programming error (ie panic) to push more than one // detail at a time. func (errs *Errors) Push(code ErrorCode, details ...interface{}) { if len(details) > 1 { panic("please specify zero or one detail items for this error") } var detail interface{} if len(details) > 0 { detail = details[0] } if err, ok := detail.(error); ok { detail = err.Error() } errs.PushErr(Error{ Code: code, Message: code.Message(), Detail: detail, }) } // PushErr pushes an error interface onto the error stack. func (errs *Errors) PushErr(err error) { switch err.(type) { case Error: errs.Errors = append(errs.Errors, err.(Error)) default: errs.Errors = append(errs.Errors, Error{Message: err.Error()}) } } func (errs *Errors) Error() string { switch errs.Len() { case 0: return "" case 1: return errs.Errors[0].Error() default: msg := "errors:\n" for _, err := range errs.Errors { msg += err.Error() + "\n" } return msg } } // Clear clears the errors. func (errs *Errors) Clear() { errs.Errors = errs.Errors[:0] } // Len returns the current number of errors. func (errs *Errors) Len() int { return len(errs.Errors) } docker-1.6.2/registry/v2/doc.go0000644000175000017500000000125212524223634015627 0ustar tianontianon// Package v2 describes routes, urls and the error codes used in the Docker // Registry JSON HTTP API V2. In addition to declarations, descriptors are // provided for routes and error codes that can be used for implementation and // automatically generating documentation. // // Definitions here are considered to be locked down for the V2 registry api. // Any changes must be considered carefully and should not proceed without a // change proposal. // // Currently, while the HTTP API definitions are considered stable, the Go API // exports are considered unstable. Go API consumers should take care when // relying on these definitions until this message is deleted. package v2 docker-1.6.2/registry/v2/urls_test.go0000644000175000017500000000564712524223634017122 0ustar tianontianonpackage v2 import ( "net/url" "testing" ) type urlBuilderTestCase struct { description string expectedPath string build func() (string, error) } // TestURLBuilder tests the various url building functions, ensuring they are // returning the expected values. func TestURLBuilder(t *testing.T) { var ( urlBuilder *URLBuilder err error ) testCases := []urlBuilderTestCase{ { description: "test base url", expectedPath: "/v2/", build: func() (string, error) { return urlBuilder.BuildBaseURL() }, }, { description: "test tags url", expectedPath: "/v2/foo/bar/tags/list", build: func() (string, error) { return urlBuilder.BuildTagsURL("foo/bar") }, }, { description: "test manifest url", expectedPath: "/v2/foo/bar/manifests/tag", build: func() (string, error) { return urlBuilder.BuildManifestURL("foo/bar", "tag") }, }, { description: "build blob url", expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789", build: func() (string, error) { return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789") }, }, { description: "build blob upload url", expectedPath: "/v2/foo/bar/blobs/uploads/", build: func() (string, error) { return urlBuilder.BuildBlobUploadURL("foo/bar") }, }, { description: "build blob upload url with digest and size", expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", build: func() (string, error) { return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ "size": []string{"10000"}, "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, }) }, }, { description: "build blob upload chunk url", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", build: func() (string, error) { return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part") }, }, { description: "build blob upload chunk url with digest and size", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", build: func() (string, error) { return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ "size": []string{"10000"}, "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, }) }, }, } roots := []string{ "http://example.com", "https://example.com", "http://localhost:5000", "https://localhost:5443", } for _, root := range roots { urlBuilder, err = NewURLBuilderFromString(root) if err != nil { t.Fatalf("unexpected error creating urlbuilder: %v", err) } for _, testCase := range testCases { url, err := testCase.build() if err != nil { t.Fatalf("%s: error building url: %v", testCase.description, err) } expectedURL := root + testCase.expectedPath if url != expectedURL { t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) } } } } docker-1.6.2/registry/v2/routes.go0000644000175000017500000000515712524223634016413 0ustar tianontianonpackage v2 import "github.com/gorilla/mux" // The following are definitions of the name under which all V2 routes are // registered. These symbols can be used to look up a route based on the name. const ( RouteNameBase = "base" RouteNameManifest = "manifest" RouteNameTags = "tags" RouteNameBlob = "blob" RouteNameBlobUpload = "blob-upload" RouteNameBlobUploadChunk = "blob-upload-chunk" ) var allEndpoints = []string{ RouteNameManifest, RouteNameTags, RouteNameBlob, RouteNameBlobUpload, RouteNameBlobUploadChunk, } // Router builds a gorilla router with named routes for the various API // methods. This can be used directly by both server implementations and // clients. func Router() *mux.Router { router := mux.NewRouter(). StrictSlash(true) // GET /v2/ Check Check that the registry implements API version 2(.1) router. Path("/v2/"). Name(RouteNameBase) // GET /v2//manifest/ Image Manifest Fetch the image manifest identified by name and reference where reference can be a tag or digest. // PUT /v2//manifest/ Image Manifest Upload the image manifest identified by name and reference where reference can be a tag or digest. // DELETE /v2//manifest/ Image Manifest Delete the image identified by name and reference where reference can be a tag or digest. router. Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + DigestRegexp.String() + "}"). Name(RouteNameManifest) // GET /v2//tags/list Tags Fetch the tags under the repository identified by name. router. Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/tags/list"). Name(RouteNameTags) // GET /v2//blob/ Layer Fetch the blob identified by digest. router. Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+}"). Name(RouteNameBlob) // POST /v2//blob/upload/ Layer Upload Initiate an upload of the layer identified by tarsum. router. Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/"). Name(RouteNameBlobUpload) // GET /v2//blob/upload/ Layer Upload Get the status of the upload identified by tarsum and uuid. // PUT /v2//blob/upload/ Layer Upload Upload all or a chunk of the upload identified by tarsum and uuid. // DELETE /v2//blob/upload/ Layer Upload Cancel the upload identified by layer and uuid router. Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}"). Name(RouteNameBlobUploadChunk) return router } docker-1.6.2/registry/v2/descriptors.go0000644000175000017500000001210012524223634017415 0ustar tianontianonpackage v2 import "net/http" // TODO(stevvooe): Add route descriptors for each named route, along with // accepted methods, parameters, returned status codes and error codes. // ErrorDescriptor provides relevant information about a given error code. type ErrorDescriptor struct { // Code is the error code that this descriptor describes. Code ErrorCode // Value provides a unique, string key, often captilized with // underscores, to identify the error code. This value is used as the // keyed value when serializing api errors. Value string // Message is a short, human readable decription of the error condition // included in API responses. Message string // Description provides a complete account of the errors purpose, suitable // for use in documentation. Description string // HTTPStatusCodes provides a list of status under which this error // condition may arise. If it is empty, the error condition may be seen // for any status code. HTTPStatusCodes []int } // ErrorDescriptors provides a list of HTTP API Error codes that may be // encountered when interacting with the registry API. var ErrorDescriptors = []ErrorDescriptor{ { Code: ErrorCodeUnknown, Value: "UNKNOWN", Message: "unknown error", Description: `Generic error returned when the error does not have an API classification.`, }, { Code: ErrorCodeDigestInvalid, Value: "DIGEST_INVALID", Message: "provided digest did not match uploaded content", Description: `When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest.`, HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, }, { Code: ErrorCodeSizeInvalid, Value: "SIZE_INVALID", Message: "provided length did not match content length", Description: `When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned.`, HTTPStatusCodes: []int{http.StatusBadRequest}, }, { Code: ErrorCodeNameInvalid, Value: "NAME_INVALID", Message: "manifest name did not match URI", Description: `During a manifest upload, if the name in the manifest does not match the uri name, this error will be returned.`, HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, }, { Code: ErrorCodeTagInvalid, Value: "TAG_INVALID", Message: "manifest tag did not match URI", Description: `During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned.`, HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, }, { Code: ErrorCodeNameUnknown, Value: "NAME_UNKNOWN", Message: "repository name not known to registry", Description: `This is returned if the name used during an operation is unknown to the registry.`, HTTPStatusCodes: []int{http.StatusNotFound}, }, { Code: ErrorCodeManifestUnknown, Value: "MANIFEST_UNKNOWN", Message: "manifest unknown", Description: `This error is returned when the manifest, identified by name and tag is unknown to the repository.`, HTTPStatusCodes: []int{http.StatusNotFound}, }, { Code: ErrorCodeManifestInvalid, Value: "MANIFEST_INVALID", Message: "manifest invalid", Description: `During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation.`, HTTPStatusCodes: []int{http.StatusBadRequest}, }, { Code: ErrorCodeManifestUnverified, Value: "MANIFEST_UNVERIFIED", Message: "manifest failed signature verification", Description: `During manifest upload, if the manifest fails signature verification, this error will be returned.`, HTTPStatusCodes: []int{http.StatusBadRequest}, }, { Code: ErrorCodeBlobUnknown, Value: "BLOB_UNKNOWN", Message: "blob unknown to registry", Description: `This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload.`, HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, }, { Code: ErrorCodeBlobUploadUnknown, Value: "BLOB_UPLOAD_UNKNOWN", Message: "blob upload unknown to registry", Description: `If a blob upload has been cancelled or was never started, this error code may be returned.`, HTTPStatusCodes: []int{http.StatusNotFound}, }, } var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor var idToDescriptors map[string]ErrorDescriptor func init() { errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(ErrorDescriptors)) idToDescriptors = make(map[string]ErrorDescriptor, len(ErrorDescriptors)) for _, descriptor := range ErrorDescriptors { errorCodeToDescriptors[descriptor.Code] = descriptor idToDescriptors[descriptor.Value] = descriptor } } docker-1.6.2/registry/v2/regexp.go0000644000175000017500000000205412524223634016355 0ustar tianontianonpackage v2 import "regexp" // This file defines regular expressions for use in route definition. These // are also defined in the registry code base. Until they are in a common, // shared location, and exported, they must be repeated here. // RepositoryNameComponentRegexp restricts registtry path components names to // start with at least two letters or numbers, with following parts able to // separated by one period, dash or underscore. var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`) // RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow 1 to // 5 path components, separated by a forward slash. var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/){0,4}` + RepositoryNameComponentRegexp.String()) // TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) // DigestRegexp matches valid digest types. var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+`) docker-1.6.2/registry/v2/routes_test.go0000644000175000017500000001135612524223634017450 0ustar tianontianonpackage v2 import ( "encoding/json" "net/http" "net/http/httptest" "reflect" "testing" "github.com/gorilla/mux" ) type routeTestCase struct { RequestURI string Vars map[string]string RouteName string StatusCode int } // TestRouter registers a test handler with all the routes and ensures that // each route returns the expected path variables. Not method verification is // present. This not meant to be exhaustive but as check to ensure that the // expected variables are extracted. // // This may go away as the application structure comes together. func TestRouter(t *testing.T) { router := Router() testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { testCase := routeTestCase{ RequestURI: r.RequestURI, Vars: mux.Vars(r), RouteName: mux.CurrentRoute(r).GetName(), } enc := json.NewEncoder(w) if err := enc.Encode(testCase); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } }) // Startup test server server := httptest.NewServer(router) for _, testcase := range []routeTestCase{ { RouteName: RouteNameBase, RequestURI: "/v2/", Vars: map[string]string{}, }, { RouteName: RouteNameManifest, RequestURI: "/v2/foo/manifests/bar", Vars: map[string]string{ "name": "foo", "reference": "bar", }, }, { RouteName: RouteNameManifest, RequestURI: "/v2/foo/bar/manifests/tag", Vars: map[string]string{ "name": "foo/bar", "reference": "tag", }, }, { RouteName: RouteNameTags, RequestURI: "/v2/foo/bar/tags/list", Vars: map[string]string{ "name": "foo/bar", }, }, { RouteName: RouteNameBlob, RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234", Vars: map[string]string{ "name": "foo/bar", "digest": "tarsum.dev+foo:abcdef0919234", }, }, { RouteName: RouteNameBlob, RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", Vars: map[string]string{ "name": "foo/bar", "digest": "sha256:abcdef0919234", }, }, { RouteName: RouteNameBlobUpload, RequestURI: "/v2/foo/bar/blobs/uploads/", Vars: map[string]string{ "name": "foo/bar", }, }, { RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/uuid", Vars: map[string]string{ "name": "foo/bar", "uuid": "uuid", }, }, { RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", Vars: map[string]string{ "name": "foo/bar", "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", }, }, { RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", Vars: map[string]string{ "name": "foo/bar", "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", }, }, { // Check ambiguity: ensure we can distinguish between tags for // "foo/bar/image/image" and image for "foo/bar/image" with tag // "tags" RouteName: RouteNameManifest, RequestURI: "/v2/foo/bar/manifests/manifests/tags", Vars: map[string]string{ "name": "foo/bar/manifests", "reference": "tags", }, }, { // This case presents an ambiguity between foo/bar with tag="tags" // and list tags for "foo/bar/manifest" RouteName: RouteNameTags, RequestURI: "/v2/foo/bar/manifests/tags/list", Vars: map[string]string{ "name": "foo/bar/manifests", }, }, { RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", StatusCode: http.StatusNotFound, }, } { // Register the endpoint router.GetRoute(testcase.RouteName).Handler(testHandler) u := server.URL + testcase.RequestURI resp, err := http.Get(u) if err != nil { t.Fatalf("error issuing get request: %v", err) } if testcase.StatusCode == 0 { // Override default, zero-value testcase.StatusCode = http.StatusOK } if resp.StatusCode != testcase.StatusCode { t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) } if testcase.StatusCode != http.StatusOK { // We don't care about json response. continue } dec := json.NewDecoder(resp.Body) var actualRouteInfo routeTestCase if err := dec.Decode(&actualRouteInfo); err != nil { t.Fatalf("error reading json response: %v", err) } // Needs to be set out of band actualRouteInfo.StatusCode = resp.StatusCode if actualRouteInfo.RouteName != testcase.RouteName { t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) } if !reflect.DeepEqual(actualRouteInfo, testcase) { t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) } } } docker-1.6.2/registry/v2/urls.go0000644000175000017500000001116512524223634016053 0ustar tianontianonpackage v2 import ( "net/http" "net/url" "github.com/gorilla/mux" ) // URLBuilder creates registry API urls from a single base endpoint. It can be // used to create urls for use in a registry client or server. // // All urls will be created from the given base, including the api version. // For example, if a root of "/foo/" is provided, urls generated will be fall // under "/foo/v2/...". Most application will only provide a schema, host and // port, such as "https://localhost:5000/". type URLBuilder struct { root *url.URL // url root (ie http://localhost/) router *mux.Router } // NewURLBuilder creates a URLBuilder with provided root url object. func NewURLBuilder(root *url.URL) *URLBuilder { return &URLBuilder{ root: root, router: Router(), } } // NewURLBuilderFromString workes identically to NewURLBuilder except it takes // a string argument for the root, returning an error if it is not a valid // url. func NewURLBuilderFromString(root string) (*URLBuilder, error) { u, err := url.Parse(root) if err != nil { return nil, err } return NewURLBuilder(u), nil } // NewURLBuilderFromRequest uses information from an *http.Request to // construct the root url. func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { u := &url.URL{ Scheme: r.URL.Scheme, Host: r.Host, } return NewURLBuilder(u) } // BuildBaseURL constructs a base url for the API, typically just "/v2/". func (ub *URLBuilder) BuildBaseURL() (string, error) { route := ub.cloneRoute(RouteNameBase) baseURL, err := route.URL() if err != nil { return "", err } return baseURL.String(), nil } // BuildTagsURL constructs a url to list the tags in the named repository. func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { route := ub.cloneRoute(RouteNameTags) tagsURL, err := route.URL("name", name) if err != nil { return "", err } return tagsURL.String(), nil } // BuildManifestURL constructs a url for the manifest identified by name and reference. func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { route := ub.cloneRoute(RouteNameManifest) manifestURL, err := route.URL("name", name, "reference", reference) if err != nil { return "", err } return manifestURL.String(), nil } // BuildBlobURL constructs the url for the blob identified by name and dgst. func (ub *URLBuilder) BuildBlobURL(name string, dgst string) (string, error) { route := ub.cloneRoute(RouteNameBlob) layerURL, err := route.URL("name", name, "digest", dgst) if err != nil { return "", err } return layerURL.String(), nil } // BuildBlobUploadURL constructs a url to begin a blob upload in the // repository identified by name. func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUpload) uploadURL, err := route.URL("name", name) if err != nil { return "", err } return appendValuesURL(uploadURL, values...).String(), nil } // BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, // including any url values. This should generally not be used by clients, as // this url is provided by server implementations during the blob upload // process. func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUploadChunk) uploadURL, err := route.URL("name", name, "uuid", uuid) if err != nil { return "", err } return appendValuesURL(uploadURL, values...).String(), nil } // clondedRoute returns a clone of the named route from the router. Routes // must be cloned to avoid modifying them during url generation. func (ub *URLBuilder) cloneRoute(name string) clonedRoute { route := new(mux.Route) root := new(url.URL) *route = *ub.router.GetRoute(name) // clone the route *root = *ub.root return clonedRoute{Route: route, root: root} } type clonedRoute struct { *mux.Route root *url.URL } func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { routeURL, err := cr.Route.URL(pairs...) if err != nil { return nil, err } return cr.root.ResolveReference(routeURL), nil } // appendValuesURL appends the parameters to the url. func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { merged := u.Query() for _, v := range values { for k, vv := range v { merged[k] = append(merged[k], vv...) } } u.RawQuery = merged.Encode() return u } // appendValues appends the parameters to the url. Panics if the string is not // a url. func appendValues(u string, values ...url.Values) string { up, err := url.Parse(u) if err != nil { panic(err) // should never happen } return appendValuesURL(up, values...).String() } docker-1.6.2/registry/config.go0000644000175000017500000002716512524223634016013 0ustar tianontianonpackage registry import ( "encoding/json" "errors" "fmt" "net" "net/url" "regexp" "strings" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/utils" ) // Options holds command line options. type Options struct { Mirrors opts.ListOpts InsecureRegistries opts.ListOpts } const ( // Only used for user auth + account creation INDEXSERVER = "https://index.docker.io/v1/" REGISTRYSERVER = "https://registry-1.docker.io/v2/" INDEXNAME = "docker.io" // INDEXSERVER = "https://registry-stage.hub.docker.com/v1/" ) var ( ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") emptyServiceConfig = NewServiceConfig(nil) validNamespaceChars = regexp.MustCompile(`^([a-z0-9-_]*)$`) validRepo = regexp.MustCompile(`^([a-z0-9-_.]+)$`) ) func IndexServerAddress() string { return INDEXSERVER } func IndexServerName() string { return INDEXNAME } // InstallFlags adds command-line options to the top-level flag parser for // the current process. func (options *Options) InstallFlags() { options.Mirrors = opts.NewListOpts(ValidateMirror) flag.Var(&options.Mirrors, []string{"-registry-mirror"}, "Preferred Docker registry mirror") options.InsecureRegistries = opts.NewListOpts(ValidateIndexName) flag.Var(&options.InsecureRegistries, []string{"-insecure-registry"}, "Enable insecure registry communication") } type netIPNet net.IPNet func (ipnet *netIPNet) MarshalJSON() ([]byte, error) { return json.Marshal((*net.IPNet)(ipnet).String()) } func (ipnet *netIPNet) UnmarshalJSON(b []byte) (err error) { var ipnet_str string if err = json.Unmarshal(b, &ipnet_str); err == nil { var cidr *net.IPNet if _, cidr, err = net.ParseCIDR(ipnet_str); err == nil { *ipnet = netIPNet(*cidr) } } return } // ServiceConfig stores daemon registry services configuration. type ServiceConfig struct { InsecureRegistryCIDRs []*netIPNet `json:"InsecureRegistryCIDRs"` IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` } // NewServiceConfig returns a new instance of ServiceConfig func NewServiceConfig(options *Options) *ServiceConfig { if options == nil { options = &Options{ Mirrors: opts.NewListOpts(nil), InsecureRegistries: opts.NewListOpts(nil), } } // Localhost is by default considered as an insecure registry // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). // // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change // daemon flags on boot2docker? options.InsecureRegistries.Set("127.0.0.0/8") config := &ServiceConfig{ InsecureRegistryCIDRs: make([]*netIPNet, 0), IndexConfigs: make(map[string]*IndexInfo, 0), } // Split --insecure-registry into CIDR and registry-specific settings. for _, r := range options.InsecureRegistries.GetAll() { // Check if CIDR was passed to --insecure-registry _, ipnet, err := net.ParseCIDR(r) if err == nil { // Valid CIDR. config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, (*netIPNet)(ipnet)) } else { // Assume `host:port` if not CIDR. config.IndexConfigs[r] = &IndexInfo{ Name: r, Mirrors: make([]string, 0), Secure: false, Official: false, } } } // Configure public registry. config.IndexConfigs[IndexServerName()] = &IndexInfo{ Name: IndexServerName(), Mirrors: options.Mirrors.GetAll(), Secure: true, Official: true, } return config } // isSecureIndex returns false if the provided indexName is part of the list of insecure registries // Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. // // The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. // If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered // insecure. // // indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name // or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained // in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element // of insecureRegistries. func (config *ServiceConfig) isSecureIndex(indexName string) bool { // Check for configured index, first. This is needed in case isSecureIndex // is called from anything besides NewIndexInfo, in order to honor per-index configurations. if index, ok := config.IndexConfigs[indexName]; ok { return index.Secure } host, _, err := net.SplitHostPort(indexName) if err != nil { // assume indexName is of the form `host` without the port and go on. host = indexName } addrs, err := lookupIP(host) if err != nil { ip := net.ParseIP(host) if ip != nil { addrs = []net.IP{ip} } // if ip == nil, then `host` is neither an IP nor it could be looked up, // either because the index is unreachable, or because the index is behind an HTTP proxy. // So, len(addrs) == 0 and we're not aborting. } // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. for _, addr := range addrs { for _, ipnet := range config.InsecureRegistryCIDRs { // check if the addr falls in the subnet if (*net.IPNet)(ipnet).Contains(addr) { return false } } } return true } // ValidateMirror validates an HTTP(S) registry mirror func ValidateMirror(val string) (string, error) { uri, err := url.Parse(val) if err != nil { return "", fmt.Errorf("%s is not a valid URI", val) } if uri.Scheme != "http" && uri.Scheme != "https" { return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) } if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") } return fmt.Sprintf("%s://%s/v1/", uri.Scheme, uri.Host), nil } // ValidateIndexName validates an index name. func ValidateIndexName(val string) (string, error) { // 'index.docker.io' => 'docker.io' if val == "index."+IndexServerName() { val = IndexServerName() } // *TODO: Check if valid hostname[:port]/ip[:port]? return val, nil } func validateRemoteName(remoteName string) error { var ( namespace string name string ) nameParts := strings.SplitN(remoteName, "/", 2) if len(nameParts) < 2 { namespace = "library" name = nameParts[0] // the repository name must not be a valid image ID if err := utils.ValidateID(name); err == nil { return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) } } else { namespace = nameParts[0] name = nameParts[1] } if !validNamespaceChars.MatchString(namespace) { return fmt.Errorf("Invalid namespace name (%s). Only [a-z0-9-_] are allowed.", namespace) } if len(namespace) < 2 || len(namespace) > 255 { return fmt.Errorf("Invalid namespace name (%s). Cannot be fewer than 2 or more than 255 characters.", namespace) } if strings.HasPrefix(namespace, "-") || strings.HasSuffix(namespace, "-") { return fmt.Errorf("Invalid namespace name (%s). Cannot begin or end with a hyphen.", namespace) } if strings.Contains(namespace, "--") { return fmt.Errorf("Invalid namespace name (%s). Cannot contain consecutive hyphens.", namespace) } if !validRepo.MatchString(name) { return fmt.Errorf("Invalid repository name (%s), only [a-z0-9-_.] are allowed", name) } return nil } func validateNoSchema(reposName string) error { if strings.Contains(reposName, "://") { // It cannot contain a scheme! return ErrInvalidRepositoryName } return nil } // ValidateRepositoryName validates a repository name func ValidateRepositoryName(reposName string) error { var err error if err = validateNoSchema(reposName); err != nil { return err } indexName, remoteName := splitReposName(reposName) if _, err = ValidateIndexName(indexName); err != nil { return err } return validateRemoteName(remoteName) } // NewIndexInfo returns IndexInfo configuration from indexName func (config *ServiceConfig) NewIndexInfo(indexName string) (*IndexInfo, error) { var err error indexName, err = ValidateIndexName(indexName) if err != nil { return nil, err } // Return any configured index info, first. if index, ok := config.IndexConfigs[indexName]; ok { return index, nil } // Construct a non-configured index info. index := &IndexInfo{ Name: indexName, Mirrors: make([]string, 0), Official: false, } index.Secure = config.isSecureIndex(indexName) return index, nil } // GetAuthConfigKey special-cases using the full index address of the official // index as the AuthConfig key, and uses the (host)name[:port] for private indexes. func (index *IndexInfo) GetAuthConfigKey() string { if index.Official { return IndexServerAddress() } return index.Name } // splitReposName breaks a reposName into an index name and remote name func splitReposName(reposName string) (string, string) { nameParts := strings.SplitN(reposName, "/", 2) var indexName, remoteName string if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { // This is a Docker Index repos (ex: samalba/hipache or ubuntu) // 'docker.io' indexName = IndexServerName() remoteName = reposName } else { indexName = nameParts[0] remoteName = nameParts[1] } return indexName, remoteName } // NewRepositoryInfo validates and breaks down a repository name into a RepositoryInfo func (config *ServiceConfig) NewRepositoryInfo(reposName string) (*RepositoryInfo, error) { if err := validateNoSchema(reposName); err != nil { return nil, err } indexName, remoteName := splitReposName(reposName) if err := validateRemoteName(remoteName); err != nil { return nil, err } repoInfo := &RepositoryInfo{ RemoteName: remoteName, } var err error repoInfo.Index, err = config.NewIndexInfo(indexName) if err != nil { return nil, err } if repoInfo.Index.Official { normalizedName := repoInfo.RemoteName if strings.HasPrefix(normalizedName, "library/") { // If pull "library/foo", it's stored locally under "foo" normalizedName = strings.SplitN(normalizedName, "/", 2)[1] } repoInfo.LocalName = normalizedName repoInfo.RemoteName = normalizedName // If the normalized name does not contain a '/' (e.g. "foo") // then it is an official repo. if strings.IndexRune(normalizedName, '/') == -1 { repoInfo.Official = true // Fix up remote name for official repos. repoInfo.RemoteName = "library/" + normalizedName } // *TODO: Prefix this with 'docker.io/'. repoInfo.CanonicalName = repoInfo.LocalName } else { // *TODO: Decouple index name from hostname (via registry configuration?) repoInfo.LocalName = repoInfo.Index.Name + "/" + repoInfo.RemoteName repoInfo.CanonicalName = repoInfo.LocalName } return repoInfo, nil } // GetSearchTerm special-cases using local name for official index, and // remote name for private indexes. func (repoInfo *RepositoryInfo) GetSearchTerm() string { if repoInfo.Index.Official { return repoInfo.LocalName } return repoInfo.RemoteName } // ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but // lacks registry configuration. func ParseRepositoryInfo(reposName string) (*RepositoryInfo, error) { return emptyServiceConfig.NewRepositoryInfo(reposName) } // NormalizeLocalName transforms a repository name into a normalize LocalName // Passes through the name without transformation on error (image id, etc) func NormalizeLocalName(name string) string { repoInfo, err := ParseRepositoryInfo(name) if err != nil { return name } return repoInfo.LocalName } docker-1.6.2/registry/config_test.go0000644000175000017500000000217712524223634017046 0ustar tianontianonpackage registry import ( "testing" ) func TestValidateMirror(t *testing.T) { valid := []string{ "http://mirror-1.com", "https://mirror-1.com", "http://localhost", "https://localhost", "http://localhost:5000", "https://localhost:5000", "http://127.0.0.1", "https://127.0.0.1", "http://127.0.0.1:5000", "https://127.0.0.1:5000", } invalid := []string{ "!invalid!://%as%", "ftp://mirror-1.com", "http://mirror-1.com/", "http://mirror-1.com/?q=foo", "http://mirror-1.com/v1/", "http://mirror-1.com/v1/?q=foo", "http://mirror-1.com/v1/?q=foo#frag", "http://mirror-1.com?q=foo", "https://mirror-1.com#frag", "https://mirror-1.com/", "https://mirror-1.com/#frag", "https://mirror-1.com/v1/", "https://mirror-1.com/v1/#", "https://mirror-1.com?q", } for _, address := range valid { if ret, err := ValidateMirror(address); err != nil || ret == "" { t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) } } for _, address := range invalid { if ret, err := ValidateMirror(address); err == nil || ret != "" { t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) } } } docker-1.6.2/registry/types.go0000644000175000017500000000460512524223634015704 0ustar tianontianonpackage registry type SearchResult struct { StarCount int `json:"star_count"` IsOfficial bool `json:"is_official"` Name string `json:"name"` IsTrusted bool `json:"is_trusted"` Description string `json:"description"` } type SearchResults struct { Query string `json:"query"` NumResults int `json:"num_results"` Results []SearchResult `json:"results"` } type RepositoryData struct { ImgList map[string]*ImgData Endpoints []string Tokens []string } type ImgData struct { ID string `json:"id"` Checksum string `json:"checksum,omitempty"` ChecksumPayload string `json:"-"` Tag string `json:",omitempty"` } type RegistryInfo struct { Version string `json:"version"` Standalone bool `json:"standalone"` } type FSLayer struct { BlobSum string `json:"blobSum"` } type ManifestHistory struct { V1Compatibility string `json:"v1Compatibility"` } type ManifestData struct { Name string `json:"name"` Tag string `json:"tag"` Architecture string `json:"architecture"` FSLayers []*FSLayer `json:"fsLayers"` History []*ManifestHistory `json:"history"` SchemaVersion int `json:"schemaVersion"` } type APIVersion int func (av APIVersion) String() string { return apiVersions[av] } var apiVersions = map[APIVersion]string{ 1: "v1", 2: "v2", } // API Version identifiers. const ( APIVersionUnknown = iota APIVersion1 APIVersion2 ) // RepositoryInfo Examples: // { // "Index" : { // "Name" : "docker.io", // "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], // "Secure" : true, // "Official" : true, // }, // "RemoteName" : "library/debian", // "LocalName" : "debian", // "CanonicalName" : "docker.io/debian" // "Official" : true, // } // { // "Index" : { // "Name" : "127.0.0.1:5000", // "Mirrors" : [], // "Secure" : false, // "Official" : false, // }, // "RemoteName" : "user/repo", // "LocalName" : "127.0.0.1:5000/user/repo", // "CanonicalName" : "127.0.0.1:5000/user/repo", // "Official" : false, // } type IndexInfo struct { Name string Mirrors []string Secure bool Official bool } type RepositoryInfo struct { Index *IndexInfo RemoteName string LocalName string CanonicalName string Official bool } docker-1.6.2/registry/auth.go0000644000175000017500000003450012524223634015476 0ustar tianontianonpackage registry import ( "encoding/base64" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "os" "path" "strings" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/utils" ) const ( // Where we store the config file CONFIGFILE = ".dockercfg" ) var ( ErrConfigFileMissing = errors.New("The Auth config file is missing") ) type AuthConfig struct { Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` Auth string `json:"auth"` Email string `json:"email"` ServerAddress string `json:"serveraddress,omitempty"` } type ConfigFile struct { Configs map[string]AuthConfig `json:"configs,omitempty"` rootPath string } type RequestAuthorization struct { authConfig *AuthConfig registryEndpoint *Endpoint resource string scope string actions []string tokenLock sync.Mutex tokenCache string tokenExpiration time.Time } func NewRequestAuthorization(authConfig *AuthConfig, registryEndpoint *Endpoint, resource, scope string, actions []string) *RequestAuthorization { return &RequestAuthorization{ authConfig: authConfig, registryEndpoint: registryEndpoint, resource: resource, scope: scope, actions: actions, } } func (auth *RequestAuthorization) getToken() (string, error) { auth.tokenLock.Lock() defer auth.tokenLock.Unlock() now := time.Now() if now.Before(auth.tokenExpiration) { log.Debugf("Using cached token for %s", auth.authConfig.Username) return auth.tokenCache, nil } client := auth.registryEndpoint.HTTPClient() factory := HTTPRequestFactory(nil) for _, challenge := range auth.registryEndpoint.AuthChallenges { switch strings.ToLower(challenge.Scheme) { case "basic": // no token necessary case "bearer": log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, auth.authConfig.Username) params := map[string]string{} for k, v := range challenge.Parameters { params[k] = v } params["scope"] = fmt.Sprintf("%s:%s:%s", auth.resource, auth.scope, strings.Join(auth.actions, ",")) token, err := getToken(auth.authConfig.Username, auth.authConfig.Password, params, auth.registryEndpoint, client, factory) if err != nil { return "", err } auth.tokenCache = token auth.tokenExpiration = now.Add(time.Minute) return token, nil default: log.Infof("Unsupported auth scheme: %q", challenge.Scheme) } } // Do not expire cache since there are no challenges which use a token auth.tokenExpiration = time.Now().Add(time.Hour * 24) return "", nil } func (auth *RequestAuthorization) Authorize(req *http.Request) error { token, err := auth.getToken() if err != nil { return err } if token != "" { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) } else if auth.authConfig.Username != "" && auth.authConfig.Password != "" { req.SetBasicAuth(auth.authConfig.Username, auth.authConfig.Password) } return nil } // create a base64 encoded auth string to store in config func encodeAuth(authConfig *AuthConfig) string { authStr := authConfig.Username + ":" + authConfig.Password msg := []byte(authStr) encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) base64.StdEncoding.Encode(encoded, msg) return string(encoded) } // decode the auth string func decodeAuth(authStr string) (string, string, error) { decLen := base64.StdEncoding.DecodedLen(len(authStr)) decoded := make([]byte, decLen) authByte := []byte(authStr) n, err := base64.StdEncoding.Decode(decoded, authByte) if err != nil { return "", "", err } if n > decLen { return "", "", fmt.Errorf("Something went wrong decoding auth config") } arr := strings.SplitN(string(decoded), ":", 2) if len(arr) != 2 { return "", "", fmt.Errorf("Invalid auth configuration file") } password := strings.Trim(arr[1], "\x00") return arr[0], password, nil } // load up the auth config information and return values // FIXME: use the internal golang config parser func LoadConfig(rootPath string) (*ConfigFile, error) { configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath} confFile := path.Join(rootPath, CONFIGFILE) if _, err := os.Stat(confFile); err != nil { return &configFile, nil //missing file is not an error } b, err := ioutil.ReadFile(confFile) if err != nil { return &configFile, err } if err := json.Unmarshal(b, &configFile.Configs); err != nil { arr := strings.Split(string(b), "\n") if len(arr) < 2 { return &configFile, fmt.Errorf("The Auth config file is empty") } authConfig := AuthConfig{} origAuth := strings.Split(arr[0], " = ") if len(origAuth) != 2 { return &configFile, fmt.Errorf("Invalid Auth config file") } authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) if err != nil { return &configFile, err } origEmail := strings.Split(arr[1], " = ") if len(origEmail) != 2 { return &configFile, fmt.Errorf("Invalid Auth config file") } authConfig.Email = origEmail[1] authConfig.ServerAddress = IndexServerAddress() // *TODO: Switch to using IndexServerName() instead? configFile.Configs[IndexServerAddress()] = authConfig } else { for k, authConfig := range configFile.Configs { authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) if err != nil { return &configFile, err } authConfig.Auth = "" authConfig.ServerAddress = k configFile.Configs[k] = authConfig } } return &configFile, nil } // save the auth config func SaveConfig(configFile *ConfigFile) error { confFile := path.Join(configFile.rootPath, CONFIGFILE) if len(configFile.Configs) == 0 { os.Remove(confFile) return nil } configs := make(map[string]AuthConfig, len(configFile.Configs)) for k, authConfig := range configFile.Configs { authCopy := authConfig authCopy.Auth = encodeAuth(&authCopy) authCopy.Username = "" authCopy.Password = "" authCopy.ServerAddress = "" configs[k] = authCopy } b, err := json.MarshalIndent(configs, "", "\t") if err != nil { return err } err = ioutil.WriteFile(confFile, b, 0600) if err != nil { return err } return nil } // Login tries to register/login to the registry server. func Login(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *utils.HTTPRequestFactory) (string, error) { // Separates the v2 registry login logic from the v1 logic. if registryEndpoint.Version == APIVersion2 { return loginV2(authConfig, registryEndpoint, factory) } return loginV1(authConfig, registryEndpoint, factory) } // loginV1 tries to register/login to the v1 registry server. func loginV1(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *utils.HTTPRequestFactory) (string, error) { var ( status string reqBody []byte err error client = registryEndpoint.HTTPClient() reqStatusCode = 0 serverAddress = authConfig.ServerAddress ) log.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint) if serverAddress == "" { return "", fmt.Errorf("Server Error: Server Address not set.") } loginAgainstOfficialIndex := serverAddress == IndexServerAddress() // to avoid sending the server address to the server it should be removed before being marshalled authCopy := *authConfig authCopy.ServerAddress = "" jsonBody, err := json.Marshal(authCopy) if err != nil { return "", fmt.Errorf("Config Error: %s", err) } // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. b := strings.NewReader(string(jsonBody)) req1, err := client.Post(serverAddress+"users/", "application/json; charset=utf-8", b) if err != nil { return "", fmt.Errorf("Server Error: %s", err) } reqStatusCode = req1.StatusCode defer req1.Body.Close() reqBody, err = ioutil.ReadAll(req1.Body) if err != nil { return "", fmt.Errorf("Server Error: [%#v] %s", reqStatusCode, err) } if reqStatusCode == 201 { if loginAgainstOfficialIndex { status = "Account created. Please use the confirmation link we sent" + " to your e-mail to activate it." } else { // *TODO: Use registry configuration to determine what this says, if anything? status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." } } else if reqStatusCode == 400 { if string(reqBody) == "\"Username or email already exists\"" { req, err := factory.NewRequest("GET", serverAddress+"users/", nil) req.SetBasicAuth(authConfig.Username, authConfig.Password) resp, err := client.Do(req) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } if resp.StatusCode == 200 { return "Login Succeeded", nil } else if resp.StatusCode == 401 { return "", fmt.Errorf("Wrong login/password, please try again") } else if resp.StatusCode == 403 { if loginAgainstOfficialIndex { return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") } // *TODO: Use registry configuration to determine what this says, if anything? return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) } return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) } return "", fmt.Errorf("Registration: %s", reqBody) } else if reqStatusCode == 401 { // This case would happen with private registries where /v1/users is // protected, so people can use `docker login` as an auth check. req, err := factory.NewRequest("GET", serverAddress+"users/", nil) req.SetBasicAuth(authConfig.Username, authConfig.Password) resp, err := client.Do(req) if err != nil { return "", err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } if resp.StatusCode == 200 { return "Login Succeeded", nil } else if resp.StatusCode == 401 { return "", fmt.Errorf("Wrong login/password, please try again") } else { return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) } } else { return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody) } return status, nil } // loginV2 tries to login to the v2 registry server. The given registry endpoint has been // pinged or setup with a list of authorization challenges. Each of these challenges are // tried until one of them succeeds. Currently supported challenge schemes are: // HTTP Basic Authorization // Token Authorization with a separate token issuing server // NOTE: the v2 logic does not attempt to create a user account if one doesn't exist. For // now, users should create their account through other means like directly from a web page // served by the v2 registry service provider. Whether this will be supported in the future // is to be determined. func loginV2(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *utils.HTTPRequestFactory) (string, error) { log.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint) var ( err error allErrors []error client = registryEndpoint.HTTPClient() ) for _, challenge := range registryEndpoint.AuthChallenges { log.Debugf("trying %q auth challenge with params %s", challenge.Scheme, challenge.Parameters) switch strings.ToLower(challenge.Scheme) { case "basic": err = tryV2BasicAuthLogin(authConfig, challenge.Parameters, registryEndpoint, client, factory) case "bearer": err = tryV2TokenAuthLogin(authConfig, challenge.Parameters, registryEndpoint, client, factory) default: // Unsupported challenge types are explicitly skipped. err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme) } if err == nil { return "Login Succeeded", nil } log.Debugf("error trying auth challenge %q: %s", challenge.Scheme, err) allErrors = append(allErrors, err) } return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors) } func tryV2BasicAuthLogin(authConfig *AuthConfig, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *utils.HTTPRequestFactory) error { req, err := factory.NewRequest("GET", registryEndpoint.Path(""), nil) if err != nil { return err } req.SetBasicAuth(authConfig.Username, authConfig.Password) resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("basic auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) } return nil } func tryV2TokenAuthLogin(authConfig *AuthConfig, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *utils.HTTPRequestFactory) error { token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint, client, factory) if err != nil { return err } req, err := factory.NewRequest("GET", registryEndpoint.Path(""), nil) if err != nil { return err } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("token auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) } return nil } // this method matches a auth configuration to a server address or a url func (config *ConfigFile) ResolveAuthConfig(index *IndexInfo) AuthConfig { configKey := index.GetAuthConfigKey() // First try the happy case if c, found := config.Configs[configKey]; found || index.Official { return c } convertToHostname := func(url string) string { stripped := url if strings.HasPrefix(url, "http://") { stripped = strings.Replace(url, "http://", "", 1) } else if strings.HasPrefix(url, "https://") { stripped = strings.Replace(url, "https://", "", 1) } nameParts := strings.SplitN(stripped, "/", 2) return nameParts[0] } // Maybe they have a legacy config file, we will iterate the keys converting // them to the new format and testing for registry, config := range config.Configs { if configKey == convertToHostname(registry) { return config } } // When all else fails, return an empty auth config return AuthConfig{} } docker-1.6.2/registry/token.go0000644000175000017500000000351012524223634015652 0ustar tianontianonpackage registry import ( "encoding/json" "errors" "fmt" "net/http" "net/url" "strings" "github.com/docker/docker/utils" ) type tokenResponse struct { Token string `json:"token"` } func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *utils.HTTPRequestFactory) (token string, err error) { realm, ok := params["realm"] if !ok { return "", errors.New("no realm specified for token auth challenge") } realmURL, err := url.Parse(realm) if err != nil { return "", fmt.Errorf("invalid token auth challenge realm: %s", err) } if realmURL.Scheme == "" { if registryEndpoint.IsSecure { realmURL.Scheme = "https" } else { realmURL.Scheme = "http" } } req, err := factory.NewRequest("GET", realmURL.String(), nil) if err != nil { return "", err } reqParams := req.URL.Query() service := params["service"] scope := params["scope"] if service != "" { reqParams.Add("service", service) } for _, scopeField := range strings.Fields(scope) { reqParams.Add("scope", scopeField) } if username != "" { reqParams.Add("account", username) req.SetBasicAuth(username, password) } req.URL.RawQuery = reqParams.Encode() resp, err := client.Do(req) if err != nil { return "", err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("token auth attempt for registry %s: %s request failed with status: %d %s", registryEndpoint, req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) } decoder := json.NewDecoder(resp.Body) tr := new(tokenResponse) if err = decoder.Decode(tr); err != nil { return "", fmt.Errorf("unable to decode token response: %s", err) } if tr.Token == "" { return "", errors.New("authorization server did not include a token in the response") } return tr.Token, nil } docker-1.6.2/registry/endpoint_test.go0000644000175000017500000000506512524223634017420 0ustar tianontianonpackage registry import ( "net/http" "net/http/httptest" "net/url" "testing" ) func TestEndpointParse(t *testing.T) { testData := []struct { str string expected string }{ {IndexServerAddress(), IndexServerAddress()}, {"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"}, {"http://0.0.0.0:5000/v2/", "http://0.0.0.0:5000/v2/"}, {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v0/"}, {"0.0.0.0:5000", "https://0.0.0.0:5000/v0/"}, } for _, td := range testData { e, err := newEndpoint(td.str, false) if err != nil { t.Errorf("%q: %s", td.str, err) } if e == nil { t.Logf("something's fishy, endpoint for %q is nil", td.str) continue } if e.String() != td.expected { t.Errorf("expected %q, got %q", td.expected, e.String()) } } } // Ensure that a registry endpoint that responds with a 401 only is determined // to be a v1 registry unless it includes a valid v2 API header. func TestValidateEndpointAmbiguousAPIVersion(t *testing.T) { requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) w.WriteHeader(http.StatusUnauthorized) }) requireBasicAuthHandlerV2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // This mock server supports v2.0, v2.1, v42.0, and v100.0 w.Header().Add("Docker-Distribution-API-Version", "registry/100.0 registry/42.0") w.Header().Add("Docker-Distribution-API-Version", "registry/2.0 registry/2.1") requireBasicAuthHandler.ServeHTTP(w, r) }) // Make a test server which should validate as a v1 server. testServer := httptest.NewServer(requireBasicAuthHandler) defer testServer.Close() testServerURL, err := url.Parse(testServer.URL) if err != nil { t.Fatal(err) } testEndpoint := Endpoint{ URL: testServerURL, Version: APIVersionUnknown, } if err = validateEndpoint(&testEndpoint); err != nil { t.Fatal(err) } if testEndpoint.Version != APIVersion1 { t.Fatalf("expected endpoint to validate to %d, got %d", APIVersion1, testEndpoint.Version) } // Make a test server which should validate as a v2 server. testServer = httptest.NewServer(requireBasicAuthHandlerV2) defer testServer.Close() testServerURL, err = url.Parse(testServer.URL) if err != nil { t.Fatal(err) } testEndpoint.URL = testServerURL testEndpoint.Version = APIVersionUnknown if err = validateEndpoint(&testEndpoint); err != nil { t.Fatal(err) } if testEndpoint.Version != APIVersion2 { t.Fatalf("expected endpoint to validate to %d, got %d", APIVersion2, testEndpoint.Version) } } docker-1.6.2/registry/service.go0000644000175000017500000001365212524223634016202 0ustar tianontianonpackage registry import ( log "github.com/Sirupsen/logrus" "github.com/docker/docker/engine" ) // Service exposes registry capabilities in the standard Engine // interface. Once installed, it extends the engine with the // following calls: // // 'auth': Authenticate against the public registry // 'search': Search for images on the public registry // 'pull': Download images from any registry (TODO) // 'push': Upload images to any registry (TODO) type Service struct { Config *ServiceConfig } // NewService returns a new instance of Service ready to be // installed no an engine. func NewService(options *Options) *Service { return &Service{ Config: NewServiceConfig(options), } } // Install installs registry capabilities to eng. func (s *Service) Install(eng *engine.Engine) error { eng.Register("auth", s.Auth) eng.Register("search", s.Search) eng.Register("resolve_repository", s.ResolveRepository) eng.Register("resolve_index", s.ResolveIndex) eng.Register("registry_config", s.GetRegistryConfig) return nil } // Auth contacts the public registry with the provided credentials, // and returns OK if authentication was sucessful. // It can be used to verify the validity of a client's credentials. func (s *Service) Auth(job *engine.Job) engine.Status { var ( authConfig = new(AuthConfig) endpoint *Endpoint index *IndexInfo status string err error ) job.GetenvJson("authConfig", authConfig) addr := authConfig.ServerAddress if addr == "" { // Use the official registry address if not specified. addr = IndexServerAddress() } if index, err = ResolveIndexInfo(job, addr); err != nil { return job.Error(err) } if endpoint, err = NewEndpoint(index); err != nil { log.Errorf("unable to get new registry endpoint: %s", err) return job.Error(err) } authConfig.ServerAddress = endpoint.String() if status, err = Login(authConfig, endpoint, HTTPRequestFactory(nil)); err != nil { log.Errorf("unable to login against registry endpoint %s: %s", endpoint, err) return job.Error(err) } log.Infof("successful registry login for endpoint %s: %s", endpoint, status) job.Printf("%s\n", status) return engine.StatusOK } // Search queries the public registry for images matching the specified // search terms, and returns the results. // // Argument syntax: search TERM // // Option environment: // 'authConfig': json-encoded credentials to authenticate against the registry. // The search extends to images only accessible via the credentials. // // 'metaHeaders': extra HTTP headers to include in the request to the registry. // The headers should be passed as a json-encoded dictionary. // // Output: // Results are sent as a collection of structured messages (using engine.Table). // Each result is sent as a separate message. // Results are ordered by number of stars on the public registry. func (s *Service) Search(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s TERM", job.Name) } var ( term = job.Args[0] metaHeaders = map[string][]string{} authConfig = &AuthConfig{} ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) repoInfo, err := ResolveRepositoryInfo(job, term) if err != nil { return job.Error(err) } // *TODO: Search multiple indexes. endpoint, err := repoInfo.GetEndpoint() if err != nil { return job.Error(err) } r, err := NewSession(authConfig, HTTPRequestFactory(metaHeaders), endpoint, true) if err != nil { return job.Error(err) } results, err := r.SearchRepositories(repoInfo.GetSearchTerm()) if err != nil { return job.Error(err) } outs := engine.NewTable("star_count", 0) for _, result := range results.Results { out := &engine.Env{} out.Import(result) outs.Add(out) } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } // ResolveRepository splits a repository name into its components // and configuration of the associated registry. func (s *Service) ResolveRepository(job *engine.Job) engine.Status { var ( reposName = job.Args[0] ) repoInfo, err := s.Config.NewRepositoryInfo(reposName) if err != nil { return job.Error(err) } out := engine.Env{} err = out.SetJson("repository", repoInfo) if err != nil { return job.Error(err) } out.WriteTo(job.Stdout) return engine.StatusOK } // Convenience wrapper for calling resolve_repository Job from a running job. func ResolveRepositoryInfo(jobContext *engine.Job, reposName string) (*RepositoryInfo, error) { job := jobContext.Eng.Job("resolve_repository", reposName) env, err := job.Stdout.AddEnv() if err != nil { return nil, err } if err := job.Run(); err != nil { return nil, err } info := RepositoryInfo{} if err := env.GetJson("repository", &info); err != nil { return nil, err } return &info, nil } // ResolveIndex takes indexName and returns index info func (s *Service) ResolveIndex(job *engine.Job) engine.Status { var ( indexName = job.Args[0] ) index, err := s.Config.NewIndexInfo(indexName) if err != nil { return job.Error(err) } out := engine.Env{} err = out.SetJson("index", index) if err != nil { return job.Error(err) } out.WriteTo(job.Stdout) return engine.StatusOK } // Convenience wrapper for calling resolve_index Job from a running job. func ResolveIndexInfo(jobContext *engine.Job, indexName string) (*IndexInfo, error) { job := jobContext.Eng.Job("resolve_index", indexName) env, err := job.Stdout.AddEnv() if err != nil { return nil, err } if err := job.Run(); err != nil { return nil, err } info := IndexInfo{} if err := env.GetJson("index", &info); err != nil { return nil, err } return &info, nil } // GetRegistryConfig returns current registry configuration. func (s *Service) GetRegistryConfig(job *engine.Job) engine.Status { out := engine.Env{} err := out.SetJson("config", s.Config) if err != nil { return job.Error(err) } out.WriteTo(job.Stdout) return engine.StatusOK } docker-1.6.2/registry/session.go0000644000175000017500000004417212524223634016226 0ustar tianontianonpackage registry import ( "bytes" "crypto/sha256" // this is required for some certificates _ "crypto/sha512" "encoding/hex" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/cookiejar" "net/url" "strconv" "strings" "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/utils" ) type Session struct { authConfig *AuthConfig reqFactory *utils.HTTPRequestFactory indexEndpoint *Endpoint jar *cookiejar.Jar timeout TimeoutType } func NewSession(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, endpoint *Endpoint, timeout bool) (r *Session, err error) { r = &Session{ authConfig: authConfig, indexEndpoint: endpoint, } if timeout { r.timeout = ReceiveTimeout } r.jar, err = cookiejar.New(nil) if err != nil { return nil, err } // If we're working with a standalone private registry over HTTPS, send Basic Auth headers // alongside our requests. if r.indexEndpoint.VersionString(1) != IndexServerAddress() && r.indexEndpoint.URL.Scheme == "https" { info, err := r.indexEndpoint.Ping() if err != nil { return nil, err } if info.Standalone { log.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", r.indexEndpoint.String()) dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password) factory.AddDecorator(dec) } } r.reqFactory = factory return r, nil } func (r *Session) doRequest(req *http.Request) (*http.Response, *http.Client, error) { return doRequest(req, r.jar, r.timeout, r.indexEndpoint.IsSecure) } // Retrieve the history of a given image from the Registry. // Return a list of the parent's json (requested image included) func (r *Session) GetRemoteHistory(imgID, registry string, token []string) ([]string, error) { req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/ancestry", nil) if err != nil { return nil, err } setTokenAuth(req, token) res, _, err := r.doRequest(req) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { return nil, errLoginRequired } return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) } jsonString, err := ioutil.ReadAll(res.Body) if err != nil { return nil, fmt.Errorf("Error while reading the http response: %s", err) } log.Debugf("Ancestry: %s", jsonString) history := new([]string) if err := json.Unmarshal(jsonString, history); err != nil { return nil, err } return *history, nil } // Check if an image exists in the Registry func (r *Session) LookupRemoteImage(imgID, registry string, token []string) error { req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) if err != nil { return err } setTokenAuth(req, token) res, _, err := r.doRequest(req) if err != nil { return err } res.Body.Close() if res.StatusCode != 200 { return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } return nil } // Retrieve an image from the Registry. func (r *Session) GetRemoteImageJSON(imgID, registry string, token []string) ([]byte, int, error) { // Get the JSON req, err := r.reqFactory.NewRequest("GET", registry+"images/"+imgID+"/json", nil) if err != nil { return nil, -1, fmt.Errorf("Failed to download json: %s", err) } setTokenAuth(req, token) res, _, err := r.doRequest(req) if err != nil { return nil, -1, fmt.Errorf("Failed to download json: %s", err) } defer res.Body.Close() if res.StatusCode != 200 { return nil, -1, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } // if the size header is not present, then set it to '-1' imageSize := -1 if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { imageSize, err = strconv.Atoi(hdr) if err != nil { return nil, -1, err } } jsonString, err := ioutil.ReadAll(res.Body) if err != nil { return nil, -1, fmt.Errorf("Failed to parse downloaded json: %s (%s)", err, jsonString) } return jsonString, imageSize, nil } func (r *Session) GetRemoteImageLayer(imgID, registry string, token []string, imgSize int64) (io.ReadCloser, error) { var ( retries = 5 statusCode = 0 client *http.Client res *http.Response imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) ) req, err := r.reqFactory.NewRequest("GET", imageURL, nil) if err != nil { return nil, fmt.Errorf("Error while getting from the server: %s\n", err) } setTokenAuth(req, token) for i := 1; i <= retries; i++ { statusCode = 0 res, client, err = r.doRequest(req) if err != nil { log.Debugf("Error contacting registry: %s", err) if res != nil { if res.Body != nil { res.Body.Close() } statusCode = res.StatusCode } if i == retries { return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", statusCode, imgID) } time.Sleep(time.Duration(i) * 5 * time.Second) continue } break } if res.StatusCode != 200 { res.Body.Close() return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", res.StatusCode, imgID) } if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { log.Debugf("server supports resume") return httputils.ResumableRequestReaderWithInitialResponse(client, req, 5, imgSize, res), nil } log.Debugf("server doesn't support resume") return res.Body, nil } func (r *Session) GetRemoteTags(registries []string, repository string, token []string) (map[string]string, error) { if strings.Count(repository, "/") == 0 { // This will be removed once the Registry supports auto-resolution on // the "library" namespace repository = "library/" + repository } for _, host := range registries { endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) req, err := r.reqFactory.NewRequest("GET", endpoint, nil) if err != nil { return nil, err } setTokenAuth(req, token) res, _, err := r.doRequest(req) if err != nil { return nil, err } log.Debugf("Got status code %d from %s", res.StatusCode, endpoint) defer res.Body.Close() if res.StatusCode != 200 && res.StatusCode != 404 { continue } else if res.StatusCode == 404 { return nil, fmt.Errorf("Repository not found") } result := make(map[string]string) if err := json.NewDecoder(res.Body).Decode(&result); err != nil { return nil, err } return result, nil } return nil, fmt.Errorf("Could not reach any registry endpoint") } func buildEndpointsList(headers []string, indexEp string) ([]string, error) { var endpoints []string parsedURL, err := url.Parse(indexEp) if err != nil { return nil, err } var urlScheme = parsedURL.Scheme // The Registry's URL scheme has to match the Index' for _, ep := range headers { epList := strings.Split(ep, ",") for _, epListElement := range epList { endpoints = append( endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) } } return endpoints, nil } func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), remote) log.Debugf("[registry] Calling GET %s", repositoryTarget) req, err := r.reqFactory.NewRequest("GET", repositoryTarget, nil) if err != nil { return nil, err } if r.authConfig != nil && len(r.authConfig.Username) > 0 { req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) } req.Header.Set("X-Docker-Token", "true") res, _, err := r.doRequest(req) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode == 401 { return nil, errLoginRequired } // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. if res.StatusCode == 404 { return nil, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) } else if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { log.Debugf("Error reading response body: %s", err) } return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote, errBody), res) } var tokens []string if res.Header.Get("X-Docker-Token") != "" { tokens = res.Header["X-Docker-Token"] } var endpoints []string if res.Header.Get("X-Docker-Endpoints") != "" { endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) if err != nil { return nil, err } } else { // Assume the endpoint is on the same host endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) } remoteChecksums := []*ImgData{} if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { return nil, err } // Forge a better object from the retrieved data imgsData := make(map[string]*ImgData) for _, elem := range remoteChecksums { imgsData[elem.ID] = elem } return &RepositoryData{ ImgList: imgsData, Endpoints: endpoints, Tokens: tokens, }, nil } func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string, token []string) error { log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/checksum") req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/checksum", nil) if err != nil { return err } setTokenAuth(req, token) req.Header.Set("X-Docker-Checksum", imgData.Checksum) req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) res, _, err := r.doRequest(req) if err != nil { return fmt.Errorf("Failed to upload metadata: %s", err) } defer res.Body.Close() if len(res.Cookies()) > 0 { r.jar.SetCookies(req.URL, res.Cookies()) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { errBody = []byte(err.Error()) } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) } return nil } // Push a local image to the registry func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string, token []string) error { log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgData.ID+"/json") req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgData.ID+"/json", bytes.NewReader(jsonRaw)) if err != nil { return err } req.Header.Add("Content-type", "application/json") setTokenAuth(req, token) res, _, err := r.doRequest(req) if err != nil { return fmt.Errorf("Failed to upload metadata: %s", err) } defer res.Body.Close() if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { return utils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { errBody = []byte(err.Error()) } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) } return nil } func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, token []string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { log.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer") tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) if err != nil { return "", "", err } h := sha256.New() h.Write(jsonRaw) h.Write([]byte{'\n'}) checksumLayer := io.TeeReader(tarsumLayer, h) req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", checksumLayer) if err != nil { return "", "", err } req.Header.Add("Content-Type", "application/octet-stream") req.ContentLength = -1 req.TransferEncoding = []string{"chunked"} setTokenAuth(req, token) res, _, err := r.doRequest(req) if err != nil { return "", "", fmt.Errorf("Failed to upload layer: %s", err) } if rc, ok := layer.(io.Closer); ok { if err := rc.Close(); err != nil { return "", "", err } } defer res.Body.Close() if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return "", "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } return "", "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) } checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) return tarsumLayer.Sum(jsonRaw), checksumPayload, nil } // push a tag on the registry. // Remote has the format '/ func (r *Session) PushRegistryTag(remote, revision, tag, registry string, token []string) error { // "jsonify" the string revision = "\"" + revision + "\"" path := fmt.Sprintf("repositories/%s/tags/%s", remote, tag) req, err := r.reqFactory.NewRequest("PUT", registry+path, strings.NewReader(revision)) if err != nil { return err } req.Header.Add("Content-type", "application/json") setTokenAuth(req, token) req.ContentLength = int64(len(revision)) res, _, err := r.doRequest(req) if err != nil { return err } res.Body.Close() if res.StatusCode != 200 && res.StatusCode != 201 { return utils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) } return nil } func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { cleanImgList := []*ImgData{} if validate { for _, elem := range imgList { if elem.Checksum != "" { cleanImgList = append(cleanImgList, elem) } } } else { cleanImgList = imgList } imgListJSON, err := json.Marshal(cleanImgList) if err != nil { return nil, err } var suffix string if validate { suffix = "images" } u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote, suffix) log.Debugf("[registry] PUT %s", u) log.Debugf("Image list pushed to index:\n%s", imgListJSON) headers := map[string][]string{ "Content-type": {"application/json"}, "X-Docker-Token": {"true"}, } if validate { headers["X-Docker-Endpoints"] = regs } // Redirect if necessary var res *http.Response for { if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { return nil, err } if !shouldRedirect(res) { break } res.Body.Close() u = res.Header.Get("Location") log.Debugf("Redirected to %s", u) } defer res.Body.Close() if res.StatusCode == 401 { return nil, errLoginRequired } var tokens, endpoints []string if !validate { if res.StatusCode != 200 && res.StatusCode != 201 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { log.Debugf("Error reading response body: %s", err) } return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote, errBody), res) } if res.Header.Get("X-Docker-Token") != "" { tokens = res.Header["X-Docker-Token"] log.Debugf("Auth token: %v", tokens) } else { return nil, fmt.Errorf("Index response didn't contain an access token") } if res.Header.Get("X-Docker-Endpoints") != "" { endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) if err != nil { return nil, err } } else { return nil, fmt.Errorf("Index response didn't contain any endpoints") } } if validate { if res.StatusCode != 204 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { log.Debugf("Error reading response body: %s", err) } return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote, errBody), res) } } return &RepositoryData{ Tokens: tokens, Endpoints: endpoints, }, nil } func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { req, err := r.reqFactory.NewRequest("PUT", u, bytes.NewReader(body)) if err != nil { return nil, err } req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) req.ContentLength = int64(len(body)) for k, v := range headers { req.Header[k] = v } response, _, err := r.doRequest(req) if err != nil { return nil, err } return response, nil } func shouldRedirect(response *http.Response) bool { return response.StatusCode >= 300 && response.StatusCode < 400 } func (r *Session) SearchRepositories(term string) (*SearchResults, error) { log.Debugf("Index server: %s", r.indexEndpoint) u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) req, err := r.reqFactory.NewRequest("GET", u, nil) if err != nil { return nil, err } if r.authConfig != nil && len(r.authConfig.Username) > 0 { req.SetBasicAuth(r.authConfig.Username, r.authConfig.Password) } req.Header.Set("X-Docker-Token", "true") res, _, err := r.doRequest(req) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != 200 { return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) } result := new(SearchResults) err = json.NewDecoder(res.Body).Decode(result) return result, err } func (r *Session) GetAuthConfig(withPasswd bool) *AuthConfig { password := "" if withPasswd { password = r.authConfig.Password } return &AuthConfig{ Username: r.authConfig.Username, Password: password, Email: r.authConfig.Email, } } func setTokenAuth(req *http.Request, token []string) { if req.Header.Get("Authorization") == "" { // Don't override req.Header.Set("Authorization", "Token "+strings.Join(token, ",")) } } docker-1.6.2/registry/auth_test.go0000644000175000017500000001016212524223634016533 0ustar tianontianonpackage registry import ( "io/ioutil" "os" "testing" ) func TestEncodeAuth(t *testing.T) { newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} authStr := encodeAuth(newAuthConfig) decAuthConfig := &AuthConfig{} var err error decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) if err != nil { t.Fatal(err) } if newAuthConfig.Username != decAuthConfig.Username { t.Fatal("Encode Username doesn't match decoded Username") } if newAuthConfig.Password != decAuthConfig.Password { t.Fatal("Encode Password doesn't match decoded Password") } if authStr != "a2VuOnRlc3Q=" { t.Fatal("AuthString encoding isn't correct.") } } func setupTempConfigFile() (*ConfigFile, error) { root, err := ioutil.TempDir("", "docker-test-auth") if err != nil { return nil, err } configFile := &ConfigFile{ rootPath: root, Configs: make(map[string]AuthConfig), } for _, registry := range []string{"testIndex", IndexServerAddress()} { configFile.Configs[registry] = AuthConfig{ Username: "docker-user", Password: "docker-pass", Email: "docker@docker.io", } } return configFile, nil } func TestSameAuthDataPostSave(t *testing.T) { configFile, err := setupTempConfigFile() if err != nil { t.Fatal(err) } defer os.RemoveAll(configFile.rootPath) err = SaveConfig(configFile) if err != nil { t.Fatal(err) } authConfig := configFile.Configs["testIndex"] if authConfig.Username != "docker-user" { t.Fail() } if authConfig.Password != "docker-pass" { t.Fail() } if authConfig.Email != "docker@docker.io" { t.Fail() } if authConfig.Auth != "" { t.Fail() } } func TestResolveAuthConfigIndexServer(t *testing.T) { configFile, err := setupTempConfigFile() if err != nil { t.Fatal(err) } defer os.RemoveAll(configFile.rootPath) indexConfig := configFile.Configs[IndexServerAddress()] officialIndex := &IndexInfo{ Official: true, } privateIndex := &IndexInfo{ Official: false, } resolved := configFile.ResolveAuthConfig(officialIndex) assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServerAddress()") resolved = configFile.ResolveAuthConfig(privateIndex) assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServerAddress()") } func TestResolveAuthConfigFullURL(t *testing.T) { configFile, err := setupTempConfigFile() if err != nil { t.Fatal(err) } defer os.RemoveAll(configFile.rootPath) registryAuth := AuthConfig{ Username: "foo-user", Password: "foo-pass", Email: "foo@example.com", } localAuth := AuthConfig{ Username: "bar-user", Password: "bar-pass", Email: "bar@example.com", } officialAuth := AuthConfig{ Username: "baz-user", Password: "baz-pass", Email: "baz@example.com", } configFile.Configs[IndexServerAddress()] = officialAuth expectedAuths := map[string]AuthConfig{ "registry.example.com": registryAuth, "localhost:8000": localAuth, "registry.com": localAuth, } validRegistries := map[string][]string{ "registry.example.com": { "https://registry.example.com/v1/", "http://registry.example.com/v1/", "registry.example.com", "registry.example.com/v1/", }, "localhost:8000": { "https://localhost:8000/v1/", "http://localhost:8000/v1/", "localhost:8000", "localhost:8000/v1/", }, "registry.com": { "https://registry.com/v1/", "http://registry.com/v1/", "registry.com", "registry.com/v1/", }, } for configKey, registries := range validRegistries { configured, ok := expectedAuths[configKey] if !ok || configured.Email == "" { t.Fail() } index := &IndexInfo{ Name: configKey, } for _, registry := range registries { configFile.Configs[registry] = configured resolved := configFile.ResolveAuthConfig(index) if resolved.Email != configured.Email { t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) } delete(configFile.Configs, registry) resolved = configFile.ResolveAuthConfig(index) if resolved.Email == configured.Email { t.Errorf("%s -> %q == %q\n", registry, resolved.Email, configured.Email) } } } } docker-1.6.2/registry/session_v2.go0000644000175000017500000002615712524223634016640 0ustar tianontianonpackage registry import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "strconv" log "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" "github.com/docker/docker/registry/v2" "github.com/docker/docker/utils" ) const DockerDigestHeader = "Docker-Content-Digest" func getV2Builder(e *Endpoint) *v2.URLBuilder { if e.URLBuilder == nil { e.URLBuilder = v2.NewURLBuilder(e.URL) } return e.URLBuilder } func (r *Session) V2RegistryEndpoint(index *IndexInfo) (ep *Endpoint, err error) { // TODO check if should use Mirror if index.Official { ep, err = newEndpoint(REGISTRYSERVER, true) if err != nil { return } err = validateEndpoint(ep) if err != nil { return } } else if r.indexEndpoint.String() == index.GetAuthConfigKey() { ep = r.indexEndpoint } else { ep, err = NewEndpoint(index) if err != nil { return } } ep.URLBuilder = v2.NewURLBuilder(ep.URL) return } // GetV2Authorization gets the authorization needed to the given image // If readonly access is requested, then only the authorization may // only be used for Get operations. func (r *Session) GetV2Authorization(ep *Endpoint, imageName string, readOnly bool) (auth *RequestAuthorization, err error) { scopes := []string{"pull"} if !readOnly { scopes = append(scopes, "push") } log.Debugf("Getting authorization for %s %s", imageName, scopes) return NewRequestAuthorization(r.GetAuthConfig(true), ep, "repository", imageName, scopes), nil } // // 1) Check if TarSum of each layer exists /v2/ // 1.a) if 200, continue // 1.b) if 300, then push the // 1.c) if anything else, err // 2) PUT the created/signed manifest // func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, auth *RequestAuthorization) ([]byte, string, error) { routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName) if err != nil { return nil, "", err } method := "GET" log.Debugf("[registry] Calling %q %s", method, routeURL) req, err := r.reqFactory.NewRequest(method, routeURL, nil) if err != nil { return nil, "", err } if err := auth.Authorize(req); err != nil { return nil, "", err } res, _, err := r.doRequest(req) if err != nil { return nil, "", err } defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { return nil, "", errLoginRequired } else if res.StatusCode == 404 { return nil, "", ErrDoesNotExist } return nil, "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res) } manifestBytes, err := ioutil.ReadAll(res.Body) if err != nil { return nil, "", fmt.Errorf("Error while reading the http response: %s", err) } return manifestBytes, res.Header.Get(DockerDigestHeader), nil } // - Succeeded to head image blob (already exists) // - Failed with no error (continue to Push the Blob) // - Failed with error func (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (bool, error) { routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum) if err != nil { return false, err } method := "HEAD" log.Debugf("[registry] Calling %q %s", method, routeURL) req, err := r.reqFactory.NewRequest(method, routeURL, nil) if err != nil { return false, err } if err := auth.Authorize(req); err != nil { return false, err } res, _, err := r.doRequest(req) if err != nil { return false, err } res.Body.Close() // close early, since we're not needing a body on this call .. yet? switch { case res.StatusCode >= 200 && res.StatusCode < 400: // return something indicating no push needed return true, nil case res.StatusCode == 401: return false, errLoginRequired case res.StatusCode == 404: // return something indicating blob push needed return false, nil } return false, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying head request for %s - %s:%s", res.StatusCode, imageName, sumType, sum), res) } func (r *Session) GetV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, blobWrtr io.Writer, auth *RequestAuthorization) error { routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum) if err != nil { return err } method := "GET" log.Debugf("[registry] Calling %q %s", method, routeURL) req, err := r.reqFactory.NewRequest(method, routeURL, nil) if err != nil { return err } if err := auth.Authorize(req); err != nil { return err } res, _, err := r.doRequest(req) if err != nil { return err } defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { return errLoginRequired } return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res) } _, err = io.Copy(blobWrtr, res.Body) return err } func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (io.ReadCloser, int64, error) { routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum) if err != nil { return nil, 0, err } method := "GET" log.Debugf("[registry] Calling %q %s", method, routeURL) req, err := r.reqFactory.NewRequest(method, routeURL, nil) if err != nil { return nil, 0, err } if err := auth.Authorize(req); err != nil { return nil, 0, err } res, _, err := r.doRequest(req) if err != nil { return nil, 0, err } if res.StatusCode != 200 { if res.StatusCode == 401 { return nil, 0, errLoginRequired } return nil, 0, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob - %s:%s", res.StatusCode, imageName, sumType, sum), res) } lenStr := res.Header.Get("Content-Length") l, err := strconv.ParseInt(lenStr, 10, 64) if err != nil { return nil, 0, err } return res.Body, l, err } // Push the image to the server for storage. // 'layer' is an uncompressed reader of the blob to be pushed. // The server will generate it's own checksum calculation. func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string, blobRdr io.Reader, auth *RequestAuthorization) error { location, err := r.initiateBlobUpload(ep, imageName, auth) if err != nil { return err } method := "PUT" log.Debugf("[registry] Calling %q %s", method, location) req, err := r.reqFactory.NewRequest(method, location, ioutil.NopCloser(blobRdr)) if err != nil { return err } queryParams := req.URL.Query() queryParams.Add("digest", sumType+":"+sumStr) req.URL.RawQuery = queryParams.Encode() if err := auth.Authorize(req); err != nil { return err } res, _, err := r.doRequest(req) if err != nil { return err } defer res.Body.Close() if res.StatusCode != 201 { if res.StatusCode == 401 { return errLoginRequired } errBody, err := ioutil.ReadAll(res.Body) if err != nil { return err } log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header) return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s blob - %s:%s", res.StatusCode, imageName, sumType, sumStr), res) } return nil } // initiateBlobUpload gets the blob upload location for the given image name. func (r *Session) initiateBlobUpload(ep *Endpoint, imageName string, auth *RequestAuthorization) (location string, err error) { routeURL, err := getV2Builder(ep).BuildBlobUploadURL(imageName) if err != nil { return "", err } log.Debugf("[registry] Calling %q %s", "POST", routeURL) req, err := r.reqFactory.NewRequest("POST", routeURL, nil) if err != nil { return "", err } if err := auth.Authorize(req); err != nil { return "", err } res, _, err := r.doRequest(req) if err != nil { return "", err } if res.StatusCode != http.StatusAccepted { if res.StatusCode == http.StatusUnauthorized { return "", errLoginRequired } if res.StatusCode == http.StatusNotFound { return "", ErrDoesNotExist } errBody, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header) return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: unexpected %d response status trying to initiate upload of %s", res.StatusCode, imageName), res) } if location = res.Header.Get("Location"); location == "" { return "", fmt.Errorf("registry did not return a Location header for resumable blob upload for image %s", imageName) } return } // Finally Push the (signed) manifest of the blobs we've just pushed func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, signedManifest, rawManifest []byte, auth *RequestAuthorization) (digest.Digest, error) { routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName) if err != nil { return "", err } method := "PUT" log.Debugf("[registry] Calling %q %s", method, routeURL) req, err := r.reqFactory.NewRequest(method, routeURL, bytes.NewReader(signedManifest)) if err != nil { return "", err } if err := auth.Authorize(req); err != nil { return "", err } res, _, err := r.doRequest(req) if err != nil { return "", err } defer res.Body.Close() // All 2xx and 3xx responses can be accepted for a put. if res.StatusCode >= 400 { if res.StatusCode == 401 { return "", errLoginRequired } errBody, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } log.Debugf("Unexpected response from server: %q %#v", errBody, res.Header) return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res) } hdrDigest, err := digest.ParseDigest(res.Header.Get(DockerDigestHeader)) if err != nil { return "", fmt.Errorf("invalid manifest digest from registry: %s", err) } dgstVerifier, err := digest.NewDigestVerifier(hdrDigest) if err != nil { return "", fmt.Errorf("invalid manifest digest from registry: %s", err) } dgstVerifier.Write(rawManifest) if !dgstVerifier.Verified() { computedDigest, _ := digest.FromBytes(rawManifest) return "", fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", hdrDigest, computedDigest) } return hdrDigest, nil } type remoteTags struct { name string tags []string } // Given a repository name, returns a json array of string tags func (r *Session) GetV2RemoteTags(ep *Endpoint, imageName string, auth *RequestAuthorization) ([]string, error) { routeURL, err := getV2Builder(ep).BuildTagsURL(imageName) if err != nil { return nil, err } method := "GET" log.Debugf("[registry] Calling %q %s", method, routeURL) req, err := r.reqFactory.NewRequest(method, routeURL, nil) if err != nil { return nil, err } if err := auth.Authorize(req); err != nil { return nil, err } res, _, err := r.doRequest(req) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { return nil, errLoginRequired } else if res.StatusCode == 404 { return nil, ErrDoesNotExist } return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s", res.StatusCode, imageName), res) } decoder := json.NewDecoder(res.Body) var remote remoteTags err = decoder.Decode(&remote) if err != nil { return nil, fmt.Errorf("Error while decoding the http response: %s", err) } return remote.tags, nil } docker-1.6.2/registry/authchallenge.go0000644000175000017500000000644112524223634017344 0ustar tianontianonpackage registry import ( "net/http" "strings" ) // Octet types from RFC 2616. type octetType byte // AuthorizationChallenge carries information // from a WWW-Authenticate response header. type AuthorizationChallenge struct { Scheme string Parameters map[string]string } var octetTypes [256]octetType const ( isToken octetType = 1 << iota isSpace ) func init() { // OCTET = // CHAR = // CTL = // CR = // LF = // SP = // HT = // <"> = // CRLF = CR LF // LWS = [CRLF] 1*( SP | HT ) // TEXT = // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT // token = 1* // qdtext = > for c := 0; c < 256; c++ { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { t |= isSpace } if isChar && !isCtl && !isSeparator { t |= isToken } octetTypes[c] = t } } func parseAuthHeader(header http.Header) []*AuthorizationChallenge { var challenges []*AuthorizationChallenge for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { v, p := parseValueAndParams(h) if v != "" { challenges = append(challenges, &AuthorizationChallenge{Scheme: v, Parameters: p}) } } return challenges } func parseValueAndParams(header string) (value string, params map[string]string) { params = make(map[string]string) value, s := expectToken(header) if value == "" { return } value = strings.ToLower(value) s = "," + skipSpace(s) for strings.HasPrefix(s, ",") { var pkey string pkey, s = expectToken(skipSpace(s[1:])) if pkey == "" { return } if !strings.HasPrefix(s, "=") { return } var pvalue string pvalue, s = expectTokenOrQuoted(s[1:]) if pvalue == "" { return } pkey = strings.ToLower(pkey) params[pkey] = pvalue s = skipSpace(s) } return } func skipSpace(s string) (rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isSpace == 0 { break } } return s[i:] } func expectToken(s string) (token, rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isToken == 0 { break } } return s[:i], s[i:] } func expectTokenOrQuoted(s string) (value string, rest string) { if !strings.HasPrefix(s, "\"") { return expectToken(s) } s = s[1:] for i := 0; i < len(s); i++ { switch s[i] { case '"': return s[:i], s[i+1:] case '\\': p := make([]byte, len(s)-1) j := copy(p, s[:i]) escape := true for i = i + i; i < len(s); i++ { b := s[i] switch { case escape: escape = false p[j] = b j++ case b == '\\': escape = true case b == '"': return string(p[:j]), s[i+1:] default: p[j] = b j++ } } return "", "" } } return "", "" } docker-1.6.2/Makefile0000644000175000017500000000642612524223634014004 0ustar tianontianon.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate # env vars passed through directly to Docker's build scripts # to allow things like `make DOCKER_CLIENTONLY=1 binary` easily # `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these DOCKER_ENVS := \ -e BUILDFLAGS \ -e DOCKER_CLIENTONLY \ -e DOCKER_EXECDRIVER \ -e DOCKER_GRAPHDRIVER \ -e TESTDIRS \ -e TESTFLAGS \ -e TIMEOUT # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds # to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` # (default to no bind mount if DOCKER_HOST is set) # note: BINDDIR is supported for backwards-compatibility here BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") # to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) # to allow `make DOCSPORT=9000 docs` DOCSPORT := 8000 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_RUN_DOCKER := docker run --rm -it --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE # for some docs workarounds (see below in "docs-build" target) GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) default: binary all: build $(DOCKER_RUN_DOCKER) hack/make.sh binary: build $(DOCKER_RUN_DOCKER) hack/make.sh binary cross: build $(DOCKER_RUN_DOCKER) hack/make.sh binary cross docs: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" mkdocs serve docs-shell: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash docs-release: docs-build $(DOCKER_RUN_DOCS) -e OPTIONS -e BUILD_ROOT -e DISTRIBUTION_ID \ -v $(CURDIR)/docs/awsconfig:/docs/awsconfig \ "$(DOCKER_DOCS_IMAGE)" ./release.sh docs-test: docs-build $(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)" ./test.sh test: build $(DOCKER_RUN_DOCKER) hack/make.sh binary cross test-unit test-integration test-integration-cli test-docker-py test-unit: build $(DOCKER_RUN_DOCKER) hack/make.sh test-unit test-integration: build $(DOCKER_RUN_DOCKER) hack/make.sh test-integration test-integration-cli: build $(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli test-docker-py: build $(DOCKER_RUN_DOCKER) hack/make.sh binary test-docker-py validate: build $(DOCKER_RUN_DOCKER) hack/make.sh validate-gofmt validate-dco validate-toml shell: build $(DOCKER_RUN_DOCKER) bash build: bundles docker build -t "$(DOCKER_IMAGE)" . docs-build: cp ./VERSION docs/VERSION echo "$(GIT_BRANCH)" > docs/GIT_BRANCH # echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET echo "$(GITCOMMIT)" > docs/GITCOMMIT docker pull docs/base docker build -t "$(DOCKER_DOCS_IMAGE)" docs bundles: mkdir bundles docker-1.6.2/dockerinit/0000755000175000017500000000000012524223634014467 5ustar tianontianondocker-1.6.2/dockerinit/dockerinit.go0000644000175000017500000000034412524223634017152 0ustar tianontianonpackage main import ( _ "github.com/docker/docker/daemon/execdriver/lxc" _ "github.com/docker/docker/daemon/execdriver/native" "github.com/docker/docker/pkg/reexec" ) func main() { // Running in init mode reexec.Init() } docker-1.6.2/VERSION0000644000175000017500000000000612524223634013400 0ustar tianontianon1.6.2 docker-1.6.2/docker/0000755000175000017500000000000012524223634013603 5ustar tianontianondocker-1.6.2/docker/README.md0000644000175000017500000000020112524223634015053 0ustar tianontianondocker.go contains Docker's main function. This file provides first line CLI argument parsing and environment variable setting. docker-1.6.2/docker/client.go0000644000175000017500000000035312524223634015411 0ustar tianontianon// +build !daemon package main import ( "log" // see gh#8745, client needs to use go log pkg ) const CanDaemon = false func mainDaemon() { log.Fatal("This is a client-only binary - running the Docker daemon is not supported.") } docker-1.6.2/docker/docker.go0000644000175000017500000000700112524223634015377 0ustar tianontianonpackage main import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "os" "strings" log "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/api/client" "github.com/docker/docker/autogen/dockerversion" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/term" "github.com/docker/docker/utils" ) const ( defaultTrustKeyFile = "key.json" defaultCaFile = "ca.pem" defaultKeyFile = "key.pem" defaultCertFile = "cert.pem" ) func main() { if reexec.Init() { return } // Set terminal emulation based on platform as required. stdin, stdout, stderr := term.StdStreams() initLogging(stderr) flag.Parse() // FIXME: validate daemon flags here if *flVersion { showVersion() return } if *flLogLevel != "" { lvl, err := log.ParseLevel(*flLogLevel) if err != nil { log.Fatalf("Unable to parse logging level: %s", *flLogLevel) } setLogLevel(lvl) } else { setLogLevel(log.InfoLevel) } // -D, --debug, -l/--log-level=debug processing // When/if -D is removed this block can be deleted if *flDebug { os.Setenv("DEBUG", "1") setLogLevel(log.DebugLevel) } if len(flHosts) == 0 { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) } defaultHost, err := api.ValidateHost(defaultHost) if err != nil { log.Fatal(err) } flHosts = append(flHosts, defaultHost) } setDefaultConfFlag(flTrustKey, defaultTrustKeyFile) if *flDaemon { if *flHelp { flag.Usage() return } mainDaemon() return } if len(flHosts) > 1 { log.Fatal("Please specify only one -H") } protoAddrParts := strings.SplitN(flHosts[0], "://", 2) var ( cli *client.DockerCli tlsConfig tls.Config ) tlsConfig.InsecureSkipVerify = true // Regardless of whether the user sets it to true or false, if they // specify --tlsverify at all then we need to turn on tls if flag.IsSet("-tlsverify") { *flTls = true } // If we should verify the server, we need to load a trusted ca if *flTlsVerify { certPool := x509.NewCertPool() file, err := ioutil.ReadFile(*flCa) if err != nil { log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) } certPool.AppendCertsFromPEM(file) tlsConfig.RootCAs = certPool tlsConfig.InsecureSkipVerify = false } // If tls is enabled, try to load and send client certificates if *flTls || *flTlsVerify { _, errCert := os.Stat(*flCert) _, errKey := os.Stat(*flKey) if errCert == nil && errKey == nil { *flTls = true cert, err := tls.LoadX509KeyPair(*flCert, *flKey) if err != nil { log.Fatalf("Couldn't load X509 key pair: %q. Make sure the key is encrypted", err) } tlsConfig.Certificates = []tls.Certificate{cert} } // Avoid fallback to SSL protocols < TLS1.0 tlsConfig.MinVersion = tls.VersionTLS10 } if *flTls || *flTlsVerify { cli = client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], &tlsConfig) } else { cli = client.NewDockerCli(stdin, stdout, stderr, *flTrustKey, protoAddrParts[0], protoAddrParts[1], nil) } if err := cli.Cmd(flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { log.Println(sterr.Status) } os.Exit(sterr.StatusCode) } log.Fatal(err) } } func showVersion() { fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT) } docker-1.6.2/docker/log.go0000644000175000017500000000027012524223634014712 0ustar tianontianonpackage main import ( log "github.com/Sirupsen/logrus" "io" ) func setLogLevel(lvl log.Level) { log.SetLevel(lvl) } func initLogging(stderr io.Writer) { log.SetOutput(stderr) } docker-1.6.2/docker/flags.go0000644000175000017500000001143512524223634015232 0ustar tianontianonpackage main import ( "fmt" "os" "path/filepath" "runtime" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/homedir" flag "github.com/docker/docker/pkg/mflag" ) var ( dockerCertPath = os.Getenv("DOCKER_CERT_PATH") dockerTlsVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" ) func init() { if dockerCertPath == "" { dockerCertPath = filepath.Join(homedir.Get(), ".docker") } } func getDaemonConfDir() string { // TODO: update for Windows daemon if runtime.GOOS == "windows" { return filepath.Join(homedir.Get(), ".docker") } return "/etc/docker" } var ( flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") flDaemon = flag.Bool([]string{"d", "-daemon"}, false, "Enable daemon mode") flDebug = flag.Bool([]string{"D", "-debug"}, false, "Enable debug mode") flLogLevel = flag.String([]string{"l", "-log-level"}, "info", "Set the logging level") flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by --tlsverify") flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage") flTlsVerify = flag.Bool([]string{"-tlsverify"}, dockerTlsVerify, "Use TLS and verify the remote") // these are initialized in init() below since their default values depend on dockerCertPath which isn't fully initialized until init() runs flTrustKey *string flCa *string flCert *string flKey *string flHosts []string ) func setDefaultConfFlag(flag *string, def string) { if *flag == "" { if *flDaemon { *flag = filepath.Join(getDaemonConfDir(), def) } else { *flag = filepath.Join(homedir.Get(), ".docker", def) } } } func init() { var placeholderTrustKey string // TODO use flag flag.String([]string{"i", "-identity"}, "", "Path to libtrust key file") flTrustKey = &placeholderTrustKey flCa = flag.String([]string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust certs signed only by this CA") flCert = flag.String([]string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file") flKey = flag.String([]string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file") opts.HostListVar(&flHosts, []string{"H", "-host"}, "Daemon socket(s) to connect to") flag.Usage = func() { fmt.Fprint(os.Stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n\nA self-sufficient runtime for linux containers.\n\nOptions:\n") flag.CommandLine.SetOutput(os.Stdout) flag.PrintDefaults() help := "\nCommands:\n" for _, command := range [][]string{ {"attach", "Attach to a running container"}, {"build", "Build an image from a Dockerfile"}, {"commit", "Create a new image from a container's changes"}, {"cp", "Copy files/folders from a container's filesystem to the host path"}, {"create", "Create a new container"}, {"diff", "Inspect changes on a container's filesystem"}, {"events", "Get real time events from the server"}, {"exec", "Run a command in a running container"}, {"export", "Stream the contents of a container as a tar archive"}, {"history", "Show the history of an image"}, {"images", "List images"}, {"import", "Create a new filesystem image from the contents of a tarball"}, {"info", "Display system-wide information"}, {"inspect", "Return low-level information on a container or image"}, {"kill", "Kill a running container"}, {"load", "Load an image from a tar archive"}, {"login", "Register or log in to a Docker registry server"}, {"logout", "Log out from a Docker registry server"}, {"logs", "Fetch the logs of a container"}, {"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"}, {"pause", "Pause all processes within a container"}, {"ps", "List containers"}, {"pull", "Pull an image or a repository from a Docker registry server"}, {"push", "Push an image or a repository to a Docker registry server"}, {"rename", "Rename an existing container"}, {"restart", "Restart a running container"}, {"rm", "Remove one or more containers"}, {"rmi", "Remove one or more images"}, {"run", "Run a command in a new container"}, {"save", "Save an image to a tar archive"}, {"search", "Search for an image on the Docker Hub"}, {"start", "Start a stopped container"}, {"stats", "Display a stream of a containers' resource usage statistics"}, {"stop", "Stop a running container"}, {"tag", "Tag an image into a repository"}, {"top", "Lookup the running processes of a container"}, {"unpause", "Unpause a paused container"}, {"version", "Show the Docker version information"}, {"wait", "Block until a container stops, then print its exit code"}, } { help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) } help += "\nRun 'docker COMMAND --help' for more information on a command." fmt.Fprintf(os.Stdout, "%s\n", help) } } docker-1.6.2/docker/daemon.go0000644000175000017500000001220312524223634015373 0ustar tianontianon// +build daemon package main import ( "fmt" "io" "os" "path/filepath" "strings" log "github.com/Sirupsen/logrus" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/builder" "github.com/docker/docker/builtins" "github.com/docker/docker/daemon" _ "github.com/docker/docker/daemon/execdriver/lxc" _ "github.com/docker/docker/daemon/execdriver/native" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/homedir" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/registry" "github.com/docker/docker/utils" ) const CanDaemon = true var ( daemonCfg = &daemon.Config{} registryCfg = ®istry.Options{} ) func init() { daemonCfg.InstallFlags() registryCfg.InstallFlags() } func migrateKey() (err error) { // Migrate trust key if exists at ~/.docker/key.json and owned by current user oldPath := filepath.Join(homedir.Get(), ".docker", defaultTrustKeyFile) newPath := filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && utils.IsFileOwner(oldPath) { defer func() { // Ensure old path is removed if no error occurred if err == nil { err = os.Remove(oldPath) } else { log.Warnf("Key migration failed, key file not removed at %s", oldPath) } }() if err := os.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil { return fmt.Errorf("Unable to create daemon configuration directory: %s", err) } newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return fmt.Errorf("error creating key file %q: %s", newPath, err) } defer newFile.Close() oldFile, err := os.Open(oldPath) if err != nil { return fmt.Errorf("error opening key file %q: %s", oldPath, err) } defer oldFile.Close() if _, err := io.Copy(newFile, oldFile); err != nil { return fmt.Errorf("error copying key: %s", err) } log.Infof("Migrated key from %s to %s", oldPath, newPath) } return nil } func mainDaemon() { if flag.NArg() != 0 { flag.Usage() return } eng := engine.New() signal.Trap(eng.Shutdown) if err := migrateKey(); err != nil { log.Fatal(err) } daemonCfg.TrustKeyPath = *flTrustKey // Load builtins if err := builtins.Register(eng); err != nil { log.Fatal(err) } // load registry service if err := registry.NewService(registryCfg).Install(eng); err != nil { log.Fatal(err) } // load the daemon in the background so we can immediately start // the http api so that connections don't fail while the daemon // is booting daemonInitWait := make(chan error) go func() { d, err := daemon.NewDaemon(daemonCfg, eng) if err != nil { daemonInitWait <- err return } log.Infof("docker daemon: %s %s; execdriver: %s; graphdriver: %s", dockerversion.VERSION, dockerversion.GITCOMMIT, d.ExecutionDriver().Name(), d.GraphDriver().String(), ) if err := d.Install(eng); err != nil { daemonInitWait <- err return } b := &builder.BuilderJob{eng, d} b.Install() // after the daemon is done setting up we can tell the api to start // accepting connections if err := eng.Job("acceptconnections").Run(); err != nil { daemonInitWait <- err return } daemonInitWait <- nil }() // Serve api job := eng.Job("serveapi", flHosts...) job.SetenvBool("Logging", true) job.SetenvBool("EnableCors", daemonCfg.EnableCors) job.Setenv("CorsHeaders", daemonCfg.CorsHeaders) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", daemonCfg.SocketGroup) job.SetenvBool("Tls", *flTls) job.SetenvBool("TlsVerify", *flTlsVerify) job.Setenv("TlsCa", *flCa) job.Setenv("TlsCert", *flCert) job.Setenv("TlsKey", *flKey) job.SetenvBool("BufferRequests", true) // The serve API job never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go func() { if err := job.Run(); err != nil { log.Errorf("ServeAPI error: %v", err) serveAPIWait <- err return } serveAPIWait <- nil }() // Wait for the daemon startup goroutine to finish // This makes sure we can actually cleanly shutdown the daemon log.Debug("waiting for daemon to initialize") errDaemon := <-daemonInitWait if errDaemon != nil { eng.Shutdown() outStr := fmt.Sprintf("Shutting down daemon due to errors: %v", errDaemon) if strings.Contains(errDaemon.Error(), "engine is shutdown") { // if the error is "engine is shutdown", we've already reported (or // will report below in API server errors) the error outStr = "Shutting down daemon due to reported errors" } // we must "fatal" exit here as the API server may be happy to // continue listening forever if the error had no impact to API log.Fatal(outStr) } else { log.Info("Daemon has completed initialization") } // Daemon is fully initialized and handling API traffic // Wait for serve API job to complete errAPI := <-serveAPIWait // If we have an error here it is unique to API (as daemonErr would have // exited the daemon process above) eng.Shutdown() if errAPI != nil { log.Fatalf("Shutting down due to ServeAPI error: %v", errAPI) } }