diff --git a/CHANGELOG.md b/CHANGELOG.md index d29275b3c9..e245a79c3e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,180 @@ +### v1.13.3 -> v1.13.4 + +#### Fixes + + - [`c63ade6`](https://github.com/deis/deis/commit/c63ade6027870308f046f17ce3aab268d2fdc61e) Godeps,deisctl: serialize a unit test and fix a package import + - [`7fd534c`](https://github.com/deis/deis/commit/7fd534c0b16389960290eb149c97c869331140ad) models: prune old containers before deploy + - [`49b008c`](https://github.com/deis/deis/commit/49b008ccaaeca4a778dbf8010ec3cde41f9b2913) contrib/azure: update template to current API and images + - [`d5d51cd`](https://github.com/deis/deis/commit/d5d51cdbb5caec564877f514ed86b9547b856c6a) client: do not add top level command to cmdArgs + - [`4ade016`](https://github.com/deis/deis/commit/4ade01607b1cbe9c583e6491fd4cd3b8b9599a65) perms: user-only settings file permissions + - [`2381530`](https://github.com/deis/deis/commit/2381530543d3fec4cb472bae24a646a8c76cce0d) database/build.sh: refer to stable pv package location + +#### Documentation + + - [`68fb9b4`](https://github.com/deis/deis/commit/68fb9b4ca0d73339324766f44343441017ce1309) layout: Update notice with URL for published v2 docs + +#### Maintenance + + - [`9b83d00`](https://github.com/deis/deis/commit/9b83d00d5910bb6e98a9a7aec6a0667b8e195cb8) buildpacks: update several buildpacks + - [`b5a1c38`](https://github.com/deis/deis/commit/b5a1c3844eeff06f0d0a5ae0420aef17b81c0f31) community: add latest community meeting + - [`e5f349d`](https://github.com/deis/deis/commit/e5f349dd9fb090aa993ad74758b3bbf038d44d23) community: september 2016 community meeting + - [`3ce03a4`](https://github.com/deis/deis/commit/3ce03a4f6ccc07719ffe98b5522de2b8e422e1c8) router: upgrade nginx to 1.10.1 stable + - [`014ba61`](https://github.com/deis/deis/commit/014ba61294ffd9e3430e5f745cc1835e4c1524b4) buildpacks: update heroku-buildpack-go to v46 + +### v1.13.2 -> v1.13.3 + +#### Features + + - [`d369fd7`](https://github.com/deis/deis/commit/d369fd7e811c24d9da4eb907fd1237400cc15114) contrib: Add script to restart all apps. + - [`ef80d81`](https://github.com/deis/deis/commit/ef80d818b01de5e69888192fb855c63374fce9cc) client: Sort listings + - [`968c48c`](https://github.com/deis/deis/commit/968c48c859e92539173032f26aa95afe6478a455) router: Add server_tokens option + +#### Fixes + + - [`d143b4b`](https://github.com/deis/deis/commit/d143b4bf7ba7cb14236748dc6e421554c95ecba8) logspout: Ignore events with short ID + +#### Documentation + + - [`6da7204`](https://github.com/deis/deis/commit/6da72045a36a49d6e6c8b4dbe59ff66695cc927d) dockerfiles: Make Bash requirement explicit in the documentation + +#### Maintenance + + - [`e30db13`](https://github.com/deis/deis/commit/e30db1391800284edf0937d5abfa7956179802af) buildpacks: update go, scala, and php buildpacks + - [`fc1518a`](https://github.com/deis/deis/commit/fc1518a66b9d28d29626696d11be07749cc121a1) (all): update base to alpine:3.4 + - [`fd35c66`](https://github.com/deis/deis/commit/fd35c66ab16c4a621e42f2f42da5b66cb2c63cda) planning: add July and August, up next, September + - [`3ef983f`](https://github.com/deis/deis/commit/3ef983f8c5c7c2d57f3261789a15443fb021f22e) (all): bump CoreOS to 1068.8.0 + - [`167a44a`](https://github.com/deis/deis/commit/167a44a43880ce47509651c3ee2507e9c4e3abca) buildpacks: update go, php, nodejs, python, and scala buildpacks + - [`8342efc`](https://github.com/deis/deis/commit/8342efc2b78b7691e2578e1ac719953739fea908) roadmap: clarify Deis v1 roadmap status + +### v1.13.1 -> v1.13.2 + +#### Fixes + - [`5888a46`](https://github.com/deis/deis/commit/5888a464c39b10ec236e51d784d7a8752d99980a) deisctl: replace -c with --cpu-shares + - [`bd827ce`](https://github.com/deis/deis/commit/bd827cea49e5ad61b127b0ca824ee81319b6f73e) Makefile: remove old "go vet" install command + +#### Documentation + + - [`62f1696`](https://github.com/deis/deis/commit/62f169691c3b1f6fe7591d88162f63255f866b03) upgrading: clarify AWS-specific instructions + +#### Maintenance + + - [`b795d8d`](https://github.com/deis/deis/commit/b795d8d52975e07164a0628dce14f180888d946c) planning: update for june/july community meetings + - [`6ca0414`](https://github.com/deis/deis/commit/6ca04149b068a166170031991b491b0bcb845b44) buildpacks: update heroku-buildpack-scala to v70 + - [`1d89269`](https://github.com/deis/deis/commit/1d892694cbf3a3f176dc90ad66f4d3efa76abcdc) buildpacks: update heroku-buildpack-php to v105 + - [`750e5e9`](https://github.com/deis/deis/commit/750e5e9e0f9e810ed5ecc2f1f08ba9473cafa7d5) buildpacks: update heroku-buildpack-go to v41 + - [`db9542a`](https://github.com/deis/deis/commit/db9542a6c7e1e87e9bd52b3e87b2ed92e1c19dc4) Revert (all): bump CoreOS to 1010.5.0 + - [`c89e903`](https://github.com/deis/deis/commit/c89e903d28674922d926dea52cef8031d508b6d3) (all): bump CoreOS to 1010.5.0 + - [`885d7ef`](https://github.com/deis/deis/commit/885d7ef297eeffc4343b887120d1bca0a0a04568) buildpacks: update heroku-buildpack-python to v80 + - [`955dcea`](https://github.com/deis/deis/commit/955dcea900d6822590c8bb01bb30d9fbf9190ac6) planning: updates for May planning + +### v1.13.0 -> v1.13.1 + +#### Fixes + + - [`2a4b9ff`](https://github.com/deis/deis/commit/2a4b9ffd511340ca8a6121a4b11357ebe961e8db) controller: only load latest config + +#### Maintenance + + - [`eb32fb2`](https://github.com/deis/deis/commit/eb32fb2a651fad862003eb9e0093411fefbcd79c) buildpacks: update heroku-buildpack-ruby to v146 + - [`297f885`](https://github.com/deis/deis/commit/297f88518a628094c14a8faed1cb82ff898f567e) buildpacks: update heroku-buildpack-php to v102 + - [`b53d38c`](https://github.com/deis/deis/commit/b53d38cdb2f58d18afd68d64c3bf718d2202e480) buildpacks: update heroku-buildpack-nodejs to v90 + - [`cfff045`](https://github.com/deis/deis/commit/cfff0454649a77ec1640ad4ee1d32b4c6c1cfa39) tests: update test-etcd to v2.2.3 + - [`8257291`](https://github.com/deis/deis/commit/8257291e0c9c03791e105f5d58a2e40c01a190ff) buildpacks: update heroku-buildpack-go to v34 + - [`c8957fc`](https://github.com/deis/deis/commit/c8957fcd69b2248110abad1c3933979c1f88fdb1) (all): bump CoreOS to 899.17.0 + - [`705f736`](https://github.com/deis/deis/commit/705f7363d67d676685bdfaa2dfbb6d0acf475bd6) roadmap: update for May planning meeting + - [`fc65359`](https://github.com/deis/deis/commit/fc65359c2ba5541aadf35836940bf312007956de) (all): bump CoreOS to 899.15.0 + +### v1.12.3 -> v1.13.0 + +#### Features + + - [`3181b2d`](https://github.com/deis/deis/commit/3181b2d4c70c8827bc7b5e9bf6bba4950f8a7b37) client: document deis version + - [`6ec3e06`](https://github.com/deis/deis/commit/6ec3e06702714f4529d4383e3ab063d062d927af) controller: add simple health check view + - [`42464a2`](https://github.com/deis/deis/commit/42464a2d951c1890d384b0c6a2be5ed1939955da) contrib: graceful shutdown for non-ceph nodes + - [`d7fe142`](https://github.com/deis/deis/commit/d7fe142a78b020e6b78e1397e9a2f04123a9960b) contrib/linode: allow for cluster expansion and standardize scripts + - [`3f4e25a`](https://github.com/deis/deis/commit/3f4e25ab269eb32b7d48cea7b234a727a2555986) router: make vhost_traffic_status_zone configurable + +#### Fixes + + - [`e2aeace`](https://github.com/deis/deis/commit/e2aeaced2303172cac196ceab3075d2a37c5edc5) logspout: discover logger connection continuously + - [`3052fe6`](https://github.com/deis/deis/commit/3052fe6707708b8207cbba3e7437142545f87caf) controller: prevent overlapping config:set operations + - [`040f90d`](https://github.com/deis/deis/commit/040f90df79648a959d1bd339163784a45f20bd89) client: only delete local ~/.deis/client.json if cancelling logged in user + - [`67090ae`](https://github.com/deis/deis/commit/67090ae58c5cd2c3c5640162a3d2c3f542c59e10) controller: use django HttpResponse for logs + - [`17da397`](https://github.com/deis/deis/commit/17da397b3fc9b961381ca34baed639f9e54746df) controller: use double quotes to escape ENV values + - [`d481c4c`](https://github.com/deis/deis/commit/d481c4c35a36dd5258283636127869aef0c18a60) api: disable adding wildcard certificates + - [`ad558e4`](https://github.com/deis/deis/commit/ad558e4bec7d3f17785e78d647267350316bba18) auth: return a 409 if a user is cancelled that has apps + - [`2cf9205`](https://github.com/deis/deis/commit/2cf92051ee86146884c4d9bc968d3783585cde3d) setup-node.sh: update packages for a Jenkins node + - [`46bbba7`](https://github.com/deis/deis/commit/46bbba731280cbe381b914cdd39b42b3dc3bddfb) router: Allow for comma-delimited X-Forwarded-Proto + - [`8f0119f`](https://github.com/deis/deis/commit/8f0119f9303ad83df6a795832da163f72a89f9b0) controller: tag keys can be lowercase or capital + - [`ba38edc`](https://github.com/deis/deis/commit/ba38edc8e4f320996780fd6c8d76336364870c55) builder: fail on piped command + - [`78a16fb`](https://github.com/deis/deis/commit/78a16fbeb793310a55b44336df862b7662704ccd) builder: source proxy envvars + - [`35a97b1`](https://github.com/deis/deis/commit/35a97b1a08d98b21cec4d0f022979a5db8b7e355) builder: remove temporary build dir on success + - [`f7099c7`](https://github.com/deis/deis/commit/f7099c70a54b9dd6bf29d393e7e404d2e43c6e3b) builder: log env tcp requests information to debug + - [`1cb11dd`](https://github.com/deis/deis/commit/1cb11ddd5d03468abefacd7bbb2494570749a934) client: simplify URL prefixing + - [`4a9f791`](https://github.com/deis/deis/commit/4a9f791ea39f366a4941b18e0b81472d8ab2dbf3) vagrant: fix Vagrantfile to handle spaces + - [`3275683`](https://github.com/deis/deis/commit/3275683a1a47119232f1171ded579608477fda4f) builder: demote handshake failure log to debug + - [`e0aa2b1`](https://github.com/deis/deis/commit/e0aa2b138c558ce3372983801595f2a19e5a46b6) client: strip controller port when parsing git remotes + - [`112f513`](https://github.com/deis/deis/commit/112f513cecf731cb22c61fe84c4c753af5429c52) controller: do not require slash at the end of the `GET /v1/users` + - [`a450965`](https://github.com/deis/deis/commit/a4509659cb1f9352f619d2fa027e3cccb3b3a434) builder: remove empty newline + - [`7290dd0`](https://github.com/deis/deis/commit/7290dd019d9d228a57a83f7ede812f46f13d79f3) logspout: Truncate lines too big for a single UDP packet + - [`0700028`](https://github.com/deis/deis/commit/07000288742edd43d43a5457ca9e457341be2b80) router: Fix issues establishing real end-client IP + - [`05e4b57`](https://github.com/deis/deis/commit/05e4b57f35cc4fb736a53f54311212474978bcb8) controller: legacy fix should modify dict instead of dict view + - [`51c6861`](https://github.com/deis/deis/commit/51c686151ce57670f493a95da3d69e2956d96de1) database: bail out if unable to check for existing backups + - [`8b824df`](https://github.com/deis/deis/commit/8b824df1f866491fb55dd166198ee277121b6968) client: backport deis/workflow#280 to v1 + - [`f46b967`](https://github.com/deis/deis/commit/f46b967e9fd7c2260cddad656393d7817bcd0897) tests: remove reference to python client + - [`455e6b0`](https://github.com/deis/deis/commit/455e6b04e30c9ebcd56e25e9e2b6198e3c95efc5) tests: use and patch known "good" version of mock-s3 + - [`481fe6b`](https://github.com/deis/deis/commit/481fe6b1ce0405ef4f0eda73dcc1669edbfe2577) registry: disallow dots in s3 buckets + +#### Documentation + + - [`7703d12`](https://github.com/deis/deis/commit/7703d1287d6bd879fc65465958902b6fd6ca7e56) managing_deis: recommend m3.medium for RDS + - [`1b9facd`](https://github.com/deis/deis/commit/1b9facd36044b59abe4d9ddbd33bebb85fde220a) roadmap: add March meeting archive + - [`48eb886`](https://github.com/deis/deis/commit/48eb88630b442f4e08c67b2c1307bd346e8b0582) contrib: add link to deis-phppgadmin + - [`4009ffd`](https://github.com/deis/deis/commit/4009ffdec6c326c1fa9c01a5a21583548582616e) reference: remove sidenote about pushing only to master + - [`a9a7f33`](https://github.com/deis/deis/commit/a9a7f33ac63085434e3c3aab583b2484c77a9d77) roadmap: add March release planning meeting + - [`997e082`](https://github.com/deis/deis/commit/997e0826392ec8c03d0bcaad04c1092fc3513bc1) Add/Remove hosts: fix wrong filepath to user-data.example + - [`ada6c9c`](https://github.com/deis/deis/commit/ada6c9c96798e96f4cba683b1282c4b6d4605a73) managing_deis: add workaround for cephless cluster + - [`f04ed12`](https://github.com/deis/deis/commit/f04ed124f857a52c4b1e37be943e11e7839203a0) managing_deis: update Sematext agent name and URL + - [`3faf421`](https://github.com/deis/deis/commit/3faf42182004056423039a65b3a2f8fa4470d619) installing_deis: Add parameter description about publicDomainName. + - [`2520e54`](https://github.com/deis/deis/commit/2520e543be81f8a9f6ac1bf9f61e0eaf6d2b10f7) roadmap: add January 2016 meeting + archive + - [`ee77077`](https://github.com/deis/deis/commit/ee77077de6ef7a979a91a63749e5659b43ef2589) roadmap: add December 2015 and January 2016 meetings + +#### Maintenance + + - [`5c8d0c4`](https://github.com/deis/deis/commit/5c8d0c4bda4c3de31dd315b98561f0966115a614) reqs: update docker-py to 1.7.2 + - [`a5e065f`](https://github.com/deis/deis/commit/a5e065f6e483d7139088dc8f60c2c8f671ca5718) build.sh: remove unused git install + - [`4ad3329`](https://github.com/deis/deis/commit/4ad3329ef6ef61f193b7da227072b7a2bbc51dbd) requirements: remove obsolete marathon lib + - [`52e96a5`](https://github.com/deis/deis/commit/52e96a5c193677976c4809292bfa464889f55de2) (all): update base to alpine:3.3 + - [`1a79431`](https://github.com/deis/deis/commit/1a794318d0bbafa4755beb1578cd0a983774c19f) (all): bump CoreOS to 899.13.0 + - [`a7ee6cc`](https://github.com/deis/deis/commit/a7ee6cc9ad8e640ef83f63e01f46692e03ded3a4) buildpacks: update heroku-buildpack-java to v44 + - [`02fbaf1`](https://github.com/deis/deis/commit/02fbaf19df415384603b0e528e567b3c6fa7b735) buildpacks: update heroku-buildpack-php to v97 + - [`452a028`](https://github.com/deis/deis/commit/452a02824255ffd71cb41aed0898f73772b31ec6) buildpacks: update heroku-buildpack-ruby to v145 + - [`ab88aaf`](https://github.com/deis/deis/commit/ab88aafd405ac54d98de12ddd23a9526d1c90611) buildpacks: update heroku-buildpack-grails to v20 + - [`408f053`](https://github.com/deis/deis/commit/408f053ce1225ef83f8c48d9adde250be1a7bbd4) buildpacks: update heroku-buildpack-scala to v67 + - [`c23dbff`](https://github.com/deis/deis/commit/c23dbff0ed5f54c4d2710ef965d1a1e93ee59bd8) buildpacks: update heroku-buildpack-multi to v1.0.0 + - [`79d68a3`](https://github.com/deis/deis/commit/79d68a37ed5abda13bc1056099cd7eeea13503b1) buildpacks: update heroku-buildpack-nodejs to v89 + - [`4ef40e6`](https://github.com/deis/deis/commit/4ef40e6923521f313c67993e1ec96e3ae220d1c8) buildpacks: update heroku-buildpack-python to v78 + - [`ee22d78`](https://github.com/deis/deis/commit/ee22d78376f4aa90840834f5085c4ce51f2ddd59) buildpacks: update heroku-buildpack-php to v94 + - [`f448577`](https://github.com/deis/deis/commit/f44857728875cc603fa0fe820a90077671573776) buildpacks: update heroku-buildpack-scala to v66 + - [`c9b0a4c`](https://github.com/deis/deis/commit/c9b0a4c95a394808d8c46d042d06d40c65fb46bd) buildpacks: update heroku-buildpack-nodejs to v88 + - [`df95748`](https://github.com/deis/deis/commit/df957488417543043cb41bcbc051c89d05b30730) buildpacks: update heroku-buildpack-go to v31 + - [`1d8f9cc`](https://github.com/deis/deis/commit/1d8f9cc854a673e6cab1c9b29d41fb26be85ef2a) (all): bump CoreOS to 835.13.0 + - [`c96aad6`](https://github.com/deis/deis/commit/c96aad6d59157d046bfc7172716245dc9f27dd75) requirements: update docker-py to 1.7.0 + - [`96796c6`](https://github.com/deis/deis/commit/96796c64c62e5011f5352af325b0e2fb2611840c) buildpacks: update all Heroku buildpacks + - [`3a619bc`](https://github.com/deis/deis/commit/3a619bcd974b6beb32be13643da9a47eb9d20057) (all): remove k8s scheduler code + - [`5c916c8`](https://github.com/deis/deis/commit/5c916c8bd9f75669234c851284c5505cef7691e6) Godeps: bump googleapi, remove unused packages + - [`7c37363`](https://github.com/deis/deis/commit/7c373633f1aaf05741cb5c8f745de587db17212c) (all): bump to CoreOS 835.11.0 + - [`4dac497`](https://github.com/deis/deis/commit/4dac49741901f1a780693d464eea1981ce29f68b) deisctl: update stateless warning message + - [`d88ec07`](https://github.com/deis/deis/commit/d88ec07c99f4ab903fc7d250025146b966aacee8) (all): bump to CoreOS 835.9.0 + - [`31ad174`](https://github.com/deis/deis/commit/31ad17416487d394a8d06f6b262d9ed4eb807020) controller: update docker-py to 1.6.0 + - [`aad62fe`](https://github.com/deis/deis/commit/aad62fedd20acc567d1477c152884de77397ffa1) (all): bump CoreOS to 835.8.0 + +### v1.12.2 -> v1.12.3 + +#### Maintenance + + - [`7356d26`](https://github.com/deis/deis/commit/7356d26adeb2f48d8df82a1d0baa090da7d4bb20) (all): bump CoreOS to 835.12.0 + ### v1.12.1 -> v1.12.2 #### Fixes diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 365750013a..0d7e58a1a1 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "github.com/deis/deis", - "GoVersion": "go1.5", + "GoVersion": "go1.7.3", "Packages": [ "./..." ], @@ -248,11 +248,6 @@ "Comment": "v1.5.0", "Rev": "a8a31eff10544860d2188dddabdee4d727545796" }, - { - "ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar", - "Comment": "v1.5.0", - "Rev": "a8a31eff10544860d2188dddabdee4d727545796" - }, { "ImportPath": "github.com/docker/libcontainer/netlink", "Comment": "v1.2.0-160-g7294213", @@ -276,10 +271,6 @@ "Comment": "v1.0-56-g819aca5", "Rev": "819aca59fdc412028cc5348ff3d16be2715d33cf" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/gorilla/context", "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" @@ -297,28 +288,11 @@ "ImportPath": "github.com/jonboulle/clockwork", "Rev": "b473f398c464f1988327f67c9e6aa7fba62f80d2" }, - { - "ImportPath": "github.com/kardianos/osext", - "Rev": "10a0e3c4f6267b0f903197acf80d194bab3eb8cb" - }, - { - "ImportPath": "github.com/leeor/etcd-sync", - "Rev": "1b8ac808160f955a5a2cdf21809e968698b07e8d" - }, { "ImportPath": "github.com/lib/pq", "Comment": "go1.0-cutoff-28-g72d960f", "Rev": "72d960fe96ac86dfd376b0de7ca81c0334838d0b" }, - { - "ImportPath": "github.com/progrium/go-basher", - "Rev": "ca3b97b3bd2a7eda303716f56916684771b79942" - }, - { - "ImportPath": "github.com/robfig/cron", - "Comment": "v1-2-g67823cd", - "Rev": "67823cd24dece1b04cced3a0a0b3ca2bc84d875e" - }, { "ImportPath": "github.com/tchap/go-patricia/patricia", "Comment": "v1.0.1", @@ -336,17 +310,13 @@ "ImportPath": "golang.org/x/crypto/ssh", "Rev": "f7445b17d61953e333441674c2d11e91ae4559d3" }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "d9558e5c97f85372afee28cf2b6059d7d3818919" - }, { "ImportPath": "golang.org/x/net/websocket", "Rev": "d9558e5c97f85372afee28cf2b6059d7d3818919" }, { "ImportPath": "google.golang.org/api/googleapi", - "Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67" + "Rev": "0caa37974a5f5ae67172acf68b4970f7864f994c" }, { "ImportPath": "gopkg.in/tomb.v1", diff --git a/Godeps/_workspace/src/github.com/docker/docker/graph/tags_unit_test.go b/Godeps/_workspace/src/github.com/docker/docker/graph/tags_unit_test.go index 58ad8ed878..f6a7231586 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/graph/tags_unit_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/graph/tags_unit_test.go @@ -7,11 +7,12 @@ import ( "path" "testing" + "archive/tar" + "github.com/docker/docker/daemon/graphdriver" _ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests "github.com/docker/docker/image" "github.com/docker/docker/utils" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) const ( diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go index 68e5c1d300..b54bad308f 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go @@ -16,7 +16,7 @@ import ( "strings" "syscall" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/fileutils" diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go index 6cd95d5ad5..dbe8d5ea79 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" ) func TestCmdStreamLargeStderr(t *testing.T) { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go index c0e8aee93c..dd1b767fa8 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go @@ -6,7 +6,7 @@ import ( "errors" "syscall" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" ) func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go index 3cc2493f6f..6312df3c8d 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows.go @@ -3,7 +3,7 @@ package archive import ( - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" ) func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go index 85217f6e08..bc67e9049a 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go @@ -10,7 +10,7 @@ import ( "syscall" "time" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/pools" diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go index ca282071f5..1e9b968ca8 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go @@ -9,7 +9,7 @@ import ( "strings" "syscall" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go index 758c4115d5..1af10fe6a3 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff_test.go @@ -3,7 +3,7 @@ package archive import ( "testing" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" ) func TestApplyLayerInvalidFilenames(t *testing.T) { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go index 9048027203..539f694b8e 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go @@ -9,7 +9,7 @@ import ( "path/filepath" "time" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" ) var testUntarFns = map[string]func(string, io.Reader) error{ diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go index b8b60197a3..0abe320bee 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap.go @@ -2,7 +2,7 @@ package archive import ( "bytes" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" "io/ioutil" ) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go index 88fcbe4a94..17ca379ba6 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -12,7 +12,7 @@ import ( "io" "strings" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" ) const ( diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go index 26f12cc847..3ec7035cb3 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go @@ -15,7 +15,7 @@ import ( "os" "testing" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" ) type testLayer struct { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go index 0ceb5298a3..243844e8c0 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/tarsum/versioning.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "archive/tar" ) // versioning of the TarSum algorithm diff --git a/Godeps/_workspace/src/github.com/golang/glog/LICENSE b/Godeps/_workspace/src/github.com/golang/glog/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/Godeps/_workspace/src/github.com/golang/glog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/golang/glog/README b/Godeps/_workspace/src/github.com/golang/glog/README deleted file mode 100644 index 5f9c11485e..0000000000 --- a/Godeps/_workspace/src/github.com/golang/glog/README +++ /dev/null @@ -1,44 +0,0 @@ -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - http://code.google.com/p/google-glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/Godeps/_workspace/src/github.com/golang/glog/glog.go b/Godeps/_workspace/src/github.com/golang/glog/glog.go deleted file mode 100644 index 3e63fffd5e..0000000000 --- a/Godeps/_workspace/src/github.com/golang/glog/glog.go +++ /dev/null @@ -1,1177 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// glog.Info("Prepare to repel boarders") -// -// glog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if glog.V(2) { -// glog.Info("Starting transaction...") -// } -// -// glog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to files in a temporary directory. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=false -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// -package glog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - stdLog "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if glog.V(2) { glog.Info("log this") } -// or -// glog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) -} diff --git a/Godeps/_workspace/src/github.com/golang/glog/glog_file.go b/Godeps/_workspace/src/github.com/golang/glog/glog_file.go deleted file mode 100644 index 65075d2811..0000000000 --- a/Godeps/_workspace/src/github.com/golang/glog/glog_file.go +++ /dev/null @@ -1,124 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package glog - -import ( - "errors" - "flag" - "fmt" - "os" - "os/user" - "path/filepath" - "strings" - "sync" - "time" -) - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") - -func createLogDirs() { - if *logDir != "" { - logDirs = append(logDirs, *logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - current, err := user.Current() - if err == nil { - userName = current.Username - } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} diff --git a/Godeps/_workspace/src/github.com/golang/glog/glog_test.go b/Godeps/_workspace/src/github.com/golang/glog/glog_test.go deleted file mode 100644 index 0fb376e1fd..0000000000 --- a/Godeps/_workspace/src/github.com/golang/glog/glog_test.go +++ /dev/null @@ -1,415 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package glog - -import ( - "bytes" - "fmt" - stdLog "log" - "path/filepath" - "runtime" - "strconv" - "strings" - "testing" - "time" -) - -// Test that shortHostname works as advertised. -func TestShortHostname(t *testing.T) { - for hostname, expect := range map[string]string{ - "": "", - "host": "host", - "host.google.com": "host", - } { - if got := shortHostname(hostname); expect != got { - t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got) - } - } -} - -// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter. -type flushBuffer struct { - bytes.Buffer -} - -func (f *flushBuffer) Flush() error { - return nil -} - -func (f *flushBuffer) Sync() error { - return nil -} - -// swap sets the log writers and returns the old array. -func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) { - l.mu.Lock() - defer l.mu.Unlock() - old = l.file - for i, w := range writers { - logging.file[i] = w - } - return -} - -// newBuffers sets the log writers to all new byte buffers and returns the old array. -func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter { - return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)}) -} - -// contents returns the specified log value as a string. -func contents(s severity) string { - return logging.file[s].(*flushBuffer).String() -} - -// contains reports whether the string is contained in the log. -func contains(s severity, str string, t *testing.T) bool { - return strings.Contains(contents(s), str) -} - -// setFlags configures the logging flags how the test expects them. -func setFlags() { - logging.toStderr = false -} - -// Test that Info works as advertised. -func TestInfo(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -func TestInfoDepth(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - - f := func() { InfoDepth(1, "depth-test1") } - - // The next three lines must stay together - _, _, wantLine, _ := runtime.Caller(0) - InfoDepth(0, "depth-test0") - f() - - msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n") - if len(msgs) != 2 { - t.Fatalf("Got %d lines, expected 2", len(msgs)) - } - - for i, m := range msgs { - if !strings.HasPrefix(m, "I") { - t.Errorf("InfoDepth[%d] has wrong character: %q", i, m) - } - w := fmt.Sprintf("depth-test%d", i) - if !strings.Contains(m, w) { - t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m) - } - - // pull out the line number (between : and ]) - msg := m[strings.LastIndex(m, ":")+1:] - x := strings.Index(msg, "]") - if x < 0 { - t.Errorf("InfoDepth[%d]: missing ']': %q", i, m) - continue - } - line, err := strconv.Atoi(msg[:x]) - if err != nil { - t.Errorf("InfoDepth[%d]: bad line number: %q", i, m) - continue - } - wantLine++ - if wantLine != line { - t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine) - } - } -} - -func init() { - CopyStandardLogTo("INFO") -} - -// Test that CopyStandardLogTo panics on bad input. -func TestCopyStandardLogToPanic(t *testing.T) { - defer func() { - if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") { - t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s) - } - }() - CopyStandardLogTo("LOG") -} - -// Test that using the standard log package logs to INFO. -func TestStandardLog(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - stdLog.Print("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -// Test that the header has the correct format. -func TestHeader(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - defer func(previous func() time.Time) { timeNow = previous }(timeNow) - timeNow = func() time.Time { - return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) - } - pid = 1234 - Info("test") - var line int - format := "I0102 15:04:05.067890 1234 glog_test.go:%d] test\n" - n, err := fmt.Sscanf(contents(infoLog), format, &line) - if n != 1 || err != nil { - t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) - } - // Scanf treats multiple spaces as equivalent to a single space, - // so check for correct space-padding also. - want := fmt.Sprintf(format, line) - if contents(infoLog) != want { - t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want) - } -} - -// Test that an Error log goes to Warning and Info. -// Even in the Info log, the source character will be E, so the data should -// all be identical. -func TestError(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - Error("test") - if !contains(errorLog, "E", t) { - t.Errorf("Error has wrong character: %q", contents(errorLog)) - } - if !contains(errorLog, "test", t) { - t.Error("Error failed") - } - str := contents(errorLog) - if !contains(warningLog, str, t) { - t.Error("Warning failed") - } - if !contains(infoLog, str, t) { - t.Error("Info failed") - } -} - -// Test that a Warning log goes to Info. -// Even in the Info log, the source character will be W, so the data should -// all be identical. -func TestWarning(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - Warning("test") - if !contains(warningLog, "W", t) { - t.Errorf("Warning has wrong character: %q", contents(warningLog)) - } - if !contains(warningLog, "test", t) { - t.Error("Warning failed") - } - str := contents(warningLog) - if !contains(infoLog, str, t) { - t.Error("Info failed") - } -} - -// Test that a V log goes to Info. -func TestV(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - logging.verbosity.Set("2") - defer logging.verbosity.Set("0") - V(2).Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -// Test that a vmodule enables a log in this file. -func TestVmoduleOn(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - logging.vmodule.Set("glog_test=2") - defer logging.vmodule.Set("") - if !V(1) { - t.Error("V not enabled for 1") - } - if !V(2) { - t.Error("V not enabled for 2") - } - if V(3) { - t.Error("V enabled for 3") - } - V(2).Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) - } - if !contains(infoLog, "test", t) { - t.Error("Info failed") - } -} - -// Test that a vmodule of another file does not enable a log in this file. -func TestVmoduleOff(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - logging.vmodule.Set("notthisfile=2") - defer logging.vmodule.Set("") - for i := 1; i <= 3; i++ { - if V(Level(i)) { - t.Errorf("V enabled for %d", i) - } - } - V(2).Info("test") - if contents(infoLog) != "" { - t.Error("V logged incorrectly") - } -} - -// vGlobs are patterns that match/don't match this file at V=2. -var vGlobs = map[string]bool{ - // Easy to test the numeric match here. - "glog_test=1": false, // If -vmodule sets V to 1, V(2) will fail. - "glog_test=2": true, - "glog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed. - // These all use 2 and check the patterns. All are true. - "*=2": true, - "?l*=2": true, - "????_*=2": true, - "??[mno]?_*t=2": true, - // These all use 2 and check the patterns. All are false. - "*x=2": false, - "m*=2": false, - "??_*=2": false, - "?[abc]?_*t=2": false, -} - -// Test that vmodule globbing works as advertised. -func testVmoduleGlob(pat string, match bool, t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - defer logging.vmodule.Set("") - logging.vmodule.Set(pat) - if V(2) != Verbose(match) { - t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match) - } -} - -// Test that a vmodule globbing works as advertised. -func TestVmoduleGlob(t *testing.T) { - for glob, match := range vGlobs { - testVmoduleGlob(glob, match, t) - } -} - -func TestRollover(t *testing.T) { - setFlags() - var err error - defer func(previous func(error)) { logExitFunc = previous }(logExitFunc) - logExitFunc = func(e error) { - err = e - } - defer func(previous uint64) { MaxSize = previous }(MaxSize) - MaxSize = 512 - - Info("x") // Be sure we have a file. - info, ok := logging.file[infoLog].(*syncBuffer) - if !ok { - t.Fatal("info wasn't created") - } - if err != nil { - t.Fatalf("info has initial error: %v", err) - } - fname0 := info.file.Name() - Info(strings.Repeat("x", int(MaxSize))) // force a rollover - if err != nil { - t.Fatalf("info has error after big write: %v", err) - } - - // Make sure the next log file gets a file name with a different - // time stamp. - // - // TODO: determine whether we need to support subsecond log - // rotation. C++ does not appear to handle this case (nor does it - // handle Daylight Savings Time properly). - time.Sleep(1 * time.Second) - - Info("x") // create a new file - if err != nil { - t.Fatalf("error after rotation: %v", err) - } - fname1 := info.file.Name() - if fname0 == fname1 { - t.Errorf("info.f.Name did not change: %v", fname0) - } - if info.nbytes >= MaxSize { - t.Errorf("file size was not reset: %d", info.nbytes) - } -} - -func TestLogBacktraceAt(t *testing.T) { - setFlags() - defer logging.swap(logging.newBuffers()) - // The peculiar style of this code simplifies line counting and maintenance of the - // tracing block below. - var infoLine string - setTraceLocation := func(file string, line int, ok bool, delta int) { - if !ok { - t.Fatal("could not get file:line") - } - _, file = filepath.Split(file) - infoLine = fmt.Sprintf("%s:%d", file, line+delta) - err := logging.traceLocation.Set(infoLine) - if err != nil { - t.Fatal("error setting log_backtrace_at: ", err) - } - } - { - // Start of tracing block. These lines know about each other's relative position. - _, file, line, ok := runtime.Caller(0) - setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls. - Info("we want a stack trace here") - } - numAppearances := strings.Count(contents(infoLog), infoLine) - if numAppearances < 2 { - // Need 2 appearances, one in the log header and one in the trace: - // log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here - // ... - // github.com/glog/glog_test.go:280 (0x41ba91) - // ... - // We could be more precise but that would require knowing the details - // of the traceback format, which may not be dependable. - t.Fatal("got no trace back; log is ", contents(infoLog)) - } -} - -func BenchmarkHeader(b *testing.B) { - for i := 0; i < b.N; i++ { - buf, _, _ := logging.header(infoLog, 0) - logging.putBuffer(buf) - } -} diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/README.md b/Godeps/_workspace/src/github.com/kardianos/osext/README.md deleted file mode 100644 index 820e1ecb54..0000000000 --- a/Godeps/_workspace/src/github.com/kardianos/osext/README.md +++ /dev/null @@ -1,14 +0,0 @@ -### Extensions to the "os" package. - -## Find the current Executable and ExecutableFolder. - -There is sometimes utility in finding the current executable file -that is running. This can be used for upgrading the current executable -or finding resources located relative to the executable file. - -Multi-platform and supports: - * Linux - * OS X - * Windows - * Plan 9 - * BSDs. diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext.go b/Godeps/_workspace/src/github.com/kardianos/osext/osext.go deleted file mode 100644 index 7bef46f03e..0000000000 --- a/Godeps/_workspace/src/github.com/kardianos/osext/osext.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Extensions to the standard "os" package. -package osext - -import "path/filepath" - -// Executable returns an absolute path that can be used to -// re-invoke the current program. -// It may not be valid after the current program exits. -func Executable() (string, error) { - p, err := executable() - return filepath.Clean(p), err -} - -// Returns same path as Executable, returns just the folder -// path. Excludes the executable name and any trailing slash. -func ExecutableFolder() (string, error) { - p, err := Executable() - if err != nil { - return "", err - } - - return filepath.Dir(p), nil -} diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext_plan9.go b/Godeps/_workspace/src/github.com/kardianos/osext/osext_plan9.go deleted file mode 100644 index 655750c542..0000000000 --- a/Godeps/_workspace/src/github.com/kardianos/osext/osext_plan9.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package osext - -import ( - "os" - "strconv" - "syscall" -) - -func executable() (string, error) { - f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text") - if err != nil { - return "", err - } - defer f.Close() - return syscall.Fd2path(int(f.Fd())) -} diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go b/Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go deleted file mode 100644 index b2598bc77a..0000000000 --- a/Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux netbsd openbsd solaris dragonfly - -package osext - -import ( - "errors" - "fmt" - "os" - "runtime" - "strings" -) - -func executable() (string, error) { - switch runtime.GOOS { - case "linux": - const deletedTag = " (deleted)" - execpath, err := os.Readlink("/proc/self/exe") - if err != nil { - return execpath, err - } - execpath = strings.TrimSuffix(execpath, deletedTag) - execpath = strings.TrimPrefix(execpath, deletedTag) - return execpath, nil - case "netbsd": - return os.Readlink("/proc/curproc/exe") - case "openbsd", "dragonfly": - return os.Readlink("/proc/curproc/file") - case "solaris": - return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid())) - } - return "", errors.New("ExecPath not implemented for " + runtime.GOOS) -} diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext_sysctl.go b/Godeps/_workspace/src/github.com/kardianos/osext/osext_sysctl.go deleted file mode 100644 index b66cac878c..0000000000 --- a/Godeps/_workspace/src/github.com/kardianos/osext/osext_sysctl.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd - -package osext - -import ( - "os" - "path/filepath" - "runtime" - "syscall" - "unsafe" -) - -var initCwd, initCwdErr = os.Getwd() - -func executable() (string, error) { - var mib [4]int32 - switch runtime.GOOS { - case "freebsd": - mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1} - case "darwin": - mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1} - } - - n := uintptr(0) - // Get length. - _, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) - if errNum != 0 { - return "", errNum - } - if n == 0 { // This shouldn't happen. - return "", nil - } - buf := make([]byte, n) - _, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) - if errNum != 0 { - return "", errNum - } - if n == 0 { // This shouldn't happen. - return "", nil - } - for i, v := range buf { - if v == 0 { - buf = buf[:i] - break - } - } - var err error - execPath := string(buf) - // execPath will not be empty due to above checks. - // Try to get the absolute path if the execPath is not rooted. - if execPath[0] != '/' { - execPath, err = getAbs(execPath) - if err != nil { - return execPath, err - } - } - // For darwin KERN_PROCARGS may return the path to a symlink rather than the - // actual executable. - if runtime.GOOS == "darwin" { - if execPath, err = filepath.EvalSymlinks(execPath); err != nil { - return execPath, err - } - } - return execPath, nil -} - -func getAbs(execPath string) (string, error) { - if initCwdErr != nil { - return execPath, initCwdErr - } - // The execPath may begin with a "../" or a "./" so clean it first. - // Join the two paths, trailing and starting slashes undetermined, so use - // the generic Join function. - return filepath.Join(initCwd, filepath.Clean(execPath)), nil -} diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go b/Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go deleted file mode 100644 index 77ccc28e9e..0000000000 --- a/Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin linux freebsd netbsd windows - -package osext - -import ( - "bytes" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "runtime" - "testing" -) - -const ( - executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE" - - executableEnvValueMatch = "match" - executableEnvValueDelete = "delete" -) - -func TestPrintExecutable(t *testing.T) { - ef, err := Executable() - if err != nil { - t.Fatalf("Executable failed: %v", err) - } - t.Log("Executable:", ef) -} -func TestPrintExecutableFolder(t *testing.T) { - ef, err := ExecutableFolder() - if err != nil { - t.Fatalf("ExecutableFolder failed: %v", err) - } - t.Log("Executable Folder:", ef) -} -func TestExecutableFolder(t *testing.T) { - ef, err := ExecutableFolder() - if err != nil { - t.Fatalf("ExecutableFolder failed: %v", err) - } - if ef[len(ef)-1] == filepath.Separator { - t.Fatal("ExecutableFolder ends with a trailing slash.") - } -} -func TestExecutableMatch(t *testing.T) { - ep, err := Executable() - if err != nil { - t.Fatalf("Executable failed: %v", err) - } - - // fullpath to be of the form "dir/prog". - dir := filepath.Dir(filepath.Dir(ep)) - fullpath, err := filepath.Rel(dir, ep) - if err != nil { - t.Fatalf("filepath.Rel: %v", err) - } - // Make child start with a relative program path. - // Alter argv[0] for child to verify getting real path without argv[0]. - cmd := &exec.Cmd{ - Dir: dir, - Path: fullpath, - Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)}, - } - out, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("exec(self) failed: %v", err) - } - outs := string(out) - if !filepath.IsAbs(outs) { - t.Fatalf("Child returned %q, want an absolute path", out) - } - if !sameFile(outs, ep) { - t.Fatalf("Child returned %q, not the same file as %q", out, ep) - } -} - -func TestExecutableDelete(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skip() - } - fpath, err := Executable() - if err != nil { - t.Fatalf("Executable failed: %v", err) - } - - r, w := io.Pipe() - stderrBuff := &bytes.Buffer{} - stdoutBuff := &bytes.Buffer{} - cmd := &exec.Cmd{ - Path: fpath, - Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)}, - Stdin: r, - Stderr: stderrBuff, - Stdout: stdoutBuff, - } - err = cmd.Start() - if err != nil { - t.Fatalf("exec(self) start failed: %v", err) - } - - tempPath := fpath + "_copy" - _ = os.Remove(tempPath) - - err = copyFile(tempPath, fpath) - if err != nil { - t.Fatalf("copy file failed: %v", err) - } - err = os.Remove(fpath) - if err != nil { - t.Fatalf("remove running test file failed: %v", err) - } - err = os.Rename(tempPath, fpath) - if err != nil { - t.Fatalf("rename copy to previous name failed: %v", err) - } - - w.Write([]byte{0}) - w.Close() - - err = cmd.Wait() - if err != nil { - t.Fatalf("exec wait failed: %v", err) - } - - childPath := stderrBuff.String() - if !filepath.IsAbs(childPath) { - t.Fatalf("Child returned %q, want an absolute path", childPath) - } - if !sameFile(childPath, fpath) { - t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath) - } -} - -func sameFile(fn1, fn2 string) bool { - fi1, err := os.Stat(fn1) - if err != nil { - return false - } - fi2, err := os.Stat(fn2) - if err != nil { - return false - } - return os.SameFile(fi1, fi2) -} -func copyFile(dest, src string) error { - df, err := os.Create(dest) - if err != nil { - return err - } - defer df.Close() - - sf, err := os.Open(src) - if err != nil { - return err - } - defer sf.Close() - - _, err = io.Copy(df, sf) - return err -} - -func TestMain(m *testing.M) { - env := os.Getenv(executableEnvVar) - switch env { - case "": - os.Exit(m.Run()) - case executableEnvValueMatch: - // First chdir to another path. - dir := "/" - if runtime.GOOS == "windows" { - dir = filepath.VolumeName(".") - } - os.Chdir(dir) - if ep, err := Executable(); err != nil { - fmt.Fprint(os.Stderr, "ERROR: ", err) - } else { - fmt.Fprint(os.Stderr, ep) - } - case executableEnvValueDelete: - bb := make([]byte, 1) - var err error - n, err := os.Stdin.Read(bb) - if err != nil { - fmt.Fprint(os.Stderr, "ERROR: ", err) - os.Exit(2) - } - if n != 1 { - fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n) - os.Exit(2) - } - if ep, err := Executable(); err != nil { - fmt.Fprint(os.Stderr, "ERROR: ", err) - } else { - fmt.Fprint(os.Stderr, ep) - } - } - os.Exit(0) -} diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext_windows.go b/Godeps/_workspace/src/github.com/kardianos/osext/osext_windows.go deleted file mode 100644 index 72d282cf8c..0000000000 --- a/Godeps/_workspace/src/github.com/kardianos/osext/osext_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package osext - -import ( - "syscall" - "unicode/utf16" - "unsafe" -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW") -) - -// GetModuleFileName() with hModule = NULL -func executable() (exePath string, err error) { - return getModuleFileName() -} - -func getModuleFileName() (string, error) { - var n uint32 - b := make([]uint16, syscall.MAX_PATH) - size := uint32(len(b)) - - r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size)) - n = uint32(r0) - if n == 0 { - return "", e1 - } - return string(utf16.Decode(b[0:n])), nil -} diff --git a/Godeps/_workspace/src/github.com/leeor/etcd-sync/.gitignore b/Godeps/_workspace/src/github.com/leeor/etcd-sync/.gitignore deleted file mode 100644 index 0925a9d131..0000000000 --- a/Godeps/_workspace/src/github.com/leeor/etcd-sync/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.*.swp -.*.swo -*_etcd_data diff --git a/Godeps/_workspace/src/github.com/leeor/etcd-sync/LICENSE b/Godeps/_workspace/src/github.com/leeor/etcd-sync/LICENSE deleted file mode 100644 index 82431a75db..0000000000 --- a/Godeps/_workspace/src/github.com/leeor/etcd-sync/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2014, Leeor Aharon - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/leeor/etcd-sync/README.md b/Godeps/_workspace/src/github.com/leeor/etcd-sync/README.md deleted file mode 100644 index 5989584193..0000000000 --- a/Godeps/_workspace/src/github.com/leeor/etcd-sync/README.md +++ /dev/null @@ -1,25 +0,0 @@ -etcd-sync -========= - -An etcd-based sync module, aiming at implementing the Go sync pkg over etcd for cluster-wide synchronization. - -Installation -============ - -~~~shell -go get github.com/leeor/etcd-sync -~~~ - -Usage -===== - -At this time, only a simple mutex has been implemented. - -## EtcdMutex - -~~~go -mutex := NewMutexFromServers([]string{"http://127.0.0.1:4001"}, key, 0) -mutex.Lock() -// do some critical stuff -mutex.Unlock() -~~~ diff --git a/Godeps/_workspace/src/github.com/leeor/etcd-sync/mutex.go b/Godeps/_workspace/src/github.com/leeor/etcd-sync/mutex.go deleted file mode 100644 index 4214237e3c..0000000000 --- a/Godeps/_workspace/src/github.com/leeor/etcd-sync/mutex.go +++ /dev/null @@ -1,249 +0,0 @@ -package etcdsync - -import ( - "fmt" - "time" - - "github.com/coreos/go-etcd/etcd" - "github.com/golang/glog" -) - -/* - * etcd-based mutex - * - * Locking works using the following scheme: - * 1. Attempt CompareAndSwap() to grab the lock. If it works -> we have the - * lock. - * 2. If the key does not exist, try creating it with Create(). If that works -> - * we have the lock. If Create() fails, it might be due to a race condition - * with another node which was able to create the key before us. So, - * 3. Attempt to CompareAndSwap() again, expect to find that the key exists, and - * the lock taken by another node. If not, then we have the lock. - * 4. Watch the key, using the index returned by the previous call to - * CompareAndSwap(), and wait for the lock to be released or expire. - * 5. Goto #3. - * - * Once we have the lock, keep refreshing its ttl until we're signaled to - * release it. - */ - -type lockState uint - -const ( - unknown lockState = 0 - released lockState = 1 << iota - acquired lockState = 1 << iota -) - -type EtcdMutex struct { - key string - ttl uint64 - - client *etcd.Client - - state lockState - - quit chan bool - released chan bool - - debug bool -} - -func NewMutexFromClient(client *etcd.Client, key string, ttl uint64) *EtcdMutex { - - m := &EtcdMutex{client: client} - - if ttl == 0 { - - ttl = 3 - } - - m.key = key - m.ttl = ttl - - m.quit = make(chan bool) - m.released = make(chan bool) - - return m -} - -func NewMutexFromServers(servers []string, key string, ttl uint64) *EtcdMutex { - - client := etcd.NewClient(servers) - - return NewMutexFromClient(client, key, ttl) -} - -func (m *EtcdMutex) setDebug(on bool) { - - m.debug = on -} - -func (m *EtcdMutex) Lock() error { - - var ( - state lockState = unknown - index uint64 - ) - - glog.Infof("[%s] Lock called", m.key) - for state != acquired { - - res, err := m.client.CompareAndSwap(m.key, "locked", m.ttl, "released", 0) - if err == nil { - - glog.Infof("[%s] lock acquired (%d)", m.key, res.Node.ModifiedIndex) - state = acquired - index = res.Node.ModifiedIndex - } else { - - glog.Infof("[%s] failed to acquire lock: %#v", m.key, err) - - if etcderr, ok := err.(*etcd.EtcdError); ok { - switch etcderr.ErrorCode { - case 100: - // The key does not exist, let's try to create it - glog.Infof("[%s] lock key does not exist, will attempt to create it", m.key) - if res, err := m.client.Create(m.key, "locked", 1); err != nil { - // Someone has created and locked this key before us. - glog.Infof("[%s] could not create lock key, someone probably beat us to it (%#v)", m.key, err) - state = released - if etcderr, ok := err.(*etcd.EtcdError); ok { - - index = etcderr.Index - } - } else { - - glog.Infof("[%s] created key and locked mutex (%#v, %d)", m.key, res.Node, res.Node.ModifiedIndex) - state = acquired - index = res.Node.ModifiedIndex - } - - case 101: - // couldn't set the key, the prevValue we gave it differs from the - // one in the server. Someone else has this key. - state = released - - if etcderr.Index != 0 { - - index = etcderr.Index - } else if index == 0 { - // we can't start a watch... - glog.Infof("[%s] need to watch, but don't have an index to start with", m.key) - time.Sleep(500 * time.Millisecond) - continue - } - - glog.Infof("[%s] unable to acquire lock, watching key (%#v, %d)", m.key, etcderr, etcderr.Index) - receive := make(chan *etcd.Response) - stop := make(chan bool, 1) - go m.client.Watch(m.key, index, false, receive, stop) - - for res = range receive { - - if res.Node.Value == "released" || res.Action == "expire" { - - glog.Infof("[%s] mutex was either released or has expired (%d)", m.key, res.Node.ModifiedIndex) - stop <- true - } else { - - glog.Infof("[%s] received message (%d): %#v", m.key, res.Node.ModifiedIndex, res) - } - } - glog.Infof("[%s] watch ended", m.key) - - default: - glog.Infof("[%s] unexpected error: %#v", m.key, etcderr) - return fmt.Errorf("Unexpected error trying to acquire lock on key %s: %s", m.key, etcderr) - } - } - } - } - - // by now, state has to be acquired - if state != acquired { - - panic("etcd-sync: mutex not acquired") - } - - glog.Infof("[%s] starting refresh routine", m.key) - go func() { - - tick := time.Tick(time.Second) - - for { - select { - case <-m.quit: - glog.Infof("[%s] quit signaled, releasing lock", m.key) - _, err := m.client.CompareAndSwap(m.key, "released", m.ttl, "locked", index) - if err != nil { - - if etcderr, ok := err.(*etcd.EtcdError); ok { - switch etcderr.ErrorCode { - case 100: - // the key has expired or deleted by a third party, - // pretty bad, but the we were about to release it - // anyway. - glog.Infof("[%s] no such key error when trying to release lock", m.key) - - case 101: - // either the prevValue or prevIndex arguments - // failed to match the current data. Either someone - // else has the lock now or the key was tampered - // with and the mutex is now unusable. As long as - // the TTL was not set to 0, it will become usable - // again with time. - glog.Infof("[%s] CAS failed when trying to release lock (%s)", m.key, etcderr.Cause) - - default: - // as long as the stops getting refreshed, the mutex - // will get "unlocked" one the key expires. - glog.Infof("[%s] unexpected error: %#v", m.key, etcderr) - } - } - } - - index = 0 - m.state = released - m.released <- true - - return - - case <-tick: - glog.Infof("[%s] refreshing TTL", m.key) - res, err := m.client.Update(m.key, "locked", m.ttl) - if err != nil { - - glog.Infof("[%s] failed to refresh ttl (%#v)", m.key, err) - } else { - - glog.Infof("[%s] refreshed ttl (%d)", m.key, res.Node.ModifiedIndex) - index = res.Node.ModifiedIndex - } - } - } - }() - - m.state = state - glog.Infof("[%s] done", m.key) - return nil -} - -func (m *EtcdMutex) Unlock() { - - if m.state != acquired { - - panic("etcd-sync: unlock of unlocked mutex") - } - - if m.quit == nil { - - panic("etcd-sync: locked mutex missing its quit channel") - } - - glog.Infof("[%s] Unlock called, sending quit signal", m.key) - m.quit <- true - - <-m.released - glog.Infof("[%s] lock released", m.key) -} diff --git a/Godeps/_workspace/src/github.com/leeor/etcd-sync/mutex_test.go b/Godeps/_workspace/src/github.com/leeor/etcd-sync/mutex_test.go deleted file mode 100644 index ba2c82ae2b..0000000000 --- a/Godeps/_workspace/src/github.com/leeor/etcd-sync/mutex_test.go +++ /dev/null @@ -1,403 +0,0 @@ -package etcdsync - -import ( - "flag" - "log" - "testing" - "time" - - "github.com/coreos/go-etcd/etcd" -) - -var key string = "test/mutex" - -func init() { - flag.Parse() -} - -func TestTwoNoKey(t *testing.T) { - - //etcd.SetLogger(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) - - client := etcd.NewClient([]string{"http://127.0.0.1:4001"}) - client.Delete(key, true) - - quit1 := make(chan bool) - quit2 := make(chan bool) - errchan := make(chan bool) - - progress := make(chan bool) - - // first thread - go func() { - - mutex := NewMutexFromClient(client, key, 0) - err := mutex.Lock() - if err != nil { - - errchan <- true - } - - progress <- true - - // sleep for 5 seconds, ttl should be refreshed after 3 seconds - time.Sleep(5 * time.Second) - mutex.Unlock() - - quit1 <- true - }() - - select { - case <-progress: - case <-errchan: - t.Fatal("could not acquire lock, is etcd running?") - } - - // second thread - go func() { - - mutex := NewMutexFromClient(client, key, 0) - //mutex := NewMutexFromServers([]string{"http://127.0.0.1:4001/"}, key, 0) - // should take us 5 seconds to acquire the lock - now := time.Now() - - err := mutex.Lock() - if err != nil { - - t.Fatal("could not acquire lock, is etcd running?", err) - errchan <- true - } - - timeToLock := time.Since(now) - if timeToLock < 5*time.Second { - - t.Fatalf("mutex TTL was not refreshed, lock acquired after %v seconds", timeToLock) - } - - mutex.Unlock() - quit2 <- true - }() - - var ( - q1 bool - q2 bool - ) - - for !q1 || !q2 { - select { - case <-quit1: - q1 = true - case <-quit2: - q2 = true - case <-errchan: - t.Fatal("could not acquire lock, is etcd running?") - } - } -} - -func TestTwoExistingKey(t *testing.T) { - - //etcd.SetLogger(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) - - client := etcd.NewClient([]string{"http://127.0.0.1:4001"}) - client.Set(key, "released", 0) - - quit1 := make(chan bool) - quit2 := make(chan bool) - errchan := make(chan bool) - - progress := make(chan bool) - - // first thread - go func() { - - mutex := NewMutexFromServers([]string{"http://127.0.0.1:4001"}, key, 0) - err := mutex.Lock() - if err != nil { - - errchan <- true - } - - progress <- true - - // sleep for 5 seconds, ttl should be refreshed after 3 seconds - time.Sleep(5 * time.Second) - mutex.Unlock() - - quit1 <- true - }() - - select { - case <-progress: - case <-errchan: - t.Fatal("could not acquire lock, is etcd running?") - } - - // second thread - go func() { - - mutex := NewMutexFromClient(client, key, 0) - //mutex := NewMutexFromServers([]string{"http://127.0.0.1:4001/"}, key, 0) - // should take us 5 seconds to acquire the lock - now := time.Now() - - err := mutex.Lock() - if err != nil { - - errchan <- true - } - - timeToLock := time.Since(now) - if timeToLock < 5*time.Second { - - t.Fatalf("mutex TTL was not refreshed, lock acquired after %v seconds", timeToLock) - } - - mutex.Unlock() - quit2 <- true - }() - - var ( - q1 bool - q2 bool - ) - - for !q1 || !q2 { - select { - case <-quit1: - q1 = true - case <-quit2: - q2 = true - case <-errchan: - t.Fatal("could not acquire lock, is etcd running?") - } - } -} - -func TestUnlockReleased(t *testing.T) { - - //etcd.SetLogger(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) - - client := etcd.NewClient([]string{"http://127.0.0.1:4001"}) - client.Delete(key, true) - - mutex := NewMutexFromClient(client, key, 0) - - defer func() { - if msg := recover(); msg == nil { - - t.Fatalf("panic not initiated") - } - }() - mutex.Unlock() -} - -func TestUnlockNoKey(t *testing.T) { - - //etcd.SetLogger(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) - - client := etcd.NewClient([]string{"http://127.0.0.1:4001"}) - client.Delete(key, true) - - mutex := NewMutexFromClient(client, key, 0) - - err := mutex.Lock() - if err != nil { - - t.Fatal("could not acquire lock, is etcd running?", err) - } - - client.Delete(key, false) - time.Sleep(2 * time.Second) - mutex.Unlock() -} - -func _TestUnlockBadIndex(t *testing.T) { - - //etcd.SetLogger(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) - - client := etcd.NewClient([]string{"http://127.0.0.1:4001"}) - client.Delete(key, true) - - mutex := NewMutexFromClient(client, key, 0) - - err := mutex.Lock() - if err != nil { - - t.Fatal("could not acquire lock, is etcd running?", err) - } - - client.Update(key, "locked", 0) - mutex.Unlock() - - trigger := make(chan bool) - errchan := make(chan bool) - go func() { - - err := mutex.Lock() - if err != nil { - - errchan <- true - } - - trigger <- true - mutex.Unlock() - }() - - tick := time.Tick(time.Second) - - select { - case <-errchan: - t.Fatal("could not acquire lock, is etcd running?", err) - case <-trigger: - t.Fatalf("managed to get a lock on an out of sync mutex") - break - case <-tick: - // release the blocked goroutine - client.Delete(key, true) - } -} - -func HammerMutex(m *EtcdMutex, loops int, cdone chan bool, errchan chan error, t *testing.T) { - log.Printf("starting %d iterations", loops) - for i := 0; i < loops; i++ { - err := m.Lock() - if err != nil { - - errchan <- err - return - } - - m.Unlock() - } - log.Printf("completed all iterations") - cdone <- true -} - -func TestConcurrentSingleMutex(t *testing.T) { - client := etcd.NewClient([]string{"http://127.0.0.1:4001"}) - client.Delete(key, true) - - m := NewMutexFromClient(client, key, 0) - c := make(chan bool) - e := make(chan error) - for i := 0; i < 10; i++ { - go HammerMutex(m, 100, c, e, t) - } - for i := 0; i < 10; i++ { - select { - case <-c: - case err := <-e: - t.Fatal("could not acquire lock, is etcd running?", err) - } - } -} - -func TestConcurrentMultipleMutex(t *testing.T) { - client := etcd.NewClient([]string{"http://127.0.0.1:4001"}) - client.Delete(key, true) - - c := make(chan bool) - e := make(chan error) - for i := 0; i < 10; i++ { - m := NewMutexFromClient(client, key, 0) - go HammerMutex(m, 100, c, e, t) - } - for i := 0; i < 10; i++ { - select { - case <-c: - case err := <-e: - t.Fatal("could not acquire lock, is etcd running?", err) - } - } -} - -func TestMutexPanic(t *testing.T) { - defer func() { - if recover() == nil { - t.Fatalf("unlock of unlocked mutex did not panic") - } - }() - - client := etcd.NewClient([]string{"http://127.0.0.1:4001"}) - client.Delete(key, true) - - mu := NewMutexFromClient(client, key, 0) - err := mu.Lock() - if err != nil { - - t.Fatal("could not acquire lock, is etcd running?", err) - } - - mu.Unlock() - mu.Unlock() -} - -func BenchmarkMutexUncontended(b *testing.B) { - type PaddedMutex struct { - *EtcdMutex - pad [128]uint8 - } - - client := etcd.NewClient([]string{"http://127.0.0.1:4001"}) - client.Delete(key, true) - - b.RunParallel(func(pb *testing.PB) { - mu := PaddedMutex{EtcdMutex: NewMutexFromClient(client, key, 0)} - - for pb.Next() { - err := mu.Lock() - if err != nil { - - b.Fatal("could not acquire lock, is etcd running?", err) - } - - mu.Unlock() - } - }) -} - -func benchmarkMutex(b *testing.B, slack, work bool) { - client := etcd.NewClient([]string{"http://127.0.0.1:4001"}) - client.Delete(key, true) - - mu := NewMutexFromClient(client, key, 0) - if slack { - b.SetParallelism(10) - } - b.RunParallel(func(pb *testing.PB) { - foo := 0 - for pb.Next() { - err := mu.Lock() - if err != nil { - - b.Fatal("could not acquire lock, is etcd running?", err) - } - - mu.Unlock() - if work { - for i := 0; i < 100; i++ { - foo *= 2 - foo /= 2 - } - } - } - _ = foo - }) -} - -func BenchmarkMutex(b *testing.B) { - benchmarkMutex(b, false, false) -} - -func BenchmarkMutexSlack(b *testing.B) { - benchmarkMutex(b, true, false) -} - -func BenchmarkMutexWork(b *testing.B) { - benchmarkMutex(b, false, true) -} - -func BenchmarkMutexWorkSlack(b *testing.B) { - benchmarkMutex(b, true, true) -} diff --git a/Godeps/_workspace/src/github.com/progrium/go-basher/.gitignore b/Godeps/_workspace/src/github.com/progrium/go-basher/.gitignore deleted file mode 100644 index 528cd5b39b..0000000000 --- a/Godeps/_workspace/src/github.com/progrium/go-basher/.gitignore +++ /dev/null @@ -1 +0,0 @@ -example/example diff --git a/Godeps/_workspace/src/github.com/progrium/go-basher/.travis.yml b/Godeps/_workspace/src/github.com/progrium/go-basher/.travis.yml deleted file mode 100644 index 178fb298fe..0000000000 --- a/Godeps/_workspace/src/github.com/progrium/go-basher/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.2 - - 1.3 - - release - - tip - -script: - - go test -v . \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/progrium/go-basher/LICENSE b/Godeps/_workspace/src/github.com/progrium/go-basher/LICENSE deleted file mode 100644 index d86f557ed9..0000000000 --- a/Godeps/_workspace/src/github.com/progrium/go-basher/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (C) 2014 Jeff Lindsay - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/progrium/go-basher/README.md b/Godeps/_workspace/src/github.com/progrium/go-basher/README.md deleted file mode 100644 index 629fb321bc..0000000000 --- a/Godeps/_workspace/src/github.com/progrium/go-basher/README.md +++ /dev/null @@ -1,94 +0,0 @@ -# go-basher - -A Go library for creating Bash environments, exporting Go functions in them as Bash functions, and running commands in that Bash environment. Combined with a tool like [go-bindata](https://github.com/jteeuwen/go-bindata), you can write programs that are part written in Go and part written in Bash that can be distributed as standalone binaries. - -[![Build Status](https://travis-ci.org/progrium/go-basher.png)](https://travis-ci.org/progrium/go-basher) [![GoDoc](https://godoc.org/github.com/progrium/go-basher?status.svg)](http://godoc.org/github.com/progrium/go-basher) - -## Using go-basher - -Here we have a simple Go program that defines a `reverse` function, creates a Bash environment sourcing `main.bash` and then runs `main` in that environment. - -```Go -package main - -import ( - "os" - "io/ioutil" - "log" - "strings" - - "github.com/progrium/go-basher" -) - -func reverse(args []string) int { - bytes, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - runes := []rune(strings.Trim(string(bytes), "\n")) - for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 { - runes[i], runes[j] = runes[j], runes[i] - } - println(string(runes)) - return 0 -} - -func main() { - bash, _ := basher.NewContext("/bin/bash", false) - bash.ExportFunc("reverse", reverse) - bash.HandleFuncs(os.Args) - - bash.Source("main.bash", nil) - status, err := bash.Run("main", os.Args[1:]) - if err != nil { - log.Fatal(err) - } - os.Exit(status) -} -``` - -Here is our `main.bash` file, the actual heart of the program: - -```bash -main() { - echo "Hello world" | reverse -} -``` - -## Using go-basher with go-bindata - -You can bundle your Bash scripts into your Go binary using [go-bindata](https://github.com/jteeuwen/go-bindata). First install go-bindata: - - $ go get github.com/jteeuwen/go-bindata/... - -Now put all your Bash scripts in a directory called `bash`. The above example program would mean you'd have a `bash/main.bash` file. Run `go-bindata` on the directory: - - $ go-bindata bash - -This will produce a `bindata.go` file that includes all of your Bash scripts. It includes a function called `Asset` that behaves like `ioutil.ReadFile` for files in your `bindata.go`. You can just pass this into the `Source` function when sourcing files. From the above example program, you'd use this line instead: - -```Go -bash.Source("bash/main.bash", Asset) -``` - -If you get ambitious you could also use go-bindata to pack a static version of the Bash binary into your Go binary. Crazy, but cool? - -## Motivation - -Go is a great compiled systems language, but it can still be faster to write and glue existing commands together in Bash. However, there are operations you wouldn't want to do in Bash that are straightforward in Go, for example, writing and reading structured data formats. By allowing them to work together, you can use each where they are strongest. - -Take a common task like making an HTTP request for JSON data. Parsing JSON is easy in Go, but without depending on a tool like `jq` it is not even worth trying in Bash. And some formats like YAML don't even have a good `jq` equivalent. Whereas making an HTTP request in Go in the *simplest* case is going to be 6+ lines, as opposed to Bash where you can use `curl` in one line. If we write our JSON parser in Go and fetch the HTTP doc with `curl`, we can express printing a field from a remote JSON object in one line: - - curl -s https://api.github.com/users/progrium | parse-user-field email - -In this case, the command `parse-user-field` is an app specific function defined in your Go program. - -Why would this ever be worth it? I can think of several basic cases: - - 1. you're writing a program in Bash that involves some complex functionality that should be in Go - 1. you're writing a CLI tool in Go but, to start, prototyping would be quicker in Bash - 1. you're writing a program in Bash and want it to be easier to distribute, like a Go binary - -## License - -BSD \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/progrium/go-basher/SPONSORS b/Godeps/_workspace/src/github.com/progrium/go-basher/SPONSORS deleted file mode 100644 index b93c4c3632..0000000000 --- a/Godeps/_workspace/src/github.com/progrium/go-basher/SPONSORS +++ /dev/null @@ -1 +0,0 @@ -Deis Project http://deis.io \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/progrium/go-basher/basher.go b/Godeps/_workspace/src/github.com/progrium/go-basher/basher.go deleted file mode 100644 index aa7f76b443..0000000000 --- a/Godeps/_workspace/src/github.com/progrium/go-basher/basher.go +++ /dev/null @@ -1,237 +0,0 @@ -// Package basher provides an API for running and integrating with Bash from Go -package basher - -import ( - "io" - "io/ioutil" - "log" - "os" - "os/exec" - "strings" - "sync" - "syscall" - - "github.com/kardianos/osext" -) - -func exitStatus(err error) (int, error) { - if err != nil { - if exiterr, ok := err.(*exec.ExitError); ok { - // There is no platform independent way to retrieve - // the exit code, but the following will work on Unix - if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return int(status.ExitStatus()), nil - } - } - return 0, err - } - return 0, nil -} - -// Application sets up a common entrypoint for a Bash application that -// uses exported Go functions. It uses the DEBUG environment variable -// to set debug on the Context, and SHELL for the Bash binary if it -// includes the string "bash". You can pass a loader function to use -// for the sourced files, and a boolean for whether or not the -// environment should be copied into the Context process. -func Application( - funcs map[string]func([]string), - scripts []string, - loader func(string) ([]byte, error), - copyEnv bool) { - - var bashPath string - bashPath, err := exec.LookPath("bash") - if err != nil { - if strings.Contains(os.Getenv("SHELL"), "bash") { - bashPath = os.Getenv("SHELL") - } else { - bashPath = "/bin/bash" - } - } - bash, err := NewContext(bashPath, os.Getenv("DEBUG") != "") - if err != nil { - log.Fatal(err) - } - for name, fn := range funcs { - bash.ExportFunc(name, fn) - } - if bash.HandleFuncs(os.Args) { - os.Exit(0) - } - - for _, script := range scripts { - bash.Source(script, loader) - } - if copyEnv { - bash.CopyEnv() - } - status, err := bash.Run("main", os.Args[1:]) - if err != nil { - log.Fatal(err) - } - os.Exit(status) -} - -// A Context is an instance of a Bash interpreter and environment, including -// sourced scripts, environment variables, and embedded Go functions -type Context struct { - sync.Mutex - - // Debug simply leaves the generated BASH_ENV file produced - // from each Run call of this Context for debugging. - Debug bool - - // BashPath is the path to the Bash executable to be used by Run - BashPath string - - // SelfPath is set by NewContext to be the current executable path. - // It's used to call back into the calling Go process to run exported - // functions. - SelfPath string - - // The io.Reader given to Bash for STDIN - Stdin io.Reader - - // The io.Writer given to Bash for STDOUT - Stdout io.Writer - - // The io.Writer given to Bash for STDERR - Stderr io.Writer - - vars []string - scripts [][]byte - funcs map[string]func([]string) -} - -// Creates and initializes a new Context that will use the given Bash executable. -// The debug mode will leave the produced temporary BASH_ENV file for inspection. -func NewContext(bashpath string, debug bool) (*Context, error) { - executable, err := osext.Executable() - if err != nil { - return nil, err - } - return &Context{ - Debug: debug, - BashPath: bashpath, - SelfPath: executable, - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - scripts: make([][]byte, 0), - vars: make([]string, 0), - funcs: make(map[string]func([]string)), - }, nil -} - -// Copies the current environment variables into the Context -func (c *Context) CopyEnv() { - c.Lock() - defer c.Unlock() - c.vars = append(c.vars, os.Environ()...) -} - -// Adds a shell script to the Context environment. The loader argument can be nil -// which means it will use ioutil.Readfile and load from disk, but it exists so you -// can use the Asset function produced by go-bindata when including script files in -// your Go binary. Calls to Source adds files to the environment in order. -func (c *Context) Source(filepath string, loader func(string) ([]byte, error)) error { - if loader == nil { - loader = ioutil.ReadFile - } - data, err := loader(filepath) - if err != nil { - return err - } - c.Lock() - defer c.Unlock() - c.scripts = append(c.scripts, data) - return nil -} - -// Adds an environment variable to the Context -func (c *Context) Export(name string, value string) { - c.Lock() - defer c.Unlock() - c.vars = append(c.vars, name+"="+value) -} - -// Registers a function with the Context that will produce a Bash function in the environment -// that calls back into your executable triggering the function defined as fn. -func (c *Context) ExportFunc(name string, fn func([]string)) { - c.Lock() - defer c.Unlock() - c.funcs[name] = fn -} - -// Expects your os.Args to parse and handle any callbacks to Go functions registered with -// ExportFunc. You normally call this at the beginning of your program. If a registered -// function is found and handled, HandleFuncs will exit with the appropriate exit code for you. -func (c *Context) HandleFuncs(args []string) bool { - for i, arg := range args { - if arg == "::" && len(args) > i+1 { - c.Lock() - defer c.Unlock() - for cmd := range c.funcs { - if cmd == args[i+1] { - c.funcs[cmd](args[i+2:]) - return true - } - } - return false - } - } - return false -} - -func (c *Context) buildEnvfile() (string, error) { - file, err := ioutil.TempFile(os.TempDir(), "bashenv.") - if err != nil { - return "", err - } - defer file.Close() - // variables - file.Write([]byte("unset BASH_ENV\n")) // unset for future calls to bash - file.Write([]byte("export SELF=" + os.Args[0] + "\n")) - file.Write([]byte("export EXECUTABLE='" + c.SelfPath + "'\n")) - for _, kvp := range c.vars { - file.Write([]byte("export " + strings.Replace( - strings.Replace(kvp, "'", "\\'", -1), "=", "=$'", 1) + "'\n")) - } - // functions - for cmd := range c.funcs { - file.Write([]byte(cmd + "() { $EXECUTABLE :: " + cmd + " \"$@\"; }\n")) - } - // scripts - for _, data := range c.scripts { - file.Write(append(data, '\n')) - } - return file.Name(), nil -} - -// Runs a command in Bash from this Context. With each call, a temporary file -// is generated used as BASH_ENV when calling Bash that includes all variables, -// sourced scripts, and exported functions from the Context. Standard I/O by -// default is attached to the calling process I/O. You can change this by setting -// the Stdout, Stderr, Stdin variables of the Context. -func (c *Context) Run(command string, args []string) (int, error) { - c.Lock() - defer c.Unlock() - envfile, err := c.buildEnvfile() - if err != nil { - return 0, err - } - if !c.Debug { - defer os.Remove(envfile) - } - argstring := "" - for _, arg := range args { - argstring = argstring + " '" + arg + "'" - } - cmd := exec.Command(c.BashPath, "-c", command+argstring) - cmd.Env = []string{"BASH_ENV=" + envfile} - cmd.Stdin = c.Stdin - cmd.Stdout = c.Stdout - cmd.Stderr = c.Stderr - return exitStatus(cmd.Run()) -} diff --git a/Godeps/_workspace/src/github.com/progrium/go-basher/basher_test.go b/Godeps/_workspace/src/github.com/progrium/go-basher/basher_test.go deleted file mode 100644 index 812f71ff98..0000000000 --- a/Godeps/_workspace/src/github.com/progrium/go-basher/basher_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package basher - -import ( - "bytes" - "strings" - "testing" -) - -var bashpath = "/bin/bash" - -var testScripts = map[string]string{ - "hello.sh": `main() { echo "hello"; }`, - "cat.sh": `main() { cat; }`, - "foobar.sh": `main() { echo $FOOBAR; }`, -} - -func testLoader(name string) ([]byte, error) { - return []byte(testScripts[name]), nil -} - -func TestHelloStdout(t *testing.T) { - bash, _ := NewContext(bashpath, false) - bash.Source("hello.sh", testLoader) - - var stdout bytes.Buffer - bash.Stdout = &stdout - status, err := bash.Run("main", []string{}) - if err != nil { - t.Fatal(err) - } - if status != 0 { - t.Fatal("non-zero exit") - } - if stdout.String() != "hello\n" { - t.Fatal("unexpected stdout:", stdout.String()) - } -} - -func TestHelloStdin(t *testing.T) { - bash, _ := NewContext(bashpath, false) - bash.Source("cat.sh", testLoader) - bash.Stdin = bytes.NewBufferString("hello\n") - - var stdout bytes.Buffer - bash.Stdout = &stdout - status, err := bash.Run("main", []string{}) - if err != nil { - t.Fatal(err) - } - if status != 0 { - t.Fatal("non-zero exit") - } - if stdout.String() != "hello\n" { - t.Fatal("unexpected stdout:", stdout.String()) - } -} - -func TestEnvironment(t *testing.T) { - bash, _ := NewContext(bashpath, false) - complexString := "Andy's Laptop says, \"$X=1\"" - bash.Source("foobar.sh", testLoader) - bash.Export("FOOBAR", complexString) - - var stdout bytes.Buffer - bash.Stdout = &stdout - status, err := bash.Run("main", []string{}) - if err != nil { - t.Fatal(err) - } - if status != 0 { - t.Fatal("non-zero exit") - } - if strings.Trim(stdout.String(), "\n") != complexString { - t.Fatal("unexpected stdout:", stdout.String()) - } -} - -func TestFuncCallback(t *testing.T) { - bash, _ := NewContext(bashpath, false) - bash.ExportFunc("myfunc", func(args []string) { - return - }) - bash.SelfPath = "/bin/echo" - - var stdout bytes.Buffer - bash.Stdout = &stdout - status, err := bash.Run("myfunc", []string{"abc", "123"}) - if err != nil { - t.Fatal(err) - } - if status != 0 { - t.Fatal("non-zero exit") - } - if stdout.String() != ":: myfunc abc 123\n" { - t.Fatal("unexpected stdout:", stdout.String()) - } -} - -func TestFuncHandling(t *testing.T) { - exit := make(chan int, 1) - bash, _ := NewContext(bashpath, false) - bash.ExportFunc("test-success", func(args []string) { - exit <- 0 - }) - bash.ExportFunc("test-fail", func(args []string) { - exit <- 2 - }) - - bash.HandleFuncs([]string{"thisprogram", "::", "test-success"}) - status := <-exit - if status != 0 { - t.Fatal("non-zero exit") - } - - bash.HandleFuncs([]string{"thisprogram", "::", "test-fail"}) - status = <-exit - if status != 2 { - t.Fatal("unexpected exit status:", status) - } -} diff --git a/Godeps/_workspace/src/github.com/progrium/go-basher/example/Makefile b/Godeps/_workspace/src/github.com/progrium/go-basher/example/Makefile deleted file mode 100644 index 904f76c703..0000000000 --- a/Godeps/_workspace/src/github.com/progrium/go-basher/example/Makefile +++ /dev/null @@ -1,6 +0,0 @@ - -build: - go get github.com/jteeuwen/go-bindata/... - go-bindata bash - go get - go build \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/progrium/go-basher/example/bash/example.bash b/Godeps/_workspace/src/github.com/progrium/go-basher/example/bash/example.bash deleted file mode 100644 index 2d7ba8fc80..0000000000 --- a/Godeps/_workspace/src/github.com/progrium/go-basher/example/bash/example.bash +++ /dev/null @@ -1,11 +0,0 @@ -set -eo pipefail - -hello-bash() { - echo "Hello world from Bash" -} - -main() { - echo "Arguments:" "$@" - hello-bash | reverse - curl -s https://api.github.com/repos/progrium/go-basher | json-pointer /owner/login -} diff --git a/Godeps/_workspace/src/github.com/progrium/go-basher/example/bindata.go b/Godeps/_workspace/src/github.com/progrium/go-basher/example/bindata.go deleted file mode 100644 index ffac3774eb..0000000000 --- a/Godeps/_workspace/src/github.com/progrium/go-basher/example/bindata.go +++ /dev/null @@ -1,118 +0,0 @@ -package main - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "strings" -) - -func bindata_read(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - return buf.Bytes(), nil -} - -func bash_example_bash() ([]byte, error) { - return bindata_read([]byte{ - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x00, 0xff, 0x4c, 0xce, - 0xb1, 0x4e, 0x45, 0x21, 0x0c, 0xc6, 0xf1, 0xf9, 0xf4, 0x29, 0x1a, 0xe2, - 0xa0, 0x03, 0x97, 0xfd, 0x4e, 0xea, 0xe4, 0x6b, 0x70, 0x8f, 0x3d, 0x50, - 0x03, 0x94, 0xb4, 0xe0, 0x19, 0xd4, 0x77, 0x17, 0xe3, 0xa0, 0x63, 0x9b, - 0x7f, 0x7e, 0xf9, 0x8c, 0x06, 0x7a, 0x12, 0xec, 0xdc, 0xe9, 0x88, 0x5c, - 0x00, 0x32, 0x95, 0x22, 0xfe, 0x16, 0x2d, 0xdf, 0x3f, 0xe0, 0x07, 0x6c, - 0xb4, 0x67, 0x41, 0xf7, 0xf2, 0xf3, 0xc5, 0x53, 0xb4, 0xbc, 0xe2, 0xa1, - 0x52, 0xf1, 0x79, 0x05, 0x0e, 0xbe, 0x00, 0x6a, 0xe4, 0xf6, 0xbf, 0x7c, - 0xd2, 0x34, 0x2b, 0xb5, 0x61, 0x57, 0x87, 0xee, 0xee, 0xd1, 0xc1, 0xf6, - 0x27, 0xe2, 0x27, 0x2a, 0xbd, 0x93, 0x1a, 0xc1, 0xb6, 0x4f, 0x2d, 0xe8, - 0x0d, 0xf3, 0x18, 0xdd, 0xae, 0x21, 0xc4, 0xce, 0x97, 0xc4, 0x23, 0xcf, - 0xdb, 0x65, 0x97, 0x1a, 0x94, 0xba, 0x58, 0xe8, 0x2a, 0x49, 0x79, 0xd6, - 0x90, 0x7e, 0x01, 0xd2, 0x45, 0xbc, 0x99, 0x34, 0xdf, 0x85, 0xdb, 0x58, - 0x67, 0x90, 0xb3, 0x91, 0x86, 0x22, 0x89, 0xdb, 0x9a, 0xf3, 0x1d, 0x00, - 0x00, 0xff, 0xff, 0xca, 0x1c, 0xba, 0x45, 0xd0, 0x00, 0x00, 0x00, - }, - "bash/example.bash", - ) -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - return f() - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() ([]byte, error){ - "bash/example.bash": bash_example_bash, -} -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for name := range node.Children { - rv = append(rv, name) - } - return rv, nil -} - -type _bintree_t struct { - Func func() ([]byte, error) - Children map[string]*_bintree_t -} -var _bintree = &_bintree_t{nil, map[string]*_bintree_t{ - "bash": &_bintree_t{nil, map[string]*_bintree_t{ - "example.bash": &_bintree_t{bash_example_bash, map[string]*_bintree_t{ - }}, - }}, -}} diff --git a/Godeps/_workspace/src/github.com/progrium/go-basher/example/example.go b/Godeps/_workspace/src/github.com/progrium/go-basher/example/example.go deleted file mode 100644 index aa6780c36d..0000000000 --- a/Godeps/_workspace/src/github.com/progrium/go-basher/example/example.go +++ /dev/null @@ -1,53 +0,0 @@ -package main - -import ( - "encoding/json" - "io/ioutil" - "log" - "os" - "strings" - - "github.com/dustin/go-jsonpointer" - "github.com/progrium/go-basher" -) - -func assert(err error) { - if err != nil { - log.Fatal(err) - } -} - -func jsonPointer(args []string) int { - if len(args) == 0 { - return 3 - } - bytes, err := ioutil.ReadAll(os.Stdin) - assert(err) - var o map[string]interface{} - assert(json.Unmarshal(bytes, &o)) - println(jsonpointer.Get(o, args[0]).(string)) - return 0 -} - -func reverse(args []string) int { - bytes, err := ioutil.ReadAll(os.Stdin) - assert(err) - runes := []rune(strings.Trim(string(bytes), "\n")) - for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 { - runes[i], runes[j] = runes[j], runes[i] - } - println(string(runes)) - return 0 -} - -func main() { - bash, _ := basher.NewContext("/bin/bash", false) - bash.ExportFunc("json-pointer", jsonPointer) - bash.ExportFunc("reverse", reverse) - bash.HandleFuncs(os.Args) - - bash.Source("bash/example.bash", Asset) - status, err := bash.Run("main", os.Args[1:]) - assert(err) - os.Exit(status) -} diff --git a/Godeps/_workspace/src/github.com/robfig/cron/.gitignore b/Godeps/_workspace/src/github.com/robfig/cron/.gitignore deleted file mode 100644 index 00268614f0..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/Godeps/_workspace/src/github.com/robfig/cron/.travis.yml b/Godeps/_workspace/src/github.com/robfig/cron/.travis.yml deleted file mode 100644 index 4f2ee4d973..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/Godeps/_workspace/src/github.com/robfig/cron/LICENSE b/Godeps/_workspace/src/github.com/robfig/cron/LICENSE deleted file mode 100644 index 3a0f627ffe..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (C) 2012 Rob Figueiredo -All Rights Reserved. - -MIT LICENSE - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/robfig/cron/README.md b/Godeps/_workspace/src/github.com/robfig/cron/README.md deleted file mode 100644 index a9db98c35f..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/README.md +++ /dev/null @@ -1 +0,0 @@ -[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) diff --git a/Godeps/_workspace/src/github.com/robfig/cron/constantdelay.go b/Godeps/_workspace/src/github.com/robfig/cron/constantdelay.go deleted file mode 100644 index cd6e7b1be9..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/constantdelay.go +++ /dev/null @@ -1,27 +0,0 @@ -package cron - -import "time" - -// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". -// It does not support jobs more frequent than once a second. -type ConstantDelaySchedule struct { - Delay time.Duration -} - -// Every returns a crontab Schedule that activates once every duration. -// Delays of less than a second are not supported (will round up to 1 second). -// Any fields less than a Second are truncated. -func Every(duration time.Duration) ConstantDelaySchedule { - if duration < time.Second { - duration = time.Second - } - return ConstantDelaySchedule{ - Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, - } -} - -// Next returns the next time this should be run. -// This rounds so that the next activation time will be on the second. -func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { - return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) -} diff --git a/Godeps/_workspace/src/github.com/robfig/cron/constantdelay_test.go b/Godeps/_workspace/src/github.com/robfig/cron/constantdelay_test.go deleted file mode 100644 index f43a58ad26..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/constantdelay_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package cron - -import ( - "testing" - "time" -) - -func TestConstantDelayNext(t *testing.T) { - tests := []struct { - time string - delay time.Duration - expected string - }{ - // Simple cases - {"Mon Jul 9 14:45 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"}, - {"Mon Jul 9 14:59 2012", 15 * time.Minute, "Mon Jul 9 15:14 2012"}, - {"Mon Jul 9 14:59:59 2012", 15 * time.Minute, "Mon Jul 9 15:14:59 2012"}, - - // Wrap around hours - {"Mon Jul 9 15:45 2012", 35 * time.Minute, "Mon Jul 9 16:20 2012"}, - - // Wrap around days - {"Mon Jul 9 23:46 2012", 14 * time.Minute, "Tue Jul 10 00:00 2012"}, - {"Mon Jul 9 23:45 2012", 35 * time.Minute, "Tue Jul 10 00:20 2012"}, - {"Mon Jul 9 23:35:51 2012", 44*time.Minute + 24*time.Second, "Tue Jul 10 00:20:15 2012"}, - {"Mon Jul 9 23:35:51 2012", 25*time.Hour + 44*time.Minute + 24*time.Second, "Thu Jul 11 01:20:15 2012"}, - - // Wrap around months - {"Mon Jul 9 23:35 2012", 91*24*time.Hour + 25*time.Minute, "Thu Oct 9 00:00 2012"}, - - // Wrap around minute, hour, day, month, and year - {"Mon Dec 31 23:59:45 2012", 15 * time.Second, "Tue Jan 1 00:00:00 2013"}, - - // Round to nearest second on the delay - {"Mon Jul 9 14:45 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"}, - - // Round up to 1 second if the duration is less. - {"Mon Jul 9 14:45:00 2012", 15 * time.Millisecond, "Mon Jul 9 14:45:01 2012"}, - - // Round to nearest second when calculating the next time. - {"Mon Jul 9 14:45:00.005 2012", 15 * time.Minute, "Mon Jul 9 15:00 2012"}, - - // Round to nearest second for both. - {"Mon Jul 9 14:45:00.005 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"}, - } - - for _, c := range tests { - actual := Every(c.delay).Next(getTime(c.time)) - expected := getTime(c.expected) - if actual != expected { - t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.delay, expected, actual) - } - } -} diff --git a/Godeps/_workspace/src/github.com/robfig/cron/cron.go b/Godeps/_workspace/src/github.com/robfig/cron/cron.go deleted file mode 100644 index 54e08c6ca1..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/cron.go +++ /dev/null @@ -1,199 +0,0 @@ -// This library implements a cron spec parser and runner. See the README for -// more details. -package cron - -import ( - "sort" - "time" -) - -// Cron keeps track of any number of entries, invoking the associated func as -// specified by the schedule. It may be started, stopped, and the entries may -// be inspected while running. -type Cron struct { - entries []*Entry - stop chan struct{} - add chan *Entry - snapshot chan []*Entry - running bool -} - -// Job is an interface for submitted cron jobs. -type Job interface { - Run() -} - -// The Schedule describes a job's duty cycle. -type Schedule interface { - // Return the next activation time, later than the given time. - // Next is invoked initially, and then each time the job is run. - Next(time.Time) time.Time -} - -// Entry consists of a schedule and the func to execute on that schedule. -type Entry struct { - // The schedule on which this job should be run. - Schedule Schedule - - // The next time the job will run. This is the zero time if Cron has not been - // started or this entry's schedule is unsatisfiable - Next time.Time - - // The last time this job was run. This is the zero time if the job has never - // been run. - Prev time.Time - - // The Job to run. - Job Job -} - -// byTime is a wrapper for sorting the entry array by time -// (with zero time at the end). -type byTime []*Entry - -func (s byTime) Len() int { return len(s) } -func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byTime) Less(i, j int) bool { - // Two zero times should return false. - // Otherwise, zero is "greater" than any other time. - // (To sort it at the end of the list.) - if s[i].Next.IsZero() { - return false - } - if s[j].Next.IsZero() { - return true - } - return s[i].Next.Before(s[j].Next) -} - -// New returns a new Cron job runner. -func New() *Cron { - return &Cron{ - entries: nil, - add: make(chan *Entry), - stop: make(chan struct{}), - snapshot: make(chan []*Entry), - running: false, - } -} - -// A wrapper that turns a func() into a cron.Job -type FuncJob func() - -func (f FuncJob) Run() { f() } - -// AddFunc adds a func to the Cron to be run on the given schedule. -func (c *Cron) AddFunc(spec string, cmd func()) error { - return c.AddJob(spec, FuncJob(cmd)) -} - -// AddFunc adds a Job to the Cron to be run on the given schedule. -func (c *Cron) AddJob(spec string, cmd Job) error { - schedule, err := Parse(spec) - if err != nil { - return err - } - c.Schedule(schedule, cmd) - return nil -} - -// Schedule adds a Job to the Cron to be run on the given schedule. -func (c *Cron) Schedule(schedule Schedule, cmd Job) { - entry := &Entry{ - Schedule: schedule, - Job: cmd, - } - if !c.running { - c.entries = append(c.entries, entry) - return - } - - c.add <- entry -} - -// Entries returns a snapshot of the cron entries. -func (c *Cron) Entries() []*Entry { - if c.running { - c.snapshot <- nil - x := <-c.snapshot - return x - } - return c.entrySnapshot() -} - -// Start the cron scheduler in its own go-routine. -func (c *Cron) Start() { - c.running = true - go c.run() -} - -// Run the scheduler.. this is private just due to the need to synchronize -// access to the 'running' state variable. -func (c *Cron) run() { - // Figure out the next activation times for each entry. - now := time.Now().Local() - for _, entry := range c.entries { - entry.Next = entry.Schedule.Next(now) - } - - for { - // Determine the next entry to run. - sort.Sort(byTime(c.entries)) - - var effective time.Time - if len(c.entries) == 0 || c.entries[0].Next.IsZero() { - // If there are no entries yet, just sleep - it still handles new entries - // and stop requests. - effective = now.AddDate(10, 0, 0) - } else { - effective = c.entries[0].Next - } - - select { - case now = <-time.After(effective.Sub(now)): - // Run every entry whose next time was this effective time. - for _, e := range c.entries { - if e.Next != effective { - break - } - go e.Job.Run() - e.Prev = e.Next - e.Next = e.Schedule.Next(effective) - } - continue - - case newEntry := <-c.add: - c.entries = append(c.entries, newEntry) - newEntry.Next = newEntry.Schedule.Next(now) - - case <-c.snapshot: - c.snapshot <- c.entrySnapshot() - - case <-c.stop: - return - } - - // 'now' should be updated after newEntry and snapshot cases. - now = time.Now().Local() - } -} - -// Stop the cron scheduler. -func (c *Cron) Stop() { - c.stop <- struct{}{} - c.running = false -} - -// entrySnapshot returns a copy of the current cron entry list. -func (c *Cron) entrySnapshot() []*Entry { - entries := []*Entry{} - for _, e := range c.entries { - entries = append(entries, &Entry{ - Schedule: e.Schedule, - Next: e.Next, - Prev: e.Prev, - Job: e.Job, - }) - } - return entries -} diff --git a/Godeps/_workspace/src/github.com/robfig/cron/cron_test.go b/Godeps/_workspace/src/github.com/robfig/cron/cron_test.go deleted file mode 100644 index eea340080b..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/cron_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package cron - -import ( - "fmt" - "sync" - "testing" - "time" -) - -// Many tests schedule a job for every second, and then wait at most a second -// for it to run. This amount is just slightly larger than 1 second to -// compensate for a few milliseconds of runtime. -const ONE_SECOND = 1*time.Second + 10*time.Millisecond - -// Start and stop cron with no entries. -func TestNoEntries(t *testing.T) { - cron := New() - cron.Start() - - select { - case <-time.After(ONE_SECOND): - t.FailNow() - case <-stop(cron): - } -} - -// Start, stop, then add an entry. Verify entry doesn't run. -func TestStopCausesJobsToNotRun(t *testing.T) { - wg := &sync.WaitGroup{} - wg.Add(1) - - cron := New() - cron.Start() - cron.Stop() - cron.AddFunc("* * * * * ?", func() { wg.Done() }) - - select { - case <-time.After(ONE_SECOND): - // No job ran! - case <-wait(wg): - t.FailNow() - } -} - -// Add a job, start cron, expect it runs. -func TestAddBeforeRunning(t *testing.T) { - wg := &sync.WaitGroup{} - wg.Add(1) - - cron := New() - cron.AddFunc("* * * * * ?", func() { wg.Done() }) - cron.Start() - defer cron.Stop() - - // Give cron 2 seconds to run our job (which is always activated). - select { - case <-time.After(ONE_SECOND): - t.FailNow() - case <-wait(wg): - } -} - -// Start cron, add a job, expect it runs. -func TestAddWhileRunning(t *testing.T) { - wg := &sync.WaitGroup{} - wg.Add(1) - - cron := New() - cron.Start() - defer cron.Stop() - cron.AddFunc("* * * * * ?", func() { wg.Done() }) - - select { - case <-time.After(ONE_SECOND): - t.FailNow() - case <-wait(wg): - } -} - -// Test timing with Entries. -func TestSnapshotEntries(t *testing.T) { - wg := &sync.WaitGroup{} - wg.Add(1) - - cron := New() - cron.AddFunc("@every 2s", func() { wg.Done() }) - cron.Start() - defer cron.Stop() - - // Cron should fire in 2 seconds. After 1 second, call Entries. - select { - case <-time.After(ONE_SECOND): - cron.Entries() - } - - // Even though Entries was called, the cron should fire at the 2 second mark. - select { - case <-time.After(ONE_SECOND): - t.FailNow() - case <-wait(wg): - } - -} - -// Test that the entries are correctly sorted. -// Add a bunch of long-in-the-future entries, and an immediate entry, and ensure -// that the immediate entry runs immediately. -// Also: Test that multiple jobs run in the same instant. -func TestMultipleEntries(t *testing.T) { - wg := &sync.WaitGroup{} - wg.Add(2) - - cron := New() - cron.AddFunc("0 0 0 1 1 ?", func() {}) - cron.AddFunc("* * * * * ?", func() { wg.Done() }) - cron.AddFunc("0 0 0 31 12 ?", func() {}) - cron.AddFunc("* * * * * ?", func() { wg.Done() }) - - cron.Start() - defer cron.Stop() - - select { - case <-time.After(ONE_SECOND): - t.FailNow() - case <-wait(wg): - } -} - -// Test running the same job twice. -func TestRunningJobTwice(t *testing.T) { - wg := &sync.WaitGroup{} - wg.Add(2) - - cron := New() - cron.AddFunc("0 0 0 1 1 ?", func() {}) - cron.AddFunc("0 0 0 31 12 ?", func() {}) - cron.AddFunc("* * * * * ?", func() { wg.Done() }) - - cron.Start() - defer cron.Stop() - - select { - case <-time.After(2 * ONE_SECOND): - t.FailNow() - case <-wait(wg): - } -} - -func TestRunningMultipleSchedules(t *testing.T) { - wg := &sync.WaitGroup{} - wg.Add(2) - - cron := New() - cron.AddFunc("0 0 0 1 1 ?", func() {}) - cron.AddFunc("0 0 0 31 12 ?", func() {}) - cron.AddFunc("* * * * * ?", func() { wg.Done() }) - cron.Schedule(Every(time.Minute), FuncJob(func() {})) - cron.Schedule(Every(time.Second), FuncJob(func() { wg.Done() })) - cron.Schedule(Every(time.Hour), FuncJob(func() {})) - - cron.Start() - defer cron.Stop() - - select { - case <-time.After(2 * ONE_SECOND): - t.FailNow() - case <-wait(wg): - } -} - -// Test that the cron is run in the local time zone (as opposed to UTC). -func TestLocalTimezone(t *testing.T) { - wg := &sync.WaitGroup{} - wg.Add(1) - - now := time.Now().Local() - spec := fmt.Sprintf("%d %d %d %d %d ?", - now.Second()+1, now.Minute(), now.Hour(), now.Day(), now.Month()) - - cron := New() - cron.AddFunc(spec, func() { wg.Done() }) - cron.Start() - defer cron.Stop() - - select { - case <-time.After(ONE_SECOND): - t.FailNow() - case <-wait(wg): - } -} - -type testJob struct { - wg *sync.WaitGroup - name string -} - -func (t testJob) Run() { - t.wg.Done() -} - -// Simple test using Runnables. -func TestJob(t *testing.T) { - wg := &sync.WaitGroup{} - wg.Add(1) - - cron := New() - cron.AddJob("0 0 0 30 Feb ?", testJob{wg, "job0"}) - cron.AddJob("0 0 0 1 1 ?", testJob{wg, "job1"}) - cron.AddJob("* * * * * ?", testJob{wg, "job2"}) - cron.AddJob("1 0 0 1 1 ?", testJob{wg, "job3"}) - cron.Schedule(Every(5*time.Second+5*time.Nanosecond), testJob{wg, "job4"}) - cron.Schedule(Every(5*time.Minute), testJob{wg, "job5"}) - - cron.Start() - defer cron.Stop() - - select { - case <-time.After(ONE_SECOND): - t.FailNow() - case <-wait(wg): - } - - // Ensure the entries are in the right order. - expecteds := []string{"job2", "job4", "job5", "job1", "job3", "job0"} - - var actuals []string - for _, entry := range cron.Entries() { - actuals = append(actuals, entry.Job.(testJob).name) - } - - for i, expected := range expecteds { - if actuals[i] != expected { - t.Errorf("Jobs not in the right order. (expected) %s != %s (actual)", expecteds, actuals) - t.FailNow() - } - } -} - -func wait(wg *sync.WaitGroup) chan bool { - ch := make(chan bool) - go func() { - wg.Wait() - ch <- true - }() - return ch -} - -func stop(cron *Cron) chan bool { - ch := make(chan bool) - go func() { - cron.Stop() - ch <- true - }() - return ch -} diff --git a/Godeps/_workspace/src/github.com/robfig/cron/doc.go b/Godeps/_workspace/src/github.com/robfig/cron/doc.go deleted file mode 100644 index dbdf50127a..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/doc.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Package cron implements a cron spec parser and job runner. - -Usage - -Callers may register Funcs to be invoked on a given schedule. Cron will run -them in their own goroutines. - - c := cron.New() - c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") }) - c.AddFunc("@hourly", func() { fmt.Println("Every hour") }) - c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") }) - c.Start() - .. - // Funcs are invoked in their own goroutine, asynchronously. - ... - // Funcs may also be added to a running Cron - c.AddFunc("@daily", func() { fmt.Println("Every day") }) - .. - // Inspect the cron job entries' next and previous run times. - inspect(c.Entries()) - .. - c.Stop() // Stop the scheduler (does not stop any jobs already running). - -CRON Expression Format - -A cron expression represents a set of times, using 6 space-separated fields. - - Field name | Mandatory? | Allowed values | Allowed special characters - ---------- | ---------- | -------------- | -------------------------- - Seconds | Yes | 0-59 | * / , - - Minutes | Yes | 0-59 | * / , - - Hours | Yes | 0-23 | * / , - - Day of month | Yes | 1-31 | * / , - ? - Month | Yes | 1-12 or JAN-DEC | * / , - - Day of week | Yes | 0-6 or SUN-SAT | * / , - ? - -Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun", -and "sun" are equally accepted. - -Special Characters - -Asterisk ( * ) - -The asterisk indicates that the cron expression will match for all values of the -field; e.g., using an asterisk in the 5th field (month) would indicate every -month. - -Slash ( / ) - -Slashes are used to describe increments of ranges. For example 3-59/15 in the -1st field (minutes) would indicate the 3rd minute of the hour and every 15 -minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", -that is, an increment over the largest possible range of the field. The form -"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the -increment until the end of that specific range. It does not wrap around. - -Comma ( , ) - -Commas are used to separate items of a list. For example, using "MON,WED,FRI" in -the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. - -Hyphen ( - ) - -Hyphens are used to define ranges. For example, 9-17 would indicate every -hour between 9am and 5pm inclusive. - -Question mark ( ? ) - -Question mark may be used instead of '*' for leaving either day-of-month or -day-of-week blank. - -Predefined schedules - -You may use one of several pre-defined schedules in place of a cron expression. - - Entry | Description | Equivalent To - ----- | ----------- | ------------- - @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 * - @monthly | Run once a month, midnight, first of month | 0 0 0 1 * * - @weekly | Run once a week, midnight on Sunday | 0 0 0 * * 0 - @daily (or @midnight) | Run once a day, midnight | 0 0 0 * * * - @hourly | Run once an hour, beginning of hour | 0 0 * * * * - -Intervals - -You may also schedule a job to execute at fixed intervals. This is supported by -formatting the cron spec like this: - - @every - -where "duration" is a string accepted by time.ParseDuration -(http://golang.org/pkg/time/#ParseDuration). - -For example, "@every 1h30m10s" would indicate a schedule that activates every -1 hour, 30 minutes, 10 seconds. - -Note: The interval does not take the job runtime into account. For example, -if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, -it will have only 2 minutes of idle time between each run. - -Time zones - -All interpretation and scheduling is done in the machine's local time zone (as -provided by the Go time package (http://www.golang.org/pkg/time). - -Be aware that jobs scheduled during daylight-savings leap-ahead transitions will -not be run! - -Thread safety - -Since the Cron service runs concurrently with the calling code, some amount of -care must be taken to ensure proper synchronization. - -All cron methods are designed to be correctly synchronized as long as the caller -ensures that invocations have a clear happens-before ordering between them. - -Implementation - -Cron entries are stored in an array, sorted by their next activation time. Cron -sleeps until the next job is due to be run. - -Upon waking: - - it runs each entry that is active on that second - - it calculates the next run times for the jobs that were run - - it re-sorts the array of entries by next activation time. - - it goes to sleep until the soonest job. -*/ -package cron diff --git a/Godeps/_workspace/src/github.com/robfig/cron/parser.go b/Godeps/_workspace/src/github.com/robfig/cron/parser.go deleted file mode 100644 index 4224fa9308..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/parser.go +++ /dev/null @@ -1,231 +0,0 @@ -package cron - -import ( - "fmt" - "log" - "math" - "strconv" - "strings" - "time" -) - -// Parse returns a new crontab schedule representing the given spec. -// It returns a descriptive error if the spec is not valid. -// -// It accepts -// - Full crontab specs, e.g. "* * * * * ?" -// - Descriptors, e.g. "@midnight", "@every 1h30m" -func Parse(spec string) (_ Schedule, err error) { - // Convert panics into errors - defer func() { - if recovered := recover(); recovered != nil { - err = fmt.Errorf("%v", recovered) - } - }() - - if spec[0] == '@' { - return parseDescriptor(spec), nil - } - - // Split on whitespace. We require 5 or 6 fields. - // (second) (minute) (hour) (day of month) (month) (day of week, optional) - fields := strings.Fields(spec) - if len(fields) != 5 && len(fields) != 6 { - log.Panicf("Expected 5 or 6 fields, found %d: %s", len(fields), spec) - } - - // If a sixth field is not provided (DayOfWeek), then it is equivalent to star. - if len(fields) == 5 { - fields = append(fields, "*") - } - - schedule := &SpecSchedule{ - Second: getField(fields[0], seconds), - Minute: getField(fields[1], minutes), - Hour: getField(fields[2], hours), - Dom: getField(fields[3], dom), - Month: getField(fields[4], months), - Dow: getField(fields[5], dow), - } - - return schedule, nil -} - -// getField returns an Int with the bits set representing all of the times that -// the field represents. A "field" is a comma-separated list of "ranges". -func getField(field string, r bounds) uint64 { - // list = range {"," range} - var bits uint64 - ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) - for _, expr := range ranges { - bits |= getRange(expr, r) - } - return bits -} - -// getRange returns the bits indicated by the given expression: -// number | number "-" number [ "/" number ] -func getRange(expr string, r bounds) uint64 { - - var ( - start, end, step uint - rangeAndStep = strings.Split(expr, "/") - lowAndHigh = strings.Split(rangeAndStep[0], "-") - singleDigit = len(lowAndHigh) == 1 - ) - - var extra_star uint64 - if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { - start = r.min - end = r.max - extra_star = starBit - } else { - start = parseIntOrName(lowAndHigh[0], r.names) - switch len(lowAndHigh) { - case 1: - end = start - case 2: - end = parseIntOrName(lowAndHigh[1], r.names) - default: - log.Panicf("Too many hyphens: %s", expr) - } - } - - switch len(rangeAndStep) { - case 1: - step = 1 - case 2: - step = mustParseInt(rangeAndStep[1]) - - // Special handling: "N/step" means "N-max/step". - if singleDigit { - end = r.max - } - default: - log.Panicf("Too many slashes: %s", expr) - } - - if start < r.min { - log.Panicf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr) - } - if end > r.max { - log.Panicf("End of range (%d) above maximum (%d): %s", end, r.max, expr) - } - if start > end { - log.Panicf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr) - } - - return getBits(start, end, step) | extra_star -} - -// parseIntOrName returns the (possibly-named) integer contained in expr. -func parseIntOrName(expr string, names map[string]uint) uint { - if names != nil { - if namedInt, ok := names[strings.ToLower(expr)]; ok { - return namedInt - } - } - return mustParseInt(expr) -} - -// mustParseInt parses the given expression as an int or panics. -func mustParseInt(expr string) uint { - num, err := strconv.Atoi(expr) - if err != nil { - log.Panicf("Failed to parse int from %s: %s", expr, err) - } - if num < 0 { - log.Panicf("Negative number (%d) not allowed: %s", num, expr) - } - - return uint(num) -} - -// getBits sets all bits in the range [min, max], modulo the given step size. -func getBits(min, max, step uint) uint64 { - var bits uint64 - - // If step is 1, use shifts. - if step == 1 { - return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) - } - - // Else, use a simple loop. - for i := min; i <= max; i += step { - bits |= 1 << i - } - return bits -} - -// all returns all bits within the given bounds. (plus the star bit) -func all(r bounds) uint64 { - return getBits(r.min, r.max, 1) | starBit -} - -// parseDescriptor returns a pre-defined schedule for the expression, or panics -// if none matches. -func parseDescriptor(spec string) Schedule { - switch spec { - case "@yearly", "@annually": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: 1 << dom.min, - Month: 1 << months.min, - Dow: all(dow), - } - - case "@monthly": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: 1 << dom.min, - Month: all(months), - Dow: all(dow), - } - - case "@weekly": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: all(dom), - Month: all(months), - Dow: 1 << dow.min, - } - - case "@daily", "@midnight": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: 1 << hours.min, - Dom: all(dom), - Month: all(months), - Dow: all(dow), - } - - case "@hourly": - return &SpecSchedule{ - Second: 1 << seconds.min, - Minute: 1 << minutes.min, - Hour: all(hours), - Dom: all(dom), - Month: all(months), - Dow: all(dow), - } - } - - const every = "@every " - if strings.HasPrefix(spec, every) { - duration, err := time.ParseDuration(spec[len(every):]) - if err != nil { - log.Panicf("Failed to parse duration %s: %s", spec, err) - } - return Every(duration) - } - - log.Panicf("Unrecognized descriptor: %s", spec) - return nil -} diff --git a/Godeps/_workspace/src/github.com/robfig/cron/parser_test.go b/Godeps/_workspace/src/github.com/robfig/cron/parser_test.go deleted file mode 100644 index 9050cf7869..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/parser_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package cron - -import ( - "reflect" - "testing" - "time" -) - -func TestRange(t *testing.T) { - ranges := []struct { - expr string - min, max uint - expected uint64 - }{ - {"5", 0, 7, 1 << 5}, - {"0", 0, 7, 1 << 0}, - {"7", 0, 7, 1 << 7}, - - {"5-5", 0, 7, 1 << 5}, - {"5-6", 0, 7, 1<<5 | 1<<6}, - {"5-7", 0, 7, 1<<5 | 1<<6 | 1<<7}, - - {"5-6/2", 0, 7, 1 << 5}, - {"5-7/2", 0, 7, 1<<5 | 1<<7}, - {"5-7/1", 0, 7, 1<<5 | 1<<6 | 1<<7}, - - {"*", 1, 3, 1<<1 | 1<<2 | 1<<3 | starBit}, - {"*/2", 1, 3, 1<<1 | 1<<3 | starBit}, - } - - for _, c := range ranges { - actual := getRange(c.expr, bounds{c.min, c.max, nil}) - if actual != c.expected { - t.Errorf("%s => (expected) %d != %d (actual)", c.expr, c.expected, actual) - } - } -} - -func TestField(t *testing.T) { - fields := []struct { - expr string - min, max uint - expected uint64 - }{ - {"5", 1, 7, 1 << 5}, - {"5,6", 1, 7, 1<<5 | 1<<6}, - {"5,6,7", 1, 7, 1<<5 | 1<<6 | 1<<7}, - {"1,5-7/2,3", 1, 7, 1<<1 | 1<<5 | 1<<7 | 1<<3}, - } - - for _, c := range fields { - actual := getField(c.expr, bounds{c.min, c.max, nil}) - if actual != c.expected { - t.Errorf("%s => (expected) %d != %d (actual)", c.expr, c.expected, actual) - } - } -} - -func TestBits(t *testing.T) { - allBits := []struct { - r bounds - expected uint64 - }{ - {minutes, 0xfffffffffffffff}, // 0-59: 60 ones - {hours, 0xffffff}, // 0-23: 24 ones - {dom, 0xfffffffe}, // 1-31: 31 ones, 1 zero - {months, 0x1ffe}, // 1-12: 12 ones, 1 zero - {dow, 0x7f}, // 0-6: 7 ones - } - - for _, c := range allBits { - actual := all(c.r) // all() adds the starBit, so compensate for that.. - if c.expected|starBit != actual { - t.Errorf("%d-%d/%d => (expected) %b != %b (actual)", - c.r.min, c.r.max, 1, c.expected|starBit, actual) - } - } - - bits := []struct { - min, max, step uint - expected uint64 - }{ - - {0, 0, 1, 0x1}, - {1, 1, 1, 0x2}, - {1, 5, 2, 0x2a}, // 101010 - {1, 4, 2, 0xa}, // 1010 - } - - for _, c := range bits { - actual := getBits(c.min, c.max, c.step) - if c.expected != actual { - t.Errorf("%d-%d/%d => (expected) %b != %b (actual)", - c.min, c.max, c.step, c.expected, actual) - } - } -} - -func TestSpecSchedule(t *testing.T) { - entries := []struct { - expr string - expected Schedule - }{ - {"* 5 * * * *", &SpecSchedule{all(seconds), 1 << 5, all(hours), all(dom), all(months), all(dow)}}, - {"@every 5m", ConstantDelaySchedule{time.Duration(5) * time.Minute}}, - } - - for _, c := range entries { - actual, err := Parse(c.expr) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(actual, c.expected) { - t.Errorf("%s => (expected) %b != %b (actual)", c.expr, c.expected, actual) - } - } -} diff --git a/Godeps/_workspace/src/github.com/robfig/cron/spec.go b/Godeps/_workspace/src/github.com/robfig/cron/spec.go deleted file mode 100644 index afa5ac86cc..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/spec.go +++ /dev/null @@ -1,159 +0,0 @@ -package cron - -import "time" - -// SpecSchedule specifies a duty cycle (to the second granularity), based on a -// traditional crontab specification. It is computed initially and stored as bit sets. -type SpecSchedule struct { - Second, Minute, Hour, Dom, Month, Dow uint64 -} - -// bounds provides a range of acceptable values (plus a map of name to value). -type bounds struct { - min, max uint - names map[string]uint -} - -// The bounds for each field. -var ( - seconds = bounds{0, 59, nil} - minutes = bounds{0, 59, nil} - hours = bounds{0, 23, nil} - dom = bounds{1, 31, nil} - months = bounds{1, 12, map[string]uint{ - "jan": 1, - "feb": 2, - "mar": 3, - "apr": 4, - "may": 5, - "jun": 6, - "jul": 7, - "aug": 8, - "sep": 9, - "oct": 10, - "nov": 11, - "dec": 12, - }} - dow = bounds{0, 6, map[string]uint{ - "sun": 0, - "mon": 1, - "tue": 2, - "wed": 3, - "thu": 4, - "fri": 5, - "sat": 6, - }} -) - -const ( - // Set the top bit if a star was included in the expression. - starBit = 1 << 63 -) - -// Next returns the next time this schedule is activated, greater than the given -// time. If no time can be found to satisfy the schedule, return the zero time. -func (s *SpecSchedule) Next(t time.Time) time.Time { - // General approach: - // For Month, Day, Hour, Minute, Second: - // Check if the time value matches. If yes, continue to the next field. - // If the field doesn't match the schedule, then increment the field until it matches. - // While incrementing the field, a wrap-around brings it back to the beginning - // of the field list (since it is necessary to re-verify previous field - // values) - - // Start at the earliest possible time (the upcoming second). - t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) - - // This flag indicates whether a field has been incremented. - added := false - - // If no time is found within five years, return zero. - yearLimit := t.Year() + 5 - -WRAP: - if t.Year() > yearLimit { - return time.Time{} - } - - // Find the first applicable month. - // If it's this month, then do nothing. - for 1< 0 - dowMatch bool = 1< 0 - ) - - if s.Dom&starBit > 0 || s.Dow&starBit > 0 { - return domMatch && dowMatch - } - return domMatch || dowMatch -} diff --git a/Godeps/_workspace/src/github.com/robfig/cron/spec_test.go b/Godeps/_workspace/src/github.com/robfig/cron/spec_test.go deleted file mode 100644 index 41540bca49..0000000000 --- a/Godeps/_workspace/src/github.com/robfig/cron/spec_test.go +++ /dev/null @@ -1,204 +0,0 @@ -package cron - -import ( - "testing" - "time" -) - -func TestActivation(t *testing.T) { - tests := []struct { - time, spec string - expected bool - }{ - // Every fifteen minutes. - {"Mon Jul 9 15:00 2012", "0 0/15 * * *", true}, - {"Mon Jul 9 15:45 2012", "0 0/15 * * *", true}, - {"Mon Jul 9 15:40 2012", "0 0/15 * * *", false}, - - // Every fifteen minutes, starting at 5 minutes. - {"Mon Jul 9 15:05 2012", "0 5/15 * * *", true}, - {"Mon Jul 9 15:20 2012", "0 5/15 * * *", true}, - {"Mon Jul 9 15:50 2012", "0 5/15 * * *", true}, - - // Named months - {"Sun Jul 15 15:00 2012", "0 0/15 * * Jul", true}, - {"Sun Jul 15 15:00 2012", "0 0/15 * * Jun", false}, - - // Everything set. - {"Sun Jul 15 08:30 2012", "0 30 08 ? Jul Sun", true}, - {"Sun Jul 15 08:30 2012", "0 30 08 15 Jul ?", true}, - {"Mon Jul 16 08:30 2012", "0 30 08 ? Jul Sun", false}, - {"Mon Jul 16 08:30 2012", "0 30 08 15 Jul ?", false}, - - // Predefined schedules - {"Mon Jul 9 15:00 2012", "@hourly", true}, - {"Mon Jul 9 15:04 2012", "@hourly", false}, - {"Mon Jul 9 15:00 2012", "@daily", false}, - {"Mon Jul 9 00:00 2012", "@daily", true}, - {"Mon Jul 9 00:00 2012", "@weekly", false}, - {"Sun Jul 8 00:00 2012", "@weekly", true}, - {"Sun Jul 8 01:00 2012", "@weekly", false}, - {"Sun Jul 8 00:00 2012", "@monthly", false}, - {"Sun Jul 1 00:00 2012", "@monthly", true}, - - // Test interaction of DOW and DOM. - // If both are specified, then only one needs to match. - {"Sun Jul 15 00:00 2012", "0 * * 1,15 * Sun", true}, - {"Fri Jun 15 00:00 2012", "0 * * 1,15 * Sun", true}, - {"Wed Aug 1 00:00 2012", "0 * * 1,15 * Sun", true}, - - // However, if one has a star, then both need to match. - {"Sun Jul 15 00:00 2012", "0 * * * * Mon", false}, - {"Sun Jul 15 00:00 2012", "0 * * */10 * Sun", false}, - {"Mon Jul 9 00:00 2012", "0 * * 1,15 * *", false}, - {"Sun Jul 15 00:00 2012", "0 * * 1,15 * *", true}, - {"Sun Jul 15 00:00 2012", "0 * * */2 * Sun", true}, - } - - for _, test := range tests { - sched, err := Parse(test.spec) - if err != nil { - t.Error(err) - continue - } - actual := sched.Next(getTime(test.time).Add(-1 * time.Second)) - expected := getTime(test.time) - if test.expected && expected != actual || !test.expected && expected == actual { - t.Errorf("Fail evaluating %s on %s: (expected) %s != %s (actual)", - test.spec, test.time, expected, actual) - } - } -} - -func TestNext(t *testing.T) { - runs := []struct { - time, spec string - expected string - }{ - // Simple cases - {"Mon Jul 9 14:45 2012", "0 0/15 * * *", "Mon Jul 9 15:00 2012"}, - {"Mon Jul 9 14:59 2012", "0 0/15 * * *", "Mon Jul 9 15:00 2012"}, - {"Mon Jul 9 14:59:59 2012", "0 0/15 * * *", "Mon Jul 9 15:00 2012"}, - - // Wrap around hours - {"Mon Jul 9 15:45 2012", "0 20-35/15 * * *", "Mon Jul 9 16:20 2012"}, - - // Wrap around days - {"Mon Jul 9 23:46 2012", "0 */15 * * *", "Tue Jul 10 00:00 2012"}, - {"Mon Jul 9 23:45 2012", "0 20-35/15 * * *", "Tue Jul 10 00:20 2012"}, - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * * *", "Tue Jul 10 00:20:15 2012"}, - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 1/2 * *", "Tue Jul 10 01:20:15 2012"}, - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 10-12 * *", "Tue Jul 10 10:20:15 2012"}, - - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 1/2 */2 * *", "Thu Jul 11 01:20:15 2012"}, - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * 9-20 * *", "Wed Jul 10 00:20:15 2012"}, - {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * 9-20 Jul *", "Wed Jul 10 00:20:15 2012"}, - - // Wrap around months - {"Mon Jul 9 23:35 2012", "0 0 0 9 Apr-Oct ?", "Thu Aug 9 00:00 2012"}, - {"Mon Jul 9 23:35 2012", "0 0 0 */5 Apr,Aug,Oct Mon", "Mon Aug 6 00:00 2012"}, - {"Mon Jul 9 23:35 2012", "0 0 0 */5 Oct Mon", "Mon Oct 1 00:00 2012"}, - - // Wrap around years - {"Mon Jul 9 23:35 2012", "0 0 0 * Feb Mon", "Mon Feb 4 00:00 2013"}, - {"Mon Jul 9 23:35 2012", "0 0 0 * Feb Mon/2", "Fri Feb 1 00:00 2013"}, - - // Wrap around minute, hour, day, month, and year - {"Mon Dec 31 23:59:45 2012", "0 * * * * *", "Tue Jan 1 00:00:00 2013"}, - - // Leap year - {"Mon Jul 9 23:35 2012", "0 0 0 29 Feb ?", "Mon Feb 29 00:00 2016"}, - - // Daylight savings time 2am EST (-5) -> 3am EDT (-4) - {"2012-03-11T00:00:00-0500", "0 30 2 11 Mar ?", "2013-03-11T02:30:00-0400"}, - - // hourly job - {"2012-03-11T00:00:00-0500", "0 0 * * * ?", "2012-03-11T01:00:00-0500"}, - {"2012-03-11T01:00:00-0500", "0 0 * * * ?", "2012-03-11T03:00:00-0400"}, - {"2012-03-11T03:00:00-0400", "0 0 * * * ?", "2012-03-11T04:00:00-0400"}, - {"2012-03-11T04:00:00-0400", "0 0 * * * ?", "2012-03-11T05:00:00-0400"}, - - // 1am nightly job - {"2012-03-11T00:00:00-0500", "0 0 1 * * ?", "2012-03-11T01:00:00-0500"}, - {"2012-03-11T01:00:00-0500", "0 0 1 * * ?", "2012-03-12T01:00:00-0400"}, - - // 2am nightly job (skipped) - {"2012-03-11T00:00:00-0500", "0 0 2 * * ?", "2012-03-12T02:00:00-0400"}, - - // Daylight savings time 2am EDT (-4) => 1am EST (-5) - {"2012-11-04T00:00:00-0400", "0 30 2 04 Nov ?", "2012-11-04T02:30:00-0500"}, - {"2012-11-04T01:45:00-0400", "0 30 1 04 Nov ?", "2012-11-04T01:30:00-0500"}, - - // hourly job - {"2012-11-04T00:00:00-0400", "0 0 * * * ?", "2012-11-04T01:00:00-0400"}, - {"2012-11-04T01:00:00-0400", "0 0 * * * ?", "2012-11-04T01:00:00-0500"}, - {"2012-11-04T01:00:00-0500", "0 0 * * * ?", "2012-11-04T02:00:00-0500"}, - - // 1am nightly job (runs twice) - {"2012-11-04T00:00:00-0400", "0 0 1 * * ?", "2012-11-04T01:00:00-0400"}, - {"2012-11-04T01:00:00-0400", "0 0 1 * * ?", "2012-11-04T01:00:00-0500"}, - {"2012-11-04T01:00:00-0500", "0 0 1 * * ?", "2012-11-05T01:00:00-0500"}, - - // 2am nightly job - {"2012-11-04T00:00:00-0400", "0 0 2 * * ?", "2012-11-04T02:00:00-0500"}, - {"2012-11-04T02:00:00-0500", "0 0 2 * * ?", "2012-11-05T02:00:00-0500"}, - - // 3am nightly job - {"2012-11-04T00:00:00-0400", "0 0 3 * * ?", "2012-11-04T03:00:00-0500"}, - {"2012-11-04T03:00:00-0500", "0 0 3 * * ?", "2012-11-05T03:00:00-0500"}, - - // Unsatisfiable - {"Mon Jul 9 23:35 2012", "0 0 0 30 Feb ?", ""}, - {"Mon Jul 9 23:35 2012", "0 0 0 31 Apr ?", ""}, - } - - for _, c := range runs { - sched, err := Parse(c.spec) - if err != nil { - t.Error(err) - continue - } - actual := sched.Next(getTime(c.time)) - expected := getTime(c.expected) - if !actual.Equal(expected) { - t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.spec, expected, actual) - } - } -} - -func TestErrors(t *testing.T) { - invalidSpecs := []string{ - "xyz", - "60 0 * * *", - "0 60 * * *", - "0 0 * * XYZ", - } - for _, spec := range invalidSpecs { - _, err := Parse(spec) - if err == nil { - t.Error("expected an error parsing: ", spec) - } - } -} - -func getTime(value string) time.Time { - if value == "" { - return time.Time{} - } - t, err := time.Parse("Mon Jan 2 15:04 2006", value) - if err != nil { - t, err = time.Parse("Mon Jan 2 15:04:05 2006", value) - if err != nil { - t, err = time.Parse("2006-01-02T15:04:05-0700", value) - if err != nil { - panic(err) - } - // Daylight savings time tests require location - if ny, err := time.LoadLocation("America/New_York"); err == nil { - t = t.In(ny) - } - } - } - - return t -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context.go b/Godeps/_workspace/src/golang.org/x/net/context/context.go deleted file mode 100644 index e7ee376c47..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/context.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out <-chan Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it's is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, &c) - return &c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) cancelCtx { - return cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return &c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go b/Godeps/_workspace/src/golang.org/x/net/context/context_test.go deleted file mode 100644 index 05345fc5e5..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/context_test.go +++ /dev/null @@ -1,575 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "fmt" - "math/rand" - "runtime" - "strings" - "sync" - "testing" - "time" -) - -// otherContext is a Context that's not one of the types defined in context.go. -// This lets us test code paths that differ based on the underlying type of the -// Context. -type otherContext struct { - Context -} - -func TestBackground(t *testing.T) { - c := Background() - if c == nil { - t.Fatalf("Background returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.Background"; got != want { - t.Errorf("Background().String() = %q want %q", got, want) - } -} - -func TestTODO(t *testing.T) { - c := TODO() - if c == nil { - t.Fatalf("TODO returned nil") - } - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - if got, want := fmt.Sprint(c), "context.TODO"; got != want { - t.Errorf("TODO().String() = %q want %q", got, want) - } -} - -func TestWithCancel(t *testing.T) { - c1, cancel := WithCancel(Background()) - - if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { - t.Errorf("c1.String() = %q want %q", got, want) - } - - o := otherContext{c1} - c2, _ := WithCancel(o) - contexts := []Context{c1, o, c2} - - for i, c := range contexts { - if d := c.Done(); d == nil { - t.Errorf("c[%d].Done() == %v want non-nil", i, d) - } - if e := c.Err(); e != nil { - t.Errorf("c[%d].Err() == %v want nil", i, e) - } - - select { - case x := <-c.Done(): - t.Errorf("<-c.Done() == %v want nothing (it should block)", x) - default: - } - } - - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - - for i, c := range contexts { - select { - case <-c.Done(): - default: - t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) - } - if e := c.Err(); e != Canceled { - t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) - } - } -} - -func TestParentFinishesChild(t *testing.T) { - // Context tree: - // parent -> cancelChild - // parent -> valueChild -> timerChild - parent, cancel := WithCancel(Background()) - cancelChild, stop := WithCancel(parent) - defer stop() - valueChild := WithValue(parent, "key", "value") - timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) - defer stop() - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-cancelChild.Done(): - t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) - case x := <-timerChild.Done(): - t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) - case x := <-valueChild.Done(): - t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) - default: - } - - // The parent's children should contain the two cancelable children. - pc := parent.(*cancelCtx) - cc := cancelChild.(*cancelCtx) - tc := timerChild.(*timerCtx) - pc.mu.Lock() - if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { - t.Errorf("bad linkage: pc.children = %v, want %v and %v", - pc.children, cc, tc) - } - pc.mu.Unlock() - - if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) - } - if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { - t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) - } - - cancel() - - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) - } - pc.mu.Unlock() - - // parent and children should all be finished. - check := func(ctx Context, name string) { - select { - case <-ctx.Done(): - default: - t.Errorf("<-%s.Done() blocked, but shouldn't have", name) - } - if e := ctx.Err(); e != Canceled { - t.Errorf("%s.Err() == %v want %v", name, e, Canceled) - } - } - check(parent, "parent") - check(cancelChild, "cancelChild") - check(valueChild, "valueChild") - check(timerChild, "timerChild") - - // WithCancel should return a canceled context on a canceled parent. - precanceledChild := WithValue(parent, "key", "value") - select { - case <-precanceledChild.Done(): - default: - t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") - } - if e := precanceledChild.Err(); e != Canceled { - t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) - } -} - -func TestChildFinishesFirst(t *testing.T) { - cancelable, stop := WithCancel(Background()) - defer stop() - for _, parent := range []Context{Background(), cancelable} { - child, cancel := WithCancel(parent) - - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - case x := <-child.Done(): - t.Errorf("<-child.Done() == %v want nothing (it should block)", x) - default: - } - - cc := child.(*cancelCtx) - pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() - if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { - t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) - } - - if pcok { - pc.mu.Lock() - if len(pc.children) != 1 || !pc.children[cc] { - t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) - } - pc.mu.Unlock() - } - - cancel() - - if pcok { - pc.mu.Lock() - if len(pc.children) != 0 { - t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) - } - pc.mu.Unlock() - } - - // child should be finished. - select { - case <-child.Done(): - default: - t.Errorf("<-child.Done() blocked, but shouldn't have") - } - if e := child.Err(); e != Canceled { - t.Errorf("child.Err() == %v want %v", e, Canceled) - } - - // parent should not be finished. - select { - case x := <-parent.Done(): - t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) - default: - } - if e := parent.Err(); e != nil { - t.Errorf("parent.Err() == %v want nil", e) - } - } -} - -func testDeadline(c Context, wait time.Duration, t *testing.T) { - select { - case <-time.After(wait): - t.Fatalf("context should have timed out") - case <-c.Done(): - } - if e := c.Err(); e != DeadlineExceeded { - t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) - } -} - -func TestDeadline(t *testing.T) { - c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 200*time.Millisecond, t) - - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) - - c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) - o = otherContext{c} - c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) - testDeadline(c, 200*time.Millisecond, t) -} - -func TestTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 100*time.Millisecond) - if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { - t.Errorf("c.String() = %q want prefix %q", got, prefix) - } - testDeadline(c, 200*time.Millisecond, t) - - c, _ = WithTimeout(Background(), 100*time.Millisecond) - o := otherContext{c} - testDeadline(o, 200*time.Millisecond, t) - - c, _ = WithTimeout(Background(), 100*time.Millisecond) - o = otherContext{c} - c, _ = WithTimeout(o, 300*time.Millisecond) - testDeadline(c, 200*time.Millisecond, t) -} - -func TestCanceledTimeout(t *testing.T) { - c, _ := WithTimeout(Background(), 200*time.Millisecond) - o := otherContext{c} - c, cancel := WithTimeout(o, 400*time.Millisecond) - cancel() - time.Sleep(100 * time.Millisecond) // let cancelation propagate - select { - case <-c.Done(): - default: - t.Errorf("<-c.Done() blocked, but shouldn't have") - } - if e := c.Err(); e != Canceled { - t.Errorf("c.Err() == %v want %v", e, Canceled) - } -} - -type key1 int -type key2 int - -var k1 = key1(1) -var k2 = key2(1) // same int as k1, different type -var k3 = key2(3) // same type as k2, different int - -func TestValues(t *testing.T) { - check := func(c Context, nm, v1, v2, v3 string) { - if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { - t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) - } - if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { - t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) - } - if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { - t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) - } - } - - c0 := Background() - check(c0, "c0", "", "", "") - - c1 := WithValue(Background(), k1, "c1k1") - check(c1, "c1", "c1k1", "", "") - - if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { - t.Errorf("c.String() = %q want %q", got, want) - } - - c2 := WithValue(c1, k2, "c2k2") - check(c2, "c2", "c1k1", "c2k2", "") - - c3 := WithValue(c2, k3, "c3k3") - check(c3, "c2", "c1k1", "c2k2", "c3k3") - - c4 := WithValue(c3, k1, nil) - check(c4, "c4", "", "c2k2", "c3k3") - - o0 := otherContext{Background()} - check(o0, "o0", "", "", "") - - o1 := otherContext{WithValue(Background(), k1, "c1k1")} - check(o1, "o1", "c1k1", "", "") - - o2 := WithValue(o1, k2, "o2k2") - check(o2, "o2", "c1k1", "o2k2", "") - - o3 := otherContext{c4} - check(o3, "o3", "", "c2k2", "c3k3") - - o4 := WithValue(o3, k3, nil) - check(o4, "o4", "", "c2k2", "") -} - -func TestAllocs(t *testing.T) { - bg := Background() - for _, test := range []struct { - desc string - f func() - limit float64 - gccgoLimit float64 - }{ - { - desc: "Background()", - f: func() { Background() }, - limit: 0, - gccgoLimit: 0, - }, - { - desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), - f: func() { - c := WithValue(bg, k1, nil) - c.Value(k1) - }, - limit: 3, - gccgoLimit: 3, - }, - { - desc: "WithTimeout(bg, 15*time.Millisecond)", - f: func() { - c, _ := WithTimeout(bg, 15*time.Millisecond) - <-c.Done() - }, - limit: 8, - gccgoLimit: 15, - }, - { - desc: "WithCancel(bg)", - f: func() { - c, cancel := WithCancel(bg) - cancel() - <-c.Done() - }, - limit: 5, - gccgoLimit: 8, - }, - { - desc: "WithTimeout(bg, 100*time.Millisecond)", - f: func() { - c, cancel := WithTimeout(bg, 100*time.Millisecond) - cancel() - <-c.Done() - }, - limit: 8, - gccgoLimit: 25, - }, - } { - limit := test.limit - if runtime.Compiler == "gccgo" { - // gccgo does not yet do escape analysis. - // TOOD(iant): Remove this when gccgo does do escape analysis. - limit = test.gccgoLimit - } - if n := testing.AllocsPerRun(100, test.f); n > limit { - t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) - } - } -} - -func TestSimultaneousCancels(t *testing.T) { - root, cancel := WithCancel(Background()) - m := map[Context]CancelFunc{root: cancel} - q := []Context{root} - // Create a tree of contexts. - for len(q) != 0 && len(m) < 100 { - parent := q[0] - q = q[1:] - for i := 0; i < 4; i++ { - ctx, cancel := WithCancel(parent) - m[ctx] = cancel - q = append(q, ctx) - } - } - // Start all the cancels in a random order. - var wg sync.WaitGroup - wg.Add(len(m)) - for _, cancel := range m { - go func(cancel CancelFunc) { - cancel() - wg.Done() - }(cancel) - } - // Wait on all the contexts in a random order. - for ctx := range m { - select { - case <-ctx.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) - } - } - // Wait for all the cancel functions to return. - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) - } -} - -func TestInterlockedCancels(t *testing.T) { - parent, cancelParent := WithCancel(Background()) - child, cancelChild := WithCancel(parent) - go func() { - parent.Done() - cancelChild() - }() - cancelParent() - select { - case <-child.Done(): - case <-time.After(1 * time.Second): - buf := make([]byte, 10<<10) - n := runtime.Stack(buf, true) - t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) - } -} - -func TestLayersCancel(t *testing.T) { - testLayers(t, time.Now().UnixNano(), false) -} - -func TestLayersTimeout(t *testing.T) { - testLayers(t, time.Now().UnixNano(), true) -} - -func testLayers(t *testing.T, seed int64, testTimeout bool) { - rand.Seed(seed) - errorf := func(format string, a ...interface{}) { - t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) - } - const ( - timeout = 200 * time.Millisecond - minLayers = 30 - ) - type value int - var ( - vals []*value - cancels []CancelFunc - numTimers int - ctx = Background() - ) - for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { - switch rand.Intn(3) { - case 0: - v := new(value) - ctx = WithValue(ctx, v, v) - vals = append(vals, v) - case 1: - var cancel CancelFunc - ctx, cancel = WithCancel(ctx) - cancels = append(cancels, cancel) - case 2: - var cancel CancelFunc - ctx, cancel = WithTimeout(ctx, timeout) - cancels = append(cancels, cancel) - numTimers++ - } - } - checkValues := func(when string) { - for _, key := range vals { - if val := ctx.Value(key).(*value); key != val { - errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) - } - } - } - select { - case <-ctx.Done(): - errorf("ctx should not be canceled yet") - default: - } - if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { - t.Errorf("ctx.String() = %q want prefix %q", s, prefix) - } - t.Log(ctx) - checkValues("before cancel") - if testTimeout { - select { - case <-ctx.Done(): - case <-time.After(timeout + 100*time.Millisecond): - errorf("ctx should have timed out") - } - checkValues("after timeout") - } else { - cancel := cancels[rand.Intn(len(cancels))] - cancel() - select { - case <-ctx.Done(): - default: - errorf("ctx should be canceled") - } - checkValues("after cancel") - } -} - -func TestCancelRemoves(t *testing.T) { - checkChildren := func(when string, ctx Context, want int) { - if got := len(ctx.(*cancelCtx).children); got != want { - t.Errorf("%s: context has %d children, want %d", when, got, want) - } - } - - ctx, _ := WithCancel(Background()) - checkChildren("after creation", ctx, 0) - _, cancel := WithCancel(ctx) - checkChildren("with WithCancel child ", ctx, 1) - cancel() - checkChildren("after cancelling WithCancel child", ctx, 0) - - ctx, _ = WithCancel(Background()) - checkChildren("after creation", ctx, 0) - _, cancel = WithTimeout(ctx, 60*time.Minute) - checkChildren("with WithTimeout child ", ctx, 1) - cancel() - checkChildren("after cancelling WithTimeout child", ctx, 0) -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go deleted file mode 100644 index 48610e3627..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package ctxhttp - -import "net/http" - -func canceler(client *http.Client, req *http.Request) func() { - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go deleted file mode 100644 index 56bcbadb85..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package ctxhttp - -import "net/http" - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -func canceler(client *http.Client, req *http.Request) func() { - rc, ok := client.Transport.(requestCanceler) - if !ok { - return func() {} - } - return func() { - rc.CancelRequest(req) - } -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 504dd63ed9..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -// Do sends an HTTP request with the provided http.Client and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. - cancel := canceler(client, req) - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - go func() { - resp, err := client.Do(req) - result <- responseAndError{resp, err} - }() - - select { - case <-ctx.Done(): - cancel() - return nil, ctx.Err() - case r := <-result: - return r.resp, r.err - } -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go deleted file mode 100644 index 47b53d7f18..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ctxhttp - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - "time" - - "golang.org/x/net/context" -) - -const ( - requestDuration = 100 * time.Millisecond - requestBody = "ok" -) - -func TestNoTimeout(t *testing.T) { - ctx := context.Background() - resp, err := doRequest(ctx) - - if resp == nil || err != nil { - t.Fatalf("error received from client: %v %v", err, resp) - } -} -func TestCancel(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - go func() { - time.Sleep(requestDuration / 2) - cancel() - }() - - resp, err := doRequest(ctx) - - if resp != nil || err == nil { - t.Fatalf("expected error, didn't get one. resp: %v", resp) - } - if err != ctx.Err() { - t.Fatalf("expected error from context but got: %v", err) - } -} - -func TestCancelAfterRequest(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - - resp, err := doRequest(ctx) - - // Cancel before reading the body. - // Request.Body should still be readable after the context is canceled. - cancel() - - b, err := ioutil.ReadAll(resp.Body) - if err != nil || string(b) != requestBody { - t.Fatalf("could not read body: %q %v", b, err) - } -} - -func doRequest(ctx context.Context) (*http.Response, error) { - var okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(requestDuration) - w.Write([]byte(requestBody)) - }) - - serv := httptest.NewServer(okHandler) - defer serv.Close() - - return Get(ctx, nil, serv.URL) -} diff --git a/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go b/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go deleted file mode 100644 index a6754dc368..0000000000 --- a/Godeps/_workspace/src/golang.org/x/net/context/withtimeout_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context_test - -import ( - "fmt" - "time" - - "golang.org/x/net/context" -) - -func ExampleWithTimeout() { - // Pass a context with a timeout to tell a blocking function that it - // should abandon its work after the timeout elapses. - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) - select { - case <-time.After(200 * time.Millisecond): - fmt.Println("overslept") - case <-ctx.Done(): - fmt.Println(ctx.Err()) // prints "context deadline exceeded" - } - // Output: - // context deadline exceeded -} diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/LICENSE b/Godeps/_workspace/src/google.golang.org/api/LICENSE similarity index 96% rename from Godeps/_workspace/src/github.com/kardianos/osext/LICENSE rename to Godeps/_workspace/src/google.golang.org/api/LICENSE index 7448756763..263aa7a0c1 100644 --- a/Godeps/_workspace/src/github.com/kardianos/osext/LICENSE +++ b/Godeps/_workspace/src/google.golang.org/api/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2011 Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go index bfd740f232..8796e3e097 100644 --- a/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go @@ -9,22 +9,13 @@ package googleapi import ( "bytes" "encoding/json" - "errors" "fmt" "io" "io/ioutil" - "mime/multipart" "net/http" - "net/textproto" "net/url" - "regexp" - "strconv" "strings" - "sync" - "time" - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" "google.golang.org/api/googleapi/internal/uritemplates" ) @@ -56,14 +47,15 @@ type ServerResponse struct { const ( Version = "0.5" - // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete. - statusResumeIncomplete = 308 - // UserAgent is the header string used to identify this package. UserAgent = "google-api-go-client/" + Version - // uploadPause determines the delay between failed upload attempts - uploadPause = 1 * time.Second + // The default chunk size to use for resumable uplods if not specified by the user. + DefaultUploadChunkSize = 8 * 1024 * 1024 + + // The minimum chunk size that can be used for resumable uploads. All + // user-specified chunk sizes must be multiple of this value. + MinUploadChunkSize = 256 * 1024 ) // Error contains an error response from the server. @@ -189,52 +181,6 @@ func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { return buf, nil } -func getMediaType(media io.Reader) (io.Reader, string) { - if typer, ok := media.(ContentTyper); ok { - return media, typer.ContentType() - } - - pr, pw := io.Pipe() - typ := "application/octet-stream" - buf, err := ioutil.ReadAll(io.LimitReader(media, 512)) - if err != nil { - pw.CloseWithError(fmt.Errorf("error reading media: %v", err)) - return pr, typ - } - typ = http.DetectContentType(buf) - mr := io.MultiReader(bytes.NewReader(buf), media) - go func() { - _, err = io.Copy(pw, mr) - if err != nil { - pw.CloseWithError(fmt.Errorf("error reading media: %v", err)) - return - } - pw.Close() - }() - return pr, typ -} - -// DetectMediaType detects and returns the content type of the provided media. -// If the type can not be determined, "application/octet-stream" is returned. -func DetectMediaType(media io.ReaderAt) string { - if typer, ok := media.(ContentTyper); ok { - return typer.ContentType() - } - - typ := "application/octet-stream" - buf := make([]byte, 1024) - n, err := media.ReadAt(buf, 0) - buf = buf[:n] - if err == nil || err == io.EOF { - typ = http.DetectContentType(buf) - } - return typ -} - -type Lengther interface { - Len() int -} - // endingWithErrorReader from r until it returns an error. If the // final error from r is io.EOF and e is non-nil, e is used instead. type endingWithErrorReader struct { @@ -250,12 +196,6 @@ func (er endingWithErrorReader) Read(p []byte) (n int, err error) { return } -func typeHeader(contentType string) textproto.MIMEHeader { - h := make(textproto.MIMEHeader) - h.Set("Content-Type", contentType) - return h -} - // countingWriter counts the number of bytes it receives to write, but // discards them. type countingWriter struct { @@ -267,203 +207,59 @@ func (w countingWriter) Write(p []byte) (int, error) { return len(p), nil } -// ConditionallyIncludeMedia does nothing if media is nil. -// -// bodyp is an in/out parameter. It should initially point to the -// reader of the application/json (or whatever) payload to send in the -// API request. It's updated to point to the multipart body reader. -// -// ctypep is an in/out parameter. It should initially point to the -// content type of the bodyp, usually "application/json". It's updated -// to the "multipart/related" content type, with random boundary. -// -// The return value is the content-length of the entire multpart body. -func ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (cancel func(), ok bool) { - if media == nil { - return - } - // Get the media type, which might return a different reader instance. - var mediaType string - media, mediaType = getMediaType(media) - - body, bodyType := *bodyp, *ctypep - - pr, pw := io.Pipe() - mpw := multipart.NewWriter(pw) - *bodyp = pr - *ctypep = "multipart/related; boundary=" + mpw.Boundary() - go func() { - w, err := mpw.CreatePart(typeHeader(bodyType)) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: body CreatePart failed: %v", err)) - return - } - _, err = io.Copy(w, body) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: body Copy failed: %v", err)) - return - } - - w, err = mpw.CreatePart(typeHeader(mediaType)) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: media CreatePart failed: %v", err)) - return - } - _, err = io.Copy(w, media) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: media Copy failed: %v", err)) - return - } - mpw.Close() - pw.Close() - }() - cancel = func() { pw.CloseWithError(errAborted) } - return cancel, true -} - -var errAborted = errors.New("googleapi: upload aborted") - // ProgressUpdater is a function that is called upon every progress update of a resumable upload. // This is the only part of a resumable upload (from googleapi) that is usable by the developer. // The remaining usable pieces of resumable uploads is exposed in each auto-generated API. type ProgressUpdater func(current, total int64) -// ResumableUpload is used by the generated APIs to provide resumable uploads. -// It is not used by developers directly. -type ResumableUpload struct { - Client *http.Client - // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". - URI string - UserAgent string // User-Agent for header of the request - // Media is the object being uploaded. - Media io.ReaderAt - // MediaType defines the media type, e.g. "image/jpeg". - MediaType string - // ContentLength is the full size of the object being uploaded. - ContentLength int64 - - mu sync.Mutex // guards progress - progress int64 // number of bytes uploaded so far - started bool // whether the upload has been started - - // Callback is an optional function that will be called upon every progress update. - Callback ProgressUpdater +type MediaOption interface { + setOptions(o *MediaOptions) } -var ( - // rangeRE matches the transfer status response from the server. $1 is the last byte index uploaded. - rangeRE = regexp.MustCompile(`^bytes=0\-(\d+)$`) - // chunkSize is the size of the chunks created during a resumable upload and should be a power of two. - // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. - chunkSize int64 = 1 << 18 -) +type contentTypeOption string -// Progress returns the number of bytes uploaded at this point. -func (rx *ResumableUpload) Progress() int64 { - rx.mu.Lock() - defer rx.mu.Unlock() - return rx.progress +func (ct contentTypeOption) setOptions(o *MediaOptions) { + o.ContentType = string(ct) } -func (rx *ResumableUpload) transferStatus(ctx context.Context) (int64, *http.Response, error) { - req, _ := http.NewRequest("POST", rx.URI, nil) - req.ContentLength = 0 - req.Header.Set("User-Agent", rx.UserAgent) - req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength)) - res, err := ctxhttp.Do(ctx, rx.Client, req) - if err != nil || res.StatusCode != statusResumeIncomplete { - return 0, res, err - } - var start int64 - if m := rangeRE.FindStringSubmatch(res.Header.Get("Range")); len(m) == 2 { - start, err = strconv.ParseInt(m[1], 10, 64) - if err != nil { - return 0, nil, fmt.Errorf("unable to parse range size %v", m[1]) - } - start += 1 // Start at the next byte +// ContentType returns a MediaOption which sets the content type of data to be uploaded. +func ContentType(ctype string) MediaOption { + return contentTypeOption(ctype) +} + +type chunkSizeOption int + +func (cs chunkSizeOption) setOptions(o *MediaOptions) { + size := int(cs) + if size%MinUploadChunkSize != 0 { + size += MinUploadChunkSize - (size % MinUploadChunkSize) } - return start, res, nil + o.ChunkSize = size } -type chunk struct { - body io.Reader - size int64 - err error +// ChunkSize returns a MediaOption which sets the chunk size for media uploads. +// size will be rounded up to the nearest multiple of 256K. +// Media which contains fewer than size bytes will be uploaded in a single request. +// Media which contains size bytes or more will be uploaded in separate chunks. +// If size is zero, media will be uploaded in a single request. +func ChunkSize(size int) MediaOption { + return chunkSizeOption(size) } -func (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) { - var start int64 - var err error - res := &http.Response{} - if rx.started { - start, res, err = rx.transferStatus(ctx) - if err != nil || res.StatusCode != statusResumeIncomplete { - return res, err - } - } - rx.started = true - - for { - select { // Check for cancellation - case <-ctx.Done(): - res.StatusCode = http.StatusRequestTimeout - return res, ctx.Err() - default: - } - reqSize := rx.ContentLength - start - if reqSize > chunkSize { - reqSize = chunkSize - } - r := io.NewSectionReader(rx.Media, start, reqSize) - req, _ := http.NewRequest("POST", rx.URI, r) - req.ContentLength = reqSize - req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength)) - req.Header.Set("Content-Type", rx.MediaType) - req.Header.Set("User-Agent", rx.UserAgent) - res, err = ctxhttp.Do(ctx, rx.Client, req) - start += reqSize - if err == nil && (res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK) { - rx.mu.Lock() - rx.progress = start // keep track of number of bytes sent so far - rx.mu.Unlock() - if rx.Callback != nil { - rx.Callback(start, rx.ContentLength) - } - } - if err != nil || res.StatusCode != statusResumeIncomplete { - break - } - } - return res, err +// MediaOptions stores options for customizing media upload. It is not used by developers directly. +type MediaOptions struct { + ContentType string + ChunkSize int } -var sleep = time.Sleep // override in unit tests - -// Upload starts the process of a resumable upload with a cancellable context. -// It retries indefinitely (with a pause of uploadPause between attempts) until cancelled. -// It is called from the auto-generated API code and is not visible to the user. -// rx is private to the auto-generated API code. -func (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) { - var res *http.Response - var err error - for { - res, err = rx.transferChunks(ctx) - if err != nil || res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK { - return res, err - } - select { // Check for cancellation - case <-ctx.Done(): - res.StatusCode = http.StatusRequestTimeout - return res, ctx.Err() - default: - } - sleep(uploadPause) +// ProcessMediaOptions stores options from opts in a MediaOptions. +// It is not used by developers directly. +func ProcessMediaOptions(opts []MediaOption) *MediaOptions { + mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} + for _, o := range opts { + o.setOptions(mo) } - return res, err + return mo } func ResolveRelative(basestr, relstr string) string { @@ -590,3 +386,33 @@ func CombineFields(s []Field) string { } return strings.Join(r, ",") } + +// A CallOption is an optional argument to an API call. +// It should be treated as an opaque value by users of Google APIs. +// +// A CallOption is something that configures an API call in a way that is +// not specific to that API; for instance, controlling the quota user for +// an API call is common across many APIs, and is thus a CallOption. +type CallOption interface { + Get() (key, value string) +} + +// QuotaUser returns a CallOption that will set the quota user for a call. +// The quota user can be used by server-side applications to control accounting. +// It can be an arbitrary string up to 40 characters, and will override UserIP +// if both are provided. +func QuotaUser(u string) CallOption { return quotaUser(u) } + +type quotaUser string + +func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } + +// UserIP returns a CallOption that will set the "userIp" parameter of a call. +// This should be the IP address of the originating request. +func UserIP(ip string) CallOption { return userIP(ip) } + +type userIP string + +func (i userIP) Get() (string, string) { return "userIp", string(i) } + +// TODO: Fields too diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi_test.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi_test.go deleted file mode 100644 index 601d67c864..0000000000 --- a/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi_test.go +++ /dev/null @@ -1,599 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package googleapi - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "reflect" - "regexp" - "strconv" - "strings" - "testing" - "time" - - "golang.org/x/net/context" -) - -type SetOpaqueTest struct { - in *url.URL - wantRequestURI string -} - -var setOpaqueTests = []SetOpaqueTest{ - // no path - { - &url.URL{ - Scheme: "http", - Host: "www.golang.org", - }, - "http://www.golang.org", - }, - // path - { - &url.URL{ - Scheme: "http", - Host: "www.golang.org", - Path: "/", - }, - "http://www.golang.org/", - }, - // file with hex escaping - { - &url.URL{ - Scheme: "https", - Host: "www.golang.org", - Path: "/file%20one&two", - }, - "https://www.golang.org/file%20one&two", - }, - // query - { - &url.URL{ - Scheme: "http", - Host: "www.golang.org", - Path: "/", - RawQuery: "q=go+language", - }, - "http://www.golang.org/?q=go+language", - }, - // file with hex escaping in path plus query - { - &url.URL{ - Scheme: "https", - Host: "www.golang.org", - Path: "/file%20one&two", - RawQuery: "q=go+language", - }, - "https://www.golang.org/file%20one&two?q=go+language", - }, - // query with hex escaping - { - &url.URL{ - Scheme: "http", - Host: "www.golang.org", - Path: "/", - RawQuery: "q=go%20language", - }, - "http://www.golang.org/?q=go%20language", - }, -} - -// prefixTmpl is a template for the expected prefix of the output of writing -// an HTTP request. -const prefixTmpl = "GET %v HTTP/1.1\r\nHost: %v\r\n" - -func TestSetOpaque(t *testing.T) { - for _, test := range setOpaqueTests { - u := *test.in - SetOpaque(&u) - - w := &bytes.Buffer{} - r := &http.Request{URL: &u} - if err := r.Write(w); err != nil { - t.Errorf("write request: %v", err) - continue - } - - prefix := fmt.Sprintf(prefixTmpl, test.wantRequestURI, test.in.Host) - if got := string(w.Bytes()); !strings.HasPrefix(got, prefix) { - t.Errorf("got %q expected prefix %q", got, prefix) - } - } -} - -type ExpandTest struct { - in string - expansions map[string]string - want string -} - -var expandTests = []ExpandTest{ - // no expansions - { - "http://www.golang.org/", - map[string]string{}, - "http://www.golang.org/", - }, - // one expansion, no escaping - { - "http://www.golang.org/{bucket}/delete", - map[string]string{ - "bucket": "red", - }, - "http://www.golang.org/red/delete", - }, - // one expansion, with hex escapes - { - "http://www.golang.org/{bucket}/delete", - map[string]string{ - "bucket": "red/blue", - }, - "http://www.golang.org/red%2Fblue/delete", - }, - // one expansion, with space - { - "http://www.golang.org/{bucket}/delete", - map[string]string{ - "bucket": "red or blue", - }, - "http://www.golang.org/red%20or%20blue/delete", - }, - // expansion not found - { - "http://www.golang.org/{object}/delete", - map[string]string{ - "bucket": "red or blue", - }, - "http://www.golang.org//delete", - }, - // multiple expansions - { - "http://www.golang.org/{one}/{two}/{three}/get", - map[string]string{ - "one": "ONE", - "two": "TWO", - "three": "THREE", - }, - "http://www.golang.org/ONE/TWO/THREE/get", - }, - // utf-8 characters - { - "http://www.golang.org/{bucket}/get", - map[string]string{ - "bucket": "£100", - }, - "http://www.golang.org/%C2%A3100/get", - }, - // punctuations - { - "http://www.golang.org/{bucket}/get", - map[string]string{ - "bucket": `/\@:,.`, - }, - "http://www.golang.org/%2F%5C%40%3A%2C./get", - }, - // mis-matched brackets - { - "http://www.golang.org/{bucket/get", - map[string]string{ - "bucket": "red", - }, - "http://www.golang.org/{bucket/get", - }, - // "+" prefix for suppressing escape - // See also: http://tools.ietf.org/html/rfc6570#section-3.2.3 - { - "http://www.golang.org/{+topic}", - map[string]string{ - "topic": "/topics/myproject/mytopic", - }, - // The double slashes here look weird, but it's intentional - "http://www.golang.org//topics/myproject/mytopic", - }, -} - -func TestExpand(t *testing.T) { - for i, test := range expandTests { - u := url.URL{ - Path: test.in, - } - Expand(&u, test.expansions) - got := u.Path - if got != test.want { - t.Errorf("got %q expected %q in test %d", got, test.want, i+1) - } - } -} - -type CheckResponseTest struct { - in *http.Response - bodyText string - want error - errText string -} - -var checkResponseTests = []CheckResponseTest{ - { - &http.Response{ - StatusCode: http.StatusOK, - }, - "", - nil, - "", - }, - { - &http.Response{ - StatusCode: http.StatusInternalServerError, - }, - `{"error":{}}`, - &Error{ - Code: http.StatusInternalServerError, - Body: `{"error":{}}`, - }, - `googleapi: got HTTP response code 500 with body: {"error":{}}`, - }, - { - &http.Response{ - StatusCode: http.StatusNotFound, - }, - `{"error":{"message":"Error message for StatusNotFound."}}`, - &Error{ - Code: http.StatusNotFound, - Message: "Error message for StatusNotFound.", - Body: `{"error":{"message":"Error message for StatusNotFound."}}`, - }, - "googleapi: Error 404: Error message for StatusNotFound.", - }, - { - &http.Response{ - StatusCode: http.StatusBadRequest, - }, - `{"error":"invalid_token","error_description":"Invalid Value"}`, - &Error{ - Code: http.StatusBadRequest, - Body: `{"error":"invalid_token","error_description":"Invalid Value"}`, - }, - `googleapi: got HTTP response code 400 with body: {"error":"invalid_token","error_description":"Invalid Value"}`, - }, - { - &http.Response{ - StatusCode: http.StatusBadRequest, - }, - `{"error":{"errors":[{"domain":"usageLimits","reason":"keyInvalid","message":"Bad Request"}],"code":400,"message":"Bad Request"}}`, - &Error{ - Code: http.StatusBadRequest, - Errors: []ErrorItem{ - { - Reason: "keyInvalid", - Message: "Bad Request", - }, - }, - Body: `{"error":{"errors":[{"domain":"usageLimits","reason":"keyInvalid","message":"Bad Request"}],"code":400,"message":"Bad Request"}}`, - Message: "Bad Request", - }, - "googleapi: Error 400: Bad Request, keyInvalid", - }, -} - -func TestCheckResponse(t *testing.T) { - for _, test := range checkResponseTests { - res := test.in - if test.bodyText != "" { - res.Body = ioutil.NopCloser(strings.NewReader(test.bodyText)) - } - g := CheckResponse(res) - if !reflect.DeepEqual(g, test.want) { - t.Errorf("CheckResponse: got %v, want %v", g, test.want) - gotJson, err := json.Marshal(g) - if err != nil { - t.Error(err) - } - wantJson, err := json.Marshal(test.want) - if err != nil { - t.Error(err) - } - t.Errorf("json(got): %q\njson(want): %q", string(gotJson), string(wantJson)) - } - if g != nil && g.Error() != test.errText { - t.Errorf("CheckResponse: unexpected error message.\nGot: %q\nwant: %q", g, test.errText) - } - } -} - -type VariantPoint struct { - Type string - Coordinates []float64 -} - -type VariantTest struct { - in map[string]interface{} - result bool - want VariantPoint -} - -var coords = []interface{}{1.0, 2.0} - -var variantTests = []VariantTest{ - { - in: map[string]interface{}{ - "type": "Point", - "coordinates": coords, - }, - result: true, - want: VariantPoint{ - Type: "Point", - Coordinates: []float64{1.0, 2.0}, - }, - }, - { - in: map[string]interface{}{ - "type": "Point", - "bogus": coords, - }, - result: true, - want: VariantPoint{ - Type: "Point", - }, - }, -} - -func TestVariantType(t *testing.T) { - for _, test := range variantTests { - if g := VariantType(test.in); g != test.want.Type { - t.Errorf("VariantType(%v): got %v, want %v", test.in, g, test.want.Type) - } - } -} - -func TestConvertVariant(t *testing.T) { - for _, test := range variantTests { - g := VariantPoint{} - r := ConvertVariant(test.in, &g) - if r != test.result { - t.Errorf("ConvertVariant(%v): got %v, want %v", test.in, r, test.result) - } - if !reflect.DeepEqual(g, test.want) { - t.Errorf("ConvertVariant(%v): got %v, want %v", test.in, g, test.want) - } - } -} - -type unexpectedReader struct{} - -func (unexpectedReader) Read([]byte) (int, error) { - return 0, fmt.Errorf("unexpected read in test.") -} - -var contentRangeRE = regexp.MustCompile(`^bytes (\d+)\-(\d+)/(\d+)$`) - -func (t *testTransport) RoundTrip(req *http.Request) (*http.Response, error) { - t.req = req - if rng := req.Header.Get("Content-Range"); rng != "" && !strings.HasPrefix(rng, "bytes */") { // Read the data - m := contentRangeRE.FindStringSubmatch(rng) - if len(m) != 4 { - return nil, fmt.Errorf("unable to parse content range: %v", rng) - } - start, err := strconv.ParseInt(m[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to parse content range: %v", rng) - } - end, err := strconv.ParseInt(m[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to parse content range: %v", rng) - } - totalSize, err := strconv.ParseInt(m[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to parse content range: %v", rng) - } - partialSize := end - start + 1 - t.buf, err = ioutil.ReadAll(req.Body) - if err != nil || int64(len(t.buf)) != partialSize { - return nil, fmt.Errorf("unable to read %v bytes from request data, n=%v: %v", partialSize, len(t.buf), err) - } - if totalSize == end+1 { - t.statusCode = 200 // signify completion of transfer - } - } - f := ioutil.NopCloser(unexpectedReader{}) - res := &http.Response{ - Body: f, - StatusCode: t.statusCode, - Header: http.Header{}, - } - if t.rangeVal != "" { - res.Header.Set("Range", t.rangeVal) - } - return res, nil -} - -type testTransport struct { - req *http.Request - statusCode int - rangeVal string - want int64 - buf []byte -} - -var statusTests = []*testTransport{ - &testTransport{statusCode: 308, want: 0}, - &testTransport{statusCode: 308, rangeVal: "bytes=0-0", want: 1}, - &testTransport{statusCode: 308, rangeVal: "bytes=0-42", want: 43}, -} - -func TestTransferStatus(t *testing.T) { - ctx := context.Background() - for _, tr := range statusTests { - rx := &ResumableUpload{ - Client: &http.Client{Transport: tr}, - } - g, _, err := rx.transferStatus(ctx) - if err != nil { - t.Error(err) - } - if g != tr.want { - t.Errorf("transferStatus got %v, want %v", g, tr.want) - } - } -} - -func (t *interruptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { - t.req = req - if rng := req.Header.Get("Content-Range"); rng != "" && !strings.HasPrefix(rng, "bytes */") { - t.interruptCount += 1 - if t.interruptCount%7 == 0 { // Respond with a "service unavailable" error - res := &http.Response{ - StatusCode: http.StatusServiceUnavailable, - Header: http.Header{}, - } - t.rangeVal = fmt.Sprintf("bytes=0-%v", len(t.buf)-1) // Set the response for next time - return res, nil - } - m := contentRangeRE.FindStringSubmatch(rng) - if len(m) != 4 { - return nil, fmt.Errorf("unable to parse content range: %v", rng) - } - start, err := strconv.ParseInt(m[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to parse content range: %v", rng) - } - end, err := strconv.ParseInt(m[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to parse content range: %v", rng) - } - totalSize, err := strconv.ParseInt(m[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to parse content range: %v", rng) - } - partialSize := end - start + 1 - buf, err := ioutil.ReadAll(req.Body) - if err != nil || int64(len(buf)) != partialSize { - return nil, fmt.Errorf("unable to read %v bytes from request data, n=%v: %v", partialSize, len(buf), err) - } - t.buf = append(t.buf, buf...) - if totalSize == end+1 { - t.statusCode = 200 // signify completion of transfer - } - } - f := ioutil.NopCloser(unexpectedReader{}) - res := &http.Response{ - Body: f, - StatusCode: t.statusCode, - Header: http.Header{}, - } - if t.rangeVal != "" { - res.Header.Set("Range", t.rangeVal) - } - return res, nil -} - -type interruptedTransport struct { - req *http.Request - statusCode int - rangeVal string - interruptCount int - buf []byte - progressBuf string -} - -func (tr *interruptedTransport) ProgressUpdate(current, total int64) { - tr.progressBuf += fmt.Sprintf("%v, %v\n", current, total) -} - -func TestInterruptedTransferChunks(t *testing.T) { - f, err := os.Open("googleapi.go") - if err != nil { - t.Fatalf("unable to open googleapi.go: %v", err) - } - defer f.Close() - slurp, err := ioutil.ReadAll(f) - if err != nil { - t.Fatalf("unable to slurp file: %v", err) - } - st, err := f.Stat() - if err != nil { - t.Fatalf("unable to stat googleapi.go: %v", err) - } - tr := &interruptedTransport{ - statusCode: 308, - buf: make([]byte, 0, st.Size()), - } - oldChunkSize := chunkSize - defer func() { chunkSize = oldChunkSize }() - chunkSize = 100 // override to process small chunks for test. - - sleep = func(time.Duration) {} // override time.Sleep - rx := &ResumableUpload{ - Client: &http.Client{Transport: tr}, - Media: f, - MediaType: "text/plain", - ContentLength: st.Size(), - Callback: tr.ProgressUpdate, - } - res, err := rx.Upload(context.Background()) - if err != nil || res == nil || res.StatusCode != http.StatusOK { - if res == nil { - t.Errorf("transferChunks not successful, res=nil: %v", err) - } else { - t.Errorf("transferChunks not successful, statusCode=%v: %v", res.StatusCode, err) - } - } - if len(tr.buf) != len(slurp) || bytes.Compare(tr.buf, slurp) != 0 { - t.Errorf("transferred file corrupted:\ngot %s\nwant %s", tr.buf, slurp) - } - w := "" - for i := chunkSize; i <= st.Size(); i += chunkSize { - w += fmt.Sprintf("%v, %v\n", i, st.Size()) - } - if st.Size()%chunkSize != 0 { - w += fmt.Sprintf("%v, %v\n", st.Size(), st.Size()) - } - if tr.progressBuf != w { - t.Errorf("progress update error, got %v, want %v", tr.progressBuf, w) - } -} - -func TestCancelUpload(t *testing.T) { - f, err := os.Open("googleapi.go") - if err != nil { - t.Fatalf("unable to open googleapi.go: %v", err) - } - defer f.Close() - st, err := f.Stat() - if err != nil { - t.Fatalf("unable to stat googleapi.go: %v", err) - } - tr := &interruptedTransport{ - statusCode: 308, - buf: make([]byte, 0, st.Size()), - } - oldChunkSize := chunkSize - defer func() { chunkSize = oldChunkSize }() - chunkSize = 100 // override to process small chunks for test. - - sleep = func(time.Duration) {} // override time.Sleep - rx := &ResumableUpload{ - Client: &http.Client{Transport: tr}, - Media: f, - MediaType: "text/plain", - ContentLength: st.Size(), - Callback: tr.ProgressUpdate, - } - ctx, cancelFunc := context.WithCancel(context.Background()) - cancelFunc() // stop the upload that hasn't started yet - res, err := rx.Upload(ctx) - if err == nil || res == nil || res.StatusCode != http.StatusRequestTimeout { - if res == nil { - t.Errorf("transferChunks not successful, got res=nil, err=%v, want StatusRequestTimeout", err) - } else { - t.Errorf("transferChunks not successful, got statusCode=%v, err=%v, want StatusRequestTimeout", res.StatusCode, err) - } - } -} diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/types_test.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/types_test.go deleted file mode 100644 index a6b2045156..0000000000 --- a/Godeps/_workspace/src/google.golang.org/api/googleapi/types_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package googleapi - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestTypes(t *testing.T) { - type T struct { - I32 Int32s - I64 Int64s - U32 Uint32s - U64 Uint64s - F64 Float64s - } - v := &T{ - I32: Int32s{-1, 2, 3}, - I64: Int64s{-1, 2, 1 << 33}, - U32: Uint32s{1, 2}, - U64: Uint64s{1, 2, 1 << 33}, - F64: Float64s{1.5, 3.33}, - } - got, err := json.Marshal(v) - if err != nil { - t.Fatal(err) - } - want := `{"I32":["-1","2","3"],"I64":["-1","2","8589934592"],"U32":["1","2"],"U64":["1","2","8589934592"],"F64":["1.5","3.33"]}` - if string(got) != want { - t.Fatalf("Marshal mismatch.\n got: %s\nwant: %s\n", got, want) - } - - v2 := new(T) - if err := json.Unmarshal(got, v2); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !reflect.DeepEqual(v, v2) { - t.Fatalf("Unmarshal didn't produce same results.\n got: %#v\nwant: %#v\n", v, v2) - } -} diff --git a/LICENSE b/LICENSE index 4537aa75ae..2efacf37b1 100644 --- a/LICENSE +++ b/LICENSE @@ -1,13 +1,21 @@ -Copyright 2013, 2014 Engine Yard, Inc. +The MIT License (MIT) -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +Copyright (c) Microsoft Corporation. All rights reserved. - http://www.apache.org/licenses/LICENSE-2.0 +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile index a7dd6c23e7..c4488276a3 100644 --- a/Makefile +++ b/Makefile @@ -90,12 +90,10 @@ setup-gotools: go get -u -v github.com/tools/godep go get -u -v github.com/golang/lint/golint go get -u -v golang.org/x/tools/cmd/cover - go get -u -v golang.org/x/tools/cmd/vet setup-root-gotools: # "go vet" and "go cover" must be installed as root on some systems sudo GOPATH=/tmp/tmpGOPATH go get -u -v golang.org/x/tools/cmd/cover - sudo GOPATH=/tmp/tmpGOPATH go get -u -v golang.org/x/tools/cmd/vet sudo rm -rf /tmp/tmpGOPATH test: test-style test-unit test-functional push test-integration diff --git a/README.md b/README.md index 0233a283d2..e92e83b8c7 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,17 @@ -# Deis +|![](https://upload.wikimedia.org/wikipedia/commons/thumb/1/17/Warning.svg/156px-Warning.svg.png) | This repository (`deis/deis`) is [no longer developed or maintained](https://deis.com/blog/2017/deis-paas-v1-takes-a-bow/). The Deis v1 PaaS based on CoreOS Container Linux and Fleet has been replaced by [Deis Workflow](https://github.com/deis/workflow) which is based on Kubernetes. | +|---|---| + +# Deis v1 PaaS Deis (pronounced DAY-iss) is an open source PaaS that makes it easy to deploy and manage applications on your own servers. Deis builds upon [Docker](http://docker.io/) and [CoreOS](http://coreos.com) to provide a lightweight PaaS with a [Heroku-inspired](http://heroku.com) workflow. [![Build Status](https://ci.deis.io/buildStatus/icon?job=test-acceptance)](https://ci.deis.io/job/test-acceptance/) -[![Current Release](http://img.shields.io/badge/release-v1.12.2-1eb0fc.svg)](https://github.com/deis/deis/releases/tag/v1.12.2) +[![Current Release](http://img.shields.io/badge/release-v1.13.4-1eb0fc.svg)](https://github.com/deis/deis/releases/tag/v1.13.4) [![Latest Docs](http://img.shields.io/badge/docs-latest-fc1e5e.svg)](http://docs.deis.io/en/latest/) -![Deis Graphic](https://s3-us-west-2.amazonaws.com/deis-images/deis-graphic.png) - - New to Deis? Learn more about Deis [Concepts](http://docs.deis.io/en/latest/understanding_deis/concepts/), [Architecture](http://docs.deis.io/en/latest/understanding_deis/architecture/) and how to [Deploy an Application](http://docs.deis.io/en/latest/using_deis/deploy-application/). - -# Installing Deis +# Installing Deis v1 Deis is a set of Docker containers that can be deployed anywhere including public cloud, private cloud, bare metal or your workstation. Decide where you'd like to deploy Deis, then follow the [provider-specific documentation](http://docs.deis.io/en/latest/installing_deis/) for provisioning. diff --git a/Vagrantfile b/Vagrantfile index de4f18f970..9162d796d5 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -17,7 +17,7 @@ CONTRIB_UTILS_PATH = File.join(File.dirname(__FILE__), "contrib", "utils.sh") # Make variables from contrib/utils.sh accessible if File.exists?(CONTRIB_UTILS_PATH) cu_vars = Hash.new do |hash, key| - stdin, stdout, stderr = Open3.popen3("/usr/bin/env", "bash", "-c", "source #{CONTRIB_UTILS_PATH} && echo $#{key}") + stdin, stdout, stderr = Open3.popen3("/usr/bin/env", "bash", "-c", "source '#{CONTRIB_UTILS_PATH}' && echo $#{key}") value = stdout.gets.chomp hash[key] = value unless value.empty? end diff --git a/builder/Makefile b/builder/Makefile index 5717442c4b..946d59db74 100644 --- a/builder/Makefile +++ b/builder/Makefile @@ -53,7 +53,7 @@ run: install start dev-release: push set-image push: check-registry - docker tag -f $(IMAGE) $(DEV_IMAGE) + docker tag $(IMAGE) $(DEV_IMAGE) docker push $(DEV_IMAGE) set-image: check-deisctl diff --git a/builder/docker/docker.go b/builder/docker/docker.go index 793d7ebf7a..a0ee765f7e 100644 --- a/builder/docker/docker.go +++ b/builder/docker/docker.go @@ -72,6 +72,8 @@ func Start(c cookoo.Context, p *cookoo.Params) (interface{}, cookoo.Interrupt) { "192.168.0.0/16", "--insecure-registry", "100.64.0.0/10", + "--exec-opt", + "native.cgroupdriver=cgroupfs", } // For overlay-ish filesystems, force the overlay to kick in if it exists. diff --git a/builder/etcd/etcd.go b/builder/etcd/etcd.go index 05a3d3586f..2ff5f5bbbd 100644 --- a/builder/etcd/etcd.go +++ b/builder/etcd/etcd.go @@ -277,6 +277,7 @@ func keysToLocal(c cookoo.Context, client Getter, ciphers []string, etcdPath str // It only fails if it cannot copy ssh_host_key to sshHostKey. All other // abnormal conditions are logged, but not considered to be failures. func keysToEtcd(c cookoo.Context, client Setter, ciphers []string, etcdPath string) error { + firstpath := "" lpath := "/etc/ssh/ssh_host_%s_key" privkey := "%s/sshHost%sKey" for _, cipher := range ciphers { @@ -288,10 +289,19 @@ func keysToEtcd(c cookoo.Context, client Setter, ciphers []string, etcdPath stri } else if _, err := client.Set(key, string(content), 0); err != nil { log.Errf(c, "Could not store ssh key in etcd: %s", err) } + // Remember the first key's path in case the generic key is missing + if firstpath == "" { + firstpath = path + } } // Now we set the generic key: - if content, err := ioutil.ReadFile("/etc/ssh/ssh_host_key"); err != nil { - log.Errf(c, "Could not read the ssh_host_key file.") + keypath := "/etc/ssh/ssh_host_key" + if _, err := os.Stat(keypath); os.IsNotExist(err) && firstpath != "" { + // Use ssh_host_dsa_key if newer ssh-keygen didn't create ssh_host_key + keypath = firstpath + } + if content, err := ioutil.ReadFile(keypath); err != nil { + log.Errf(c, "Could not read the %s file.", keypath) return err } else if _, err := client.Set("sshHostKey", string(content), 0); err != nil { log.Errf(c, "Failed to set sshHostKey in etcd.") diff --git a/builder/git/git.go b/builder/git/git.go index 71aca17158..82b20711b5 100644 --- a/builder/git/git.go +++ b/builder/git/git.go @@ -30,6 +30,8 @@ strip_remote_prefix() { stdbuf -i0 -o0 -e0 sed "s/^/"$'\e[1G'"/" } +set -eo pipefail + while read oldrev newrev refname do LOCKFILE="/tmp/$RECEIVE_REPO.lock" diff --git a/builder/rootfs/Dockerfile b/builder/rootfs/Dockerfile index 5f5badca2e..1e99780752 100644 --- a/builder/rootfs/Dockerfile +++ b/builder/rootfs/Dockerfile @@ -1,7 +1,7 @@ -FROM alpine:3.2 +FROM docker:1.8-dind # install common packages -RUN apk add --update-cache curl bash sudo && rm -rf /var/cache/apk/* +RUN apk add --no-cache curl bash sudo # install etcdctl RUN curl -sSL -o /usr/local/bin/etcdctl https://s3-us-west-2.amazonaws.com/get-deis/etcdctl-v0.4.9 \ @@ -11,27 +11,19 @@ RUN curl -sSL -o /usr/local/bin/etcdctl https://s3-us-west-2.amazonaws.com/get-d RUN curl -sSL -o /usr/local/bin/confd https://github.com/kelseyhightower/confd/releases/download/v0.10.0/confd-0.10.0-linux-amd64 \ && chmod +x /usr/local/bin/confd -RUN apk add --update-cache \ +RUN apk add --no-cache \ coreutils \ device-mapper \ e2fsprogs \ git \ iptables \ - libudev \ + udev \ lxc \ openssh \ udev \ util-linux \ - xz \ - && rm -rf /var/cache/apk/* + xz -# the docker package in alpine disables aufs and devicemapper -ENV DOCKER_BUCKET get.docker.com -ENV DOCKER_VERSION 1.8.3 -ENV DOCKER_SHA256 f024bc65c45a3778cf07213d26016075e8172de8f6e4b5702bedde06c241650f -RUN curl -sSL "https://${DOCKER_BUCKET}/builds/Linux/x86_64/docker-$DOCKER_VERSION" -o /usr/bin/docker \ - && echo "${DOCKER_SHA256} /usr/bin/docker" | sha256sum -c - \ - && chmod +x /usr/bin/docker # configure ssh server RUN mkdir -p /var/run/sshd && rm -rf /etc/ssh/ssh_host* @@ -62,4 +54,4 @@ RUN passwd -u git COPY . / -ENV DEIS_RELEASE 1.13.0-dev +ENV DEIS_RELEASE 1.13.4 diff --git a/builder/rootfs/bin/boot b/builder/rootfs/bin/boot index 0a57033e5c..0fd9c78566 100755 --- a/builder/rootfs/bin/boot +++ b/builder/rootfs/bin/boot @@ -8,6 +8,8 @@ set -eo pipefail if [[ -f /etc/environment_proxy ]]; then source /etc/environment_proxy + export HTTP_PROXY HTTPS_PROXY ALL_PROXY NO_PROXY + export http_proxy https_proxy all_proxy no_proxy fi exec builder diff --git a/builder/rootfs/bin/entry b/builder/rootfs/bin/entry index 77793b7605..8ed73cfd60 100755 --- a/builder/rootfs/bin/entry +++ b/builder/rootfs/bin/entry @@ -3,123 +3,8 @@ set -eo pipefail if [[ -f /etc/environment_proxy ]]; then source /etc/environment_proxy + export HTTP_PROXY HTTPS_PROXY ALL_PROXY NO_PROXY + export http_proxy https_proxy all_proxy no_proxy fi -# START jpetazzo/dind wrapper - -# DinD: a wrapper script which allows docker to be run inside a docker container. -# Original version by Jerome Petazzoni -# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/ -# -# This script should be executed inside a docker container in privilieged mode -# ('docker run --privileged', introduced in docker 0.6). - -# Usage: dind CMD [ARG...] - -# apparmor sucks and Docker needs to know that it's in a container (c) @tianon -export container=docker - -# as of docker 1.8, cgroups will be mounted in the container -if ! mountpoint -q /sys/fs/cgroup; then - - # First, make sure that cgroups are mounted correctly. - CGROUP=/cgroup - - mkdir -p "$CGROUP" - - if ! mountpoint -q "$CGROUP"; then - mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { - echo >&2 'Could not make a tmpfs mount. Did you use --privileged?' - exit 1 - } - fi - - # Mount the cgroup hierarchies exactly as they are in the parent system. - for HIER in $(cut -d: -f2 /proc/1/cgroup); do - - # The following sections address a bug which manifests itself - # by a cryptic "lxc-start: no ns_cgroup option specified" when - # trying to start containers within a container. - # The bug seems to appear when the cgroup hierarchies are not - # mounted on the exact same directories in the host, and in the - # container. - - SUBSYSTEMS="${HIER%name=*}" - - # If cgroup hierarchy is named(mounted with "-o name=foo") we - # need to mount it in $CGROUP/foo to create exect same - # directoryes as on host. Else we need to mount it as is e.g. - # "subsys1,subsys2" if it has two subsystems - - # Named, control-less cgroups are mounted with "-o name=foo" - # (and appear as such under /proc//cgroup) but are usually - # mounted on a directory named "foo" (without the "name=" prefix). - # Systemd and OpenRC (and possibly others) both create such a - # cgroup. So just mount them on directory $CGROUP/foo. - - OHIER=$HIER - HIER="${HIER#*name=}" - - mkdir -p "$CGROUP/$HIER" - - if ! mountpoint -q "$CGROUP/$HIER"; then - mount -n -t cgroup -o "$OHIER" cgroup "$CGROUP/$HIER" - fi - - # Likewise, on at least one system, it has been reported that - # systemd would mount the CPU and CPU accounting controllers - # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" - # but on a directory called "cpu,cpuacct" (note the inversion - # in the order of the groups). This tries to work around it. - - if [ "$HIER" = 'cpuacct,cpu' ]; then - ln -s "$HIER" "$CGROUP/cpu,cpuacct" - fi - - # If hierarchy has multiple subsystems, in /proc//cgroup - # we will see ":subsys1,subsys2,subsys3,name=foo:" substring, - # we need to mount it to "$CGROUP/foo" and if there were no - # name to "$CGROUP/subsys1,subsys2,subsys3", so we must create - # symlinks for docker daemon to find these subsystems: - # ln -s $CGROUP/foo $CGROUP/subsys1 - # ln -s $CGROUP/subsys1,subsys2,subsys3 $CGROUP/subsys1 - - if [ "$SUBSYSTEMS" != "${SUBSYSTEMS//,/ }" ]; then - SUBSYSTEMS="${SUBSYSTEMS//,/ }" - for SUBSYS in $SUBSYSTEMS - do - ln -s "$CGROUP/$HIER" "$CGROUP/$SUBSYS" - done - fi - done -fi - -if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then - mount -t securityfs none /sys/kernel/security || { - echo >&2 'Could not mount /sys/kernel/security.' - echo >&2 'AppArmor detection and --privileged mode might break.' - } -fi - -# Note: as I write those lines, the LXC userland tools cannot setup -# a "sub-container" properly if the "devices" cgroup is not in its -# own hierarchy. Let's detect this and issue a warning. -if ! grep -q :devices: /proc/1/cgroup; then - echo >&2 'WARNING: the "devices" cgroup should be in its own hierarchy.' -fi -if ! grep -qw devices /proc/1/cgroup; then - echo >&2 'WARNING: it looks like the "devices" cgroup is not mounted.' -fi - -# Mount /tmp (conditionally) -if ! mountpoint -q /tmp; then - mount -t tmpfs none /tmp -fi - -if [ $# -gt 0 ]; then - exec "$@" -fi - -echo >&2 'ERROR: No command specified.' -echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' -# END jpetazzo/dind wrapper +/usr/local/bin/dind $@ diff --git a/builder/rootfs/etc/confd/templates/builder b/builder/rootfs/etc/confd/templates/builder index b1c88dad5b..1a275f8d24 100755 --- a/builder/rootfs/etc/confd/templates/builder +++ b/builder/rootfs/etc/confd/templates/builder @@ -89,7 +89,7 @@ fi # and write out a Dockerfile to use that slug if [ ! -f Dockerfile ]; then BUILD_OPTS=() - BUILD_OPTS+='/usr/bin/docker' + BUILD_OPTS+='/usr/local/bin/docker' BUILD_OPTS+=' run -v /etc/environment_proxy:/etc/environment_proxy' # get application configuration BUILD_OPTS+=$(echo $RESPONSE | get-app-values) @@ -131,7 +131,6 @@ puts-step "Building Docker image" docker build -t $TMP_IMAGE . 2>&1 puts-step "Pushing image to private registry" docker push $TMP_IMAGE &>/dev/null -echo # use Procfile if provided, otherwise try default process types from ./release if [ -f Procfile ]; then @@ -173,6 +172,7 @@ echo # cleanup cd $REPO_DIR git gc &>/dev/null +rm -rf $TMP_DIR if [ -n "$JOB" ]; then docker rm -f $JOB &>/dev/null fi diff --git a/builder/rootfs/usr/local/src/slugbuilder/Dockerfile b/builder/rootfs/usr/local/src/slugbuilder/Dockerfile index 72cd4a4ccb..a5481e4744 100644 --- a/builder/rootfs/usr/local/src/slugbuilder/Dockerfile +++ b/builder/rootfs/usr/local/src/slugbuilder/Dockerfile @@ -11,4 +11,4 @@ ADD ./builder/ /tmp/builder RUN /tmp/builder/install-buildpacks ENTRYPOINT ["/tmp/builder/build.sh"] -ENV DEIS_RELEASE 1.13.0-dev +ENV DEIS_RELEASE 1.13.4 diff --git a/builder/rootfs/usr/local/src/slugbuilder/builder/install-buildpacks b/builder/rootfs/usr/local/src/slugbuilder/builder/install-buildpacks index 73715c3e5e..7f2c431b38 100755 --- a/builder/rootfs/usr/local/src/slugbuilder/builder/install-buildpacks +++ b/builder/rootfs/usr/local/src/slugbuilder/builder/install-buildpacks @@ -28,15 +28,15 @@ download_buildpack() { mkdir -p $BUILDPACK_INSTALL_PATH -download_buildpack https://github.com/heroku/heroku-buildpack-multi.git 26fa21a -download_buildpack https://github.com/heroku/heroku-buildpack-ruby.git v140 -download_buildpack https://github.com/heroku/heroku-buildpack-nodejs.git v86 -download_buildpack https://github.com/heroku/heroku-buildpack-java.git v40 -download_buildpack https://github.com/heroku/heroku-buildpack-gradle.git v12 -download_buildpack https://github.com/heroku/heroku-buildpack-grails.git v19 -download_buildpack https://github.com/heroku/heroku-buildpack-play.git v24 -download_buildpack https://github.com/heroku/heroku-buildpack-python.git v70 -download_buildpack https://github.com/heroku/heroku-buildpack-php.git v82 -download_buildpack https://github.com/heroku/heroku-buildpack-clojure.git v70 -download_buildpack https://github.com/heroku/heroku-buildpack-scala.git v63 -download_buildpack https://github.com/heroku/heroku-buildpack-go.git v18 +download_buildpack https://github.com/heroku/heroku-buildpack-multi.git v1.0.0 +download_buildpack https://github.com/heroku/heroku-buildpack-ruby.git v150 +download_buildpack https://github.com/heroku/heroku-buildpack-nodejs.git v93 +download_buildpack https://github.com/heroku/heroku-buildpack-java.git v44 +download_buildpack https://github.com/heroku/heroku-buildpack-gradle.git v17 +download_buildpack https://github.com/heroku/heroku-buildpack-grails.git v21 +download_buildpack https://github.com/heroku/heroku-buildpack-play.git v26 +download_buildpack https://github.com/heroku/heroku-buildpack-python.git v97 +download_buildpack https://github.com/heroku/heroku-buildpack-php.git v117 +download_buildpack https://github.com/heroku/heroku-buildpack-clojure.git v75 +download_buildpack https://github.com/heroku/heroku-buildpack-scala.git v72 +download_buildpack https://github.com/heroku/heroku-buildpack-go.git v54 diff --git a/builder/rootfs/usr/local/src/slugrunner/Dockerfile b/builder/rootfs/usr/local/src/slugrunner/Dockerfile index fd3638c4e3..4afab979f5 100644 --- a/builder/rootfs/usr/local/src/slugrunner/Dockerfile +++ b/builder/rootfs/usr/local/src/slugrunner/Dockerfile @@ -17,7 +17,7 @@ USER slug ENV HOME /app ENTRYPOINT ["/runner/init"] -ENV DEIS_RELEASE 1.13.0-dev +ENV DEIS_RELEASE 1.13.4 ONBUILD RUN mkdir -p /app ONBUILD WORKDIR /app diff --git a/builder/sshd/server.go b/builder/sshd/server.go index dc54cbfe34..bc4e5b7ee5 100644 --- a/builder/sshd/server.go +++ b/builder/sshd/server.go @@ -31,19 +31,6 @@ const ( ServerConfig string = "ssh.ServerConfig" ) -// PrereceiveHookTmpl is a pre-receive hook. -const PrereceiveHookTpl = `#!/bin/bash -strip_remote_prefix() { - stdbuf -i0 -o0 -e0 sed "s/^/"$'\e[1G'"/" -} - -echo "pre-receive hook START" -set -eo pipefail; while read oldrev newrev refname; do -[[ $refname = "refs/heads/master" ]] && git archive $newrev | {{.Receiver}} "$RECEIVE_REPO" "$newrev" | strip_remote_prefix -done -echo "pre-receive hook END" -` - // Serve starts a native SSH server. // // The general design of the server is that it acts as a main server for @@ -134,7 +121,7 @@ func (s *server) handleConn(conn net.Conn, conf *ssh.ServerConfig) { _, chans, reqs, err := ssh.NewServerConn(conn, conf) if err != nil { // Handshake failure. - log.Errf(s.c, "Failed handshake: %s (%v)", err, conn) + log.Debugf(s.c, "Failed handshake: %s", err) return } @@ -199,7 +186,7 @@ func (s *server) answer(channel ssh.Channel, requests <-chan *ssh.Request, sshCo case "env": o := &EnvVar{} ssh.Unmarshal(req.Payload, o) - fmt.Printf("Key='%s', Value='%s'\n", o.Name, o.Value) + log.Debugf(s.c, "Key='%s', Value='%s'\n", o.Name, o.Value) req.Reply(true, nil) case "exec": clean := cleanExec(req.Payload) diff --git a/client/cmd/apps.go b/client/cmd/apps.go index 435f102001..1d83df8ff2 100644 --- a/client/cmd/apps.go +++ b/client/cmd/apps.go @@ -2,8 +2,8 @@ package cmd import ( "fmt" - "net/url" "os" + "sort" "strings" "time" @@ -83,6 +83,7 @@ func AppsList(results int) error { fmt.Printf("=== Apps%s", limitCount(len(apps), count)) + sort.Sort(apps) for _, app := range apps { fmt.Println(app.ID) } @@ -142,15 +143,12 @@ func AppOpen(appID string) error { return err } - u, err := url.Parse(app.URL) - - if err != nil { - return err + u := app.URL + if !(strings.HasPrefix(u, "http://") || strings.HasPrefix(u, "https://")) { + u = "http://" + u } - u.Scheme = "http" - - return webbrowser.Webbrowser(u.String()) + return webbrowser.Webbrowser(u) } // AppLogs returns the logs from an app. @@ -172,7 +170,7 @@ func AppLogs(appID string, lines int) error { // printLogs prints each log line with a color matched to its category. func printLogs(logs string) error { - for _, log := range strings.Split(strings.Trim(logs, `\n`), `\n`) { + for _, log := range strings.Split(logs, "\n") { category := "unknown" parts := strings.Split(strings.Split(log, ": ")[0], " ") if len(parts) >= 2 { diff --git a/client/cmd/auth.go b/client/cmd/auth.go index 293b437594..a7e87e3ad3 100644 --- a/client/cmd/auth.go +++ b/client/cmd/auth.go @@ -5,6 +5,7 @@ import ( "fmt" "net/url" "os" + "reflect" "strings" "syscall" @@ -242,12 +243,16 @@ func Cancel(username string, password string, yes bool) error { err = auth.Delete(c, username) - if err != nil { + cleanup := fmt.Errorf("\n%s %s\n\n", "409", "Conflict") + if reflect.DeepEqual(err, cleanup) { + fmt.Printf("%s still has application associated with it. Transfer ownership or delete them first\n", username) + return nil + } else if err != nil { return err } // If user targets themselves, logout. - if username != "" || c.Username == username { + if username == "" || c.Username == username { if err := client.Delete(); err != nil { return err } diff --git a/client/cmd/domains.go b/client/cmd/domains.go index 7517328eda..166033b089 100644 --- a/client/cmd/domains.go +++ b/client/cmd/domains.go @@ -2,6 +2,7 @@ package cmd import ( "fmt" + "sort" "github.com/deis/deis/client/controller/models/domains" ) @@ -26,6 +27,8 @@ func DomainsList(appID string, results int) error { fmt.Printf("=== %s Domains%s", appID, limitCount(len(domains), count)) + sort.Sort(domains) + for _, domain := range domains { fmt.Println(domain.Domain) } diff --git a/client/cmd/keys.go b/client/cmd/keys.go index ddf89c19f8..88bfdacd9b 100644 --- a/client/cmd/keys.go +++ b/client/cmd/keys.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "path" "regexp" + "sort" "strconv" "strings" @@ -33,6 +34,8 @@ func KeysList(results int) error { fmt.Printf("=== %s Keys%s", c.Username, limitCount(len(keys), count)) + sort.Sort(keys) + for _, key := range keys { fmt.Printf("%s %s...%s\n", key.ID, key.Public[:16], key.Public[len(key.Public)-10:]) } diff --git a/client/cmd/perms.go b/client/cmd/perms.go index 0fe414776d..4f54c609a8 100644 --- a/client/cmd/perms.go +++ b/client/cmd/perms.go @@ -2,6 +2,7 @@ package cmd import ( "fmt" + "sort" "github.com/deis/deis/client/controller/client" "github.com/deis/deis/client/controller/models/perms" @@ -38,6 +39,8 @@ func PermsList(appID string, admin bool, results int) error { fmt.Printf("=== %s's Users\n", appID) } + sort.Strings(users) + for _, user := range users { fmt.Println(user) } diff --git a/client/cmd/ps.go b/client/cmd/ps.go index 16ddff5729..72be000c46 100644 --- a/client/cmd/ps.go +++ b/client/cmd/ps.go @@ -3,6 +3,7 @@ package cmd import ( "fmt" "regexp" + "sort" "strconv" "strings" "time" @@ -133,15 +134,19 @@ func PsRestart(appID, target string) error { return nil } -func printProcesses(appID string, processes []api.Process, count int) { +func printProcesses(appID string, processes api.Processes, count int) { psMap := ps.ByType(processes) fmt.Printf("=== %s Processes%s", appID, limitCount(len(processes), count)) - for psType, procs := range psMap { - fmt.Printf("--- %s:\n", psType) + sort.Sort(psMap) - for _, proc := range procs { + for _, processType := range psMap { + fmt.Printf("--- %s:\n", processType.Type) + + sort.Sort(processType.Processes) + + for _, proc := range processType.Processes { fmt.Printf("%s.%d %s (%s)\n", proc.Type, proc.Num, proc.State, proc.Release) } } diff --git a/client/cmd/users.go b/client/cmd/users.go index 68ecab5b82..163b2a020a 100644 --- a/client/cmd/users.go +++ b/client/cmd/users.go @@ -2,6 +2,7 @@ package cmd import ( "fmt" + "sort" "github.com/deis/deis/client/controller/client" "github.com/deis/deis/client/controller/models/users" @@ -27,6 +28,8 @@ func UsersList(results int) error { fmt.Printf("=== Users%s", limitCount(len(users), count)) + sort.Sort(users) + for _, user := range users { fmt.Println(user.Username) } diff --git a/client/controller/api/apps.go b/client/controller/api/apps.go index 89770e5a35..dd5a60e9e8 100644 --- a/client/controller/api/apps.go +++ b/client/controller/api/apps.go @@ -10,6 +10,12 @@ type App struct { UUID string `json:"uuid"` } +type Apps []App + +func (a Apps) Len() int { return len(a) } +func (a Apps) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a Apps) Less(i, j int) bool { return a[i].ID < a[j].ID } + // AppCreateRequest is the definition of POST /v1/apps/. type AppCreateRequest struct { ID string `json:"id,omitempty"` diff --git a/client/controller/api/apps_test.go b/client/controller/api/apps_test.go new file mode 100644 index 0000000000..5f13107de4 --- /dev/null +++ b/client/controller/api/apps_test.go @@ -0,0 +1,24 @@ +package api + +import ( + "sort" + "testing" +) + +func TestAppsSorted(t *testing.T) { + apps := Apps{ + {"2014-01-01T00:00:00UTC", "Zulu", "John", "2016-01-02", "zulu.example.com", "d57be2ba-7ae2-4825-9ace-7c86cb893046"}, + {"2014-01-01T00:00:00UTC", "Alpha", "John", "2016-01-02", "alpha.example.com", "3d501190-1b8e-41ef-94c5-dd9a0bb707bb"}, + {"2014-01-01T00:00:00UTC", "Gamma", "John", "2016-01-02", "gamma.example.com", "41d95133-fd4d-4f4c-92a2-e454857371cc"}, + {"2014-01-01T00:00:00UTC", "Beta", "John", "2016-01-02", "beta.example.com", "222ed1aa-e985-4bec-9966-a88215300661"}, + } + + sort.Sort(apps) + expectedAppNames := []string{"Alpha", "Beta", "Gamma", "Zulu"} + + for i, app := range apps { + if expectedAppNames[i] != app.ID { + t.Errorf("Expected apps to be sorted %v, Got %v at index %v", expectedAppNames[i], app.ID, i) + } + } +} diff --git a/client/controller/api/domains.go b/client/controller/api/domains.go index 542e1699f6..6ae3889ed0 100644 --- a/client/controller/api/domains.go +++ b/client/controller/api/domains.go @@ -9,6 +9,12 @@ type Domain struct { Updated string `json:"updated"` } +type Domains []Domain + +func (d Domains) Len() int { return len(d) } +func (d Domains) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d Domains) Less(i, j int) bool { return d[i].Domain < d[j].Domain } + // DomainCreateRequest is the structure of POST /v1/app//domains/. type DomainCreateRequest struct { Domain string `json:"domain"` diff --git a/client/controller/api/domains_test.go b/client/controller/api/domains_test.go new file mode 100644 index 0000000000..6799d98adf --- /dev/null +++ b/client/controller/api/domains_test.go @@ -0,0 +1,24 @@ +package api + +import ( + "sort" + "testing" +) + +func TestDomainsSorted(t *testing.T) { + domains := Domains{ + {"Alpha", "", "gamma.example.com", "", ""}, + {"Alpha", "", "alpha1.example.com", "", ""}, + {"Alpha", "", "zulu.example.com", "", ""}, + {"Alpha", "", "delta.example.com", "", ""}, + } + + sort.Sort(domains) + expectedDomains := []string{"alpha1.example.com", "delta.example.com", "gamma.example.com", "zulu.example.com"} + + for i, domain := range domains { + if expectedDomains[i] != domain.Domain { + t.Errorf("Expected domains to be sorted %v, Got %v at index %v", expectedDomains[i], domain.Domain, i) + } + } +} diff --git a/client/controller/api/keys.go b/client/controller/api/keys.go index eb5b11c7d7..75eb8d5e7b 100644 --- a/client/controller/api/keys.go +++ b/client/controller/api/keys.go @@ -10,6 +10,12 @@ type Key struct { UUID string `json:"uuid"` } +type Keys []Key + +func (k Keys) Len() int { return len(k) } +func (k Keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k Keys) Less(i, j int) bool { return k[i].ID < k[j].ID } + // KeyCreateRequest is the definition of POST /v1/keys/. type KeyCreateRequest struct { ID string `json:"id"` diff --git a/client/controller/api/keys_test.go b/client/controller/api/keys_test.go new file mode 100644 index 0000000000..be331ed5dc --- /dev/null +++ b/client/controller/api/keys_test.go @@ -0,0 +1,24 @@ +package api + +import ( + "sort" + "testing" +) + +func TestKeysSorted(t *testing.T) { + keys := Keys{ + {"", "Delta", "", "", "", ""}, + {"", "Alpha", "", "", "", ""}, + {"", "Gamma", "", "", "", ""}, + {"", "Zeta", "", "", "", ""}, + } + + sort.Sort(keys) + expectedKeys := []string{"Alpha", "Delta", "Gamma", "Zeta"} + + for i, key := range keys { + if expectedKeys[i] != key.ID { + t.Errorf("Expected domains to be sorted %v, Got %v at index %v", expectedKeys[i], key.ID, i) + } + } +} diff --git a/client/controller/api/ps.go b/client/controller/api/ps.go index 56c7841aad..f8cbcd791a 100644 --- a/client/controller/api/ps.go +++ b/client/controller/api/ps.go @@ -12,3 +12,20 @@ type Process struct { Num int `json:"num"` State string `json:"state"` } + +type Processes []Process + +func (p Processes) Len() int { return len(p) } +func (p Processes) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Processes) Less(i, j int) bool { return p[i].Num < p[j].Num } + +type ProcessType struct { + Type string + Processes Processes +} + +type ProcessTypes []ProcessType + +func (p ProcessTypes) Len() int { return len(p) } +func (p ProcessTypes) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p ProcessTypes) Less(i, j int) bool { return p[i].Type < p[j].Type } diff --git a/client/controller/api/ps_test.go b/client/controller/api/ps_test.go new file mode 100644 index 0000000000..fecaad2ca2 --- /dev/null +++ b/client/controller/api/ps_test.go @@ -0,0 +1,41 @@ +package api + +import ( + "sort" + "testing" +) + +func TestProcessesSorted(t *testing.T) { + processes := Processes{ + {"", "", "", "", "", "", "web", 4, "up"}, + {"", "", "", "", "", "", "web", 2, "up"}, + {"", "", "", "", "", "", "web", 3, "up"}, + {"", "", "", "", "", "", "web", 1, "up"}, + } + + // The API will return this sorted already, just to be sure + sort.Sort(processes) + + for i, process := range processes { + if i+1 != process.Num { + t.Errorf("Expected processes to be sorted %v, Got %v", i+1, process.Num) + } + } +} + +func TestProcessTypesSorted(t *testing.T) { + processTypes := ProcessTypes{ + {"worker", Processes{}}, + {"web", Processes{}}, + {"clock", Processes{}}, + } + + sort.Sort(processTypes) + expectedProcessTypes := []string{"clock", "web", "worker"} + + for i, processType := range processTypes { + if expectedProcessTypes[i] != processType.Type { + t.Errorf("Expected apps to be sorted %v, Got %v at index %v", expectedProcessTypes[i], processType.Type, i) + } + } +} diff --git a/client/controller/api/users.go b/client/controller/api/users.go index e83a63787d..ed64c196ad 100644 --- a/client/controller/api/users.go +++ b/client/controller/api/users.go @@ -13,3 +13,9 @@ type User struct { IsActive bool `json:"is_active"` DateJoined string `json:"date_joined"` } + +type Users []User + +func (u Users) Len() int { return len(u) } +func (u Users) Swap(i, j int) { u[i], u[j] = u[j], u[i] } +func (u Users) Less(i, j int) bool { return u[i].Username < u[j].Username } diff --git a/client/controller/api/users_test.go b/client/controller/api/users_test.go new file mode 100644 index 0000000000..c43675a458 --- /dev/null +++ b/client/controller/api/users_test.go @@ -0,0 +1,24 @@ +package api + +import ( + "sort" + "testing" +) + +func TestUsersSorted(t *testing.T) { + users := Users{ + {1, "", false, "Zulu", "", "", "", false, false, ""}, + {2, "", false, "Beta", "", "", "", false, false, ""}, + {3, "", false, "Gamma", "", "", "", false, false, ""}, + {4, "", false, "Alpha", "", "", "", false, false, ""}, + } + + sort.Sort(users) + expectedUsernames := []string{"Alpha", "Beta", "Gamma", "Zulu"} + + for i, user := range users { + if expectedUsernames[i] != user.Username { + t.Errorf("Expected users to be sorted %v, Got %v at index %v", expectedUsernames[i], user.Username, i) + } + } +} diff --git a/client/controller/client/client.go b/client/controller/client/client.go index d13d63bfe3..9247700cef 100644 --- a/client/controller/client/client.go +++ b/client/controller/client/client.go @@ -94,11 +94,11 @@ func (c Client) Save() error { return err } - if err = os.MkdirAll(path.Join(FindHome(), "/.deis/"), 0775); err != nil { + if err = os.MkdirAll(path.Join(FindHome(), "/.deis/"), 0700); err != nil { return err } - return ioutil.WriteFile(locateSettingsFile(), settingsContents, 0775) + return ioutil.WriteFile(locateSettingsFile(), settingsContents, 0600) } // Delete user's settings file. diff --git a/client/controller/client/http.go b/client/controller/client/http.go index c4ef10d156..4e04270e97 100644 --- a/client/controller/client/http.go +++ b/client/controller/client/http.go @@ -155,9 +155,8 @@ func checkForErrors(res *http.Response, body string) error { // CheckConnection checks that the user is connected to a network and the URL points to a valid controller. func CheckConnection(client *http.Client, controllerURL url.URL) error { errorMessage := `%s does not appear to be a valid Deis controller. -Make sure that the Controller URI is correct and the server is running.` - - baseURL := controllerURL.String() +Make sure that the Controller URI is correct, the server is running and +your client version is correct.` controllerURL.Path = "/v1/" @@ -171,13 +170,13 @@ Make sure that the Controller URI is correct and the server is running.` res, err := client.Do(req) if err != nil { - fmt.Printf(errorMessage+"\n", baseURL) + fmt.Printf(errorMessage+"\n", controllerURL.String()) return err } defer res.Body.Close() if res.StatusCode != 401 { - return fmt.Errorf(errorMessage, baseURL) + return fmt.Errorf(errorMessage, controllerURL.String()) } checkAPICompatibility(res.Header.Get("DEIS_API_VERSION")) diff --git a/client/controller/models/apps/apps.go b/client/controller/models/apps/apps.go index 35720c26d6..24d78abe59 100644 --- a/client/controller/models/apps/apps.go +++ b/client/controller/models/apps/apps.go @@ -11,7 +11,7 @@ import ( ) // List lists apps on a Deis controller. -func List(c *client.Client, results int) ([]api.App, int, error) { +func List(c *client.Client, results int) (api.Apps, int, error) { body, count, err := c.LimitedRequest("/v1/apps/", results) if err != nil { diff --git a/client/controller/models/apps/apps_test.go b/client/controller/models/apps/apps_test.go index 56f9553ea6..d2499f6808 100644 --- a/client/controller/models/apps/apps_test.go +++ b/client/controller/models/apps/apps_test.go @@ -288,7 +288,7 @@ func TestAppsRun(t *testing.T) { func TestAppsList(t *testing.T) { t.Parallel() - expected := []api.App{ + expected := api.Apps{ api.App{ ID: "example-go", Created: "2014-01-01T00:00:00UTC", diff --git a/client/controller/models/auth/auth_test.go b/client/controller/models/auth/auth_test.go index 40f9cd9e71..7bc2cb83de 100644 --- a/client/controller/models/auth/auth_test.go +++ b/client/controller/models/auth/auth_test.go @@ -6,6 +6,7 @@ import ( "net/http" "net/http/httptest" "net/url" + "reflect" "testing" "github.com/deis/deis/client/controller/client" @@ -18,6 +19,7 @@ const passwdExpected string = `{"username":"test","password":"old","new_password const regenAllExpected string = `{"all":true}` const regenUserExpected string = `{"username":"test"}` const cancelUserExpected string = `{"username":"foo"}` +const cancelAdminExpected string = `{"username":"admin"}` type fakeHTTPServer struct { regenBodyEmpty bool @@ -131,7 +133,12 @@ func (f *fakeHTTPServer) ServeHTTP(res http.ResponseWriter, req *http.Request) { res.Write(nil) } - if string(body) == cancelUserExpected && !f.cancelUsername { + if string(body) == cancelAdminExpected && !f.cancelUsername { + f.cancelUsername = true + res.WriteHeader(http.StatusConflict) + res.Write(nil) + return + } else if string(body) == cancelUserExpected && !f.cancelUsername { f.cancelUsername = true res.WriteHeader(http.StatusNoContent) res.Write(nil) @@ -251,6 +258,31 @@ func TestDelete(t *testing.T) { } } +func TestDeleteUserApp(t *testing.T) { + t.Parallel() + + handler := fakeHTTPServer{cancelUsername: false, cancelEmpty: false} + server := httptest.NewServer(&handler) + defer server.Close() + + u, err := url.Parse(server.URL) + + if err != nil { + t.Fatal(err) + } + + httpClient := client.CreateHTTPClient(false) + client := client.Client{HTTPClient: httpClient, ControllerURL: *u} + + err = Delete(&client, "admin") + // should be a 409 Conflict + + expected := fmt.Errorf("\n%s %s\n\n", "409", "Conflict") + if reflect.DeepEqual(err, expected) == false { + t.Errorf("got '%s' but expected '%s'", err, expected) + } +} + func TestRegenerate(t *testing.T) { t.Parallel() diff --git a/client/controller/models/domains/domains.go b/client/controller/models/domains/domains.go index abf64c90d1..705f76c83c 100644 --- a/client/controller/models/domains/domains.go +++ b/client/controller/models/domains/domains.go @@ -9,7 +9,7 @@ import ( ) // List domains registered with an app. -func List(c *client.Client, appID string, results int) ([]api.Domain, int, error) { +func List(c *client.Client, appID string, results int) (api.Domains, int, error) { u := fmt.Sprintf("/v1/apps/%s/domains/", appID) body, count, err := c.LimitedRequest(u, results) diff --git a/client/controller/models/domains/domains_test.go b/client/controller/models/domains/domains_test.go index 57b3459de0..e4cab5910c 100644 --- a/client/controller/models/domains/domains_test.go +++ b/client/controller/models/domains/domains_test.go @@ -86,7 +86,7 @@ func (fakeHTTPServer) ServeHTTP(res http.ResponseWriter, req *http.Request) { func TestDomainsList(t *testing.T) { t.Parallel() - expected := []api.Domain{ + expected := api.Domains{ api.Domain{ App: "example-go", Created: "2014-01-01T00:00:00UTC", diff --git a/client/controller/models/keys/keys.go b/client/controller/models/keys/keys.go index 45981fd920..2e3ade3f85 100644 --- a/client/controller/models/keys/keys.go +++ b/client/controller/models/keys/keys.go @@ -9,7 +9,7 @@ import ( ) // List keys on a controller. -func List(c *client.Client, results int) ([]api.Key, int, error) { +func List(c *client.Client, results int) (api.Keys, int, error) { body, count, err := c.LimitedRequest("/v1/keys/", results) if err != nil { diff --git a/client/controller/models/keys/keys_test.go b/client/controller/models/keys/keys_test.go index f435cd7e81..b44667b0a2 100644 --- a/client/controller/models/keys/keys_test.go +++ b/client/controller/models/keys/keys_test.go @@ -88,7 +88,7 @@ func (fakeHTTPServer) ServeHTTP(res http.ResponseWriter, req *http.Request) { func TestKeysList(t *testing.T) { t.Parallel() - expected := []api.Key{ + expected := api.Keys{ api.Key{ Created: "2014-01-01T00:00:00UTC", ID: "test@example.com", diff --git a/client/controller/models/ps/ps.go b/client/controller/models/ps/ps.go index 3abf57f464..8a28b787f4 100644 --- a/client/controller/models/ps/ps.go +++ b/client/controller/models/ps/ps.go @@ -10,7 +10,7 @@ import ( ) // List an app's processes. -func List(c *client.Client, appID string, results int) ([]api.Process, int, error) { +func List(c *client.Client, appID string, results int) (api.Processes, int, error) { u := fmt.Sprintf("/v1/apps/%s/containers/", appID) body, count, err := c.LimitedRequest(u, results) @@ -69,12 +69,17 @@ func Restart(c *client.Client, appID string, procType string, num int) ([]api.Pr } // ByType organizes processes of an app by process type. -func ByType(processes []api.Process) map[string][]api.Process { - psMap := make(map[string][]api.Process) +func ByType(processes api.Processes) api.ProcessTypes { + psMap := make(map[string]api.Processes) for _, ps := range processes { psMap[ps.Type] = append(psMap[ps.Type], ps) } - return psMap + processTypes := []api.ProcessType{} + for processType, processes := range psMap { + processTypes = append(processTypes, api.ProcessType{processType, processes}) + } + + return processTypes } diff --git a/client/controller/models/ps/ps_test.go b/client/controller/models/ps/ps_test.go index 0b85ad92de..e9bd38ee34 100644 --- a/client/controller/models/ps/ps_test.go +++ b/client/controller/models/ps/ps_test.go @@ -135,7 +135,7 @@ func (fakeHTTPServer) ServeHTTP(res http.ResponseWriter, req *http.Request) { func TestProcessesList(t *testing.T) { t.Parallel() - expected := []api.Process{ + expected := api.Processes{ api.Process{ Owner: "test", App: "example-go", diff --git a/client/controller/models/users/users.go b/client/controller/models/users/users.go index ea06bf8028..8113da68aa 100644 --- a/client/controller/models/users/users.go +++ b/client/controller/models/users/users.go @@ -8,7 +8,7 @@ import ( ) // List users registered with the controller. -func List(c *client.Client, results int) ([]api.User, int, error) { +func List(c *client.Client, results int) (api.Users, int, error) { body, count, err := c.LimitedRequest("/v1/users/", results) if err != nil { diff --git a/client/controller/models/users/users_test.go b/client/controller/models/users/users_test.go index f65072c85c..144089d350 100644 --- a/client/controller/models/users/users_test.go +++ b/client/controller/models/users/users_test.go @@ -54,7 +54,7 @@ func (fakeHTTPServer) ServeHTTP(res http.ResponseWriter, req *http.Request) { func TestUsersList(t *testing.T) { t.Parallel() - expected := []api.User{ + expected := api.Users{ api.User{ ID: 1, LastLogin: "2014-10-19T22:01:00.601Z", diff --git a/client/deis-version b/client/deis-version index a4ab692a5f..80138e7146 100644 --- a/client/deis-version +++ b/client/deis-version @@ -1 +1 @@ -1.13.0-dev +1.13.4 diff --git a/client/deis.go b/client/deis.go index ab9363d772..977875524d 100644 --- a/client/deis.go +++ b/client/deis.go @@ -8,10 +8,11 @@ import ( "syscall" "github.com/deis/deis/client/parser" - "github.com/deis/deis/version" docopt "github.com/docopt/docopt-go" ) +const extensionPrefix = "deis-" + // main exits with the return value of Command(os.Args[1:]), deferring all logic to // a func we can test. func main() { @@ -25,6 +26,11 @@ The Deis command-line client issues API calls to a Deis controller. Usage: deis [...] +Option flags:: + + -h --help display help information + -v --version display client version + Auth commands:: register register a new user with a controller @@ -47,6 +53,7 @@ Subcommands, use 'deis help [subcommand]' to learn more:: perms manage permissions for applications git manage git for applications users manage users + version display client version Shortcut commands, use 'deis shortcuts' to see all:: @@ -64,7 +71,7 @@ Use 'git push deis master' to deploy to an application. // Reorganize some command line flags and commands. command, argv := parseArgs(argv) // Give docopt an optional final false arg so it doesn't call os.Exit(). - _, err := docopt.Parse(usage, []string{command}, false, version.Version, true, false) + _, err := docopt.Parse(usage, []string{command}, false, "", true, false) if err != nil { fmt.Fprintln(os.Stderr, err) @@ -107,30 +114,21 @@ Use 'git push deis master' to deploy to an application. err = parser.Git(argv) case "users": err = parser.Users(argv) + case "version": + err = parser.Version(argv) case "help": fmt.Print(usage) return 0 - case "--version": - return 0 default: env := os.Environ() - extCmd := "deis-" + command - binary, err := exec.LookPath(extCmd) + binary, err := exec.LookPath(extensionPrefix + command) if err != nil { parser.PrintUsage() return 1 } - cmdArgv := []string{extCmd} - - cmdSplit := strings.Split(argv[0], command+":") - - if len(cmdSplit) > 1 { - argv[0] = cmdSplit[1] - } - - cmdArgv = append(cmdArgv, argv...) + cmdArgv := prepareCmdArgs(command, argv) err = syscall.Exec(binary, cmdArgv, env) if err != nil { @@ -149,9 +147,12 @@ Use 'git push deis master' to deploy to an application. // expands shortcuts and formats commands to be properly routed. func parseArgs(argv []string) (string, []string) { if len(argv) == 1 { - // rearrange "deis --help" as "deis help" if argv[0] == "--help" || argv[0] == "-h" { + // rearrange "deis --help" as "deis help" argv[0] = "help" + } else if argv[0] == "--version" || argv[0] == "-v" { + // rearrange "deis --version" as "deis version" + argv[0] = "version" } } @@ -178,6 +179,18 @@ func parseArgs(argv []string) (string, []string) { return "", argv } +// split original command and pass its first element in arguments +func prepareCmdArgs(command string, argv []string) []string { + cmdArgv := []string{extensionPrefix + command} + cmdSplit := strings.Split(argv[0], command+":") + + if len(cmdSplit) > 1 { + cmdArgv = append(cmdArgv, cmdSplit[1]) + } + + return append(cmdArgv, argv[1:]...) +} + func replaceShortcut(command string) string { shortcuts := map[string]string{ "create": "apps:create", diff --git a/client/deis_test.go b/client/deis_test.go index 1a2f927b36..2a8cef093e 100644 --- a/client/deis_test.go +++ b/client/deis_test.go @@ -61,6 +61,32 @@ func TestCommandSplitting(t *testing.T) { } } +func TestTopLevelCommandArgsPreparing(t *testing.T) { + t.Parallel() + + command := "ssh" + argv := []string{"ssh"} + expected := []string{"deis-ssh"} + actual := prepareCmdArgs(command, argv) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %v, Got %v", expected, actual) + } +} + +func TestCommandWithParameterArgsPreparing(t *testing.T) { + t.Parallel() + + command := "ssh --help" + argv := []string{"ssh --help"} + expected := []string{"deis-ssh --help"} + actual := prepareCmdArgs(command, argv) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %v, Got %v", expected, actual) + } +} + func TestReplaceShortcutRepalce(t *testing.T) { t.Parallel() diff --git a/client/parser/version.go b/client/parser/version.go new file mode 100644 index 0000000000..023267791f --- /dev/null +++ b/client/parser/version.go @@ -0,0 +1,25 @@ +package parser + +import ( + "fmt" + + "github.com/deis/deis/version" + docopt "github.com/docopt/docopt-go" +) + +// Version displays the client version +func Version(argv []string) error { + usage := ` +Displays the client version. + +Usage: deis version + +Use 'deis help [command]' to learn more. +` + if _, err := docopt.Parse(usage, argv, true, "", false, true); err != nil { + return err + } + + fmt.Println(version.Version) + return nil +} diff --git a/client/pkg/git/git.go b/client/pkg/git/git.go index 1dae99a057..7057c68d53 100644 --- a/client/pkg/git/git.go +++ b/client/pkg/git/git.go @@ -93,6 +93,9 @@ func findRemote(host string) (string, error) { cmd := string(out) + // Strip off any trailing :port number after the host name. + host = strings.Split(host, ":")[0] + for _, line := range strings.Split(cmd, "\n") { for _, remote := range strings.Split(line, " ") { if strings.Contains(remote, host) { @@ -106,5 +109,7 @@ func findRemote(host string) (string, error) { // RemoteURL returns the git URL of app. func RemoteURL(host, appID string) string { + // Strip off any trailing :port number after the host name. + host = strings.Split(host, ":")[0] return fmt.Sprintf("ssh://git@%s:2222/%s.git", host, appID) } diff --git a/client/pkg/git/git_test.go b/client/pkg/git/git_test.go index 393efc137f..36b5ffda90 100644 --- a/client/pkg/git/git_test.go +++ b/client/pkg/git/git_test.go @@ -7,9 +7,15 @@ import ( func TestRemoteURL(t *testing.T) { t.Parallel() + actual := RemoteURL("example.com", "app") expected := "ssh://git@example.com:2222/app.git" - actual := RemoteURL("example.com", "app") + if actual != expected { + t.Errorf("Expected %s, Got %s", expected, actual) + } + + actual = RemoteURL("deis.10.245.1.3.xip.io:31350", "velcro-underdog") + expected = "ssh://git@deis.10.245.1.3.xip.io:2222/velcro-underdog.git" if actual != expected { t.Errorf("Expected %s, Got %s", expected, actual) diff --git a/contrib/README.md b/contrib/README.md index 3a101e0d97..5665ebca93 100644 --- a/contrib/README.md +++ b/contrib/README.md @@ -31,13 +31,15 @@ Please add to this list by opening a pull request! * [deis-netstat](https://github.com/lorieri/deis-netstat) by [@lorieri](https://github.com/lorieri) - A cluster-wide netstat tool for Deis * [deis-proxy](https://github.com/lorieri/deis-proxy) by [@lorieri](https://github.com/lorieri) - A transparent proxy for Deis * [deis-store-dashboard](https://github.com/aledbf/deis/tree/optional_store_dashboard) by [@aledbf](https://github.com/aledbf) - An implementation of [ceph-dash](https://github.com/Crapworks/ceph-dash) to view `deis-store` health +* [deis-phppgadmin](https://github.com/HeheCloud/deis-phppgadmin) by [hehecloud](https://github.com/HeheCloud) - An addon (database dashboard) for deis-database (phpPgAdmin) + ### CoreOS Unit Files * [CoreOS unit files](https://github.com/ianblenke/coreos-vagrant-kitchen-sink/tree/master/cloud-init) by [@ianblenke](https://github.com/ianblenke) - Unit files to launch various services on CoreOS hosts * [Docker S3 Cleaner](https://github.com/myriadmobile/docker-s3-cleaner) by [@croemmich](https://github.com/croemmich) - Unit file to remove orphaned image layers from S3 backed private docker registries * [New Relic unit for CoreOS](https://github.com/lorieri/coreos-newrelic) by [@lorieri](https://github.com/lorieri) - A global unit to launch New Relic sysmond -* [SPM Docker Agent for CoreOS](https://github.com/sematext/spm-agent-docker/blob/master/coreos/spm-agent.service) by [@sematext](https://github.com/sematext) - A global unit to launch the agent for [SPM Performance Monitoring, Anomaly Detection and Alerting](http://sematext.com/spm/integrations/docker-monitoring.html) -* [Forwarding systemd journal to Logsene](https://github.com/sematext/spm-agent-docker/blob/master/coreos/logsene.service) by [@sematext](https://github.com/sematext) - A global unit to forward systemd journal via SSL/TLS. Note: The IP address of the CoreOS host needs to be authorized in Logsene. [Logsene ­Log Management & Analytics](http://www.sematext.com/logsene/) +* [Sematext Docker Agent for CoreOS](https://github.com/sematext/sematext-agent-docker/blob/master/coreos/sematext-agent.service) by [@sematext](https://github.com/sematext) - A global unit to launch the agent for [SPM Performance Monitoring, Anomaly Detection and Alerting](http://sematext.com/spm/integrations/docker-monitoring.html) +* [Forwarding systemd journal to Logsene](https://github.com/sematext/sematext-agent-docker/blob/master/coreos/logsene.service) by [@sematext](https://github.com/sematext) - A global unit to forward systemd journal via SSL/TLS. Note: The IP address of the CoreOS host needs to be authorized in Logsene. [Logsene ­Log Management & Analytics](http://www.sematext.com/logsene/) ### Example Applications * [Melano](https://github.com/SuaveIO/Melano) - F# "Hello World" app using the Suave framework diff --git a/contrib/aws/provision-aws-cluster.sh b/contrib/aws/provision-aws-cluster.sh index daa0c6be52..765132193b 100755 --- a/contrib/aws/provision-aws-cluster.sh +++ b/contrib/aws/provision-aws-cluster.sh @@ -62,7 +62,7 @@ COUNTER=1 INSTANCE_IDS="" until [ $(wc -w <<< $INSTANCE_IDS) -eq $DEIS_NUM_INSTANCES -a "$STACK_STATUS" = "CREATE_COMPLETE" ]; do if [ $COUNTER -gt $ATTEMPTS ]; then - echo "Provisioning instances failed (timeout, $(wc -w <<< $INSTANCE_IDS) of $DEIS_NUM_INSTANCES provisioned after 10m)" + echo "Provisioning instances failed (timeout, $(wc -w <<< $INSTANCE_IDS) of $DEIS_NUM_INSTANCES provisioned after $(expr $ATTEMPTS * $SLEEPTIME)s)" echo "Destroying stack $STACK_NAME" bailout exit 1 @@ -85,7 +85,7 @@ until [ $(wc -w <<< $INSTANCE_IDS) -eq $DEIS_NUM_INSTANCES -a "$STACK_STATUS" = --output text \ $EXTRA_AWS_CLI_ARGS) - echo "Waiting for instances to be provisioned ($STACK_STATUS, $(expr 61 - $COUNTER)0s) ..." + echo "Waiting for instances to be provisioned ($STACK_STATUS, $(expr $ATTEMPTS + 1 - $COUNTER)0s) ..." sleep $SLEEPTIME let COUNTER=COUNTER+1 @@ -96,14 +96,14 @@ COUNTER=1 INSTANCE_STATUSES="" until [ `wc -w <<< $INSTANCE_STATUSES` -eq $DEIS_NUM_INSTANCES ]; do if [ $COUNTER -gt $ATTEMPTS ]; - then echo "Health checks not passed after 10m, giving up" + then echo "Health checks not passed after $(expr $ATTEMPTS * $SLEEPTIME)s, giving up" echo "Destroying stack $STACK_NAME" bailout exit 1 fi if [ $COUNTER -ne 1 ]; then sleep $SLEEPTIME; fi - echo "Waiting for instances to pass initial health checks ($(expr 61 - $COUNTER)0s) ..." + echo "Waiting for instances to pass initial health checks ($(expr $ATTEMPTS + 1 - $COUNTER)0s) ..." INSTANCE_STATUSES=$(aws ec2 describe-instance-status \ --filters Name=instance-status.reachability,Values=passed \ --instance-ids $INSTANCE_IDS \ diff --git a/contrib/azure/arm-template.json b/contrib/azure/arm-template.json index d95940cc01..0ae84123ea 100644 --- a/contrib/azure/arm-template.json +++ b/contrib/azure/arm-template.json @@ -28,23 +28,57 @@ "Standard_A1", "Standard_A2", "Standard_A3", + "Standard_A5", "Standard_A4", + "Standard_A6", + "Standard_A7", + "Basic_A0", + "Basic_A1", + "Basic_A2", + "Basic_A3", + "Basic_A4", + "Standard_D1_v2", + "Standard_D2_v2", + "Standard_D3_v2", + "Standard_D4_v2", + "Standard_D5_v2", + "Standard_D11_v2", + "Standard_D12_v2", + "Standard_D13_v2", + "Standard_D14_v2", + "Standard_D15_v2", + "Standard_F1", + "Standard_F2", + "Standard_F4", + "Standard_F8", + "Standard_F16", "Standard_D1", - "Standard_DS1", "Standard_D2", - "Standard_DS2", "Standard_D3", - "Standard_DS3", "Standard_D4", - "Standard_DS4", "Standard_D11", - "Standard_DS11", "Standard_D12", - "Standard_DS12", "Standard_D13", - "Standard_DS13", "Standard_D14", - "Standard_DS14" + "Standard_DS1_v2", + "Standard_DS2_v2", + "Standard_DS3_v2", + "Standard_DS4_v2", + "Standard_DS5_v2", + "Standard_DS11_v2", + "Standard_DS12_v2", + "Standard_DS13_v2", + "Standard_DS14_v2", + "Standard_DS15_v2", + "Standard_F1s", + "Standard_F2s", + "Standard_F4s", + "Standard_F8s", + "Standard_F16s", + "Standard_A8", + "Standard_A9", + "Standard_A10", + "Standard_A11" ], "metadata": { "description": "Instance size for the VMs" @@ -73,14 +107,14 @@ "metadata": { "description": "Number of member nodes" }, - "defaultValue": "3" + "defaultValue": 3 }, "dockerVolumeSize": { "type": "int", "metadata": { "description": "Size in GB of the Docker volume" }, - "defaultValue": "100" + "defaultValue": 100 }, "coreosVersion": { "type": "string", @@ -134,7 +168,7 @@ { "type": "Microsoft.Compute/availabilitySets", "name": "[variables('availabilitySetName')]", - "apiVersion": "2015-05-01-preview", + "apiVersion": "2016-03-30", "location": "[resourceGroup().location]", "properties": {} }, @@ -142,13 +176,13 @@ "type": "Microsoft.Storage/storageAccounts", "name": "[parameters('newStorageAccountName')]", "location": "[resourceGroup().location]", - "apiVersion": "2015-05-01-preview", + "apiVersion": "2015-06-15", "properties": { "accountType": "[parameters('storageAccountType')]" } }, { - "apiVersion": "2015-05-01-preview", + "apiVersion": "2016-06-01", "type": "Microsoft.Network/publicIPAddresses", "name": "[variables('loadBalancerPublicIPName')]", "location": "[resourceGroup().location]", @@ -160,7 +194,7 @@ } }, { - "apiVersion": "2015-05-01-preview", + "apiVersion": "2016-06-01", "type": "Microsoft.Network/publicIPAddresses", "name": "[concat(parameters('dnsPrefixNameForPublicIP'),copyIndex())]", "location": "[resourceGroup().location]", @@ -179,7 +213,7 @@ } }, { - "apiVersion": "2015-05-01-preview", + "apiVersion": "2016-06-01", "type": "Microsoft.Network/loadBalancers", "name": "[variables('loadBalancerName')]", "location": "[resourceGroup().location]", @@ -226,10 +260,10 @@ "id": "[variables('lbPoolID')]" }, "protocol": "TCP", - "frontendPort": "80", - "backendPort": "80", + "frontendPort": 80, + "backendPort": 80, "enableFloatingIP": false, - "idleTimeoutInMinutes": "10", + "idleTimeoutInMinutes": 10, "probe": { "id": "[variables('apiProbeID')]" } @@ -237,9 +271,6 @@ }, { "name": "[variables('loadBalancerBuilderRuleName')]", - "dependsOn": [ - "[variables('lbIPConfig')]" - ], "properties": { "frontendIPConfiguration": { "id": "[variables('lbIPConfig')]" @@ -248,10 +279,10 @@ "id": "[variables('lbPoolID')]" }, "protocol": "TCP", - "frontendPort": "2222", - "backendPort": "2222", + "frontendPort": 2222, + "backendPort": 2222, "enableFloatingIP": false, - "idleTimeoutInMinutes": "10" + "idleTimeoutInMinutes": 10 } } ], @@ -260,9 +291,9 @@ "name": "[variables('apiProbeName')]", "properties": { "protocol": "HTTP", - "port": "80", - "intervalInSeconds": "5", - "numberOfProbes": "2", + "port": 80, + "intervalInSeconds": 5, + "numberOfProbes": 2, "requestPath": "/health-check" } } @@ -274,6 +305,9 @@ "name": "[variables('virtualNetworkName')]", "location": "[resourceGroup().location]", "apiVersion": "2015-05-01-preview", + "dependsOn": [ + "[variables('nsgID')]" + ], "properties": { "addressSpace": { "addressPrefixes": [ @@ -303,7 +337,7 @@ "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]", "[variables('lbID')]" ], - "apiVersion": "2015-05-01-preview", + "apiVersion": "2016-06-01", "properties": { "ipConfigurations": [ { @@ -339,7 +373,7 @@ "[concat('Microsoft.Network/networkInterfaces/', 'nic', copyindex())]", "[concat('Microsoft.Compute/availabilitySets/', variables('availabilitySetName'))]" ], - "apiVersion": "2015-05-01-preview", + "apiVersion": "2016-03-30", "properties": { "availabilitySet": { "id": "[resourceId('Microsoft.Compute/availabilitySets',variables('availabilitySetName'))]" diff --git a/contrib/azure/parameters.json b/contrib/azure/parameters.json index 493b06b9aa..5409ab219d 100644 --- a/contrib/azure/parameters.json +++ b/contrib/azure/parameters.json @@ -27,7 +27,7 @@ "value": 100 }, "coreosVersion": { - "value": "835.9.0" + "value": "1068.8.0" }, "storageAccountType": { "value": "Premium_LRS" diff --git a/contrib/coreos/user-data.example b/contrib/coreos/user-data.example index dec1ba0f2f..5b7427024c 100644 --- a/contrib/coreos/user-data.example +++ b/contrib/coreos/user-data.example @@ -43,6 +43,10 @@ coreos: enable: false - name: docker.service drop-ins: + - name: 00-reset-environment.conf + content: | + [Service] + Environment= - name: 10-require-flannel.conf content: | [Unit] @@ -52,6 +56,10 @@ coreos: content: | [Service] Environment="DOCKER_OPTS=--insecure-registry 10.0.0.0/8 --insecure-registry 172.16.0.0/12 --insecure-registry 192.168.0.0/16 --insecure-registry 100.64.0.0/10" + - name: 60-cgroup-driver.conf + content: | + [Service] + Environment="DOCKER_CGROUPS=--exec-opt native.cgroupdriver=cgroupfs" - name: flanneld.service command: start drop-ins: @@ -59,22 +67,41 @@ coreos: content: | [Service] ExecStartPre=-/usr/bin/etcdctl mk /coreos.com/network/config '{"Network": "10.244.0.0/16", "SubnetLen": 24, "SubnetMin": "10.244.0.0", "Backend": {"Type": "vxlan"}}' - - name: graceful-deis-shutdown.service + - name: graceful-ceph-shutdown.service content: | [Unit] - Description=Clean up + Description=Ceph node clean up for Deis DefaultDependencies=no - After=fleet.service etcd2.service docker.service docker.socket deis-store-admin.service deis-store-daemon.service deis-store-volume.service deis-store-monitor.service - Requires=fleet.service etcd2.service deis-store-admin.service deis-store-daemon.service deis-store-volume.service docker.service docker.socket deis-store-monitor.service + After=fleet.service etcd2.service docker.service docker.socket deis-store-admin.service deis-store-daemon.service deis-store-volume.service deis-store-monitor.service graceful-etcd-shutdown.service + Requires=fleet.service etcd2.service docker.service docker.socket deis-store-admin.service deis-store-daemon.service deis-store-volume.service deis-store-monitor.service graceful-etcd-shutdown.service + RefuseManualStop=true [Install] WantedBy=shutdown.target halt.target reboot.target [Service] - ExecStop=/opt/bin/graceful-shutdown.sh --really + ExecStart=/usr/bin/docker exec deis-store-admin ceph -s + ExecStop=/opt/bin/graceful-shutdown.sh --ceph Type=oneshot TimeoutSec=1200 RemainAfterExit=yes + - name: graceful-etcd-shutdown.service + content: | + [Unit] + Description=etcd clean up for Deis + DefaultDependencies=no + After=fleet.service etcd2.service docker.service docker.socket + Requires=fleet.service etcd2.service docker.service docker.socket + RefuseManualStop=true + + [Install] + WantedBy=shutdown.target halt.target reboot.target + + [Service] + ExecStop=/opt/bin/graceful-shutdown.sh --etcd + Type=oneshot + TimeoutSec=120 + RemainAfterExit=yes - name: install-deisctl.service command: start content: | @@ -84,7 +111,7 @@ coreos: [Service] Type=oneshot - ExecStart=/usr/bin/sh -c 'curl -sSL --retry 5 --retry-delay 2 http://deis.io/deisctl/install.sh | sh -s 1.12.2' + ExecStart=/usr/bin/sh -c 'curl -sSL --retry 5 --retry-delay 2 http://deis.io/deisctl/install.sh | sh -s 1.13.4' - name: increase-nf_conntrack-connections.service command: start content: | @@ -98,7 +125,7 @@ coreos: write_files: - path: /etc/deis-release content: | - DEIS_RELEASE=v1.12.2 + DEIS_RELEASE=v1.13.4 - path: /etc/motd content: " \e[31m* * \e[34m* \e[32m***** \e[39mddddd eeeeeee iiiiiii ssss\n\e[31m* * \e[34m* * \e[32m* * \e[39md d e e i s s\n \e[31m* * \e[34m***** \e[32m***** \e[39md d e i s\n\e[32m***** \e[31m* * \e[34m* \e[39md d e i s\n\e[32m* * \e[31m* * \e[34m* * \e[39md d eee i sss\n\e[32m***** \e[31m* * \e[34m***** \e[39md d e i s\n \e[34m* \e[32m***** \e[31m* * \e[39md d e i s\n \e[34m* * \e[32m* * \e[31m* * \e[39md d e e i s s\n\e[34m***** \e[32m***** \e[31m* * \e[39mddddd eeeeeee iiiiiii ssss\n\n\e[39mWelcome to Deis\t\t\tPowered by Core\e[38;5;45mO\e[38;5;206mS\e[39m\n" - path: /etc/profile.d/nse-function.sh @@ -187,58 +214,67 @@ write_files: permissions: '0755' content: | #!/usr/bin/bash - if [ "$1" != '--really' ]; then - echo "command must be run as: $0 --really" - exit 1 - fi - # procedure requires the store-admin - ADMIN_RUNNING=$(docker inspect --format="{{ .State.Running }}" deis-store-admin) - if [ $? -eq 1 ] || [ "$ADMIN_RUNNING" == "false" ]; then - echo "deis-store-admin container is required for graceful shutdown" - exit 2 - fi - set -e -x -o pipefail - # determine osd id - CURRENT_STATUS=$(docker exec deis-store-admin ceph health | awk '{print $1}') - OSD_HOSTS=($(etcdctl ls /deis/store/hosts/| awk -F'/' '{print $5}')) - for HOST in "${OSD_HOSTS[@]}" - do - PUBLIC_IP=$(fleetctl list-machines -fields="machine,ip" -full -no-legend| grep `cat /etc/machine-id` | awk '{print $2}') - if [ "$HOST" = "$PUBLIC_IP" ] ; then - OSD_ID=$(etcdctl get /deis/store/osds/$PUBLIC_IP) - break - fi - done - # if we own an osd and its healthy, try to gracefully remove it - if [ ! -z "$OSD_ID" ] && [[ "$CURRENT_STATUS" == *"HEALTH_OK"* ]] && [ ${#OSD_HOSTS[@]} -gt "3" ]; then - docker exec deis-store-admin ceph osd out $OSD_ID - sleep 30 - TIMEWAITED=0 - until [[ $(docker exec deis-store-admin ceph health) == *"HEALTH_OK"* ]] + ceph_shutdown () { + # determine osd id + OSD_HOSTS=($(etcdctl ls /deis/store/hosts/| awk -F'/' '{print $5}')) + for HOST in "${OSD_HOSTS[@]}" do - if [ $TIMEWAITED -gt "1200" ] - then - echo "ceph graceful removal timeout exceeded" + PUBLIC_IP=$(etcdctl member list|grep `cat /etc/machine-id`| awk '{print $3}'| grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') + if [ "$HOST" = "$PUBLIC_IP" ] ; then + OSD_ID=$(etcdctl get /deis/store/osds/$PUBLIC_IP) break fi - echo "waiting" && sleep 5 - TIMEWAITED=$((TIMEWAITED+5)) done - docker stop deis-store-daemon - docker exec deis-store-admin ceph osd crush remove osd.$OSD_ID - docker exec deis-store-admin ceph auth del osd.$OSD_ID - docker exec deis-store-admin ceph osd rm $OSD_ID - etcdctl rm /deis/store/osds/$PUBLIC_IP - etcdctl rm /deis/store/hosts/$PUBLIC_IP && sleep 10 - # remove ceph mon - docker stop deis-store-monitor || true - docker exec deis-store-admin ceph mon remove `hostname -f` # fixme - docker stop deis-store-metadata || true - fi + # if we own an osd and its healthy, try to gracefully remove it + if [ ! -z "$OSD_ID" ] && [ ${#OSD_HOSTS[@]} -gt "3" ]; then + ADMIN_RUNNING=$(docker inspect --format="{{ .State.Running }}" deis-store-admin) + if [ $? -eq 1 ] || [ "$ADMIN_RUNNING" == "false" ]; then + echo "deis-store-admin container is required for graceful shutdown" + exit 2 + fi + set -e -x -o pipefail + CURRENT_STATUS=$(docker exec deis-store-admin ceph health | awk '{print $1}') + if [[ "$CURRENT_STATUS" != *"HEALTH_OK"* ]]; then + echo "Ceph cluster must be healthy to perform graceful removal" + exit 3 + fi - # removing the node from etcd - NODE=$(etcdctl member list | grep `cat /etc/machine-id` | cut -d ':' -f 1) - etcdctl member remove $NODE + docker exec deis-store-admin ceph osd out $OSD_ID + sleep 30 + TIMEWAITED=0 + until [[ $(docker exec deis-store-admin ceph health) == *"HEALTH_OK"* ]] + do + if [ $TIMEWAITED -gt "1200" ] + then + echo "ceph graceful removal timeout exceeded" + break + fi + echo "waiting" && sleep 5 + TIMEWAITED=$((TIMEWAITED+5)) + done + docker stop deis-store-daemon + docker exec deis-store-admin ceph osd crush remove osd.$OSD_ID + docker exec deis-store-admin ceph auth del osd.$OSD_ID + docker exec deis-store-admin ceph osd rm $OSD_ID + etcdctl rm /deis/store/osds/$PUBLIC_IP + etcdctl rm /deis/store/hosts/$PUBLIC_IP && sleep 10 + # remove ceph mon + docker stop deis-store-monitor || true + docker exec deis-store-admin ceph mon remove `hostname -f` # fixme + docker stop deis-store-metadata || true + fi + } + etcd_shutdown () { + set -e -x -o pipefail + # removing the node from etcd + NODE=$(etcdctl member list | grep `cat /etc/machine-id` | cut -d ':' -f 1) + etcdctl member remove $NODE + } + if [ "$1" == "--ceph" ]; then + ceph_shutdown + elif [ "$1" == "--etcd" ]; then + etcd_shutdown + fi - path: /opt/bin/wupiao permissions: '0755' content: | @@ -249,31 +285,3 @@ write_files: sleep 1 && echo .; done; exit $? - - path: /opt/bin/download-k8s-binary - permissions: '0755' - content: | - #!/usr/bin/env bash - export K8S_VERSION="v1.0.1" - mkdir -p /opt/bin - FILE=$1 - if [ ! -f /opt/bin/$FILE ]; then - curl -sSL -o /opt/bin/$FILE https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/$FILE - chmod +x /opt/bin/$FILE - else - # we check the version of the binary - INSTALLED_VERSION=$(/opt/bin/$FILE --version) - MATCH=$(echo "${INSTALLED_VERSION}" | grep -c "${K8S_VERSION}") - if [ $MATCH -eq 0 ]; then - # the version is different - curl -sSL -o /opt/bin/$FILE https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/$FILE - chmod +x /opt/bin/$FILE - fi - fi - - path: /opt/bin/scheduler-policy.json - content: | - { - "kind": "Policy", - "apiVersion": "v1", - "predicates": [{"name": "PodFitsPorts"},{"name": "PodFitsResources"},{"name": "NoDiskConflict"},{"name": "MatchNodeSelector"},{"name": "HostName"}], - "priorities": [{"name": "LeastRequestedPriority","weight": 1},{"name": "BalancedResourceAllocation","weight": 1},{"name": "ServiceSpreadingPriority","weight": 2},{"name": "EqualPriority","weight": 1}] - } diff --git a/contrib/linode/apply-firewall.py b/contrib/linode/apply-firewall.py index f1c05ecc64..65309f2b31 100755 --- a/contrib/linode/apply-firewall.py +++ b/contrib/linode/apply-firewall.py @@ -4,60 +4,60 @@ Usage: apply-firewall.py """ -import os import re import string import argparse +import threading from threading import Thread import uuid -import colorama -from colorama import Fore, Style import paramiko import requests import sys import yaml +from linodeapi import LinodeApiCommand +import linodeutils -def get_nodes_from_args(args): - if args.discovery_url is not None: - return get_nodes_from_discovery_url(args.discovery_url) +class FirewallCommand(LinodeApiCommand): - return get_nodes_from_discovery_url(get_discovery_url_from_user_data()) + def get_nodes_from_args(self): + if not self.discovery_url: + self.discovery_url = self.get_discovery_url_from_user_data() + return self.get_nodes_from_discovery_url(self.discovery_url) + + def get_nodes_from_discovery_url(self, discovery_url): + try: + nodes = [] + json = requests.get(discovery_url).json() + discovery_nodes = json['node']['nodes'] + for node in discovery_nodes: + value = node['value'] + ip = re.search('([0-9]{1,3}\.){3}[0-9]{1,3}', value).group(0) + nodes.append(ip) + return nodes + except: + raise IOError('Could not load nodes from discovery url ' + discovery_url) -def get_nodes_from_discovery_url(discovery_url): - try: - nodes = [] - json = requests.get(discovery_url).json() - discovery_nodes = json['node']['nodes'] - for node in discovery_nodes: - value = node['value'] - ip = re.search('([0-9]{1,3}\.){3}[0-9]{1,3}', value).group(0) - nodes.append(ip) - return nodes - except: - raise IOError('Could not load nodes from discovery url ' + discovery_url) - - -def get_discovery_url_from_user_data(): - name = 'linode-user-data.yaml' - log_info('Loading discovery url from ' + name) - try: - current_dir = os.path.dirname(__file__) - user_data_file = file(os.path.abspath(os.path.join(current_dir, name)), 'r') - user_data_yaml = yaml.safe_load(user_data_file) - return user_data_yaml['coreos']['etcd2']['discovery'] - except: - raise IOError('Could not load discovery url from ' + name) + + def get_discovery_url_from_user_data(self): + name = 'linode-user-data.yaml' + linodeutils.log_info('Loading discovery url from ' + name) + try: + user_data_file = linodeutils.get_file(name) + user_data_yaml = yaml.safe_load(user_data_file) + return user_data_yaml['coreos']['etcd2']['discovery'] + except: + raise IOError('Could not load discovery url from ' + name) -def validate_ip_address(ip): - return True if re.match('([0-9]{1,3}\.){3}[0-9]{1,3}', ip) else False + def validate_ip_address(self, ip): + return True if re.match('([0-9]{1,3}\.){3}[0-9]{1,3}', ip) else False -def get_firewall_contents(node_ips, private=False): - rules_template_text = """*filter + def get_firewall_contents(self, node_ips): + rules_template_text = """*filter :INPUT DROP [0:0] :FORWARD DROP [0:0] :OUTPUT ACCEPT [0:0] @@ -78,137 +78,141 @@ def get_firewall_contents(node_ips, private=False): # Allow connections from docker container -A Firewall-INPUT -i docker0 -j ACCEPT # Accept ssh, http, https and git --A Firewall-INPUT -m conntrack --ctstate NEW -m multiport$multiport_private -p tcp --dports 22,2222,80,443 -j ACCEPT +-A Firewall-INPUT -m conntrack --ctstate NEW -m multiport$multiport_private -p tcp --dports 22,2222,80,443$add_new_nodes -j ACCEPT # Log and drop everything else -A Firewall-INPUT -j REJECT COMMIT """ - multiport_private = ' -s 192.168.0.0/16' if private else '' - - rules_template = string.Template(rules_template_text) - return rules_template.substitute(node_ips=string.join(node_ips, ','), multiport_private=multiport_private) - - -def apply_rules_to_all(host_ips, rules, private_key): - pkey = detect_and_create_private_key(private_key) - - threads = [] - for ip in host_ips: - t = Thread(target=apply_rules, args=(ip, rules, pkey)) - t.setDaemon(False) - t.start() - threads.append(t) - for thread in threads: - thread.join() - - -def detect_and_create_private_key(private_key): - private_key_text = private_key.read() - private_key.seek(0) - if '-----BEGIN RSA PRIVATE KEY-----' in private_key_text: - return paramiko.RSAKey.from_private_key(private_key) - elif '-----BEGIN DSA PRIVATE KEY-----' in private_key_text: - return paramiko.DSSKey.from_private_key(private_key) - else: - raise ValueError('Invalid private key file ' + private_key.name) - - -def apply_rules(host_ip, rules, private_key): - # connect to the server via ssh - ssh = paramiko.SSHClient() - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - ssh.connect(host_ip, username='core', allow_agent=False, look_for_keys=False, pkey=private_key) - - # copy the rules to the temp directory - temp_file = '/tmp/' + str(uuid.uuid4()) - - ssh.open_sftp() - sftp = ssh.open_sftp() - sftp.open(temp_file, 'w').write(rules) - - # move the rules in to place and enable and run the iptables-restore.service - commands = [ - 'sudo mv ' + temp_file + ' /var/lib/iptables/rules-save', - 'sudo chown root:root /var/lib/iptables/rules-save', - 'sudo systemctl enable iptables-restore.service', - 'sudo systemctl start iptables-restore.service' - ] - - for command in commands: - stdin, stdout, stderr = ssh.exec_command(command) - stdout.channel.recv_exit_status() - - ssh.close() - - log_success('Applied rule to ' + host_ip) + multiport_private = ' -s 192.168.0.0/16' if self.private else '' + add_new_nodes = ',2379,2380' if self.adding_new_nodes else '' + + rules_template = string.Template(rules_template_text) + return rules_template.substitute(node_ips=string.join(node_ips, ','), multiport_private=multiport_private, add_new_nodes=add_new_nodes) + + + def apply_rules_to_all(self, host_ips, rules): + pkey = self.detect_and_create_private_key() + + threads = [] + for ip in host_ips: + t = Thread(target=self.apply_rules, args=(ip, rules, pkey)) + t.setDaemon(False) + t.start() + threads.append(t) + for thread in threads: + thread.join() + + + def detect_and_create_private_key(self): + private_key_text = self.private_key.read() + self.private_key.seek(0) + if '-----BEGIN RSA PRIVATE KEY-----' in private_key_text: + return paramiko.RSAKey.from_private_key(self.private_key) + elif '-----BEGIN DSA PRIVATE KEY-----' in private_key_text: + return paramiko.DSSKey.from_private_key(self.private_key) + else: + raise ValueError('Invalid private key file ' + self.private_key.name) + + + def apply_rules(self, host_ip, rules, private_key): + # connect to the server via ssh + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(host_ip, username='core', allow_agent=False, look_for_keys=False, pkey=private_key) + + # copy the rules to the temp directory + temp_file = '/tmp/' + str(uuid.uuid4()) + + ssh.open_sftp() + sftp = ssh.open_sftp() + sftp.open(temp_file, 'w').write(rules) + + # move the rules in to place and enable and run the iptables-restore.service + commands = [ + 'sudo mv ' + temp_file + ' /var/lib/iptables/rules-save', + 'sudo chown root:root /var/lib/iptables/rules-save', + 'sudo systemctl enable iptables-restore.service', + 'sudo systemctl start iptables-restore.service' + ] + + for command in commands: + stdin, stdout, stderr = ssh.exec_command(command) + stdout.channel.recv_exit_status() + + ssh.close() + + linodeutils.log_success('Applied rule to ' + host_ip) + + def acquire_linode_ips(self): + linodeutils.log_info('Getting info for Linodes from display group: ' + self.node_display_group) + deis_grp = self.request('linode.list') + deis_linodeids = [l.get('LINODEID','') for l in deis_grp if l.get('LPM_DISPLAYGROUP', '') == self.node_display_group] + deis_grp_ips = self.request('linode.ip.list') + self.deis_privateips = [ip.get('IPADDRESS','') for ip in deis_grp_ips if (ip.get('LINODEID','') in deis_linodeids) and (ip.get('ISPUBLIC', 1) == 0)] + self.deis_publicips = [ip.get('IPADDRESS','') for ip in deis_grp_ips if (ip.get('LINODEID','') in deis_linodeids) and (ip.get('ISPUBLIC', 0) == 1)] + + def run(self): + #NOTE: defaults to using display group, then manual input (via nodes/hosts), then discovery_url + if self.node_display_group: + self.acquire_linode_ips() + nodes = self.deis_privateips + hosts = self.deis_publicips + else: + nodes = self.nodes if self.nodes is not None else self.get_nodes_from_args() + hosts = self.hosts if self.hosts is not None else nodes + + node_ips = [] + for ip in nodes: + if self.validate_ip_address(ip): + node_ips.append(ip) + else: + linodeutils.log_warning('Invalid IP will not be added to security group: ' + ip) + + if not len(node_ips) > 0: + raise ValueError('No valid IP addresses in security group.') + + host_ips = [] + for ip in hosts: + if self.validate_ip_address(ip): + host_ips.append(ip) + else: + linodeutils.log_warning('Host has invalid IP address: ' + ip) + + if not len(host_ips) > 0: + raise ValueError('No valid host addresses.') + + linodeutils.log_info('Generating iptables rules...') + rules = self.get_firewall_contents(node_ips) + linodeutils.log_success('Generated rules:') + linodeutils.log_debug(rules) + + linodeutils.log_info('Applying rules...') + self.apply_rules_to_all(host_ips, rules) + linodeutils.log_success('Done!') def main(): - colorama.init() + linodeutils.init() parser = argparse.ArgumentParser(description='Apply a "Security Group" to a Deis cluster') + parser.add_argument('--api-key', dest='linode_api_key', help='Linode API Key') parser.add_argument('--private-key', required=True, type=file, dest='private_key', help='Cluster SSH Private Key') parser.add_argument('--private', action='store_true', dest='private', help='Only allow access to the cluster from the private network') + parser.add_argument('--adding-new-nodes', action='store_true', dest='adding_new_nodes', help='When adding new nodes to existing cluster, allows access to etcd') parser.add_argument('--discovery-url', dest='discovery_url', help='Etcd discovery url') - parser.add_argument('--hosts', nargs='+', dest='hosts', help='The IP addresses of the hosts to apply rules to') + parser.add_argument('--display-group', required=False, dest='node_display_group', help='Display group (used for Linode IP discovery).') + parser.add_argument('--hosts', nargs='+', dest='hosts', help='The public IP addresses of the hosts to apply rules to (for ssh)') + parser.add_argument('--nodes', nargs='+', dest='nodes', help='The private IP addresses of the hosts (for iptable setup)') + parser.set_defaults(cmd=FirewallCommand) + args = parser.parse_args() - - nodes = get_nodes_from_args(args) - hosts = args.hosts if args.hosts is not None else nodes - - node_ips = [] - for ip in nodes: - if validate_ip_address(ip): - node_ips.append(ip) - else: - log_warning('Invalid IP will not be added to security group: ' + ip) - - if not len(node_ips) > 0: - raise ValueError('No valid IP addresses in security group.') - - host_ips = [] - for ip in hosts: - if validate_ip_address(ip): - host_ips.append(ip) - else: - log_warning('Host has invalid IP address: ' + ip) - - if not len(host_ips) > 0: - raise ValueError('No valid host addresses.') - - log_info('Generating iptables rules...') - rules = get_firewall_contents(node_ips, args.private) - log_success('Generated rules:') - log_debug(rules) - - log_info('Applying rules...') - apply_rules_to_all(host_ips, rules, args.private_key) - log_success('Done!') - - -def log_debug(message): - print(Style.DIM + Fore.MAGENTA + message + Fore.RESET + Style.RESET_ALL) - - -def log_info(message): - print(Fore.CYAN + message + Fore.RESET) - - -def log_warning(message): - print(Fore.YELLOW + message + Fore.RESET) - - -def log_success(message): - print(Style.BRIGHT + Fore.GREEN + message + Fore.RESET + Style.RESET_ALL) - - -def log_error(message): - print(Style.BRIGHT + Fore.RED + message + Fore.RESET + Style.RESET_ALL) - + cmd = args.cmd(args) + args.cmd(args).run() + if __name__ == "__main__": try: main() except Exception as e: - log_error(e.message) + linodeutils.log_error(e.message) sys.exit(1) diff --git a/contrib/linode/create-linode-user-data.py b/contrib/linode/create-linode-user-data.py index 51760ed5a6..23e6a911ad 100755 --- a/contrib/linode/create-linode-user-data.py +++ b/contrib/linode/create-linode-user-data.py @@ -6,39 +6,40 @@ """ import base64 import sys -import os -import collections import argparse import yaml -import colorama -from colorama import Fore, Style import requests +import linodeutils -def combine_dicts(orig_dict, new_dict): - for key, val in new_dict.iteritems(): - if isinstance(val, collections.Mapping): - tmp = combine_dicts(orig_dict.get(key, {}), val) - orig_dict[key] = tmp - elif isinstance(val, list): - orig_dict[key] = (orig_dict.get(key, []) + val) - else: - orig_dict[key] = new_dict[key] - return orig_dict +def validate_public_key(key): + try: + type, key_string, comment = key.split() + data = base64.decodestring(key_string) + return data[4:11] == type + except: + return False -def get_file(name, mode="r", abspath=False): - current_dir = os.path.dirname(__file__) +def generate_etcd_token(): + linodeutils.log_info('Generating new Etcd token...') + data = requests.get('https://discovery.etcd.io/new').text + token = data.replace('https://discovery.etcd.io/', '') + linodeutils.log_success('Generated new token: ' + token) + return token - if abspath: - return file(os.path.abspath(os.path.join(current_dir, name)), mode) - else: - return file(os.path.join(current_dir, name), mode) + +def validate_etcd_token(token): + try: + int(token, 16) + return True + except: + return False def main(): - colorama.init() + linodeutils.init() parser = argparse.ArgumentParser(description='Create Linode User Data') parser.add_argument('--public-key', action='append', required=True, type=file, dest='public_key_files', help='Authorized SSH Keys') @@ -58,14 +59,14 @@ def main(): if validate_public_key(public_key): public_keys.append(public_key) else: - log_warning('Invalid public key: ' + public_key_file.name) + linodeutils.log_warning('Invalid public key: ' + public_key_file.name) if not len(public_keys) > 0: raise ValueError('Must supply at least one valid public key') - linode_user_data = get_file("linode-user-data.yaml", "w", True) - linode_template = get_file("linode-user-data-template.yaml") - coreos_template = get_file("../coreos/user-data.example") + linode_user_data = linodeutils.get_file("linode-user-data.yaml", "w", True) + linode_template = linodeutils.get_file("linode-user-data-template.yaml") + coreos_template = linodeutils.get_file("../coreos/user-data.example") coreos_template_string = coreos_template.read() coreos_template_string = coreos_template_string.replace('#DISCOVERY_URL', 'https://discovery.etcd.io/' + str(etcd_token)) @@ -73,60 +74,18 @@ def main(): configuration_linode_template = yaml.safe_load(linode_template) configuration_coreos_template = yaml.safe_load(coreos_template_string) - configuration = combine_dicts(configuration_coreos_template, configuration_linode_template) + configuration = linodeutils.combine_dicts(configuration_coreos_template, configuration_linode_template) configuration['ssh_authorized_keys'] = public_keys dump = yaml.dump(configuration, default_flow_style=False, default_style='|') with linode_user_data as outfile: outfile.write("#cloud-config\n\n" + dump) - log_success('Wrote Linode user data to ' + linode_user_data.name) - - -def validate_public_key(key): - try: - type, key_string, comment = key.split() - data = base64.decodestring(key_string) - return data[4:11] == type - except: - return False - - -def generate_etcd_token(): - log_info('Generating new Etcd token...') - data = requests.get('https://discovery.etcd.io/new').text - token = data.replace('https://discovery.etcd.io/', '') - log_success('Generated new token: ' + token) - return token - - -def validate_etcd_token(token): - try: - int(token, 16) - return True - except: - return False - - -def log_info(message): - print(Fore.CYAN + message + Fore.RESET) - - -def log_warning(message): - print(Fore.YELLOW + message + Fore.RESET) - - -def log_success(message): - print(Style.BRIGHT + Fore.GREEN + message + Fore.RESET + Style.RESET_ALL) - - -def log_error(message): - print(Style.BRIGHT + Fore.RED + message + Fore.RESET + Style.RESET_ALL) - + linodeutils.log_success('Wrote Linode user data to ' + linode_user_data.name) if __name__ == "__main__": try: main() except Exception as e: - log_error(e.message) + linodeutils.log_error(e.message) sys.exit(1) diff --git a/contrib/linode/linode-user-data-template.yaml b/contrib/linode/linode-user-data-template.yaml index d4bc052706..83aa54181f 100644 --- a/contrib/linode/linode-user-data-template.yaml +++ b/contrib/linode/linode-user-data-template.yaml @@ -2,7 +2,7 @@ hostname: $hostname coreos: fleet: - metadata: name=%H + metadata: name=%H,controlPlane=true,dataPlane=true,routerMesh=true units: - name: 00-eth0.network runtime: true diff --git a/contrib/linode/linodeapi.py b/contrib/linode/linodeapi.py new file mode 100644 index 0000000000..054348eba2 --- /dev/null +++ b/contrib/linode/linodeapi.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +""" +Provides a class for Linode API commands + +Usage: used by other files as a base class +""" +import requests +import threading +from colorama import Fore, Style + + +class LinodeApiCommand: + def __init__(self, arguments): + self._arguments = vars(arguments) + self._linode_api_key = arguments.linode_api_key if arguments.linode_api_key is not None else '' + + def __getattr__(self, name): + return self._arguments.get(name) + + def request(self, action, **kwargs): + data = '' + if self._linode_api_key: + kwargs['params'] = dict({'api_key': self._linode_api_key, 'api_action': action}.items() + kwargs.get('params', {}).items()) + response = requests.request('get', 'https://api.linode.com/api/', **kwargs) + + json = response.json() + errors = json.get('ERRORARRAY', []) + data = json.get('DATA') + + if len(errors) > 0: + raise IOError(str(errors)) + else: + self.info('Linode api key not provided. Please provide at the start of script to perform this function.') + + return data + + + def run(self): + raise NotImplementedError + + def info(self, message): + print(Fore.MAGENTA + threading.current_thread().name + ': ' + Fore.CYAN + message + Fore.RESET) + + def success(self, message): + print(Fore.MAGENTA + threading.current_thread().name + ': ' + Fore.GREEN + message + Fore.RESET) diff --git a/contrib/linode/linodeutils.py b/contrib/linode/linodeutils.py new file mode 100644 index 0000000000..6804899aa6 --- /dev/null +++ b/contrib/linode/linodeutils.py @@ -0,0 +1,79 @@ +from colorama import Fore, Style +import colorama +import collections +import os + + +def log_debug(message): + print(Style.DIM + Fore.MAGENTA + message + Fore.RESET + Style.RESET_ALL) + + +def log_info(message): + print(Fore.CYAN + message + Fore.RESET) + + +def log_warning(message): + print(Fore.YELLOW + message + Fore.RESET) + + +def log_success(message): + print(Style.BRIGHT + Fore.GREEN + message + Fore.RESET + Style.RESET_ALL) + +def log_minor_success(message): + print(Fore.GREEN + message + Fore.RESET + Style.RESET_ALL) + +def log_error(message): + print(Style.BRIGHT + Fore.RED + message + Fore.RESET + Style.RESET_ALL) + + +''' See provision-cluster.py in method ProvisionCommand._report_created for + an example of use. Each row should be a string tuple. rows is a list of + tuples. ''' +def log_table(rows, header_msg, footer_msg): + + # set up the report constants + divider = Style.BRIGHT + Fore.MAGENTA + ('=' * 109) + Fore.RESET + Style.RESET_ALL + column_format = " {:<20} {:<20} {:<20} {:<20} {:<12} {:>8}" + formatted_header = column_format.format(*('HOSTNAME', 'PUBLIC IP', 'PRIVATE IP', 'GATEWAY', 'DC', 'PLAN')) + + # display the report + print('') + print(divider) + print(divider) + print('') + print(Style.BRIGHT + Fore.LIGHTGREEN_EX + header_msg + Fore.RESET + Style.RESET_ALL) + print('') + print(Style.BRIGHT + Fore.CYAN + formatted_header + Fore.RESET + Style.RESET_ALL) + for row in rows: + print(Fore.CYAN + column_format.format(*row) + Fore.RESET) + print('') + print('') + print(Fore.LIGHTYELLOW_EX + footer_msg + Fore.RESET) + print(divider) + print(divider) + print('') + + +def combine_dicts(orig_dict, new_dict): + for key, val in new_dict.iteritems(): + if isinstance(val, collections.Mapping): + tmp = combine_dicts(orig_dict.get(key, {}), val) + orig_dict[key] = tmp + elif isinstance(val, list): + orig_dict[key] = (orig_dict.get(key, []) + val) + else: + orig_dict[key] = new_dict[key] + return orig_dict + + +def get_file(name, mode="r", abspath=False): + current_dir = os.path.dirname(__file__) + + if abspath: + return file(os.path.abspath(os.path.join(current_dir, name)), mode) + else: + return file(os.path.join(current_dir, name), mode) + + +def init(): + colorama.init() diff --git a/contrib/linode/provision-linode-cluster.py b/contrib/linode/provision-linode-cluster.py index a89b5edf5b..732e809cbb 100755 --- a/contrib/linode/provision-linode-cluster.py +++ b/contrib/linode/provision-linode-cluster.py @@ -12,41 +12,8 @@ import sys import paramiko -import requests -import colorama -from colorama import Fore, Style - - -class LinodeApiCommand: - def __init__(self, arguments): - self._arguments = vars(arguments) - self._linode_api_key = arguments.linode_api_key - - def __getattr__(self, name): - return self._arguments.get(name) - - def request(self, action, **kwargs): - kwargs['params'] = dict({'api_key': self._linode_api_key, 'api_action': action}.items() + kwargs.get('params', {}).items()) - response = requests.request('get', 'https://api.linode.com/api/', **kwargs) - - json = response.json() - errors = json.get('ERRORARRAY', []) - data = json.get('DATA') - - if len(errors) > 0: - raise IOError(str(errors)) - - return data - - def run(self): - raise NotImplementedError - - def info(self, message): - print(Fore.MAGENTA + threading.current_thread().name + ': ' + Fore.CYAN + message + Fore.RESET) - - def success(self, message): - print(Fore.MAGENTA + threading.current_thread().name + ': ' + Fore.GREEN + message + Fore.RESET) - +from linodeapi import LinodeApiCommand +import linodeutils class ProvisionCommand(LinodeApiCommand): _created_linodes = [] @@ -81,31 +48,9 @@ def _report_created(self): ips.append(linode['public']) firewall_command = './apply-firewall.py --private-key /path/to/key/deis --hosts ' + string.join(ips, ' ') - - # set up the report constants - divider = Style.BRIGHT + Fore.MAGENTA + ('=' * 109) + Fore.RESET + Style.RESET_ALL - column_format = " {:<20} {:<20} {:<20} {:<20} {:<12} {:>8}" - formatted_header = column_format.format(*('HOSTNAME', 'PUBLIC IP', 'PRIVATE IP', 'GATEWAY', 'DC', 'PLAN')) - - # display the report - print('') - print(divider) - print(divider) - print('') - print(Style.BRIGHT + Fore.LIGHTGREEN_EX + ' Successfully provisioned ' + str(self.num_nodes) + ' nodes!' + Fore.RESET + Style.RESET_ALL) - print('') - print(Style.BRIGHT + Fore.CYAN + formatted_header + Fore.RESET + Style.RESET_ALL) - for row in rows: - print(Fore.CYAN + column_format.format(*row) + Fore.RESET) - print('') - print('') - print(Fore.LIGHTYELLOW_EX + ' Finish up your installation by securing your cluster with the following command:' + Fore.RESET) - print('') - print(' ' + firewall_command) - print('') - print(divider) - print(divider) - print('') + header_msg = ' Successfully provisioned ' + str(self.num_nodes) + ' nodes!' + footer_msg = ' Finish up your installation by securing your cluster with the following command:\n ' + firewall_command + '\n' + linodeutils.log_table(rows, header_msg, footer_msg) def _get_plan(self): if self._plan is None: @@ -140,8 +85,8 @@ def _check_num_nodes(self): if self.num_nodes < 1: raise ValueError('Must provision at least one node.') elif self.num_nodes < 3: - print(Fore.YELLOW + 'A Deis cluster must have 3 or more nodes, only continue if you adding to a current cluster.' + Fore.RESET) - print(Fore.YELLOW + 'Continue? (y/n)' + Fore.RESET) + linodeutils.log_warning('A Deis cluster must have 3 or more nodes, only continue if you adding to a current cluster.') + linodeutils.log_warning('Continue? (y/n)') accept = None while True: if accept == 'y': @@ -371,21 +316,20 @@ class ListDataCentersCommand(LinodeApiCommand): def run(self): data = self.request('avail.datacenters') column_format = "{:<4} {:}" - print(Style.BRIGHT + Fore.GREEN + column_format.format(*('ID', 'LOCATION')) + Fore.RESET + Style.RESET_ALL) + linodeutils.log_success(column_format.format(*('ID', 'LOCATION'))) for data_center in data: row = ( data_center.get('DATACENTERID'), data_center.get('LOCATION') ) - print(Fore.GREEN + column_format.format(*row) + Fore.RESET) + linodeutils.log_minor_success(column_format.format(*row)) class ListPlansCommand(LinodeApiCommand): def run(self): data = self.request('avail.linodeplans') column_format = "{:<4} {:<16} {:<8} {:<12} {:}" - print(Style.BRIGHT + Fore.GREEN + column_format.format( - *('ID', 'LABEL', 'CORES', 'RAM', 'PRICE')) + Fore.RESET + Style.RESET_ALL) + linodeutils.log_success(column_format.format(*('ID', 'LABEL', 'CORES', 'RAM', 'PRICE'))) for plan in data: row = ( plan.get('PLANID'), @@ -394,11 +338,11 @@ def run(self): str(plan.get('RAM')) + 'MB', '$' + str(plan.get('PRICE')) ) - print(Fore.GREEN + column_format.format(*row) + Fore.RESET) + linodeutils.log_minor_success(column_format.format(*row)) -if __name__ == '__main__': - colorama.init() +def main(): + linodeutils.init() parser = argparse.ArgumentParser(description='Provision Linode Deis Cluster') parser.add_argument('--api-key', required=True, dest='linode_api_key', help='Linode API Key') @@ -413,7 +357,7 @@ def run(self): help='Node data center id. Use list-data-centers to find the id.') provision_parser.add_argument('--cloud-config', required=False, default='linode-user-data.yaml', type=file, dest='cloud_config', help='CoreOS cloud config user-data file') - provision_parser.add_argument('--coreos-version', required=False, default='835.9.0', dest='coreos_version', + provision_parser.add_argument('--coreos-version', required=False, default='1068.8.0', dest='coreos_version', help='CoreOS version number to install') provision_parser.add_argument('--coreos-channel', required=False, default='stable', dest='coreos_channel', help='CoreOS channel to install from') @@ -426,10 +370,12 @@ def run(self): list_plans_parser.set_defaults(cmd=ListPlansCommand) args = parser.parse_args() - cmd = args.cmd(args) + args.cmd(args).run() + +if __name__ == "__main__": try: - cmd.run() + main() except Exception as e: - print(Style.BRIGHT + Fore.RED + e.message + Fore.RESET + Style.RESET_ALL) + linodeutils.log_error(e.message) sys.exit(1) diff --git a/contrib/util/reset-ps-all-apps.sh b/contrib/util/reset-ps-all-apps.sh new file mode 100644 index 0000000000..3bd05c4b0c --- /dev/null +++ b/contrib/util/reset-ps-all-apps.sh @@ -0,0 +1,12 @@ +apps=$(curl -H "Authorization: token $DEIS_TOKEN" http://$DEIS_SERVER/v1/apps | jq -r '.results | map(.id) | join(" ")') + +for app in $apps; do + echo "Resetting containers of $app" + + originalscale=$(curl -H "Authorization: token $DEIS_TOKEN" http://$DEIS_SERVER/v1/apps/$app/containers/ 2>/dev/null | jq -r '(.results) | [group_by(.type)[] | max_by(.num)] | [map(.type), map(.num)] | transpose | map([.[0], .[1] | tostring] | join("=")) | join(" ")') + zeroscale=$(curl -H "Authorization: token $DEIS_TOKEN" http://$DEIS_SERVER/v1/apps/$app/containers/ 2>/dev/null | jq -r '(.results) | unique_by(.type) | map([.type, "0"] | join("=")) | join(" ")') + + deis ps:scale $zeroscale -a $app + deis ps:scale $originalscale -a $app + echo +done diff --git a/contrib/utils.sh b/contrib/utils.sh index dc0465729d..44f02f45f5 100644 --- a/contrib/utils.sh +++ b/contrib/utils.sh @@ -13,5 +13,5 @@ function echo_green { } export COREOS_CHANNEL=${COREOS_CHANNEL:-stable} -export COREOS_VERSION=${COREOS_VERSION:-835.9.0} -export DEIS_RELEASE=1.13.0-dev +export COREOS_VERSION=${COREOS_VERSION:-1068.8.0} +export DEIS_RELEASE=1.13.4 diff --git a/controller/Dockerfile b/controller/Dockerfile index d4a1f40aed..dfa3e6ff80 100644 --- a/controller/Dockerfile +++ b/controller/Dockerfile @@ -1,7 +1,7 @@ -FROM alpine:3.2 +FROM alpine:3.4 # install common packages -RUN apk add --update-cache curl bash sudo && rm -rf /var/cache/apk/* +RUN apk add --no-cache curl bash sudo # install etcdctl RUN curl -sSL -o /usr/local/bin/etcdctl https://s3-us-west-2.amazonaws.com/get-deis/etcdctl-v0.4.9 \ @@ -29,4 +29,4 @@ ADD . /app # Create static resources RUN /app/manage.py collectstatic --settings=deis.settings --noinput -ENV DEIS_RELEASE 1.13.0-dev +ENV DEIS_RELEASE 1.13.4 diff --git a/controller/Makefile b/controller/Makefile index cc631abef6..c46c971553 100644 --- a/controller/Makefile +++ b/controller/Makefile @@ -38,7 +38,7 @@ run: install start dev-release: push set-image push: check-registry - docker tag -f $(IMAGE) $(DEV_IMAGE) + docker tag $(IMAGE) $(DEV_IMAGE) docker push $(DEV_IMAGE) set-image: check-deisctl diff --git a/controller/api/management/commands/load_db_state_to_etcd.py b/controller/api/management/commands/load_db_state_to_etcd.py index e240a07983..427133d53c 100644 --- a/controller/api/management/commands/load_db_state_to_etcd.py +++ b/controller/api/management/commands/load_db_state_to_etcd.py @@ -1,6 +1,8 @@ +from __future__ import print_function + from django.core.management.base import BaseCommand -from api.models import Key, App, Domain, Certificate, Config +from api.models import Key, App, Domain, Certificate class Command(BaseCommand): @@ -9,8 +11,11 @@ class Command(BaseCommand): """ def handle(self, *args, **options): """Publishes Deis platform state from the database to etcd.""" - print "Publishing DB state to etcd..." - for model in (Key, App, Domain, Certificate, Config): + print("Publishing DB state to etcd...") + for app in App.objects.all(): + app.save() + app.config_set.latest().save() + for model in (Key, Domain, Certificate): for obj in model.objects.all(): obj.save() - print "Done Publishing DB state to etcd." + print("Done Publishing DB state to etcd.") diff --git a/controller/api/models.py b/controller/api/models.py index 05984fd44c..821f51bc6e 100644 --- a/controller/api/models.py +++ b/controller/api/models.py @@ -116,6 +116,11 @@ def validate_certificate(value): raise ValidationError('Could not load certificate: {}'.format(e)) +def validate_common_name(value): + if '*' in value: + raise ValidationError('Wildcard certificates are not supported') + + def get_etcd_client(): if not hasattr(get_etcd_client, "client"): # wire up etcd publishing if we can connect @@ -445,14 +450,32 @@ def _destroy_containers(self, to_destroy): destroy_threads = [Thread(target=c.destroy) for c in to_destroy] [t.start() for t in destroy_threads] [t.join() for t in destroy_threads] - [c.delete() for c in to_destroy if c.state == 'destroyed'] + pks = [c.pk for c in to_destroy if c.state == 'destroyed'] + Container.objects.filter(pk__in=pks).delete() if any(c.state != 'destroyed' for c in to_destroy): err = 'aborting, failed to destroy some containers' log_event(self, err, logging.ERROR) raise RuntimeError(err) + def _prune_containers(self): + try: + containers = self.container_set.exclude(type='run') + # find the unique type+num values of containers + vals = set((i[0], i[1]) for i in containers.values_list('type', 'num')) + for typ, num in vals: + # delete all but the latest of each type+num + group = containers.filter(type=typ, num=num) + if group.count() > 1: + latest = group.latest() + group.exclude(uuid=latest.uuid).delete() + except Exception as e: + # just log the error, don't raise it + err = '(_prune_containers): {}'.format(e) + log_event(self, err, logging.ERROR) + def deploy(self, user, release): """Deploy a new release to this application""" + self._prune_containers() existing = self.container_set.exclude(type='run') new = [] scale_types = set() @@ -603,7 +626,7 @@ def __str__(self): return self.short_name() class Meta: - get_latest_by = '-created' + get_latest_by = 'created' ordering = ['created'] @property @@ -1011,7 +1034,7 @@ class Certificate(AuditedModel): certificate = models.TextField(validators=[validate_certificate]) key = models.TextField() # X.509 certificates allow any string of information as the common name. - common_name = models.TextField(unique=True) + common_name = models.TextField(unique=True, validators=[validate_common_name]) expires = models.DateTimeField() def __str__(self): diff --git a/controller/api/serializers.py b/controller/api/serializers.py index 29e30f09fd..fdb013945d 100644 --- a/controller/api/serializers.py +++ b/controller/api/serializers.py @@ -19,7 +19,7 @@ PROCTYPE_MATCH = re.compile(r'^(?P[a-z]+)') MEMLIMIT_MATCH = re.compile(r'^(?P[0-9]+(MB|KB|GB|[BKMG]))$', re.IGNORECASE) CPUSHARE_MATCH = re.compile(r'^(?P[0-9]+)$') -TAGKEY_MATCH = re.compile(r'^[a-z]+$') +TAGKEY_MATCH = re.compile(r'^[A-Za-z]+$') TAGVAL_MATCH = re.compile(r'^\w+$') CONFIGKEY_MATCH = re.compile(r'^[a-z_]+[a-z0-9_]*$', re.IGNORECASE) diff --git a/controller/api/tests/__init__.py b/controller/api/tests/__init__.py index 76566aacfd..114cbabba9 100644 --- a/controller/api/tests/__init__.py +++ b/controller/api/tests/__init__.py @@ -57,6 +57,7 @@ def mock_status_ok(*args, **kwargs): from .test_config import * # noqa from .test_container import * # noqa from .test_domain import * # noqa +from .test_healthcheck import * # noqa from .test_hooks import * # noqa from .test_key import * # noqa from .test_limits import * # noqa diff --git a/controller/api/tests/test_app.py b/controller/api/tests/test_app.py index 3d45926b39..ea3eda6cda 100644 --- a/controller/api/tests/test_app.py +++ b/controller/api/tests/test_app.py @@ -105,32 +105,30 @@ def test_app_actions(self, mock_get): url = "/v1/apps/{app_id}/logs".format(**locals()) response = self.client.get(url, HTTP_AUTHORIZATION="token {}".format(self.token)) self.assertEqual(response.status_code, 204) - self.assertEqual(response.data, "No logs for {}".format(app_id)) # test logs - 404 from deis-logger mock_response.status_code = 404 response = self.client.get(url, HTTP_AUTHORIZATION="token {}".format(self.token)) self.assertEqual(response.status_code, 204) - self.assertEqual(response.data, "No logs for {}".format(app_id)) # test logs - unanticipated status code from deis-logger mock_response.status_code = 400 response = self.client.get(url, HTTP_AUTHORIZATION="token {}".format(self.token)) self.assertEqual(response.status_code, 500) - self.assertEqual(response.data, "Error accessing logs for {}".format(app_id)) + self.assertEqual(response.content, "Error accessing logs for {}".format(app_id)) # test logs - success accessing deis-logger mock_response.status_code = 200 mock_response.content = FAKE_LOG_DATA response = self.client.get(url, HTTP_AUTHORIZATION="token {}".format(self.token)) self.assertEqual(response.status_code, 200) - self.assertEqual(response.data, FAKE_LOG_DATA) + self.assertEqual(response.content, FAKE_LOG_DATA) # test logs - HTTP request error while accessing deis-logger mock_get.side_effect = requests.exceptions.RequestException('Boom!') response = self.client.get(url, HTTP_AUTHORIZATION="token {}".format(self.token)) self.assertEqual(response.status_code, 500) - self.assertEqual(response.data, "Error accessing logs for {}".format(app_id)) + self.assertEqual(response.content, "Error accessing logs for {}".format(app_id)) # TODO: test run needs an initial build @@ -138,14 +136,23 @@ def test_app_actions(self, mock_get): def test_app_release_notes_in_logs(self, mock_logger): """Verifies that an app's release summary is dumped into the logs.""" url = '/v1/apps' - body = {'id': 'autotest'} + app_name = 'autotest' + body = {'id': app_name} + response = self.client.post(url, json.dumps(body), content_type='application/json', HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 201) + app = App.objects.get(id=app_name) # check app logs - exp_msg = "autotest created initial release" - exp_log_call = mock.call(logging.INFO, exp_msg) - mock_logger.log.has_calls(exp_log_call) + exp_msg = "[{app_name}]: {self.user.username} created initial release".format(**locals()) + mock_logger.log.assert_called_with(logging.INFO, exp_msg) + app.log('hello world') + exp_msg = "[{app_name}]: hello world".format(**locals()) + mock_logger.log.assert_called_with(logging.INFO, exp_msg) + app.log('goodbye world', logging.WARNING) + # assert logging with a different log level + exp_msg = "[{app_name}]: goodbye world".format(**locals()) + mock_logger.log.assert_called_with(logging.WARNING, exp_msg) def test_app_errors(self): app_id = 'autotest-errors' diff --git a/controller/api/tests/test_auth.py b/controller/api/tests/test_auth.py index 9f2033cdfd..0ab0a84e80 100644 --- a/controller/api/tests/test_auth.py +++ b/controller/api/tests/test_auth.py @@ -221,6 +221,19 @@ def test_cancel(self): content_type='application/json', HTTP_AUTHORIZATION='token {}'.format(self.admin_token)) self.assertEqual(response.status_code, 204) + # user can not be deleted if it has an app attached to it + response = self.client.post( + '/v1/apps', + HTTP_AUTHORIZATION='token {}'.format(self.admin_token) + ) + self.assertEqual(response.status_code, 201) + app_id = response.data['id'] # noqa + self.assertIn('id', response.data) + + response = self.client.delete(url, json.dumps({'username': str(self.admin)}), + content_type='application/json', + HTTP_AUTHORIZATION='token {}'.format(self.admin_token)) + self.assertEqual(response.status_code, 409) def test_passwd(self): """Test that a registered user can change the password.""" diff --git a/controller/api/tests/test_certificate.py b/controller/api/tests/test_certificate.py index c82b08cae9..2e84b8c043 100644 --- a/controller/api/tests/test_certificate.py +++ b/controller/api/tests/test_certificate.py @@ -80,6 +80,17 @@ def test_create_certificate_with_domain(self): HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 201) + def test_create_wildcard_certificate(self): + """Tests creating a wildcard certificate, which should be disabled.""" + body = {'certificate': self.autotest_example_com_cert, + 'key': self.key, + 'common_name': '*.example.com'} + response = self.client.post(self.url, json.dumps(body), content_type='application/json', + HTTP_AUTHORIZATION='token {}'.format(self.token)) + self.assertEqual(response.status_code, 400) + self.assertEqual(json.loads(response.content), + {'common_name': ['Wildcard certificates are not supported']}) + def test_create_certificate_with_different_common_name(self): """ In some cases such as with SAN certificates, the certificate can cover more diff --git a/controller/api/tests/test_config.py b/controller/api/tests/test_config.py index 8626c8c258..4045662719 100644 --- a/controller/api/tests/test_config.py +++ b/controller/api/tests/test_config.py @@ -9,13 +9,16 @@ import json import logging +import os.path import requests +import tempfile from django.contrib.auth.models import User from django.test import TransactionTestCase import etcd import mock from rest_framework.authtoken.models import Token +from simpleflock import SimpleFlock import api.exceptions from api.models import App, Config @@ -133,6 +136,42 @@ def test_config(self): self.assertEqual(response.status_code, 405) return config5 + @mock.patch('requests.post', mock_status_ok) + def test_overlapping_config(self): + """ + Test that config won't be created if a similar operation + is in progress for that app. + """ + url = '/v1/apps' + response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) + self.assertEqual(response.status_code, 201) + app_id = response.data['id'] + # check to see that an initial/empty config was created + url = "/v1/apps/{app_id}/config".format(**locals()) + response = self.client.get(url, + HTTP_AUTHORIZATION='token {}'.format(self.token)) + self.assertEqual(response.status_code, 200) + self.assertIn('values', response.data) + self.assertEqual(response.data['values'], {}) + config1 = response.data + # create the lockfile as though a "deis config:set" were in progress + lockfile = os.path.join(tempfile.gettempdir(), app_id + "-config") + with SimpleFlock(lockfile): + # set an initial config value + body = {'values': json.dumps({'NEW_URL1': 'http://localhost:8080/'})} + response = self.client.post(url, json.dumps(body), content_type='application/json', + HTTP_AUTHORIZATION='token {}'.format(self.token)) + self.assertEqual(response.status_code, 409) + self.assertNotIn('values', response.data) + self.assertNotIn('uuid', response.data) + # read the config + response = self.client.get(url, + HTTP_AUTHORIZATION='token {}'.format(self.token)) + self.assertEqual(response.status_code, 200) + config2 = response.data + self.assertEqual(config1, config2) + self.assertNotIn('NEW_URL1', response.data['values']) + @mock.patch('requests.post', mock_status_ok) def test_response_data(self): """Test that the serialized response contains only relevant data.""" diff --git a/controller/api/tests/test_container.py b/controller/api/tests/test_container.py index d941d48f6f..8cbffdfe98 100644 --- a/controller/api/tests/test_container.py +++ b/controller/api/tests/test_container.py @@ -667,3 +667,39 @@ def test_restart_containers(self): HTTP_AUTHORIZATION='token {}'.format(self.token)) self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data), container_set.filter(type='web', num=1).count()) + + def test_prune_old_containers(self): + url = '/v1/apps' + response = self.client.post(url, HTTP_AUTHORIZATION='token {}'.format(self.token)) + self.assertEqual(response.status_code, 201) + app_id = response.data['id'] + # post a new build + build_url = "/v1/apps/{app_id}/builds".format(**locals()) + body = {'image': 'autotest/example', 'sha': 'a'*40, + 'procfile': json.dumps({'web': 'node server.js', 'worker': 'node worker.js'})} + response = self.client.post(build_url, json.dumps(body), content_type='application/json', + HTTP_AUTHORIZATION='token {}'.format(self.token)) + url = "/v1/apps/{app_id}/scale".format(**locals()) + body = {'web': 4, 'worker': 8} + response = self.client.post(url, json.dumps(body), content_type='application/json', + HTTP_AUTHORIZATION='token {}'.format(self.token)) + self.assertEqual(response.status_code, 204) + containers = App.objects.get(id=app_id).container_set.all() + self.assertEqual(containers.count(), 12) + # save a representation of this container set to compare against later + repr_map = map(repr, containers) + # create some duplicate containers + for i in range(5): + for c in containers: + n = c.clone(c.release) + n.save() + # recreate the queryset since iterating caused it to be cached + containers = App.objects.get(id=app_id).container_set.all() + # test that duplicate containers actually exist at this point + self.assertEqual(containers.count(), 72) + # call the purge_containers method + app = App.objects.get(id=app_id) + app._prune_containers() + # verify that earlier duplicate containers went away + self.assertEqual(containers.count(), 12) + self.assertQuerysetEqual(containers, repr_map) diff --git a/controller/api/tests/test_healthcheck.py b/controller/api/tests/test_healthcheck.py new file mode 100644 index 0000000000..e62ce5f4d0 --- /dev/null +++ b/controller/api/tests/test_healthcheck.py @@ -0,0 +1,24 @@ +from __future__ import unicode_literals + +from django.test import TestCase + + +class HealthCheckTest(TestCase): + + def setUp(self): + self.url = '/healthz' + + def test_healthcheck(self): + # GET and HEAD (no auth required) + resp = self.client.get(self.url) + self.assertEqual(resp.status_code, 200) + self.assertEqual(resp.content, "OK") + + resp = self.client.head(self.url) + self.assertEqual(resp.status_code, 200) + + def test_healthcheck_invalid(self): + for method in ('put', 'post', 'patch', 'delete'): + resp = getattr(self.client, method)(self.url) + # method not allowed + self.assertEqual(resp.status_code, 405) diff --git a/controller/api/tests/test_users.py b/controller/api/tests/test_users.py index 55c3963689..0ea59568e4 100644 --- a/controller/api/tests/test_users.py +++ b/controller/api/tests/test_users.py @@ -12,7 +12,7 @@ class TestUsers(TestCase): fixtures = ['tests.json'] def test_super_user_can_list(self): - url = '/v1/users/' + url = '/v1/users' user = User.objects.get(username='autotest') token = Token.objects.get(user=user) @@ -24,7 +24,7 @@ def test_super_user_can_list(self): self.assertEqual(len(response.data['results']), 3) def test_non_super_user_cannot_list(self): - url = '/v1/users/' + url = '/v1/users' user = User.objects.get(username='autotest2') token = Token.objects.get(user=user) diff --git a/controller/api/urls.py b/controller/api/urls.py index 99eada5fb9..6b3ab21429 100644 --- a/controller/api/urls.py +++ b/controller/api/urls.py @@ -99,5 +99,5 @@ url(r'^certs/?', views.CertificateViewSet.as_view({'get': 'list', 'post': 'create'})), # list users - url(r'^users/', views.UserView.as_view({'get': 'list'})), + url(r'^users/?', views.UserView.as_view({'get': 'list'})), ) diff --git a/controller/api/views.py b/controller/api/views.py index e159cf4ab2..55c8035f53 100644 --- a/controller/api/views.py +++ b/controller/api/views.py @@ -1,24 +1,41 @@ """ RESTful view classes for presenting Deis API objects. """ + +import os.path +import tempfile + from django.conf import settings from django.core.exceptions import ValidationError from django.contrib.auth.models import User +from django.http import HttpResponse from django.shortcuts import get_object_or_404 from guardian.shortcuts import assign_perm, get_objects_for_user, \ get_users_with_perms, remove_perm +from django.views.generic import View from rest_framework import mixins, renderers, status from rest_framework.exceptions import PermissionDenied from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from rest_framework.viewsets import GenericViewSet from rest_framework.authtoken.models import Token +from simpleflock import SimpleFlock from api import authentication, models, permissions, serializers, viewsets import requests +class HealthCheckView(View): + """Simple health check view to determine if the server + is responding to HTTP requests. + """ + + def get(self, request): + return HttpResponse("OK") + head = get + + class UserRegistrationViewSet(GenericViewSet, mixins.CreateModelMixin): """ViewSet to handle registering new users. The logic is in the serializer.""" @@ -47,6 +64,11 @@ def destroy(self, request, **kwargs): else: raise PermissionDenied() + # A user can not be removed without apps changing ownership first + if len(models.App.objects.filter(owner=target_obj)) > 0: + msg = '{} still has applications assigned. Delete or transfer ownership'.format(str(target_obj)) # noqa + return Response({'detail': msg}, status=status.HTTP_409_CONFLICT) + target_obj.delete() return Response(status=status.HTTP_204_NO_CONTENT) @@ -207,22 +229,20 @@ def scale(self, request, **kwargs): def logs(self, request, **kwargs): app = self.get_object() try: - return Response(app.logs(request.query_params.get('log_lines', - str(settings.LOG_LINES))), - status=status.HTTP_200_OK, content_type='text/plain') + return HttpResponse(app.logs(request.query_params.get('log_lines', + str(settings.LOG_LINES))), + status=status.HTTP_200_OK, content_type='text/plain') except requests.exceptions.RequestException: - return Response("Error accessing logs for {}".format(app.id), - status=status.HTTP_500_INTERNAL_SERVER_ERROR, - content_type='text/plain') - except EnvironmentError as e: - if e.message == 'Error accessing deis-logger': - return Response("Error accessing logs for {}".format(app.id), + return HttpResponse("Error accessing logs for {}".format(app.id), status=status.HTTP_500_INTERNAL_SERVER_ERROR, content_type='text/plain') + except EnvironmentError as e: + if e.message == 'Error accessing deis-logger': + return HttpResponse("Error accessing logs for {}".format(app.id), + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + content_type='text/plain') else: - return Response("No logs for {}".format(app.id), - status=status.HTTP_204_NO_CONTENT, - content_type='text/plain') + return HttpResponse(status=status.HTTP_204_NO_CONTENT) def run(self, request, **kwargs): app = self.get_object() @@ -262,6 +282,18 @@ class ConfigViewSet(ReleasableViewSet): model = models.Config serializer_class = serializers.ConfigSerializer + def create(self, request, **kwargs): + # Guard against overlapping config changes, using a filesystem lock so that + # multiple controller processes can be coordinated. + # Use a tempfile such as "/tmp/violet-valkyrie-config". + lockfile = os.path.join(tempfile.gettempdir(), kwargs['id'] + '-config') + try: + with SimpleFlock(lockfile, timeout=5): + return super(ConfigViewSet, self).create(request, **kwargs) + except IOError as err: + msg = "Config changes already in progress.\n{}".format(err) + return Response(status=status.HTTP_409_CONFLICT, data={'error': msg}) + def post_save(self, config): release = config.app.release_set.latest() self.release = release.new(self.request.user, config=config, build=release.build) diff --git a/controller/bin/boot b/controller/bin/boot index 76437c9205..21c7ff53c3 100755 --- a/controller/bin/boot +++ b/controller/bin/boot @@ -86,6 +86,11 @@ if addgroup -g "$(stat -c "%g" /var/run/docker.sock)" docker; then addgroup deis docker fi +# allow deis user permission to fleet +if addgroup -g "$(stat -c "%g" /var/run/fleet.sock)" fleet; then + addgroup deis fleet +fi + # run an idempotent database migration sudo -E -u deis ./manage.py syncdb --migrate --noinput @@ -113,15 +118,15 @@ if [[ ! -z $EXTERNAL_PORT ]]; then # configure service discovery PORT=${PORT:-8000} - PROTO=${PROTO:-tcp} + PROTO=${PROTO:-http} set +e # wait for the service to become available on PORT - sleep 1 && while [[ -z $(netstat -lnt | awk "\$6 == \"LISTEN\" && \$4 ~ \".$PORT\" && \$1 ~ \"$PROTO.?\"") ]] ; do sleep 1; done + sleep 1 && while ! curl -sf "${PROTO}://localhost:${PORT}/healthz" > /dev/null ; do sleep 1; done # while the port is listening, publish to etcd - while [[ ! -z $(netstat -lnt | awk "\$6 == \"LISTEN\" && \$4 ~ \".$PORT\" && \$1 ~ \"$PROTO.?\"") ]] ; do + while curl -sf "${PROTO}://localhost:${PORT}/healthz" > /dev/null ; do etcdctl --no-sync -C "$ETCD" set "$ETCD_PATH/host" "$HOST" --ttl "$ETCD_TTL" >/dev/null etcdctl --no-sync -C "$ETCD" set "$ETCD_PATH/port" "$EXTERNAL_PORT" --ttl "$ETCD_TTL" >/dev/null sleep $((ETCD_TTL/2)) # sleep for half the TTL diff --git a/controller/build.sh b/controller/build.sh index 80e50fdf03..0b2fefa4ad 100755 --- a/controller/build.sh +++ b/controller/build.sh @@ -11,20 +11,19 @@ if [[ -z $DOCKER_BUILD ]]; then fi # install required system packages -# HACK: install git so we can install bacongobbler's fork of django-fsm -apk add --update-cache \ +apk add --no-cache \ build-base \ - git \ libffi-dev \ libpq \ openldap \ openldap-dev \ + openssl \ postgresql-dev \ python \ python-dev # install pip -curl -sSL https://raw.githubusercontent.com/pypa/pip/7.0.3/contrib/get-pip.py | python - +curl -sSL https://bootstrap.pypa.io/get-pip.py | python - pip==8.1.1 # add a deis user adduser deis -D -h /app -s /bin/bash @@ -39,11 +38,9 @@ mkdir -p /templates && chown -R deis:deis /templates pip install --disable-pip-version-check --no-cache-dir -r /app/requirements.txt # cleanup. -apk del --purge \ +apk del --no-cache \ build-base \ - git \ libffi-dev \ openldap-dev \ postgresql-dev \ python-dev -rm -rf /var/cache/apk/* diff --git a/controller/deis/__init__.py b/controller/deis/__init__.py index 6afea93ae6..4b468d2723 100644 --- a/controller/deis/__init__.py +++ b/controller/deis/__init__.py @@ -6,4 +6,4 @@ from __future__ import absolute_import -__version__ = '1.13.0-dev' +__version__ = '1.13.4' diff --git a/controller/deis/urls.py b/controller/deis/urls.py index e7df3d405a..e864fe5612 100644 --- a/controller/deis/urls.py +++ b/controller/deis/urls.py @@ -10,6 +10,7 @@ from django.conf import settings from django.conf.urls import patterns, include, url from django.contrib import admin +from api.views import HealthCheckView admin.autodiscover() @@ -17,6 +18,7 @@ urlpatterns = patterns( '', + url(r'^healthz$', HealthCheckView.as_view()), url(r'^v1/', include('api.urls')), ) diff --git a/controller/registry/dockerclient.py b/controller/registry/dockerclient.py index 0bc2b2d5ba..c2ecae1f99 100644 --- a/controller/registry/dockerclient.py +++ b/controller/registry/dockerclient.py @@ -55,8 +55,8 @@ def publish_release(self, source, config, target, deis_registry): def build(self, source, config, repo, tag): """Add a "last-mile" layer of environment config to a Docker image for deis-registry.""" check_blacklist(repo) - env = ' '.join("{}='{}'".format( - k, v.encode('unicode-escape').replace("'", "\\'")) for k, v in config.viewitems()) + env = ' '.join('{}="{}"'.format( + k, v.encode('unicode-escape').replace('"', '\\"')) for k, v in config.viewitems()) dockerfile = "FROM {}\nENV {}".format(source, env) f = io.BytesIO(dockerfile.encode('utf-8')) target_repo = "{}/{}:{}".format(self.registry, repo, tag) @@ -92,8 +92,7 @@ def check_blacklist(repo): blacklisted = [ # NOTE: keep this list up to date! 'builder', 'cache', 'controller', 'database', 'logger', 'logspout', 'publisher', 'registry', 'router', 'store-admin', 'store-daemon', - 'store-gateway', 'store-metadata', 'store-monitor', 'swarm', 'mesos-master', - 'mesos-marathon', 'mesos-slave', 'zookeeper', + 'store-gateway', 'store-metadata', 'store-monitor', ] if any("deis/{}".format(c) in repo for c in blacklisted): raise PermissionDenied("Repository name {} is not allowed".format(repo)) diff --git a/controller/registry/tests.py b/controller/registry/tests.py index ae905ee44d..76c3b1c297 100644 --- a/controller/registry/tests.py +++ b/controller/registry/tests.py @@ -56,7 +56,7 @@ def test_build(self, mock_client): self.assertDictContainsSubset(args, kwargs) # test that the fileobj arg to "docker build" contains a correct Dockerfile f = kwargs['fileobj'] - self.assertEqual(f.read(), "FROM ozzy/embryo:git-f3a8020\nENV POWERED_BY='Deis'") + self.assertEqual(f.read(), "FROM ozzy/embryo:git-f3a8020\nENV POWERED_BY=\"Deis\"") # Test that blacklisted image names can't be built with self.assertRaises(PermissionDenied): self.client.build('deis/controller:v1.11.1', {}, 'deis/controller', 'v1.11.1') diff --git a/controller/requirements.txt b/controller/requirements.txt index 38bfab2030..76c5cf493f 100644 --- a/controller/requirements.txt +++ b/controller/requirements.txt @@ -8,15 +8,14 @@ django-guardian==1.2.5 django-json-field==0.5.7 django-auth-ldap==1.2.5 djangorestframework==3.0.5 -docker-py==1.6.0 +docker-py==1.7.2 gunicorn==19.3.0 -marathon==0.6.15 paramiko==1.15.2 psycopg2==2.6.1 python-etcd==0.3.2 python-ldap==2.4.19 PyYAML==3.11 semantic_version==2.4.2 -simpleflock==0.0.2 +simpleflock==0.0.3 South==1.0.2 static==1.1.1 diff --git a/controller/scheduler/fleet.py b/controller/scheduler/fleet.py index 21726ac121..6d257924fe 100644 --- a/controller/scheduler/fleet.py +++ b/controller/scheduler/fleet.py @@ -152,7 +152,7 @@ def _create_container(self, name, image, command, unit, **kwargs): tags = kwargs.get('tags', {}) unit_tags = tags.viewitems() if settings.ENABLE_PLACEMENT_OPTIONS in ['true', 'True', 'TRUE', '1']: - unit_tags['dataPlane'] = 'true' + tags['dataPlane'] = 'true' if unit_tags: tagset = ' '.join(['"{}={}"'.format(k, v) for k, v in unit_tags]) unit.append({"section": "X-Fleet", "name": "MachineMetadata", diff --git a/controller/scheduler/k8s.py b/controller/scheduler/k8s.py deleted file mode 100644 index b0878deb11..0000000000 --- a/controller/scheduler/k8s.py +++ /dev/null @@ -1,686 +0,0 @@ -import copy -import httplib -import json -import random -import re -import string -import time - -from django.conf import settings -from docker import Client -from .states import JobState -from . import AbstractSchedulerClient - - -POD_TEMPLATE = '''{ - "kind": "Pod", - "apiVersion": "$version", - "metadata": { - "name": "$id" - }, - "spec": { - "containers": [ - { - "name": "$id", - "image": "$image" - } - ], - "restartPolicy":"Never" - } -}''' - -RC_TEMPLATE = '''{ - "kind":"ReplicationController", - "apiVersion":"$version", - "metadata":{ - "name":"$name", - "labels":{ - "name":"$id" - } - }, - "spec":{ - "replicas":$num, - "selector":{ - "name":"$id", - "version":"$appversion", - "type":"$type" - }, - "template":{ - "metadata":{ - "labels":{ - "name":"$id", - "version":"$appversion", - "type":"$type" - } - }, - "spec":{ - "containers":[ - { - "name":"$containername", - "image":"$image" - } - ] - } - } - } -}''' - -SERVICE_TEMPLATE = '''{ - "kind":"Service", - "apiVersion":"$version", - "metadata":{ - "name":"$name", - "labels":{ - "name":"$label" - } - }, - "spec":{ - "ports": [ - { - "port":80, - "targetPort":$port, - "protocol":"TCP" - } - ], - "selector":{ - "name":"$label", - "type":"$type" - } - } -}''' - -POD_DELETE = '''{ -}''' - - -RETRIES = 3 -MATCH = re.compile( - r'(?P[a-z0-9-]+)_?(?Pv[0-9]+)?\.?(?P[a-z-_]+)') - - -class KubeHTTPClient(AbstractSchedulerClient): - - def __init__(self, target, auth, options, pkey): - super(KubeHTTPClient, self).__init__(target, auth, options, pkey) - self.target = settings.K8S_MASTER - self.port = "8080" - self.registry = settings.REGISTRY_HOST+":"+settings.REGISTRY_PORT - self.apiversion = "v1" - self.conn = httplib.HTTPConnection(self.target+":"+self.port) - - def _get_old_rc(self, name, app_type): - con_app = httplib.HTTPConnection(self.target+":"+self.port) - con_app.request('GET', '/api/'+self.apiversion + - '/namespaces/'+name+'/replicationcontrollers') - resp = con_app.getresponse() - data = resp.read() - reason = resp.reason - status = resp.status - con_app.close() - if not 200 <= status <= 299: - errmsg = "Failed to get Replication Controllers: {} {} - {}".format( - status, reason, data) - raise RuntimeError(errmsg) - parsed_json = json.loads(data) - exists = False - prev_rc = [] - for rc in parsed_json['items']: - if('name' in rc['metadata']['labels'] and name == rc['metadata']['labels']['name'] and - 'type' in rc['spec']['selector'] and app_type == rc['spec']['selector']['type']): - exists = True - prev_rc = rc - break - if exists: - return prev_rc - else: - return 0 - - def _get_rc_status(self, name, namespace): - conn_rc = httplib.HTTPConnection(self.target+":"+self.port) - conn_rc.request('GET', '/api/'+self.apiversion+'/' + - 'namespaces/'+namespace+'/replicationcontrollers/'+name) - resp = conn_rc.getresponse() - status = resp.status - conn_rc.close() - return status - - def _get_rc_(self, name, namespace): - conn_rc_resver = httplib.HTTPConnection(self.target+":"+self.port) - conn_rc_resver.request('GET', '/api/'+self.apiversion+'/' + - 'namespaces/'+namespace+'/replicationcontrollers/'+name) - resp = conn_rc_resver.getresponse() - data = resp.read() - reason = resp.reason - status = resp.status - conn_rc_resver.close() - if not 200 <= status <= 299: - errmsg = "Failed to get Replication Controller:{} {} {} - {}".format( - name, status, reason, data) - raise RuntimeError(errmsg) - parsed_json = json.loads(data) - return parsed_json - - def deploy(self, name, image, command, **kwargs): - app_name = kwargs.get('aname', {}) - app_type = name.split(".")[1] - old_rc = self._get_old_rc(app_name, app_type) - new_rc = self._create_rc(name, image, command, **kwargs) - desired = int(old_rc["spec"]["replicas"]) - old_rc_name = old_rc["metadata"]["name"] - new_rc_name = new_rc["metadata"]["name"] - try: - count = 1 - while desired >= count: - new_rc = self._scale_app(new_rc_name, count, app_name) - old_rc = self._scale_app(old_rc_name, desired-count, app_name) - count += 1 - except Exception as e: - self._scale_app(new_rc["metadata"]["name"], 0, app_name) - self._delete_rc(new_rc["metadata"]["name"], app_name) - self._scale_app(old_rc["metadata"]["name"], desired, app_name) - err = '{} (deploy): {}'.format(name, e) - raise RuntimeError(err) - self._delete_rc(old_rc_name, app_name) - - def _get_events(self, namespace): - con_get = httplib.HTTPConnection(self.target+":"+self.port) - con_get.request('GET', '/api/'+self.apiversion+'/namespaces/'+namespace+'/events') - resp = con_get.getresponse() - reason = resp.reason - status = resp.status - data = resp.read() - con_get.close() - if not 200 <= status <= 299: - errmsg = "Failed to get events: {} {} - {}".format( - status, reason, data) - raise RuntimeError(errmsg) - return (status, data, reason) - - def _get_schedule_status(self, name, num, namespace): - pods = [] - for _ in xrange(120): - count = 0 - pods = [] - status, data, reason = self._get_pods(namespace) - parsed_json = json.loads(data) - for pod in parsed_json['items']: - if pod['metadata']['generateName'] == name+'-': - count += 1 - pods.append(pod['metadata']['name']) - if count == num: - break - time.sleep(1) - for _ in xrange(120): - count = 0 - status, data, reason = self._get_events(namespace) - parsed_json = json.loads(data) - for event in parsed_json['items']: - if(event['involvedObject']['name'] in pods and - event['source']['component'] == 'scheduler'): - if event['reason'] == 'scheduled': - count += 1 - else: - raise RuntimeError(event['message']) - if count == num: - break - time.sleep(1) - - def _scale_rc(self, rc, namespace): - name = rc['metadata']['name'] - num = rc["spec"]["replicas"] - headers = {'Content-Type': 'application/json'} - conn_scalepod = httplib.HTTPConnection(self.target+":"+self.port) - conn_scalepod.request('PUT', '/api/'+self.apiversion+'/namespaces/'+namespace+'/' + - 'replicationcontrollers/'+name, headers=headers, body=json.dumps(rc)) - resp = conn_scalepod.getresponse() - data = resp.read() - reason = resp.reason - status = resp.status - conn_scalepod.close() - if not 200 <= status <= 299: - errmsg = "Failed to scale Replication Controller:{} {} {} - {}".format( - name, status, reason, data) - raise RuntimeError(errmsg) - resource_ver = rc['metadata']['resourceVersion'] - for _ in xrange(30): - js_template = self._get_rc_(name, namespace) - if js_template["metadata"]["resourceVersion"] != resource_ver: - break - time.sleep(1) - self._get_schedule_status(name, num, namespace) - for _ in xrange(120): - count = 0 - status, data, reason = self._get_pods(namespace) - parsed_json = json.loads(data) - for pod in parsed_json['items']: - if(pod['metadata']['generateName'] == name+'-' and - pod['status']['phase'] == 'Running'): - count += 1 - if count == num: - break - time.sleep(1) - - def _scale_app(self, name, num, namespace): - js_template = self._get_rc_(name, namespace) - js_template["spec"]["replicas"] = num - self._scale_rc(js_template, namespace) - - def scale(self, name, image, command, **kwargs): - app_name = kwargs.get('aname', {}) - rc_name = name.replace(".", "-") - rc_name = rc_name.replace("_", "-") - if not 200 <= self._get_rc_status(rc_name, app_name) <= 299: - self.create(name, image, command, **kwargs) - return - name = name.replace(".", "-") - name = name.replace("_", "-") - num = kwargs.get('num', {}) - js_template = self._get_rc_(name, app_name) - old_replicas = js_template["spec"]["replicas"] - try: - self._scale_app(name, num, app_name) - except Exception as e: - self._scale_app(name, old_replicas, app_name) - err = '{} (Scale): {}'.format(name, e) - raise RuntimeError(err) - - def _create_rc(self, name, image, command, **kwargs): - container_fullname = name - app_name = kwargs.get('aname', {}) - app_type = name.split(".")[1] - container_name = app_name+"-"+app_type - name = name.replace(".", "-") - name = name.replace("_", "-") - args = command.split() - - num = kwargs.get('num', {}) - l = {} - l["name"] = name - l["id"] = app_name - l["appversion"] = kwargs.get('version', {}) - l["version"] = self.apiversion - l["image"] = self.registry+"/"+image - l['num'] = num - l['containername'] = container_name - l['type'] = app_type - template = string.Template(RC_TEMPLATE).substitute(l) - js_template = json.loads(template) - containers = js_template["spec"]["template"]["spec"]["containers"] - containers[0]['args'] = args - loc = locals().copy() - loc.update(re.match(MATCH, container_fullname).groupdict()) - mem = kwargs.get('memory', {}).get(loc['c_type']) - cpu = kwargs.get('cpu', {}).get(loc['c_type']) - if mem or cpu: - containers[0]["resources"] = {"limits": {}} - if mem: - if mem[-2:-1].isalpha() and mem[-1].isalpha(): - mem = mem[:-1] - mem = mem+"i" - containers[0]["resources"]["limits"]["memory"] = mem - if cpu: - cpu = float(cpu)/1024 - containers[0]["resources"]["limits"]["cpu"] = cpu - headers = {'Content-Type': 'application/json'} - conn_rc = httplib.HTTPConnection(self.target+":"+self.port) - conn_rc.request('POST', '/api/'+self.apiversion+'/namespaces/'+app_name+'/' + - 'replicationcontrollers', headers=headers, body=json.dumps(js_template)) - resp = conn_rc.getresponse() - data = resp.read() - reason = resp.reason - status = resp.status - conn_rc.close() - if not 200 <= status <= 299: - errmsg = "Failed to create Replication Controller:{} {} {} - {}".format( - name, status, reason, data) - raise RuntimeError(errmsg) - create = False - for _ in xrange(30): - if not create and self._get_rc_status(name, app_name) == 404: - time.sleep(1) - continue - create = True - rc = self._get_rc_(name, app_name) - if ("observedGeneration" in rc["status"] - and rc["metadata"]["generation"] == rc["status"]["observedGeneration"]): - break - time.sleep(1) - return json.loads(data) - - def create(self, name, image, command, **kwargs): - """Create a container.""" - self._create_rc(name, image, command, **kwargs) - app_type = name.split(".")[1] - name = name.replace(".", "-") - name = name.replace("_", "-") - app_name = kwargs.get('aname', {}) - try: - self._create_service(name, app_name, app_type) - except Exception as e: - self._scale_app(name, 0, app_name) - self._delete_rc(name, app_name) - err = '{} (create): {}'.format(name, e) - raise RuntimeError(err) - - def _get_service(self, name, namespace): - con_get = httplib.HTTPConnection(self.target+":"+self.port) - con_get.request('GET', '/api/'+self.apiversion+'/namespaces/'+namespace+'/services/'+name) - resp = con_get.getresponse() - reason = resp.reason - status = resp.status - data = resp.read() - con_get.close() - if not 200 <= status <= 299: - errmsg = "Failed to get Service: {} {} - {}".format( - status, reason, data) - raise RuntimeError(errmsg) - return (status, data, reason) - - def _create_service(self, name, app_name, app_type): - random.seed(app_name) - app_id = random.randint(1, 100000) - appname = "app-"+str(app_id) - actual_pod = {} - for _ in xrange(300): - status, data, reason = self._get_pods(app_name) - parsed_json = json.loads(data) - for pod in parsed_json['items']: - if('generateName' in pod['metadata'] and - pod['metadata']['generateName'] == name+'-'): - actual_pod = pod - break - if actual_pod and actual_pod['status']['phase'] == 'Running': - break - time.sleep(1) - container_id = actual_pod['status']['containerStatuses'][0]['containerID'].split("//")[1] - ip = actual_pod['status']['hostIP'] - docker_cli = Client("tcp://{}:2375".format(ip), timeout=1200, version='1.17') - container = docker_cli.inspect_container(container_id) - port = int(container['Config']['ExposedPorts'].keys()[0].split("/")[0]) - l = {} - l["version"] = self.apiversion - l["label"] = app_name - l["port"] = port - l['type'] = app_type - l["name"] = appname - template = string.Template(SERVICE_TEMPLATE).substitute(l) - headers = {'Content-Type': 'application/json'} - conn_serv = httplib.HTTPConnection(self.target+":"+self.port) - conn_serv.request('POST', '/api/'+self.apiversion+'/namespaces/'+app_name+'/services', - headers=headers, body=copy.deepcopy(template)) - resp = conn_serv.getresponse() - data = resp.read() - reason = resp.reason - status = resp.status - conn_serv.close() - if status == 409: - status, data, reason = self._get_service(appname, app_name) - srv = json.loads(data) - if srv['spec']['selector']['type'] == 'web': - return - srv['spec']['selector']['type'] = app_type - srv['spec']['ports'][0]['targetPort'] = port - headers = {'Content-Type': 'application/json'} - conn_scalepod = httplib.HTTPConnection(self.target+":"+self.port) - conn_scalepod.request('PUT', '/api/'+self.apiversion+'/namespaces/'+app_name+'/' + - 'services/'+appname, headers=headers, body=json.dumps(srv)) - resp = conn_scalepod.getresponse() - data = resp.read() - reason = resp.reason - status = resp.status - conn_scalepod.close() - if not 200 <= status <= 299: - errmsg = "Failed to update the Service:{} {} {} - {}".format( - name, status, reason, data) - raise RuntimeError(errmsg) - elif not 200 <= status <= 299: - errmsg = "Failed to create Service:{} {} {} - {}".format( - name, status, reason, data) - raise RuntimeError(errmsg) - - def start(self, name): - """Start a container.""" - pass - - def stop(self, name): - """Stop a container.""" - pass - - def _delete_rc(self, name, namespace): - headers = {'Content-Type': 'application/json'} - con_dest = httplib.HTTPConnection(self.target+":"+self.port) - con_dest.request('DELETE', '/api/'+self.apiversion+'/namespaces/'+namespace+'/' + - 'replicationcontrollers/'+name, headers=headers, body=POD_DELETE) - resp = con_dest.getresponse() - reason = resp.reason - status = resp.status - data = resp.read() - con_dest.close() - if not 200 <= status <= 299: - errmsg = "Failed to delete Replication Controller:{} {} {} - {}".format( - name, status, reason, data) - raise RuntimeError(errmsg) - - def destroy(self, name): - """Destroy a container.""" - appname = name.split("_")[0] - name = name.split(".") - name = name[0]+'-'+name[1] - name = name.replace("_", "-") - - headers = {'Content-Type': 'application/json'} - con_dest = httplib.HTTPConnection(self.target+":"+self.port) - con_dest.request('DELETE', '/api/'+self.apiversion+'/namespaces/'+appname+'/' + - 'replicationcontrollers/'+name, headers=headers, body=POD_DELETE) - resp = con_dest.getresponse() - reason = resp.reason - status = resp.status - data = resp.read() - con_dest.close() - if status == 404: - return - if not 200 <= status <= 299: - errmsg = "Failed to delete Replication Controller:{} {} {} - {}".format( - name, status, reason, data) - raise RuntimeError(errmsg) - - random.seed(appname) - app_id = random.randint(1, 100000) - app_name = "app-"+str(app_id) - con_serv = httplib.HTTPConnection(self.target+":"+self.port) - con_serv.request('DELETE', '/api/'+self.apiversion + - '/namespaces/'+appname+'/services/'+app_name) - resp = con_serv.getresponse() - reason = resp.reason - status = resp.status - data = resp.read() - con_serv.close() - if status != 404 and not 200 <= status <= 299: - errmsg = "Failed to delete service:{} {} {} - {}".format( - name, status, reason, data) - raise RuntimeError(errmsg) - - status, data, reason = self._get_pods(appname) - parsed_json = json.loads(data) - for pod in parsed_json['items']: - if 'generateName' in pod['metadata'] and pod['metadata']['generateName'] == name+'-': - self._delete_pod(pod['metadata']['name'], appname) - con_ns = httplib.HTTPConnection(self.target+":"+self.port) - con_ns.request('DELETE', '/api/'+self.apiversion+'/namespaces/'+appname) - resp = con_ns.getresponse() - reason = resp.reason - status = resp.status - data = resp.read() - con_ns.close() - if not 200 <= status <= 299: - errmsg = "Failed to delete namespace:{} {} {} - {}".format( - appname, status, reason, data) - raise RuntimeError(errmsg) - - def _get_pod(self, name, namespace): - conn_pod = httplib.HTTPConnection(self.target+":"+self.port) - conn_pod.request('GET', '/api/'+self.apiversion+'/namespaces/'+namespace+'/pods/'+name) - resp = conn_pod.getresponse() - status = resp.status - data = resp.read() - reason = resp.reason - conn_pod.close() - return (status, data, reason) - - def _get_pods(self, namespace): - con_get = httplib.HTTPConnection(self.target+":"+self.port) - con_get.request('GET', '/api/'+self.apiversion+'/namespaces/'+namespace+'/pods') - resp = con_get.getresponse() - reason = resp.reason - status = resp.status - data = resp.read() - con_get.close() - if not 200 <= status <= 299: - errmsg = "Failed to get Pods: {} {} - {}".format( - status, reason, data) - raise RuntimeError(errmsg) - return (status, data, reason) - - def _delete_pod(self, name, namespace): - headers = {'Content-Type': 'application/json'} - con_dest_pod = httplib.HTTPConnection(self.target+":"+self.port) - con_dest_pod.request('DELETE', '/api/'+self.apiversion+'/namespaces/' + - namespace+'/pods/'+name, headers=headers, body=POD_DELETE) - resp = con_dest_pod.getresponse() - reason = resp.reason - status = resp.status - data = resp.read() - con_dest_pod.close() - if not 200 <= status <= 299: - errmsg = "Failed to delete Pod: {} {} - {}".format( - status, reason, data) - raise RuntimeError(errmsg) - for _ in xrange(5): - status, data, reason = self._get_pod(name, namespace) - if status != 404: - time.sleep(1) - continue - break - if status != 404: - errmsg = "Failed to delete Pod: {} {} - {}".format( - status, reason, data) - raise RuntimeError(errmsg) - - def _pod_log(self, name, namespace): - conn_log = httplib.HTTPConnection(self.target+":"+self.port) - conn_log.request('GET', '/api/'+self.apiversion+'/namespaces/' + - namespace+'/pods/'+name+'/log') - resp = conn_log.getresponse() - status = resp.status - data = resp.read() - reason = resp.reason - conn_log.close() - if not 200 <= status <= 299: - errmsg = "Failed to get the log: {} {} - {}".format( - status, reason, data) - raise RuntimeError(errmsg) - return (status, data, reason) - - def logs(self, name): - appname = name.split("_")[0] - name = name.replace(".", "-") - name = name.replace("_", "-") - status, data, reason = self._get_pods(appname) - parsed_json = json.loads(data) - log_data = '' - for pod in parsed_json['items']: - if name in pod['metadata']['generateName'] and pod['status']['phase'] == 'Running': - status, data, reason = self._pod_log(pod['metadata']['name'], appname) - log_data += data - return log_data - - def run(self, name, image, entrypoint, command): - """Run a one-off command.""" - appname = name.split("_")[0] - name = name.replace(".", "-") - name = name.replace("_", "-") - l = {} - l["id"] = name - l["version"] = self.apiversion - l["image"] = self.registry+"/"+image - template = string.Template(POD_TEMPLATE).substitute(l) - if command.startswith("-c "): - args = command.split(' ', 1) - args[1] = args[1][1:-1] - else: - args = [command[1:-1]] - js_template = json.loads(template) - js_template['spec']['containers'][0]['command'] = [entrypoint] - js_template['spec']['containers'][0]['args'] = args - - con_dest = httplib.HTTPConnection(self.target+":"+self.port) - headers = {'Content-Type': 'application/json'} - con_dest.request('POST', '/api/'+self.apiversion+'/namespaces/'+appname+'/pods', - headers=headers, body=json.dumps(js_template)) - resp = con_dest.getresponse() - data = resp.read() - status = resp.status - reason = resp.reason - con_dest.close() - if not 200 <= status <= 299: - errmsg = "Failed to create a Pod: {} {} - {}".format( - status, reason, data) - raise RuntimeError(errmsg) - while(1): - parsed_json = {} - status = 404 - reason = '' - data = '' - for _ in xrange(5): - status, data, reason = self._get_pod(name, appname) - if not 200 <= status <= 299: - time.sleep(1) - continue - parsed_json = json.loads(data) - break - if not 200 <= status <= 299: - errmsg = "Failed to create a Pod: {} {} - {}".format( - status, reason, data) - raise RuntimeError(errmsg) - if parsed_json['status']['phase'] == 'Succeeded': - status, data, reason = self._pod_log(name, appname) - self._delete_pod(name, appname) - return 0, data - elif parsed_json['status']['phase'] == 'Failed': - pod_state = parsed_json['status']['containerStatuses'][0]['state'] - err_code = pod_state['terminated']['exitCode'] - self._delete_pod(name, appname) - return err_code, data - time.sleep(1) - return 0, data - - def _get_pod_state(self, name): - try: - appname = name.split("_")[0] - name = name.split(".") - name = name[0]+'-'+name[1] - name = name.replace("_", "-") - for _ in xrange(120): - status, data, reason = self._get_pods(appname) - parsed_json = json.loads(data) - for pod in parsed_json['items']: - if pod['metadata']['generateName'] == name+'-': - actual_pod = pod - break - if actual_pod and actual_pod['status']['phase'] == 'Running': - return JobState.up - time.sleep(1) - return JobState.destroyed - except: - return JobState.destroyed - - def state(self, name): - """Display the given job's running state.""" - try: - return self._get_pod_state(name) - except KeyError: - return JobState.error - except RuntimeError: - return JobState.destroyed - -SchedulerClient = KubeHTTPClient diff --git a/controller/scheduler/mesos_marathon.py b/controller/scheduler/mesos_marathon.py deleted file mode 100644 index 3338c9c503..0000000000 --- a/controller/scheduler/mesos_marathon.py +++ /dev/null @@ -1,115 +0,0 @@ -import re -import time - -from django.conf import settings -from docker import Client -from marathon import MarathonClient -from marathon.models import MarathonApp - -from . import AbstractSchedulerClient -from .fleet import FleetHTTPClient -from .states import JobState - -# turn down standard marathon logging - -MATCH = re.compile( - '(?P[a-z0-9-]+)_?(?Pv[0-9]+)?\.?(?P[a-z-_]+)?.(?P[0-9]+)') -RETRIES = 3 -POLL_ATTEMPTS = 30 -POLL_WAIT = 100 - - -class MarathonHTTPClient(AbstractSchedulerClient): - - def __init__(self, target, auth, options, pkey): - super(MarathonHTTPClient, self).__init__(target, auth, options, pkey) - self.target = settings.MARATHON_HOST - self.registry = settings.REGISTRY_HOST + ':' + settings.REGISTRY_PORT - self.client = MarathonClient('http://'+self.target+':8180') - self.fleet = FleetHTTPClient('/var/run/fleet.sock', auth, options, pkey) - - # helpers - def _app_id(self, name): - return name.replace('_', '.') - - # container api - def create(self, name, image, command='', **kwargs): - """Create a new container""" - app_id = self._app_id(name) - l = locals().copy() - l.update(re.match(MATCH, name).groupdict()) - image = self.registry + '/' + image - mems = kwargs.get('memory', {}).get(l['c_type']) - m = 0 - if mems: - mems = mems.lower() - if mems[-2:-1].isalpha() and mems[-1].isalpha(): - mems = mems[:-1] - m = int(mems[:-1]) - c = 0.5 - cpu = kwargs.get('cpu', {}).get(l['c_type']) - if cpu: - c = cpu - cmd = "docker run --name {name} -P {image} {command}".format(**locals()) - self.client.create_app(app_id, MarathonApp(cmd=cmd, mem=m, cpus=c, instances=0)) - for _ in xrange(POLL_ATTEMPTS): - if self.client.get_app(self._app_id(name)).tasks_running == 0: - return - time.sleep(1) - - def start(self, name): - """Start a container.""" - self.client.scale_app(self._app_id(name), 1, force=True) - for _ in xrange(POLL_ATTEMPTS): - if self.client.get_app(self._app_id(name)).tasks_running == 1: - break - time.sleep(1) - host = self.client.get_app(self._app_id(name)).tasks[0].host - self._waitforcontainer(host, name) - - def destroy(self, name): - """Destroy a container.""" - try: - host = self.client.get_app(self._app_id(name)).tasks[0].host - self.client.delete_app(self._app_id(name), force=True) - self._delete_container(host, name) - except: - self.client.delete_app(self._app_id(name), force=True) - - def _get_container_state(self, host, name): - docker_cli = Client("tcp://{}:2375".format(host), timeout=1200, version='1.17') - try: - if docker_cli.inspect_container(name)['State']['Running']: - return JobState.up - except: - return JobState.destroyed - - def _waitforcontainer(self, host, name): - for _ in xrange(POLL_WAIT): - if self._get_container_state(host, name) == JobState.up: - return - time.sleep(1) - raise RuntimeError("App container Not Started") - - def _delete_container(self, host, name): - docker_cli = Client("tcp://{}:2375".format(host), timeout=1200, version='1.17') - if docker_cli.inspect_container(name)['State']: - docker_cli.remove_container(name, force=True) - - def run(self, name, image, entrypoint, command): # noqa - """Run a one-off command.""" - return self.fleet.run(name, image, entrypoint, command) - - def state(self, name): - """Display the given job's running state.""" - try: - for _ in xrange(POLL_ATTEMPTS): - if self.client.get_app(self._app_id(name)).tasks_running == 1: - return JobState.up - elif self.client.get_app(self._app_id(name)).tasks_running == 0: - return JobState.created - time.sleep(1) - except: - return JobState.destroyed - -SchedulerClient = MarathonHTTPClient diff --git a/controller/scheduler/swarm.py b/controller/scheduler/swarm.py deleted file mode 100644 index 6d4142f4d5..0000000000 --- a/controller/scheduler/swarm.py +++ /dev/null @@ -1,124 +0,0 @@ -import re -import time - -from django.conf import settings -from docker import Client - -from . import AbstractSchedulerClient -from .states import JobState - - -MATCH = re.compile( - r'(?P[a-z0-9-]+)_?(?Pv[0-9]+)?\.?(?P[a-z-_]+)?.(?P[0-9]+)') - - -class SwarmClient(AbstractSchedulerClient): - - def __init__(self, target, auth, options, pkey): - super(SchedulerClient, self).__init__(target, auth, options, pkey) - self.target = settings.SWARM_HOST - # single global connection - self.registry = settings.REGISTRY_HOST + ':' + settings.REGISTRY_PORT - self.docker_cli = Client("tcp://{}:2395".format(self.target), - timeout=1200, version='1.17') - - def create(self, name, image, command='', template=None, **kwargs): - """Create a new container.""" - cimage = self.registry + '/' + image - affinity = "affinity:container!=~/{}*/".format(re.split(r'_v\d.', name)[0]) - l = locals().copy() - l.update(re.match(MATCH, name).groupdict()) - mem = kwargs.get('memory', {}).get(l['c_type']) - if mem: - mem = mem.lower() - if mem[-2:-1].isalpha() and mem[-1].isalpha(): - mem = mem[:-1] - cpu = kwargs.get('cpu', {}).get(l['c_type']) - self.docker_cli.create_container(image=cimage, name=name, - command=command.encode('utf-8'), - mem_limit=mem, - cpu_shares=cpu, - environment=[affinity], - host_config={'PublishAllPorts': True}) - - def start(self, name): - """Start a container.""" - self.docker_cli.start(name) - - def stop(self, name): - """Stop a container.""" - self.docker_cli.stop(name) - - def destroy(self, name): - """Destroy a container.""" - self.stop(name) - self.docker_cli.remove_container(name) - - def run(self, name, image, entrypoint, command): - """Run a one-off command.""" - cimage = self.registry + '/' + image - # use affinity for nodes that already have the image - affinity = "affinity:image==~{}".format(cimage) - self.docker_cli.create_container(image=cimage, name=name, - command=command.encode('utf-8'), - environment=[affinity], - entrypoint=[entrypoint]) - time.sleep(2) - self.start(name) - rc = 0 - while (True): - if self._get_container_state(name) == JobState.created: - break - time.sleep(1) - try: - output = self.docker_cli.logs(name) - return rc, output - except: - rc = 1 - return rc, output - - def _get_container_state(self, name): - try: - if self.docker_cli.inspect_container(name)['State']['Running']: - return JobState.up - else: - return JobState.created - except: - return JobState.destroyed - - def state(self, name): - """Display the given job's running state.""" - try: - for _ in xrange(30): - return self._get_container_state(name) - time.sleep(1) - # FIXME (smothiki): should be able to send JobState.crashed - except KeyError: - return JobState.error - except RuntimeError: - return JobState.destroyed - - def _get_hostname(self, application_name): - hostname = settings.UNIT_HOSTNAME - if hostname == 'default': - return '' - elif hostname == 'application': - # replace underscore with dots, since underscore is not valid in DNS hostnames - dns_name = application_name.replace('_', '.') - return dns_name - elif hostname == 'server': - raise NotImplementedError - else: - raise RuntimeError('Unsupported hostname: ' + hostname) - - def _get_portbindings(self, image): - dictports = self.docker_cli.inspect_image(image)['ContainerConfig']['ExposedPorts'] - for port in dictports: - dictports[port] = None - return dictports - - def _get_ports(self, image): - dictports = self.docker_cli.inspect_image(image)['ContainerConfig']['ExposedPorts'] - return [int(port.split('/')[0]) for port in dictports] - -SchedulerClient = SwarmClient diff --git a/controller/templates/confd_settings.py b/controller/templates/confd_settings.py index fc2e29fb4a..0593d9da79 100644 --- a/controller/templates/confd_settings.py +++ b/controller/templates/confd_settings.py @@ -3,21 +3,13 @@ BUILDER_KEY = '{{ getv "/deis/controller/builderKey" }}' # scheduler settings -SCHEDULER_MODULE = 'scheduler.{{ if exists "/deis/controller/schedulerModule" }}{{ getv "/deis/controller/schedulerModule" }}{{ else }}fleet{{ end }}' -SCHEDULER_TARGET = '{{ if exists "/deis/controller/schedulerTarget" }}{{ getv "/deis/controller/schedulerTarget" }}{{ else }}/var/run/fleet.sock{{ end }}' +SCHEDULER_MODULE = 'scheduler.fleet' +SCHEDULER_TARGET = '/var/run/fleet.sock' try: SCHEDULER_OPTIONS = dict('{{ if exists "/deis/controller/schedulerOptions" }}{{ getv "/deis/controller/schedulerOptions" }}{{ else }}{}{{ end }}') except: SCHEDULER_OPTIONS = {} -# scheduler swarm manager host - -SWARM_HOST = '{{ if exists "/deis/scheduler/swarm/host" }}{{ getv "/deis/scheduler/swarm/host" }}{{ else }}127.0.0.1{{ end }}' - -MARATHON_HOST = '{{ if exists "/deis/scheduler/mesos/marathon" }}{{ getv "/deis/scheduler/mesos/marathon" }}{{ else }}127.0.0.1{{ end }}' - -K8S_MASTER = '{{ if exists "/deis/scheduler/k8s/master" }}{{ getv "/deis/scheduler/k8s/master" }}{{ else }}127.0.0.1{{ end }}' - # base64-encoded SSH private key to facilitate current version of "deis run" SSH_PRIVATE_KEY = """{{ if exists "/deis/platform/sshPrivateKey" }}{{ getv "/deis/platform/sshPrivateKey" }}{{ else }}""{{end}}""" diff --git a/controller/web/static/css/main.css b/controller/web/static/css/main.css index 2cf7c52cb2..48fce68ea3 100644 --- a/controller/web/static/css/main.css +++ b/controller/web/static/css/main.css @@ -514,6 +514,24 @@ pre, code pre { color: #000!important; background-color: #f5f5f5!important; } +.doc-content .version-warning { + padding: 10px; + margin-top 10px; + border: solid 4px #ff135a; +} +.doc-content .version-warning .version-warning-title { + font-size: 18px; + font-weight: bold; + line-height: 26px; + text-indent: 50px; +} +.doc-content .important .version-warning-title { + background: url(../img/glyphs.png) no-repeat 0 -81px; +} +.doc-content .version-warning a, .doc-content .version-warning a em, .doc-content .version-warning em { + font-style: normal!imporant; + color: #ff135a!important; +} .doc-content .admonition { padding: 10px; margin-bottom: 10px; diff --git a/controller/web/static/img/404.png b/controller/web/static/img/404.png index ca2492b2b3..8a89f36a04 100644 Binary files a/controller/web/static/img/404.png and b/controller/web/static/img/404.png differ diff --git a/controller/web/static/img/aws-ec2.png b/controller/web/static/img/aws-ec2.png index 1d27b83962..ae22f28f88 100644 Binary files a/controller/web/static/img/aws-ec2.png and b/controller/web/static/img/aws-ec2.png differ diff --git a/controller/web/static/img/blog-social.png b/controller/web/static/img/blog-social.png index a379db88a9..c0e970ebf7 100644 Binary files a/controller/web/static/img/blog-social.png and b/controller/web/static/img/blog-social.png differ diff --git a/controller/web/static/img/cover-image.png b/controller/web/static/img/cover-image.png index 83b7a0a64e..a16aabc156 100644 Binary files a/controller/web/static/img/cover-image.png and b/controller/web/static/img/cover-image.png differ diff --git a/controller/web/static/img/deis-graphic.png b/controller/web/static/img/deis-graphic.png index 7a42f13aa9..ba28118d2d 100644 Binary files a/controller/web/static/img/deis-graphic.png and b/controller/web/static/img/deis-graphic.png differ diff --git a/controller/web/static/img/deis_builder.png b/controller/web/static/img/deis_builder.png index d25c059af9..99961abdfe 100644 Binary files a/controller/web/static/img/deis_builder.png and b/controller/web/static/img/deis_builder.png differ diff --git a/controller/web/static/img/deis_logo.png b/controller/web/static/img/deis_logo.png index 8059bc9bdc..109f86148a 100644 Binary files a/controller/web/static/img/deis_logo.png and b/controller/web/static/img/deis_logo.png differ diff --git a/controller/web/static/img/favicon.png b/controller/web/static/img/favicon.png index 9d33fc3da1..77db90e908 100644 Binary files a/controller/web/static/img/favicon.png and b/controller/web/static/img/favicon.png differ diff --git a/controller/web/static/img/fork.png b/controller/web/static/img/fork.png index 427caa6ff9..a1d23de9ed 100644 Binary files a/controller/web/static/img/fork.png and b/controller/web/static/img/fork.png differ diff --git a/controller/web/static/img/glyphs.png b/controller/web/static/img/glyphs.png index 5f046512ee..e1e4424f22 100644 Binary files a/controller/web/static/img/glyphs.png and b/controller/web/static/img/glyphs.png differ diff --git a/controller/web/static/img/large-social.png b/controller/web/static/img/large-social.png index e954284192..636667c75d 100644 Binary files a/controller/web/static/img/large-social.png and b/controller/web/static/img/large-social.png differ diff --git a/controller/web/static/img/mag_glass.png b/controller/web/static/img/mag_glass.png index 511274ef00..3793ebd1af 100644 Binary files a/controller/web/static/img/mag_glass.png and b/controller/web/static/img/mag_glass.png differ diff --git a/controller/web/static/img/menu-logo.png b/controller/web/static/img/menu-logo.png index cd3b7786bd..4565589c03 100644 Binary files a/controller/web/static/img/menu-logo.png and b/controller/web/static/img/menu-logo.png differ diff --git a/controller/web/static/img/social.png b/controller/web/static/img/social.png index c25e100145..83f7ba08f6 100644 Binary files a/controller/web/static/img/social.png and b/controller/web/static/img/social.png differ diff --git a/database/Dockerfile b/database/Dockerfile index 412b4ef576..5d99c6222c 100644 --- a/database/Dockerfile +++ b/database/Dockerfile @@ -20,4 +20,4 @@ CMD ["/app/bin/boot"] EXPOSE 5432 ADD . /app -ENV DEIS_RELEASE 1.13.0-dev +ENV DEIS_RELEASE 1.13.4 diff --git a/database/Makefile b/database/Makefile index a609e38961..808c032895 100644 --- a/database/Makefile +++ b/database/Makefile @@ -34,7 +34,7 @@ run: install start dev-release: push set-image push: check-registry - docker tag -f $(IMAGE) $(DEV_IMAGE) + docker tag $(IMAGE) $(DEV_IMAGE) docker push $(DEV_IMAGE) set-image: check-deisctl diff --git a/database/bin/boot b/database/bin/boot index d66ba147eb..01d36b722b 100755 --- a/database/bin/boot +++ b/database/bin/boot @@ -75,7 +75,8 @@ if [[ "$(cat $PG_DATA_DIR/initialized 2> /dev/null)" != "$INIT_ID" ]]; then echo "database: no existing database found or it is outdated." # check if there are any backups -- if so, let's restore # we could probably do better than just testing number of lines -- one line is just a heading, meaning no backups - if [[ $(envdir /etc/wal-e.d/env wal-e --terse backup-list | wc -l) -gt "1" ]]; then + BACKUP_LIST=$(envdir /etc/wal-e.d/env wal-e --terse backup-list) # if this fails, this script will exit + if [[ $(echo "$BACKUP_LIST" | wc -l) -gt "1" ]]; then echo "database: restoring from backup..." rm -rf $PG_DATA_DIR sudo -u postgres envdir /etc/wal-e.d/env wal-e backup-fetch $PG_DATA_DIR LATEST diff --git a/database/build.sh b/database/build.sh index c58700a79e..084131cc04 100755 --- a/database/build.sh +++ b/database/build.sh @@ -25,19 +25,19 @@ apk add --update-cache \ python-dev # pv port. -curl http://dl-3.alpinelinux.org/alpine/edge/testing/x86_64/pv-1.6.0-r0.apk -o /tmp/pv-1.6.0-r0.apk -apk add /tmp/pv-1.6.0-r0.apk +curl https://s3-us-west-2.amazonaws.com/get-deis/pv-1.6.0-r1.apk -o /tmp/pv-1.6.0-r1.apk +apk add /tmp/pv-1.6.0-r1.apk /etc/init.d/postgresql stop || true # install pip -curl -sSL https://raw.githubusercontent.com/pypa/pip/7.0.3/contrib/get-pip.py | python - +curl -sSL https://bootstrap.pypa.io/get-pip.py | python - pip==8.1.1 # install wal-e pip install --disable-pip-version-check --no-cache-dir wal-e==0.8.1 oslo.config>=1.12.0 # python port of daemontools -pip install --disable-pip-version-check --no-cache-dir envdir +pip install --disable-pip-version-check --no-cache-dir envdir==0.7 mkdir -p /etc/wal-e.d/env /etc/postgresql/main /var/lib/postgresql diff --git a/deisctl/backend/fleet/fleet_test.go b/deisctl/backend/fleet/fleet_test.go index 7d3fa77ba8..6eef4fb1df 100644 --- a/deisctl/backend/fleet/fleet_test.go +++ b/deisctl/backend/fleet/fleet_test.go @@ -174,8 +174,6 @@ func (s *syncBuffer) Bytes() []byte { } func TestNewClient(t *testing.T) { - t.Parallel() - // set required flags Flags.Endpoint = "http://127.0.0.1:4001" diff --git a/deisctl/client/client.go b/deisctl/client/client.go index b70d62c71d..b5c391fd96 100644 --- a/deisctl/client/client.go +++ b/deisctl/client/client.go @@ -273,7 +273,7 @@ Usage: Options: -p --path= where to save unit files [default: $HOME/.deis/units] -t --tag= git tag, branch, or SHA to use when downloading unit files - [default: master] + [default: v1.13.4] ` // parse command-line arguments args, err := docopt.Parse(usage, argv, true, "", false) diff --git a/deisctl/cmd/cmd.go b/deisctl/cmd/cmd.go index ce5ed7d624..834845e8ef 100644 --- a/deisctl/cmd/cmd.go +++ b/deisctl/cmd/cmd.go @@ -23,11 +23,8 @@ const ( PlatformCommand string = "platform" // StatelessPlatformCommand is shorthand for the components except store-* and database. StatelessPlatformCommand string = "stateless-platform" - swarm string = "swarm" - mesos string = "mesos" // DefaultRouterMeshSize defines the default number of routers to be loaded when installing the platform. - DefaultRouterMeshSize uint8 = 3 - k8s string = "k8s" + DefaultRouterMeshSize uint8 = 3 ) // ListUnits prints a list of installed units. @@ -84,12 +81,6 @@ func Start(targets []string, b backend.Backend) error { return StartPlatform(b, false) case StatelessPlatformCommand: return StartPlatform(b, true) - case mesos: - return StartMesos(b) - case swarm: - return StartSwarm(b) - case k8s: - return StartK8s(b) } } var wg sync.WaitGroup @@ -195,12 +186,6 @@ func Stop(targets []string, b backend.Backend) error { return StopPlatform(b, false) case StatelessPlatformCommand: return StopPlatform(b, true) - case mesos: - return StopMesos(b) - case swarm: - return StopSwarm(b) - case k8s: - return StopK8s(b) } } @@ -296,12 +281,6 @@ func Install(targets []string, b backend.Backend, cb config.Backend, checkKeys f return InstallPlatform(b, cb, checkKeys, false) case StatelessPlatformCommand: return InstallPlatform(b, cb, checkKeys, true) - case mesos: - return InstallMesos(b) - case swarm: - return InstallSwarm(b) - case k8s: - return InstallK8s(b) } } var wg sync.WaitGroup @@ -360,12 +339,6 @@ func Uninstall(targets []string, b backend.Backend) error { return UninstallPlatform(b, false) case StatelessPlatformCommand: return UninstallPlatform(b, true) - case mesos: - return UninstallMesos(b) - case swarm: - return UnInstallSwarm(b) - case k8s: - return UnInstallK8s(b) } } diff --git a/deisctl/cmd/cmd_test.go b/deisctl/cmd/cmd_test.go index e558fc4341..c84d035a4f 100644 --- a/deisctl/cmd/cmd_test.go +++ b/deisctl/cmd/cmd_test.go @@ -277,19 +277,6 @@ func TestStartStatelessPlatform(t *testing.T) { } } -func TestStartSwarm(t *testing.T) { - t.Parallel() - - b := backendStub{} - expected := []string{"swarm-manager", "swarm-node"} - - Start([]string{"swarm"}, &b) - - if !reflect.DeepEqual(b.startedUnits, expected) { - t.Error(fmt.Errorf("Expected %v, Got %v", expected, b.startedUnits)) - } -} - func TestRollingRestart(t *testing.T) { t.Parallel() @@ -416,18 +403,6 @@ func TestStopStatelessPlatform(t *testing.T) { } } -func TestStopSwarm(t *testing.T) { - t.Parallel() - - b := backendStub{} - expected := []string{"swarm-node", "swarm-manager"} - Stop([]string{"swarm"}, &b) - - if !reflect.DeepEqual(b.stoppedUnits, expected) { - t.Error(fmt.Errorf("Expected %v, Got %v", expected, b.stoppedUnits)) - } -} - func TestRestart(t *testing.T) { t.Parallel() @@ -589,21 +564,6 @@ func TestInstallStatelessPlatform(t *testing.T) { } } -func TestInstallSwarm(t *testing.T) { - t.Parallel() - - b := backendStub{} - cb := mock.ConfigBackend{} - - expected := []string{"swarm-manager", "swarm-node"} - - Install([]string{"swarm"}, &b, &cb, fakeCheckKeys) - - if !reflect.DeepEqual(b.installedUnits, expected) { - t.Error(fmt.Errorf("Expected %v, Got %v", expected, b.installedUnits)) - } -} - func TestUninstall(t *testing.T) { t.Parallel() @@ -645,16 +605,3 @@ func TestUninstallStatelessPlatform(t *testing.T) { t.Error(fmt.Errorf("Expected %v, Got %v", expected, b.uninstalledUnits)) } } - -func TestUninstallSwarm(t *testing.T) { - t.Parallel() - - b := backendStub{} - expected := []string{"swarm-node", "swarm-manager"} - - Uninstall([]string{"swarm"}, &b) - - if !reflect.DeepEqual(b.uninstalledUnits, expected) { - t.Error(fmt.Errorf("Expected %v, Got %v", expected, b.uninstalledUnits)) - } -} diff --git a/deisctl/cmd/k8s.go b/deisctl/cmd/k8s.go deleted file mode 100644 index 42ae633766..0000000000 --- a/deisctl/cmd/k8s.go +++ /dev/null @@ -1,88 +0,0 @@ -package cmd - -import ( - "fmt" - "io" - "sync" - - "github.com/deis/deis/deisctl/backend" - "github.com/deis/deis/pkg/prettyprint" -) - -//InstallK8s Installs K8s -func InstallK8s(b backend.Backend) error { - var wg sync.WaitGroup - io.WriteString(Stdout, prettyprint.DeisIfy("Installing K8s...")) - fmt.Fprintln(Stdout, "K8s control plane...") - b.Create([]string{"kube-apiserver"}, &wg, Stdout, Stderr) - wg.Wait() - b.Create([]string{"kube-controller-manager", "kube-scheduler"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "K8s data plane...") - b.Create([]string{"kube-kubelet"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "K8s router mesh...") - b.Create([]string{"kube-proxy"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "Done.\n ") - fmt.Fprintln(Stdout, "Please run `deisctl start k8s` to start K8s.") - return nil -} - -//StartK8s starts K8s Schduler -func StartK8s(b backend.Backend) error { - var wg sync.WaitGroup - io.WriteString(Stdout, prettyprint.DeisIfy("Starting K8s...")) - fmt.Fprintln(Stdout, "K8s control plane...") - b.Start([]string{"kube-apiserver"}, &wg, Stdout, Stderr) - wg.Wait() - b.Start([]string{"kube-controller-manager", "kube-scheduler"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "K8s data plane...") - b.Start([]string{"kube-kubelet"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "K8s router mesh...") - b.Start([]string{"kube-proxy"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "Done.\n ") - fmt.Fprintln(Stdout, "Please run `deisctl config controller set schedulerModule=k8s` to use the K8s scheduler.") - return nil -} - -//StopK8s stops K8s -func StopK8s(b backend.Backend) error { - var wg sync.WaitGroup - io.WriteString(Stdout, prettyprint.DeisIfy("Stopping K8s...")) - fmt.Fprintln(Stdout, "K8s router mesh...") - b.Stop([]string{"kube-proxy"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "K8s data plane...") - b.Stop([]string{"kube-kubelet"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "K8s control plane...") - b.Stop([]string{"kube-controller-manager", "kube-scheduler"}, &wg, Stdout, Stderr) - wg.Wait() - b.Stop([]string{"kube-apiserver"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "Done.\n ") - return nil -} - -//UnInstallK8s uninstall K8s -func UnInstallK8s(b backend.Backend) error { - var wg sync.WaitGroup - io.WriteString(Stdout, prettyprint.DeisIfy("Uninstalling K8s...")) - fmt.Fprintln(Stdout, "K8s router mesh...") - b.Destroy([]string{"kube-proxy"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "K8s data plane...") - b.Destroy([]string{"kube-kubelet"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "K8s control plane...") - b.Destroy([]string{"kube-controller-manager", "kube-scheduler"}, &wg, Stdout, Stderr) - wg.Wait() - b.Destroy([]string{"kube-apiserver"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "Done.\n ") - return nil -} diff --git a/deisctl/cmd/mesos.go b/deisctl/cmd/mesos.go deleted file mode 100644 index 9590c5dc8e..0000000000 --- a/deisctl/cmd/mesos.go +++ /dev/null @@ -1,131 +0,0 @@ -package cmd - -import ( - "fmt" - "io" - "sync" - - "github.com/deis/deis/deisctl/backend" - "github.com/deis/deis/pkg/prettyprint" -) - -// InstallMesos loads all Mesos units for StartMesos -func InstallMesos(b backend.Backend) error { - - var wg sync.WaitGroup - - io.WriteString(Stdout, prettyprint.DeisIfy("Installing Mesos/Marathon...")) - - installMesosServices(b, &wg, Stdout, Stderr) - - wg.Wait() - - fmt.Fprintln(Stdout, "Done.\n ") - fmt.Fprintln(Stdout, "Please run `deisctl start mesos` to boot up Mesos.") - return nil -} - -func installMesosServices(b backend.Backend, wg *sync.WaitGroup, out, err io.Writer) { - - fmt.Fprintln(out, "Mesos/Marathon control plane...") - b.Create([]string{"zookeeper", "mesos-master"}, wg, out, err) - wg.Wait() - b.Create([]string{"mesos-marathon"}, wg, out, err) - wg.Wait() - - fmt.Fprintln(out, "Mesos/Marathon data plane...") - b.Create([]string{"mesos-slave"}, wg, out, err) - wg.Wait() -} - -// UninstallMesos unloads and uninstalls all Mesos component definitions -func UninstallMesos(b backend.Backend) error { - - var wg sync.WaitGroup - - io.WriteString(Stdout, prettyprint.DeisIfy("Uninstalling Mesos/Marathon...")) - - uninstallMesosServices(b, &wg, Stdout, Stderr) - - wg.Wait() - - fmt.Fprintln(Stdout, "Done.\n ") - return nil -} - -func uninstallMesosServices(b backend.Backend, wg *sync.WaitGroup, out, err io.Writer) error { - - fmt.Fprintln(out, "Mesos/Marathon data plane...") - b.Destroy([]string{"mesos-slave"}, wg, out, err) - wg.Wait() - - fmt.Fprintln(out, "Mesos/Marathon control plane...") - b.Destroy([]string{"mesos-marathon", "mesos-master", "zookeeper"}, wg, out, err) - wg.Wait() - - return nil -} - -// StartMesos activates all Mesos components. -func StartMesos(b backend.Backend) error { - - var wg sync.WaitGroup - - io.WriteString(Stdout, prettyprint.DeisIfy("Starting Mesos/Marathon...")) - - startMesosServices(b, &wg, Stdout, Stderr) - - wg.Wait() - - fmt.Fprintln(Stdout, "Done.\n ") - fmt.Fprintln(Stdout, "Please use `deisctl config controller set schedulerModule=mesos_marathon`") - return nil -} - -func startMesosServices(b backend.Backend, wg *sync.WaitGroup, out, err io.Writer) { - - fmt.Fprintln(out, "Mesos/Marathon control plane...") - b.Start([]string{"zookeeper"}, wg, out, err) - wg.Wait() - b.Start([]string{"mesos-master"}, wg, out, err) - wg.Wait() - b.Start([]string{"mesos-marathon"}, wg, out, err) - wg.Wait() - - fmt.Fprintln(out, "Mesos/Marathon data plane...") - b.Start([]string{"mesos-slave"}, wg, out, err) - wg.Wait() - - wg.Wait() -} - -// StopMesos deactivates all Mesos components. -func StopMesos(b backend.Backend) error { - - var wg sync.WaitGroup - - io.WriteString(Stdout, prettyprint.DeisIfy("Stopping Mesos/Marathon...")) - - stopMesosServices(b, &wg, Stdout, Stderr) - - wg.Wait() - - fmt.Fprintln(Stdout, "Done.\n ") - fmt.Fprintln(Stdout, "Please run `deisctl start mesos` to restart Mesos.") - return nil -} - -func stopMesosServices(b backend.Backend, wg *sync.WaitGroup, out, err io.Writer) { - - fmt.Fprintln(out, "Mesos/Marathon data plane...") - b.Stop([]string{"mesos-slave"}, wg, out, err) - wg.Wait() - - fmt.Fprintln(out, "Mesos/Marathon control plane...") - b.Stop([]string{"mesos-marathon"}, wg, out, err) - wg.Wait() - b.Stop([]string{"mesos-master"}, wg, out, err) - wg.Wait() - b.Stop([]string{"zookeeper"}, wg, out, err) - wg.Wait() -} diff --git a/deisctl/cmd/swarm.go b/deisctl/cmd/swarm.go deleted file mode 100644 index a1fc314c74..0000000000 --- a/deisctl/cmd/swarm.go +++ /dev/null @@ -1,70 +0,0 @@ -package cmd - -import ( - "fmt" - "io" - "sync" - - "github.com/deis/deis/deisctl/backend" - "github.com/deis/deis/pkg/prettyprint" -) - -//InstallSwarm Installs swarm -func InstallSwarm(b backend.Backend) error { - var wg sync.WaitGroup - io.WriteString(Stdout, prettyprint.DeisIfy("Installing Swarm...")) - fmt.Fprintln(Stdout, "Swarm control plane...") - b.Create([]string{"swarm-manager"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "Swarm data plane...") - b.Create([]string{"swarm-node"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "Done.\n ") - fmt.Fprintln(Stdout, "Please run `deisctl start swarm` to start swarm.") - return nil -} - -//StartSwarm starts Swarm Schduler -func StartSwarm(b backend.Backend) error { - var wg sync.WaitGroup - io.WriteString(Stdout, prettyprint.DeisIfy("Starting Swarm...")) - fmt.Fprintln(Stdout, "Swarm control plane...") - b.Start([]string{"swarm-manager"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "Swarm data plane...") - b.Start([]string{"swarm-node"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "Done.\n ") - fmt.Fprintln(Stdout, "Please run `deisctl config controller set schedulerModule=swarm` to use the swarm scheduler.") - return nil -} - -//StopSwarm stops swarm -func StopSwarm(b backend.Backend) error { - - var wg sync.WaitGroup - io.WriteString(Stdout, prettyprint.DeisIfy("Stopping Swarm...")) - fmt.Fprintln(Stdout, "Swarm data plane...") - b.Stop([]string{"swarm-node"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "Swarm control plane...") - b.Stop([]string{"swarm-manager"}, &wg, Stdout, Stderr) - wg.Wait() - - fmt.Fprintln(Stdout, "Done.\n ") - return nil -} - -//UnInstallSwarm uninstall Swarm -func UnInstallSwarm(b backend.Backend) error { - var wg sync.WaitGroup - io.WriteString(Stdout, prettyprint.DeisIfy("Uninstalling Swarm...")) - fmt.Fprintln(Stdout, "Swarm data plane...") - b.Destroy([]string{"swarm-node"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "Swarm control plane...") - b.Destroy([]string{"swarm-manager"}, &wg, Stdout, Stderr) - wg.Wait() - fmt.Fprintln(Stdout, "Done.\n ") - return nil -} diff --git a/deisctl/deis-version b/deisctl/deis-version index a4ab692a5f..80138e7146 100644 --- a/deisctl/deis-version +++ b/deisctl/deis-version @@ -1 +1 @@ -1.13.0-dev +1.13.4 diff --git a/deisctl/units/decorators/deis-kube-apiserver.service.decorator b/deisctl/units/decorators/deis-kube-apiserver.service.decorator deleted file mode 100644 index 1f5e20ea85..0000000000 --- a/deisctl/units/decorators/deis-kube-apiserver.service.decorator +++ /dev/null @@ -1,3 +0,0 @@ - -[X-Fleet] -MachineMetadata="controlPlane=true" diff --git a/deisctl/units/decorators/deis-kube-controller-manager.service.decorator b/deisctl/units/decorators/deis-kube-controller-manager.service.decorator deleted file mode 100644 index 1f5e20ea85..0000000000 --- a/deisctl/units/decorators/deis-kube-controller-manager.service.decorator +++ /dev/null @@ -1,3 +0,0 @@ - -[X-Fleet] -MachineMetadata="controlPlane=true" diff --git a/deisctl/units/decorators/deis-kube-kubelet.service.decorator b/deisctl/units/decorators/deis-kube-kubelet.service.decorator deleted file mode 100644 index 9ba31b5294..0000000000 --- a/deisctl/units/decorators/deis-kube-kubelet.service.decorator +++ /dev/null @@ -1 +0,0 @@ -MachineMetadata="dataPlane=true" diff --git a/deisctl/units/decorators/deis-kube-proxy.service.decorator b/deisctl/units/decorators/deis-kube-proxy.service.decorator deleted file mode 100644 index b19daa48a1..0000000000 --- a/deisctl/units/decorators/deis-kube-proxy.service.decorator +++ /dev/null @@ -1 +0,0 @@ -MachineMetadata="routerMesh=true" diff --git a/deisctl/units/decorators/deis-kube-scheduler.service.decorator b/deisctl/units/decorators/deis-kube-scheduler.service.decorator deleted file mode 100644 index 1f5e20ea85..0000000000 --- a/deisctl/units/decorators/deis-kube-scheduler.service.decorator +++ /dev/null @@ -1,3 +0,0 @@ - -[X-Fleet] -MachineMetadata="controlPlane=true" diff --git a/deisctl/units/decorators/deis-mesos-marathon.service.decorator b/deisctl/units/decorators/deis-mesos-marathon.service.decorator deleted file mode 100644 index 8e4d049edb..0000000000 --- a/deisctl/units/decorators/deis-mesos-marathon.service.decorator +++ /dev/null @@ -1,2 +0,0 @@ - -MachineMetadata="controlPlane=true" diff --git a/deisctl/units/decorators/deis-mesos-master.service.decorator b/deisctl/units/decorators/deis-mesos-master.service.decorator deleted file mode 100644 index 1f5e20ea85..0000000000 --- a/deisctl/units/decorators/deis-mesos-master.service.decorator +++ /dev/null @@ -1,3 +0,0 @@ - -[X-Fleet] -MachineMetadata="controlPlane=true" diff --git a/deisctl/units/decorators/deis-mesos-slave.service.decorator b/deisctl/units/decorators/deis-mesos-slave.service.decorator deleted file mode 100644 index 59b20a4e57..0000000000 --- a/deisctl/units/decorators/deis-mesos-slave.service.decorator +++ /dev/null @@ -1,2 +0,0 @@ - -MachineMetadata="dataPlane=true" diff --git a/deisctl/units/decorators/deis-swarm-manager.service.decorator b/deisctl/units/decorators/deis-swarm-manager.service.decorator deleted file mode 100644 index 1f5e20ea85..0000000000 --- a/deisctl/units/decorators/deis-swarm-manager.service.decorator +++ /dev/null @@ -1,3 +0,0 @@ - -[X-Fleet] -MachineMetadata="controlPlane=true" diff --git a/deisctl/units/decorators/deis-swarm-node.service.decorator b/deisctl/units/decorators/deis-swarm-node.service.decorator deleted file mode 100644 index 9ba31b5294..0000000000 --- a/deisctl/units/decorators/deis-swarm-node.service.decorator +++ /dev/null @@ -1 +0,0 @@ -MachineMetadata="dataPlane=true" diff --git a/deisctl/units/decorators/deis-zookeeper.service.decorator b/deisctl/units/decorators/deis-zookeeper.service.decorator deleted file mode 100644 index 8e4d049edb..0000000000 --- a/deisctl/units/decorators/deis-zookeeper.service.decorator +++ /dev/null @@ -1,2 +0,0 @@ - -MachineMetadata="controlPlane=true" diff --git a/deisctl/units/deis-builder.service b/deisctl/units/deis-builder.service index 99b442ad8c..2da889b24c 100644 --- a/deisctl/units/deis-builder.service +++ b/deisctl/units/deis-builder.service @@ -9,7 +9,7 @@ ExecStartPre=/bin/sh -c "IMAGE=alpine:3.2 && docker inspect deis-builder-data >/ ExecStartPre=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/builder` && docker history $IMAGE >/dev/null 2>&1 || flock -w 1200 /var/run/lock/alpine-pull docker pull $IMAGE" ExecStartPre=/bin/sh -c "docker inspect deis-builder >/dev/null 2>&1 && docker rm -f deis-builder || true" ExecStartPre=-/bin/sh -c "/sbin/losetup -f" -ExecStart=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/builder` && docker run --name deis-builder --rm -p 2223:2223 --volumes-from=deis-builder-data -c 800 -e EXTERNAL_PORT=2223 -e HOST=$COREOS_PRIVATE_IPV4 --privileged -v /etc/environment_proxy:/etc/environment_proxy $IMAGE" +ExecStart=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/builder` && docker run --name deis-builder --rm -p 2223:2223 --volumes-from=deis-builder-data --cpu-shares 800 -e EXTERNAL_PORT=2223 -e HOST=$COREOS_PRIVATE_IPV4 --privileged -v /etc/environment_proxy:/etc/environment_proxy $IMAGE" ExecStartPost=/bin/sh -c "echo 'Waiting for builder on 2223/tcp...' && until ncat $COREOS_PRIVATE_IPV4 2223 --exec '/usr/bin/echo dummy-value' >/dev/null 2>&1; do sleep 1; done" ExecStop=-/usr/bin/docker stop deis-builder Restart=on-failure diff --git a/deisctl/units/deis-kube-apiserver.service b/deisctl/units/deis-kube-apiserver.service deleted file mode 100644 index 85353bbf05..0000000000 --- a/deisctl/units/deis-kube-apiserver.service +++ /dev/null @@ -1,33 +0,0 @@ -[Unit] -Description=Kubernetes API Server -Documentation=https://github.com/GoogleCloudPlatform/kubernetes -Requires=fleet.service docker.service flanneld.service -After=fleet.service docker.service flanneld.service - -[Service] -EnvironmentFile=/etc/environment -ExecStartPre=-/bin/sh -c "etcdctl get /deis/scheduler/k8s/master >/dev/null 2>&1 || etcdctl mk /deis/scheduler/k8s/master" -ExecStartPre=/bin/sh -c "etcdctl set /deis/scheduler/k8s/master $COREOS_PRIVATE_IPV4" -ExecStartPre=/bin/bash -c "/opt/bin/download-k8s-binary kube-apiserver" -ExecStartPre=/bin/bash -c "if etcdctl get /kube-serviceaccount >/dev/null 2>&1; then etcdctl get /kube-serviceaccount > /opt/bin/kube-serviceaccount.key; else /bin/openssl genrsa -out /opt/bin/kube-serviceaccount.key 2048 2>/dev/null; etcdctl set /kube-serviceaccount < /opt/bin/kube-serviceaccount.key; fi" -ExecStart=/bin/bash -c "/opt/bin/kube-apiserver \ - --service_account_key_file=/opt/bin/kube-serviceaccount.key \ - --service_account_lookup=false \ - --admission_control=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota \ - --allow_privileged=true \ - --insecure_bind_address=0.0.0.0 \ - --insecure_port=8080 \ - --kubelet_https=true \ - --secure_port=6443 \ - --service-cluster-ip-range=10.100.0.0/16 \ - --etcd_servers=$(curl -s 127.0.0.1:4001/v2/machines | sed 's/ //g') \ - --public_address_override=${COREOS_PRIVATE_IPV4} \ - --logtostderr=true \ - --runtime_config=api/v1" -ExecStartPost=/bin/bash -c "fleetctl stop deis-kube-scheduler deis-kube-controller-manager deis-kube-kubelet deis-kube-proxy; sleep 2; fleetctl start deis-kube-scheduler deis-kube-controller-manager deis-kube-kubelet deis-kube-proxy" -Restart=always -RestartSec=10 -SuccessExitStatus=2 - -[Install] -WantedBy=multi-user.target diff --git a/deisctl/units/deis-kube-controller-manager.service b/deisctl/units/deis-kube-controller-manager.service deleted file mode 100644 index aae3f6d2bc..0000000000 --- a/deisctl/units/deis-kube-controller-manager.service +++ /dev/null @@ -1,21 +0,0 @@ -[Unit] -Description=Kubernetes Controller Manager -Documentation=https://github.com/GoogleCloudPlatform/kubernetes -Requires=deis-kube-apiserver.service -After=deis-kube-apiserver.service - -[Service] -EnvironmentFile=/etc/environment -ExecStartPre=/bin/bash -c "/opt/bin/download-k8s-binary kube-controller-manager" -ExecStartPre=/bin/bash -c "/opt/bin/wupiao $(/usr/bin/etcdctl get /deis/scheduler/k8s/master):8080" -ExecStart=/opt/bin/kube-controller-manager \ - --service_account_private_key_file=/opt/bin/kube-serviceaccount.key \ - --master=127.0.0.1:8080 \ - --pod_eviction_timeout=30s \ - --logtostderr=true -Restart=always -RestartSec=10 -SuccessExitStatus=2 - -[X-Fleet] -MachineOf=deis-kube-apiserver.service diff --git a/deisctl/units/deis-kube-kubelet.service b/deisctl/units/deis-kube-kubelet.service deleted file mode 100644 index f594833ec3..0000000000 --- a/deisctl/units/deis-kube-kubelet.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Kubernetes Kubelet -Documentation=https://github.com/GoogleCloudPlatform/kubernetes - -[Service] -EnvironmentFile=/etc/environment -ExecStartPre=/bin/bash -c "/opt/bin/download-k8s-binary kubelet" -ExecStartPre=/bin/bash -c "/opt/bin/wupiao $(/usr/bin/etcdctl get /deis/scheduler/k8s/master):8080" -ExecStartPre=/usr/bin/mkdir -p /opt/kubernetes/manifests/ -ExecStart=/bin/bash -c '/opt/bin/kubelet --address=0.0.0.0 --port=10250 --hostname_override=$COREOS_PRIVATE_IPV4 --api_servers=`/usr/bin/etcdctl get /deis/scheduler/k8s/master`:8080 --allow_privileged=true --cluster_dns=10.100.0.10 --cluster_domain=cluster.local --logtostderr=true --config=/opt/kubernetes/manifests/ --healthz_bind_address=0.0.0.0 --healthz_port=10248' -Restart=always -RestartSec=10 -WorkingDirectory=/root/ -SuccessExitStatus=2 - -[X-Fleet] -Global=true diff --git a/deisctl/units/deis-kube-proxy.service b/deisctl/units/deis-kube-proxy.service deleted file mode 100644 index 1f138de425..0000000000 --- a/deisctl/units/deis-kube-proxy.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Kubernetes Proxy -Documentation=https://github.com/GoogleCloudPlatform/kubernetes - -[Service] -EnvironmentFile=/etc/environment -ExecStartPre=/bin/bash -c "/opt/bin/download-k8s-binary kube-proxy" -ExecStartPre=/bin/bash -c "/opt/bin/wupiao $(/usr/bin/etcdctl get /deis/scheduler/k8s/master):8080" -ExecStart=/bin/bash -c '/opt/bin/kube-proxy --master=`/usr/bin/etcdctl get /deis/scheduler/k8s/master`:8080 --logtostderr=true --healthz-bind-address=0.0.0.0' -Restart=always -RestartSec=10 -SuccessExitStatus=2 - -[X-Fleet] -Global=true diff --git a/deisctl/units/deis-kube-scheduler.service b/deisctl/units/deis-kube-scheduler.service deleted file mode 100644 index d8f1f46dfb..0000000000 --- a/deisctl/units/deis-kube-scheduler.service +++ /dev/null @@ -1,19 +0,0 @@ -[Unit] -Description=Kubernetes Scheduler -Documentation=https://github.com/GoogleCloudPlatform/kubernetes -Requires=deis-kube-apiserver.service -After=deis-kube-apiserver.service - -[Service] -EnvironmentFile=/etc/environment -ExecStartPre=/bin/bash -c "/opt/bin/download-k8s-binary kube-scheduler" -ExecStartPre=/bin/bash -c "/opt/bin/wupiao $(/usr/bin/etcdctl get /deis/scheduler/k8s/master):8080" -ExecStart=/opt/bin/kube-scheduler \ - --master=127.0.0.1:8080 \ - --policy-config-file=/opt/bin/scheduler-policy.json -Restart=always -RestartSec=10 -SuccessExitStatus=2 - -[X-Fleet] -MachineOf=deis-kube-apiserver.service diff --git a/deisctl/units/deis-mesos-marathon.service b/deisctl/units/deis-mesos-marathon.service deleted file mode 100644 index 55de393857..0000000000 --- a/deisctl/units/deis-mesos-marathon.service +++ /dev/null @@ -1,23 +0,0 @@ -[Unit] -Description=Mesosphere Marathon -After=docker.service -Requires=docker.service - -[Service] -EnvironmentFile=/etc/environment -Restart=on-failure -RestartSec=20 -TimeoutStartSec=0 -ExecStartPre=-/bin/sh -c "etcdctl get /deis/scheduler/mesos/marathon >/dev/null 2>&1 || etcdctl mk /deis/scheduler/mesos/marathon" -ExecStartPre=/bin/sh -c "etcdctl set /deis/scheduler/mesos/marathon $COREOS_PRIVATE_IPV4" -ExecStartPre=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/mesos-marathon` && docker history $IMAGE >/dev/null 2>&1 || docker pull $IMAGE" -ExecStartPre=-/usr/bin/docker kill deis-mesos-marathon -ExecStartPre=-/usr/bin/docker rm deis-mesos-marathon -ExecStart=/usr/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/mesos-marathon` && docker run --name=deis-mesos-marathon --net=host -e HOST=$COREOS_PRIVATE_IPV4 $IMAGE" -ExecStop=-/usr/bin/docker stop deis-mesos-marathon - -[Install] -WantedBy=multi-user.target - -[X-Fleet] -MachineOf=deis-mesos-master.service diff --git a/deisctl/units/deis-mesos-master.service b/deisctl/units/deis-mesos-master.service deleted file mode 100644 index 4c837c8cfb..0000000000 --- a/deisctl/units/deis-mesos-master.service +++ /dev/null @@ -1,19 +0,0 @@ -[Unit] -Description=Mesos master -After=docker.service -Requires=docker.service - -[Service] -EnvironmentFile=/etc/environment -Restart=on-failure -RestartSec=20 -TimeoutStartSec=0 -ExecStartPre=-/usr/bin/docker kill deis-mesos-master -ExecStartPre=-/usr/bin/docker rm deis-mesos-master -ExecStartPre=/bin/sh -c "docker inspect deis-mesos-master-data >/dev/null 2>&1 || docker run --name deis-mesos-master-data -v /tmp/mesos-master alpine:3.2 /bin/true" -ExecStartPre=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/mesos-master` && docker history $IMAGE >/dev/null 2>&1 || docker pull $IMAGE" -ExecStart=/usr/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/mesos-master` && docker run --volumes-from=deis-mesos-master-data --name=deis-mesos-master --privileged --net=host -e HOST=$COREOS_PRIVATE_IPV4 $IMAGE" -ExecStop=-/usr/bin/docker stop deis-mesos-master - -[Install] -WantedBy=multi-user.target diff --git a/deisctl/units/deis-mesos-slave.service b/deisctl/units/deis-mesos-slave.service deleted file mode 100644 index a7203d1301..0000000000 --- a/deisctl/units/deis-mesos-slave.service +++ /dev/null @@ -1,21 +0,0 @@ -[Unit] -Description=Mesos slave -After=docker.service -Requires=docker.service - -[Service] -EnvironmentFile=/etc/environment -Restart=on-failure -RestartSec=20 -TimeoutStartSec=0 -ExecStartPre=-/usr/bin/docker kill deis-mesos-slave -ExecStartPre=-/usr/bin/docker rm deis-mesos-slave -ExecStartPre=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/mesos-slave` && docker history $IMAGE >/dev/null 2>&1 || docker pull $IMAGE" -ExecStart=/usr/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/mesos-slave` && docker run --name=deis-mesos-slave --net=host --privileged -e HOST=$COREOS_PRIVATE_IPV4 -v /sys:/sys -v /usr/bin/docker:/usr/bin/docker:ro -v /var/run/docker.sock:/var/run/docker.sock -v /lib64/libdevmapper.so.1.02:/lib/libdevmapper.so.1.02:ro $IMAGE" -ExecStop=-/usr/bin/docker stop deis-mesos-slave - -[Install] -WantedBy=multi-user.target - -[X-Fleet] -Global=true diff --git a/deisctl/units/deis-router.service b/deisctl/units/deis-router.service index 325188159c..e67f4dd8c2 100644 --- a/deisctl/units/deis-router.service +++ b/deisctl/units/deis-router.service @@ -4,7 +4,6 @@ Description=deis-router [Service] EnvironmentFile=/etc/environment TimeoutStartSec=20m -ExecStartPre=-/usr/bin/etcdctl mkdir /registry/services/ >/dev/null 2>&1 ExecStartPre=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/router` && docker history $IMAGE >/dev/null 2>&1 || flock -w 1200 /var/run/lock/alpine-pull docker pull $IMAGE" ExecStartPre=/bin/sh -c "docker inspect deis-router >/dev/null 2>&1 && docker rm -f deis-router || true" ExecStart=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/router` && docker run --name deis-router --rm -p 80:80 -p 2222:2222 -p 443:443 -p 9090:9090 -e EXTERNAL_PORT=80 -e HOST=$COREOS_PRIVATE_IPV4 $IMAGE" diff --git a/deisctl/units/deis-store-volume.service b/deisctl/units/deis-store-volume.service index f4b40a3b71..e58d5b0214 100644 --- a/deisctl/units/deis-store-volume.service +++ b/deisctl/units/deis-store-volume.service @@ -5,7 +5,7 @@ Description=deis-store-volume EnvironmentFile=/etc/environment ExecStartPre=/usr/bin/mkdir -p /var/lib/deis/store ExecStartPre=/bin/sh -c "echo waiting for store-monitor... && until etcdctl get /deis/store/monSetupComplete >/dev/null 2>&1; do sleep 2; done" -ExecStartPre=/bin/bash -c "HOSTS=`etcdctl ls /deis/store/hosts | cut -d/ -f5 | awk '{if(NR == 1) {printf $0} else {printf \",\"$0}}'` && cat /proc/mounts |grep '/var/lib/deis/store' || mount -t ceph $HOSTS:/ /var/lib/deis/store -o name=admin,secret=`etcdctl get /deis/store/adminKeyring | grep 'key =' | cut -d' ' -f3`" +ExecStartPre=/bin/bash -c "HOSTS=`etcdctl ls /deis/store/hosts | cut -d/ -f5 | awk '{if(NR == 1) {printf $0} else {printf \",\"$0}}'` && cat /proc/mounts |grep '/var/lib/deis/store' || mount -t ceph $HOSTS:/ /var/lib/deis/store -o name=admin,secret=`etcdctl get /deis/store/adminKeyring | grep 'key =' | cut -d' ' -f3`,context=system_u:object_r:tmp_t:s0" ExecStart=/usr/bin/tail -f /dev/null ExecStartPost=/bin/sh -c "test -d /var/lib/deis/store/logs || mkdir -p /var/lib/deis/store/logs" ExecStopPost=-/usr/bin/umount /var/lib/deis/store diff --git a/deisctl/units/deis-swarm-manager.service b/deisctl/units/deis-swarm-manager.service deleted file mode 100644 index fc8705aa4f..0000000000 --- a/deisctl/units/deis-swarm-manager.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=deis-swarm-manager - -[Service] -EnvironmentFile=/etc/environment -TimeoutStartSec=20m -ExecStartPre=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/swarm` && docker history $IMAGE >/dev/null 2>&1 || docker pull $IMAGE" -ExecStartPre=/bin/sh -c "docker inspect deis-swarm-manager >/dev/null 2>&1 && docker rm -f deis-swarm-manager >/dev/null 2>&1 || true" -ExecStart=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/swarm` && docker run --name deis-swarm-manager --rm -p 2395:2375 -e EXTERNAL_PORT=2395 -e HOST=$COREOS_PRIVATE_IPV4 -v /etc/environment_proxy:/etc/environment_proxy $IMAGE manage" -ExecStop=-/usr/bin/docker stop deis-swarm-manager -Restart=on-failure -RestartSec=5 - -[Install] -WantedBy=multi-user.target diff --git a/deisctl/units/deis-swarm-node.service b/deisctl/units/deis-swarm-node.service deleted file mode 100644 index 7745a7a03e..0000000000 --- a/deisctl/units/deis-swarm-node.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=deis-swarm-node - -[Service] -EnvironmentFile=/etc/environment -TimeoutStartSec=20m -ExecStartPre=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/swarm` && docker history $IMAGE >/dev/null 2>&1 || docker pull $IMAGE" -ExecStartPre=/bin/sh -c "docker inspect deis-swarm-node >/dev/null 2>&1 && docker rm -f deis-swarm-node >/dev/null 2>&1 || true" -ExecStart=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/swarm` && docker run --name deis-swarm-node --rm -e HOST=$COREOS_PRIVATE_IPV4 -v /etc/environment_proxy:/etc/environment_proxy $IMAGE join" -ExecStop=-/usr/bin/docker stop deis-swarm-node -Restart=on-failure -RestartSec=5 - -[Install] -WantedBy=multi-user.target - -[X-Fleet] -Global=true diff --git a/deisctl/units/deis-zookeeper.service b/deisctl/units/deis-zookeeper.service deleted file mode 100644 index a0f050b9a3..0000000000 --- a/deisctl/units/deis-zookeeper.service +++ /dev/null @@ -1,22 +0,0 @@ -[Unit] -Description=Zookeeper -After=docker.service -Requires=docker.service - -[Service] -EnvironmentFile=/etc/environment -Restart=on-failure -RestartSec=20 -TimeoutStartSec=0 -ExecStartPre=/bin/sh -c "docker inspect zookeeper-data >/dev/null 2>&1 || docker run --name zookeeper-data -v /opt/zookeeper-data alpine:3.2 /bin/true" -ExecStartPre=-/usr/bin/docker kill deis-zookeeper -ExecStartPre=-/usr/bin/docker rm deis-zookeeper -ExecStartPre=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/zookeeper` && docker history $IMAGE >/dev/null 2>&1 || docker pull $IMAGE" -ExecStart=/bin/sh -c "IMAGE=`/run/deis/bin/get_image /deis/zookeeper` && docker run -e EXTERNAL_PORT=2181 -e HOST=$COREOS_PRIVATE_IPV4 -e LOG_LEVEL=debug --net=host --rm --name deis-zookeeper --volumes-from=zookeeper-data $IMAGE" -ExecStop=-/usr/bin/docker stop deis-zookeeper - -[Install] -WantedBy=multi-user.target - -[X-Fleet] -Global=true diff --git a/deisctl/units/units.go b/deisctl/units/units.go index df0c1ab30a..05f67412de 100644 --- a/deisctl/units/units.go +++ b/deisctl/units/units.go @@ -16,17 +16,6 @@ var Names = []string{ "deis-store-metadata", "deis-store-monitor", "deis-store-volume", - "deis-swarm-manager", - "deis-swarm-node", - "deis-mesos-marathon", - "deis-mesos-master", - "deis-mesos-slave", - "deis-zookeeper", - "deis-kube-apiserver", - "deis-kube-controller-manager", - "deis-kube-kubelet", - "deis-kube-proxy", - "deis-kube-scheduler", } // URL is the GitHub url where these units can be refreshed from diff --git a/docs/_includes/_get-the-source.rst b/docs/_includes/_get-the-source.rst index 94bac92d8b..220a15f6e7 100644 --- a/docs/_includes/_get-the-source.rst +++ b/docs/_includes/_get-the-source.rst @@ -6,7 +6,7 @@ this documentation. Download an archive file from the `releases page`_, or use $ git clone https://github.com/deis/deis.git $ cd deis - $ git checkout v1.12.2 + $ git checkout v1.13.4 Check out the latest Deis release, rather than using the default (master). diff --git a/docs/customizing_deis/choosing-a-scheduler.rst b/docs/customizing_deis/choosing-a-scheduler.rst deleted file mode 100644 index 20757626ba..0000000000 --- a/docs/customizing_deis/choosing-a-scheduler.rst +++ /dev/null @@ -1,248 +0,0 @@ -:title: Choosing a Scheduler -:description: How to choose a scheduler backend for Deis. - - -.. _choosing_a_scheduler: - -Choosing a Scheduler -==================== - -The :ref:`scheduler` creates, starts, stops, and destroys each :ref:`container` -of your app. For example, a command such as ``deis scale web=3`` tells the -scheduler to run three containers from the Docker image for your app. - -Deis defaults to using the :ref:`fleet_scheduler`. Tech previews of schedulers based on -:ref:`Kubernetes `, :ref:`Mesos with Marathon `, and -:ref:`Swarm ` are available for testing. - -.. note:: - - If you are using a scheduler other than fleet, app containers will not be rescheduled if - deis-registry is unavailable. For more information, see `deis-registry issue 3619`_. - -Settings set by scheduler -------------------------- - -The following etcd keys are set by the scheduler module of the controller component. - -Some keys will exist only if a particular ``schedulerModule`` backend is enabled. - -=================================== ========================================================== -setting description -=================================== ========================================================== -/deis/scheduler/swarm/host the swarm manager's host IP address -/deis/scheduler/swarm/node used to identify other nodes in the cluster -/deis/scheduler/mesos/marathon used to identify Marathon framework's host IP address -/deis/scheduler/k8s/master used to identify host IP address of kubernetes ApiService -=================================== ========================================================== - - -Settings used by scheduler --------------------------- - -The following etcd keys are used by the scheduler module of the controller component. - -==================================== =============================================== -setting description -==================================== =============================================== -/deis/controller/schedulerModule scheduler backend, either "fleet" or "swarm" or - "mesos_marathon" or "k8s" (default: "fleet") -==================================== =============================================== - -.. _fleet_scheduler: - -Fleet Scheduler ---------------- - -`fleet`_ is a scheduling backend included with CoreOS: - - fleet ties together systemd and etcd into a distributed init system. Think of - it as an extension of systemd that operates at the cluster level instead of the - machine level. This project is very low level and is designed as a foundation - for higher order orchestration. - -``fleetd`` is already running on the machines provisioned for Deis: no additional -configuration is needed. Commands such as ``deis ps:restart web.1`` or -``deis scale cmd=10`` will use `fleet`_ by default to manage app containers. - -To use the Fleet Scheduler backend explicitly, set the controller's -``schedulerModule`` to "fleet": - -.. code-block:: console - - $ deisctl config controller set schedulerModule=fleet - -.. _k8s_scheduler: - -Kubernetes Scheduler --------------------- - -.. important:: - - The Kubernetes Scheduler is a technology preview and is not recommended for production use. - Since it requires overlay networking, Kubernetes can only be enabled on a new cluster. - -`Kubernetes`_ is an orchestration system for Docker containers: - - Kubernetes (k8s) provides APIs to manage, deploy and scale Docker containers. Kubernetes - deploys containers as `pods`_, providing a unique entity across a cluster, but allowing - containers within the pod to share a namespace. - -Kubernetes requires the `flannel`_ overlay network so each pod receives a unique IP address within -the cluster. Existing Deis clusters cannot simply turn on overlay networking. Instead, provision -a new cluster to enable `flannel`_. - -To test the Kubernetes Scheduler, first install and start the Kubernetes components: - -.. code-block:: console - - $deisctl install k8s && deisctl start k8s - -Then set the controller's ``schedulerModule`` to "k8s": - -.. code-block:: console - - $ deisctl config controller set schedulerModule=k8s - -The Kubernetes scheduler is now active. Commands such as ``deis destroy`` or ``deis scale web=9`` -will use the Kubernetes ApiServer to manage app pods. - -Deis creates a `replication controller`_ to manage pods and a `service`_ which proxies traffic to -the pods for your app. Unlike other Deis schedulers, new app releases on Kubernetes do a rolling -deploy: pods with the new release replace old pods one at a time, until all are replaced (or until -an error forces a rollback to the previous release). - -.. note:: - - **Known Issues** - - - The flannel overlay network is not backward-compatible with earlier Deis clusters, since it - changes Docker networking and requires new units from Deis' user-data file. - - The Kubernetes ApiServer is not HA. If the ApiServer is rescheduled, it will reschedule all - Kubernetes units. - - Kubernetes implements resource-based scheduling. Specifying limits will create a reservation - of that resource on the node. - -.. _mesos_scheduler: - -Mesos with Marathon framework ------------------------------ - -.. important:: - - The Mesos with Marathon framework Scheduler is a technology preview and is not recommended for - production use. - -`Mesos`_ is a distributed system kernel: - - Mesos provides APIs for resource management and scheduling. A framework interacts with Mesos master - and schedules and task. A Zookeeper cluster elects Mesos master node. Mesos slaves are installed on - each node and they communicate to master with available resources. - -`Marathon`_ is a Mesos_ framework for long running applications: - - Marathon provides a Paas like feel for long running applications and features like high-availablilty, host constraints, - service discovery, load balancing and REST API to control your Apps. - -Deis uses the Marathon framework to schedule containers. Since Marathon is a framework for long-running -jobs, Deis uses the :ref:`fleet_scheduler` to run batch processing jobs. ``deisctl`` installs a standalone Mesos -cluster. To install an HA Mesos cluster, follow the directions at `aledbf-mesos`_, and set the etcd key -``/deis/scheduler/mesos/marathon`` to any Marathon node IP address. If a request is received by a regular -Marathon node, it is proxied to the master Marathon node. - -To test the Marathon Scheduler backend, first install and start the mesos components: - -.. code-block:: console - - $ deisctl install mesos && deisctl start mesos - -Then set the controller's ``schedulerModule`` to "mesos_marathon": - -.. code-block:: console - - $ deisctl config controller set schedulerModule=mesos_marathon - -The Marathon framework is now active. Commands such as ``deis destroy`` or -``deis scale web=9`` will use `Marathon`_ to manage app containers. - -Deis starts Marathon on port 8180. You can manage apps through the Marathon UI, which is accessible at http://:8180 - -.. note:: - - **Known Issues** - - - deisctl installs a standalone mesos cluster as fleet doesn't support runtime change to metadata. - You can specify this in cloud-init during the deployment of the node. keep watching `dynamic metadata fleet PR 1077`_. - - If you want to access Marathon UI, you'll have to expose port 8180 in the security group settings. - This is blocked off by default for security purposes. - - Deis does not yet use Marathon's docker container API to create containers. - - CPU shares are integers representing the number of CPUs. Memory limits should be specified in MB. - -.. _swarm_scheduler: - -Swarm Scheduler ---------------- - -.. important:: - - The Swarm Scheduler is a technology preview and is not recommended for - production use. - -`swarm`_ is a scheduling backend for Docker: - - Docker Swarm is native clustering for Docker. It turns a pool of Docker hosts - into a single, virtual host. - -.. - - Swarm serves the standard Docker API, so any tool which already communicates - with a Docker daemon can use Swarm to transparently scale to multiple hosts... - -Deis includes an enhanced version of swarm v0.2.0 with node failover and optimized -locking on container creation. The Swarm Scheduler uses a `soft affinity`_ filter -to spread app containers out among available machines. - -Swarm requires the Docker Remote API to be available at TCP port 2375. If you are -upgrading an earlier installation of Deis, please refer to the CoreOS documentation -to `enable the remote API`_. - -.. note:: - - **Known Issues** - - - It is not yet possible to change the default affinity filter. - -To test the Swarm Scheduler backend, first install and start the swarm components: - -.. code-block:: console - - $ deisctl install swarm && deisctl start swarm - -Then set the controller's ``schedulerModule`` to "swarm": - -.. code-block:: console - - $ deisctl config controller set schedulerModule=swarm - -The Swarm Scheduler is now active. Commands such as ``deis destroy`` or -``deis scale web=9`` will use `swarm`_ to manage app containers. - -To monitor Swarm Scheduler operations, watch the logs of the swarm-manager -component, or spy on Docker events directly on the swarm-manager machine: - - -.. _Kubernetes: http://kubernetes.io/ -.. _Mesos: http://mesos.apache.org -.. _Marathon: https://github.com/mesosphere/marathon -.. _pods: https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/user-guide/pods.md -.. _replication controller: https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/user-guide/replication-controller.md -.. _service: https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/user-guide/services.md -.. _flannel: https://github.com/coreos/flannel -.. _fleet: https://github.com/coreos/fleet#fleet---a-distributed-init-system -.. _swarm: https://github.com/docker/swarm#swarm-a-docker-native-clustering-system -.. _`soft affinity`: https://docs.docker.com/swarm/scheduler/filter/#soft-affinitiesconstraints -.. _`enable the remote API`: https://coreos.com/docs/launching-containers/building/customizing-docker/ -.. _`deis-kubernetes issue 3850`: https://github.com/deis/deis/issues/3850 -.. _`dynamic metadata fleet PR 1077`: https://github.com/coreos/fleet/pull/1077 -.. _`aledbf-mesos`: https://github.com/aledbf/coreos-mesos-zookeeper -.. _`deis-registry issue 3619`: https://github.com/deis/deis/issues/3619 diff --git a/docs/customizing_deis/controller_settings.rst b/docs/customizing_deis/controller_settings.rst index 48f66b63bb..9b34f47cdb 100644 --- a/docs/customizing_deis/controller_settings.rst +++ b/docs/customizing_deis/controller_settings.rst @@ -41,7 +41,6 @@ The following etcd keys are used by the controller component. setting description ==================================== ====================================================== /deis/controller/registrationMode set registration to "enabled", "disabled", or "admin_only" (default: "enabled") -/deis/controller/schedulerModule scheduler backend (default: "fleet") /deis/controller/subdomain subdomain used by the router for API requests (default: "deis") /deis/controller/webEnabled enable controller web UI (default: 0) /deis/controller/workers number of web worker processes (default: CPU cores * 2 + 1) diff --git a/docs/customizing_deis/index.rst b/docs/customizing_deis/index.rst index 37051d3bfa..98dfbef8d6 100644 --- a/docs/customizing_deis/index.rst +++ b/docs/customizing_deis/index.rst @@ -11,7 +11,6 @@ Customizing Deis .. toctree:: - choosing-a-scheduler cli-plugins builder_settings controller_settings diff --git a/docs/customizing_deis/router_settings.rst b/docs/customizing_deis/router_settings.rst index cb9c5cccdf..551771db19 100644 --- a/docs/customizing_deis/router_settings.rst +++ b/docs/customizing_deis/router_settings.rst @@ -47,6 +47,7 @@ setting description /deis/router/controller/timeout/read proxy_read_timeout for deis-controller (default: 20m) /deis/router/controller/timeout/send proxy_send_timeout for deis-controller (default: 20m) /deis/router/controller/whitelist comma separated list of IPs (or CIDR) allowed to connect to the controller (default: not set) Example: "0.0.0.0:some_optional_label,10.0.0.0/8" +/deis/router/enableNginxStatus enable vhost traffic status page /deis/router/enforceHTTPS redirect all HTTP traffic to HTTPS (default: false) /deis/router/enforceWhitelist deny all connections unless specifically whitelisted (default: false) /deis/router/firewall/enabled nginx naxsi firewall enabled (default: false) @@ -69,6 +70,7 @@ setting description /deis/router/maxWorkerConnections maximum number of simultaneous connections that can be opened by a worker process (default: 768) /deis/router/serverNameHashMaxSize nginx server_names_hash_max_size setting (default: 512) /deis/router/serverNameHashBucketSize nginx server_names_hash_bucket_size (default: 64) +/deis/router/serverTokens nginx server_tokens setting (default: not set) /deis/router/sslCert cluster-wide SSL certificate /deis/router/sslCiphers cluster-wide enabled SSL ciphers /deis/router/sslKey cluster-wide SSL private key @@ -78,6 +80,7 @@ setting description /deis/router/sslSessionTickets nginx ssl_session_tickets setting (default: on) /deis/router/sslSessionTimeout nginx ssl_session_timeout setting (default: 10m) /deis/router/sslBufferSize nginx ssl_buffer_size setting (default: 4k) +/deis/router/trafficStatusZoneSize nginx vhost_traffic_status_zone size setting (default: 1m) /deis/router/workerProcesses nginx number of worker processes to start (default: auto i.e. available CPU cores) /deis/router/proxyProtocol nginx PROXY protocol enabled /deis/router/proxyRealIpCidr nginx IP with CIDR used by the load balancer in front of deis-router (default: 10.0.0.0/8) diff --git a/docs/docs_requirements.txt b/docs/docs_requirements.txt index 05243785c9..a2ccf7b91c 100644 --- a/docs/docs_requirements.txt +++ b/docs/docs_requirements.txt @@ -11,7 +11,7 @@ django-fsm==2.2.0 django-guardian==1.2.5 django-json-field==0.5.7 djangorestframework==3.0.5 -docker-py==1.6.0 +docker-py==1.7.0 gunicorn==19.3.0 paramiko==1.15.2 python-etcd==0.3.2 diff --git a/docs/installing_deis/aws.rst b/docs/installing_deis/aws.rst index 5c2f8fec5b..ea6b039d31 100644 --- a/docs/installing_deis/aws.rst +++ b/docs/installing_deis/aws.rst @@ -234,6 +234,25 @@ Install Deis Platform Now that you've finished provisioning a cluster, please refer to :ref:`install_deis_platform` to start installing the platform. +In case of failures +------------------- + +Though it is uncommon, provisioning may fail for a few reasons. In these cases, +``provision-aws-cluster.sh`` will automatically attempt to rollback its changes. + +If it fails to do so, you can clean up the AWS resources manually. Do this by logging into the AWS +console, and under CloudFormation, simply delete the ``deis`` stack it created. + +If you wish to retry, you'll need to take note of a few caveats: + +- The ``deis`` CloudFormation stack may be in the process of being deleted. In this case, you can't + provision another CloudFormation stack with the same name. You can simply wait for the stack to + clean itself up, or provision a stack under a different name by doing ``./provision-aws-cluster.sh + ``. + +- In most cases, it's not a good idea to re-use the same discovery URL of a failed provisioning. + Generate a new discovery URL before attempting to provision a new stack. + CloudFormation Updates ---------------------- diff --git a/docs/installing_deis/azure.rst b/docs/installing_deis/azure.rst index b6bccd37ad..bae91d8df4 100644 --- a/docs/installing_deis/azure.rst +++ b/docs/installing_deis/azure.rst @@ -20,11 +20,19 @@ This script uses PyYAML, a Python library, to do its work. If you haven't already, install these on your development machine: +For OSX users: + .. code-block:: console $ brew install python $ sudo pip install pyyaml +For Ubuntu users: + +.. code-block:: console + + $ sudo apt-get install -y python-yaml + Additionally, we'll also need to install the `Azure CLI`_ from Microsoft. Create CoreOS Cluster @@ -62,7 +70,8 @@ Generate a new discovery URL for the deployment so the hosts can find each other $ ./create-azure-user-data $(curl -s https://discovery.etcd.io/new) Next, edit ``parameters.json`` to configure the parameters required for the -cluster. For ``sshKeyData``, use the public key material for the SSH key you'd like +cluster. For ``publicDomainName``, specify the prefix of domain name (like ``deisNode``). +For ``sshKeyData``, use the public key material for the SSH key you'd like to use to log into the hosts. For ``customData``, you'll need to supply the base64-encoded version of ``azure-user-data``. This can be generated using ``base64``: diff --git a/docs/installing_deis/baremetal.rst b/docs/installing_deis/baremetal.rst index e2ed94545f..1931dc179e 100644 --- a/docs/installing_deis/baremetal.rst +++ b/docs/installing_deis/baremetal.rst @@ -94,7 +94,7 @@ Start the installation .. code-block:: console - coreos-install -C stable -c /tmp/config -d /dev/sda -V 835.9.0 + coreos-install -C stable -c /tmp/config -d /dev/sda -V 899.17.0 This will install the latest `CoreOS`_ stable release that has been known to work diff --git a/docs/installing_deis/gce.rst b/docs/installing_deis/gce.rst index 8882fe3a26..1d48c5a875 100644 --- a/docs/installing_deis/gce.rst +++ b/docs/installing_deis/gce.rst @@ -118,7 +118,7 @@ Launch 3 instances. You can choose another starting CoreOS image from the listin --metadata-from-file user-data=gce-user-data,sshKeys=$HOME/.ssh/deis.pub \ --disk name=cored${num},device-name=coredocker \ --tags deis \ - --image coreos-stable-835-9-0-v20151208 \ + --image coreos-stable-1068-8-0-v20160718 \ --image-project coreos-cloud; done @@ -191,7 +191,7 @@ Now edit the zone to add the Deis endpoint and wildcard DNS: .. code-block:: console - $ gcloud dns record-sets --zone deisdemoio transaction start + $ gcloud dns record-sets transaction start --zone deisdemoio This exports a `transaction.yaml` file. @@ -232,7 +232,7 @@ You will want to add two records as YAML objects. Here is an example edit for th ttl: 21600 type: A - kind: dns#resourceRecordSet - name: *.dev.deisdemo.io. + name: "*.dev.deisdemo.io." rrdatas: - 23.251.153.6 ttl: 21600 @@ -249,7 +249,7 @@ And finally execute the transaction. .. code-block:: console - $ gcloud dns record-sets --zone deisdemoio transaction execute + $ gcloud dns record-sets transaction execute --zone deisdemoio Install Deis Platform diff --git a/docs/installing_deis/install-deisctl.rst b/docs/installing_deis/install-deisctl.rst index 6cd46dfa18..b04e02b853 100644 --- a/docs/installing_deis/install-deisctl.rst +++ b/docs/installing_deis/install-deisctl.rst @@ -18,11 +18,11 @@ and run the latest installer: .. code-block:: console $ cd ~/bin - $ curl -sSL http://deis.io/deisctl/install.sh | sh -s 1.12.2 + $ curl -sSL http://deis.io/deisctl/install.sh | sh -s 1.13.4 $ # on CoreOS, add "sudo" to install to /opt/bin/deisctl - $ curl -sSL http://deis.io/deisctl/install.sh | sudo sh -s 1.12.2 + $ curl -sSL http://deis.io/deisctl/install.sh | sudo sh -s 1.13.4 -This installs ``deisctl`` version 1.12.2 to the current directory, and downloads the matching +This installs ``deisctl`` version 1.13.4 to the current directory, and downloads the matching Deis systemd unit files used to schedule the components. Link ``deisctl`` into /usr/local/bin, so it will be in your ``$PATH``: @@ -33,10 +33,10 @@ it will be in your ``$PATH``: To change installation options, save the installer directly: .. image:: download-linux-brightgreen.svg - :target: https://s3-us-west-2.amazonaws.com/get-deis/deisctl-1.12.2-linux-amd64.run + :target: https://s3-us-west-2.amazonaws.com/get-deis/deisctl-1.13.4-linux-amd64.run .. image:: download-osx-brightgreen.svg - :target: https://s3-us-west-2.amazonaws.com/get-deis/deisctl-1.12.2-darwin-amd64.run + :target: https://s3-us-west-2.amazonaws.com/get-deis/deisctl-1.13.4-darwin-amd64.run Then run the downloaded file as a shell script. Append ``--help`` to see what options are available. diff --git a/docs/installing_deis/install-platform.rst b/docs/installing_deis/install-platform.rst index f445b8ee25..f528f60a64 100644 --- a/docs/installing_deis/install-platform.rst +++ b/docs/installing_deis/install-platform.rst @@ -14,7 +14,7 @@ First check that you have ``deisctl`` installed and the version is correct. .. code-block:: console $ deisctl --version - 1.12.2 + 1.13.4 If not, follow instructions to :ref:`install_deisctl`. diff --git a/docs/installing_deis/linode.rst b/docs/installing_deis/linode.rst index 4059820578..e169df1826 100644 --- a/docs/installing_deis/linode.rst +++ b/docs/installing_deis/linode.rst @@ -180,16 +180,29 @@ each host. To do so, run: $ ./apply-firewall.py --private-key /path/to/key/deis --hosts 1.2.3.4 11.22.33.44 111.222.33.44 + +Or, you can provide the display group (NOTE: the default display group is ``deis``) to search for the +nodes using the Linode API, by running: -The script will use the etcd discovery url in the generated ``linode-user-data.yaml`` file or the value of the -discovery-url argument, if provided, to find all of the nodes in your cluster and create iptables rules to allow -connections between nodes while blocking outside connections automatically. Full command usage: +.. code-block:: console + + $ ./apply-firewall.py --private-key /path/to/key/deis --api-key YOUR_LINODE_API_KEY --display-group YOUR_DISPLAY_GROUP + + +The script will use either the Linode API or the etcd discovery url to find all of the nodes in your +cluster and create iptables rules to allow connections between nodes while blocking outside connections +automatically. Note that when discovering node ips, the ``--display-group`` parameter has highest priority, +then manual specification via ``--nodes`` and ``--hosts`` (i.e. public and private ips), then the etcd +discovery url via parameter ``--display-url`` or the ``linode-user-data.yaml`` file. Full command usage: .. code-block:: console usage: apply-firewall.py [-h] --private-key PRIVATE_KEY [--private] + [--adding-new-nodes] [--discovery-url DISCOVERY_URL] + [--display-group DISPLAY_GROUP] [--hosts HOSTS [HOSTS ...]] + [--nodes HOSTS [HOSTS ...]] Apply a "Security Group" to a Deis cluster @@ -199,10 +212,15 @@ connections between nodes while blocking outside connections automatically. Full Cluster SSH Private Key --private Only allow access to the cluster from the private network + --adding-new-nodes When adding new nodes to existing cluster, allows access to etcd + --display-group DISPLAY_GROUP + Linode display group for nodes --discovery-url DISCOVERY_URL Etcd discovery url --hosts HOSTS [HOSTS ...] - The IP addresses of the hosts to apply rules to + The public IP addresses of the hosts + --nodes HOSTS [HOSTS ...] + The private IP addresses of the hosts Install Deis Platform @@ -212,7 +230,28 @@ Now that you've finished provisioning a cluster, please refer to :ref:`install_d start installing the platform. +Adding Nodes to an Existing Cluster +----------------------------------- + +When adding one or more nodes to an existing CoreOS setup, ``etcd`` will be `added as a proxy to +the existing cluster`_. The setup of a proxy requires access to ports 2379 and 2380 of the existing +nodes in the cluster. + +In order to open up these ports, before cluster provisioning, run: + +.. code-block:: console + + $ ./apply-firewall.py --private-key /path/to/key/deis --hosts 1.2.3.4 11.22.33.44 111.222.33.44 + --adding-new-nodes + + +Then provision the cluster as described above and afterwards reapply the firewall using +``./apply-firewall.py`` without the ``--adding-new-nodes`` parameter. + + +.. _`added as a proxy to the existing cluster`: https://coreos.com/etcd/docs/latest/clustering.html#public-etcd-discovery-service .. _`contrib/linode`: https://github.com/deis/deis/tree/master/contrib/linode .. _`Linode Account Settings`: https://manager.linode.com/account/settings .. _`Linode API Keys`: https://manager.linode.com/profile/api .. _`pip`: https://pip.pypa.io/en/stable/ + diff --git a/docs/installing_deis/system-requirements.rst b/docs/installing_deis/system-requirements.rst index 71e391d812..0752a5411e 100644 --- a/docs/installing_deis/system-requirements.rst +++ b/docs/installing_deis/system-requirements.rst @@ -33,10 +33,9 @@ in component failures, issues with etcd/fleet, and other problems. Cluster size ------------ -For :ref:`scheduling ` and the :ref:`deis-store ` components -to work properly, clusters must have at least three nodes. The ``etcd`` service must always -be able to obtain a quorum, and the Ceph data store must maintain at least three replicas -of persistent data. +For the :ref:`deis-store ` component to work properly, clusters must have at least +three nodes. The ``etcd`` service must always be able to obtain a quorum, and the Ceph +data store must maintain at least three replicas of persistent data. See `optimal etcd cluster size`_ and `etcd disaster recovery`_ for further information. diff --git a/docs/managing_deis/DeisLoadBalancerDiagram.png b/docs/managing_deis/DeisLoadBalancerDiagram.png index d4c7111328..fd43b0aa62 100644 Binary files a/docs/managing_deis/DeisLoadBalancerDiagram.png and b/docs/managing_deis/DeisLoadBalancerDiagram.png differ diff --git a/docs/managing_deis/add_remove_host.rst b/docs/managing_deis/add_remove_host.rst index 9e5c996c3d..a690eb678a 100644 --- a/docs/managing_deis/add_remove_host.rst +++ b/docs/managing_deis/add_remove_host.rst @@ -288,24 +288,42 @@ This can be achieved by making a request to the etcd API. See `remove machines`_ Automatic Host Removal ====================== -The ``contrib/user-data.example`` provides a unit which provides some experimental logic to clean-up a Deis -node's Ceph and etcd membership before reboot, shutdown or halt events. Currently, the procedure will -also be triggered if it is manually stopped via systemctl. +The ``contrib/coreos/user-data.example`` provides 2 units, ``graceful-etcd-shutdown.service`` and +``graceful-ceph-shutdown.service``, that contain some experimental logic to clean-up a Deis node's +cluster membership before reboot, shutdown or halt events. The units can be used independently or +together. -The unit requires that the optional ``deis-store-admin`` component is installed. +The ``graceful-etcd-shutdown`` unit is useful for any Deis node running its own etcd. To be used, it +must be enabled and started. + +.. code-block:: console + + root@deis-1:/# systemctl enable graceful-etcd-shutdown + root@deis-1:/# systemctl start graceful-etcd-shutdown + +The ``graceful-ceph-shutdown`` script is only useful for nodes running deis-store components. To be used, +the unit requires that the optional ``deis-store-admin`` component is installed. .. code-block:: console root@deis-1:/# deisctl install store-admin root@deis-1:/# deisctl start store-admin -To enable the feature you must enable and start the unit: +Then the unit should be enabled and started. .. code-block:: console - root@deis-1:/# systemctl enable graceful-deis-shutdown - root@deis-1:/# systemctl start graceful-deis-shutdown + root@deis-1:/# systemctl enable graceful-ceph-shutdown + root@deis-1:/# systemctl start graceful-ceph-shutdown + +At this point your node is ready to be gracefully removed whenever a halt, shutdown or reboot event occurs. +The graceful shutdown units insert themselves ahead of the etcd and Ceph units in the shutdown order. This +allows them to perform preemptive actions on etcd and Ceph while they are still healthy and in the cluster. + +The units make use of the script ``/opt/bin/graceful-shutdown.sh`` to remove the node from the cluster. For +Ceph, this means determining if the Ceph cluster is healthy and has enough nodes to return to health - if it +does, it will remove its OSD and wait for the Ceph cluster to return to health. Once it is healthy, it will +remove its monitor and continue to shut down Ceph components. The end result should be a Ceph cluster that +returns its status as ``health_ok``. -The unit is now active and will now be stopped ahead of the store components, Docker and etcd. -The script ``/opt/bin/graceful-shutdown.sh`` will determine if the Ceph cluster is healthy, and attempt -to remove this node from the Deis store components if there are greater than 3 Ceph nodes in the cluster. +For etcd, the script remove its etcd member and delete itself from the CoreOS discovery url. diff --git a/docs/managing_deis/isolating-planes.rst b/docs/managing_deis/isolating-planes.rst index 827c924bc0..de61dc39b6 100644 --- a/docs/managing_deis/isolating-planes.rst +++ b/docs/managing_deis/isolating-planes.rst @@ -105,15 +105,5 @@ the metadata that describes where each unit may be hosted. $ deisctl config platform set enablePlacementOptions=true -Alternate schedulers --------------------- - -Recent versions of Deis ship with -:ref:`technology previews ` that permit the use of -alternate schedulers such as Swarm or Mesos with Marathon. - -If opting into both isolated planes and an alternate scheduler, units for the -alternate scheduler's agents (a Mesos slave process, for instance) will be -decorated appropriately to isolate them to the Data Plane. .. _`More details on Fleet metadata`: https://coreos.com/fleet/docs/latest/unit-files-and-scheduling.html#fleet-specific-options diff --git a/docs/managing_deis/platform_monitoring.rst b/docs/managing_deis/platform_monitoring.rst index 6f6c9d4b20..f928855e07 100644 --- a/docs/managing_deis/platform_monitoring.rst +++ b/docs/managing_deis/platform_monitoring.rst @@ -132,7 +132,7 @@ SPM Docker Agent can be run using Docker as follows (assuming the SPM_TOKEN is s .. code-block:: console - docker run -d --name spm-agent -e SPM_TOKEN=`etcdctl get SPM_TOKEN` -e HOSTNAME=$HOSTNAME -v /var/run/docker.sock:/var/run/docker.sock sematext/spm-agent-docker + docker run -d --name sematext-agent -e SPM_TOKEN=`etcdctl get SPM_TOKEN` -e HOSTNAME=$HOSTNAME -v /var/run/docker.sock:/var/run/docker.sock sematext/sematext-agent-docker To activate SPM Docker Agent for the entire cluster submit this unit file to fleet @@ -149,11 +149,11 @@ To activate SPM Docker Agent for the entire cluster submit this unit file to fle EnvironmentFile=/etc/environment Restart=always RestartSec=30s - ExecStartPre=-/usr/bin/docker kill spm-agent - ExecStartPre=-/usr/bin/docker rm spm-agent - ExecStartPre=/usr/bin/docker pull sematext/spm-agent-docker:latest - ExecStart=/bin/sh -c 'set -ex; /usr/bin/docker run --name spm-agent -e SPM_TOKEN=$(etcdctl get SPM_TOKEN) -e HOSTNAME=$HOSTNAME -v /var/run/docker.sock:/var/run/docker.sock sematext/spm-agent-docker' - ExecStop=/usr/bin/docker stop spm-agent + ExecStartPre=-/usr/bin/docker kill sematext-agent + ExecStartPre=-/usr/bin/docker rm sematext-agent + ExecStartPre=/usr/bin/docker pull sematext/sematext-agent-docker:latest + ExecStart=/bin/sh -c 'set -ex; /usr/bin/docker run --name sematext-agent -e SPM_TOKEN=$(etcdctl get SPM_TOKEN) -e HOSTNAME=$HOSTNAME -v /var/run/docker.sock:/var/run/docker.sock sematext/sematext-agent-docker' + ExecStop=/usr/bin/docker stop sematext-agent [Install] WantedBy=multi-user.target @@ -161,22 +161,22 @@ To activate SPM Docker Agent for the entire cluster submit this unit file to fle [X-Fleet] Global=true -Save the file as ``spm-agent.service``. +Save the file as ``sematext-agent.service``. .. code-block:: console - wget https://raw.githubusercontent.com/sematext/spm-agent-docker/master/coreos/spm-agent.service + wget https://raw.githubusercontent.com/sematext/sematext-agent-docker/master/coreos/sematext-agent.service Load and start the service with .. code-block:: console - fleetctl load spm-agent.service && fleetctl start spm-agent.service + fleetctl load sematext-agent.service && fleetctl start sematext-agent.service After one minute, you should see metrics in SPM. Documentation, source code and support information is available here: -`https://github.com/sematext/spm-agent-docker`_. +`https://github.com/sematext/sematext-agent-docker`_. @@ -189,4 +189,4 @@ Documentation, source code and support information is available here: .. _`newrelic-sysmond`: https://github.com/johanneswuerbach/newrelic-sysmond-service .. _`SPM for Docker`: http://sematext.com/spm/integrations/docker-monitoring.html .. _`Create a new SPM App`: https://apps.sematext.com/spm-reports/registerApplication.do -.. _`https://github.com/sematext/spm-agent-docker`: https://github.com/sematext/spm-agent-docker +.. _`https://github.com/sematext/sematext-agent-docker`: https://github.com/sematext/sematext-agent-docker diff --git a/docs/managing_deis/running-deis-without-ceph.rst b/docs/managing_deis/running-deis-without-ceph.rst index febdc31692..91aa4897d9 100644 --- a/docs/managing_deis/running-deis-without-ceph.rst +++ b/docs/managing_deis/running-deis-without-ceph.rst @@ -41,7 +41,7 @@ ships these to the :ref:`logger` component. By default, the logger writes the logs to a distributed Ceph filesystem. These logs can then be fetched by the :ref:`controller` component via HTTP. -In a Ceph-less clutser, the Logger component should be configured, instead, to +In a Ceph-less cluster, the Logger component should be configured, instead, to use in-memory log storage. Optionally, a drain may also be configured to forward logs to an external log service (such as Papertrail) for longer-term archival. @@ -57,6 +57,9 @@ previous state. We will not be using the database component in the Ceph-less cluster, and will instead rely on an external database. +When provisioning the database, it is strongly recommended to use an `m3.medium` +instance or greater. + Registry ^^^^^^^^ @@ -120,6 +123,13 @@ S3 store configuration sample: s3encrypt=false \ s3secure=false +Due to `issue 4568`_, you'll also need to run the following to ensure confd will template out the +registry's configuration: + +.. code-block:: console + + $ deisctl config store set gateway=' ' + By default, the registry will try to authenticate to S3 using the instance role. If your cluster is not running on EC2, you can supply hard coded API access and secret key: @@ -218,5 +228,6 @@ When following the :ref:`upgrading-deis` documentation, be sure to use .. _`Amazon RDS`: http://aws.amazon.com/rds/ .. _`Amazon S3`: http://aws.amazon.com/s3/ .. _`Arne-Christian Blystad`: https://github.com/blystad +.. _`issue 4568`: https://github.com/deis/deis/issues/4568 .. _`Papertrail`: https://papertrailapp.com/ .. _`Swift3`: https://github.com/openstack/swift3 diff --git a/docs/managing_deis/upgrading-deis.rst b/docs/managing_deis/upgrading-deis.rst index 407a68cc2c..7d1f306ed7 100644 --- a/docs/managing_deis/upgrading-deis.rst +++ b/docs/managing_deis/upgrading-deis.rst @@ -41,10 +41,10 @@ Finally, update ``deisctl`` to the new version and reinstall: .. code-block:: console - $ curl -sSL http://deis.io/deisctl/install.sh | sh -s 1.12.2 + $ curl -sSL http://deis.io/deisctl/install.sh | sh -s 1.13.4 $ deisctl --version # should match the desired platform - 1.12.2 - $ deisctl config platform set version=v1.12.2 + 1.13.4 + $ deisctl config platform set version=v1.13.4 $ deisctl install platform $ deisctl start platform @@ -58,10 +58,16 @@ Finally, update ``deisctl`` to the new version and reinstall: When upgrading an AWS cluster older than Deis v1.6, a :ref:`migration_upgrade` is preferable. - On AWS, Deis enables the :ref:`PROXY protocol ` by default. - If an in-place upgrade is required, run ``deisctl config router set proxyProtocol=1``, - enable PROXY protocol for ports 80 and 443 on the ELB, add a ``TCP 443:443`` listener, and - change existing targets and health checks from HTTP to TCP. + On AWS, Deis v1.6 and above enables the :ref:`PROXY protocol ` by default. + If an in-place upgrade is required on a cluster running a version older than v1.6, + run ``deisctl config router set proxyProtocol=1``, enable PROXY protocol for ports 80 and + 443 on the ELB, add a ``TCP 443:443`` listener. + + Elastic Load Balancer is set to perform health checks to make sure your instances are alive. + When you take your cluster down, there will be a brief period that your instances will be + marked as ``OutOfService``. If deis-cli can't connect to your cluster, check your EC2 Load + Balancer's health check status in the AWS web console. Wait for the instances to return to + ``InService`` status. Upgrade Deis clients ^^^^^^^^^^^^^^^^^^^^ @@ -99,11 +105,11 @@ to. Care should be taken not to overwrite the existing ``deisctl`` version. .. code-block:: console $ mkdir /tmp/upgrade - $ curl -sSL http://deis.io/deisctl/install.sh | sh -s 1.12.2 /tmp/upgrade + $ curl -sSL http://deis.io/deisctl/install.sh | sh -s 1.13.4 /tmp/upgrade $ /tmp/upgrade/deisctl --version # should match the desired platform - 1.12.2 + 1.13.4 $ /tmp/upgrade/deisctl refresh-units - $ /tmp/upgrade/deisctl config platform set version=v1.12.2 + $ /tmp/upgrade/deisctl config platform set version=v1.13.4 Now it is possible to prepare the cluster for the upgrade using the old ``deisctl`` binary. This command will shutdown and uninstall all components of the cluster except the router and publisher. This means your services should still be @@ -128,7 +134,7 @@ If the process were to fail, the old version can be restored manually by reinsta $ /tmp/upgrade/deisctl stop platform $ /tmp/upgrade/deisctl uninstall platform - $ /tmp/upgrade/deisctl config platform set version=v1.12.2 + $ /tmp/upgrade/deisctl config platform set version=v1.13.4 $ /opt/bin/deisctl refresh-units $ /opt/bin/deisctl install platform $ /opt/bin/deisctl start platform diff --git a/docs/reference/api-v1.7.rst b/docs/reference/api-v1.7.rst index e52916be2d..fced552073 100644 --- a/docs/reference/api-v1.7.rst +++ b/docs/reference/api-v1.7.rst @@ -51,7 +51,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -89,7 +89,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json {"token": "abc123"} @@ -112,7 +112,7 @@ Example Response: HTTP/1.1 204 NO CONTENT DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Regenerate Token ```````````````` @@ -144,7 +144,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json {"token": "abc123"} @@ -182,7 +182,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Applications @@ -206,7 +206,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -252,7 +252,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -283,7 +283,7 @@ Example Response: HTTP/1.1 204 NO CONTENT DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 List Application Details @@ -303,7 +303,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -367,10 +367,11 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: text/plain - "16:51:14 deis[api]: test created initial release\n" + 16:51:14 deis[api]: test created initial release + 16:51:15 deis[api]: test added POWERED_BY Run one-off Commands @@ -391,7 +392,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json [0, "hi\n"] @@ -418,7 +419,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -451,7 +452,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -496,7 +497,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -526,7 +527,7 @@ Example Response: HTTP/1.1 204 NO CONTENT DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Containers @@ -550,7 +551,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -590,7 +591,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -630,7 +631,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json [ @@ -665,7 +666,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json [ @@ -700,7 +701,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json [ @@ -738,7 +739,7 @@ Example Response: HTTP/1.1 204 NO CONTENT DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Configuration @@ -762,7 +763,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -800,7 +801,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json X-Deis-Release: 3 @@ -843,7 +844,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json X-Deis-Release: 4 @@ -885,7 +886,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -923,7 +924,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -953,7 +954,7 @@ Example Response: HTTP/1.1 204 NO CONTENT DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Builds @@ -977,7 +978,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -1032,7 +1033,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json X-Deis-Release: 4 @@ -1070,7 +1071,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -1132,7 +1133,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -1168,7 +1169,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json {"version": 5} @@ -1195,7 +1196,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -1237,7 +1238,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -1267,7 +1268,7 @@ Example Response: HTTP/1.1 204 NO CONTENT DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Permissions @@ -1295,7 +1296,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -1325,7 +1326,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Remove Application Permission @@ -1345,7 +1346,7 @@ Example Response: HTTP/1.1 204 NO CONTENT DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 List Administrators ``````````````````` @@ -1364,7 +1365,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { @@ -1407,7 +1408,7 @@ Example Response: HTTP/1.1 201 CREATED DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Remove User's Administrative Privileges ``````````````````````````````````````` @@ -1430,7 +1431,7 @@ Example Response: HTTP/1.1 204 NO CONTENT DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Users ----- @@ -1456,7 +1457,7 @@ Example Response: HTTP/1.1 200 OK DEIS_API_VERSION: 1.7 - DEIS_PLATFORM_VERSION: 1.12.2 + DEIS_PLATFORM_VERSION: 1.13.4 Content-Type: application/json { diff --git a/docs/reference/server/scheduler.rst b/docs/reference/server/scheduler.rst index dbb03f1bfe..172395be65 100644 --- a/docs/reference/server/scheduler.rst +++ b/docs/reference/server/scheduler.rst @@ -26,9 +26,3 @@ scheduler.states .. contents:: :local: .. automodule:: scheduler.states - -scheduler.swarm ---------------- -.. contents:: - :local: -.. automodule:: scheduler.swarm diff --git a/docs/reference/terms/build.rst b/docs/reference/terms/build.rst index db10e205a6..6328b44ea1 100644 --- a/docs/reference/terms/build.rst +++ b/docs/reference/terms/build.rst @@ -9,6 +9,3 @@ Deis builds are created automatically on the controller when a developer uses ``git push deis master``. When a new build is created, a new :ref:`release` is created automatically. - -.. note:: - Deis only supports ``git push`` to the **master** branch. diff --git a/docs/reference/terms/scheduler.rst b/docs/reference/terms/scheduler.rst index a215028b8c..3b82ca118a 100644 --- a/docs/reference/terms/scheduler.rst +++ b/docs/reference/terms/scheduler.rst @@ -15,9 +15,8 @@ The scheduler must decide which machines are eligible to run these container jobs. Scheduler backends vary in the details of their job allocation policies and whether or not they are resource-aware, among other features. -The Deis scheduler is implemented in the :ref:`controller` component. The -scheduler implementation (or "backend") can be changed dynamically to support -different strategies or cluster types. +The Deis scheduler client is implemented in the :ref:`controller` component. +Deis uses `Fleet`_ to schedule the containers across the cluster. -Follow the :ref:`choosing_a_scheduler` guide to learn about available -options for the Deis scheduler. + +.. _`Fleet`: https://github.com/coreos/fleet diff --git a/docs/roadmap/planning.rst b/docs/roadmap/planning.rst index d74ddeae11..7967143d6d 100644 --- a/docs/roadmap/planning.rst +++ b/docs/roadmap/planning.rst @@ -30,13 +30,118 @@ Major decisions affecting the Roadmap are discussed during Release Planning Meet Release Planning Meetings are open to the public with access coordinated via the #deis IRC channel on Freenode. Notes from past meetings are below, along with links to a recording of the entire meeting on YouTube. -January 2016 -~~~~~~~~~~~~ +November 2016 +~~~~~~~~~~~~~ The next public release planning meeting for Deis will take place on -**Thursday, January 7th** at **12pm MDT/1900 GMT**. The `event`_ can be added +**Thursday, November 3rd** at **12pm MDT/1900 UTC**. The `event`_ can be added directly to your calendar. +October 2016 +~~~~~~~~~~~~ + +- Introduction - Seth Goings +- Release Review - Jason Hansen +- Deis CI Overview - Jonathan Chauncey + +Archive: https://www.youtube.com/watch?v=LhglIIh9izw + +September 2016 +~~~~~~~~~~~~~~ + +- Introduction - Gabe Monroy +- Meet Matt Tucker - Seth Goings & Matt Tucker +- Workflow 2.4 Release Review - Jason Hansen +- Autoscaling Preview - Helgi Þorbjörnsson +- Roadmap Process - Jason Hansen + +Archive: https://www.youtube.com/watch?v=sDaAZGDcRgU + +August 2016 +~~~~~~~~~~~ + +- Introduction - Jason Hansen +- Megaboom Demo - Aaron Schlesinger +- Deployments Demo - Helgi Þorbjörnsson +- Workflow 2.3 Release Review - Jason Hansen +- Workflow 2.4 Up Next - Jason Hansen + +Archive: https://www.youtube.com/watch?v=ZWkDpi76H-E + +July 2016 +~~~~~~~~~ + +- Workflow 2.1 and 2.2 Release Review +- Helm Alpha.2 and Alpha.3 Review + +Archive: https://www.youtube.com/watch?v=BVHT03uQ1WU + +June 2016 +~~~~~~~~~ + +- Introduction & Community Updates - Gabe Monroy +- Helm Alpha 1 Demo - Michelle Noorali +- How Mozilla uses Deis v1 - Josh Mize +- Deis V1 Maintenance Update - Matt Boersma +- Deis Workflow Release Update - Jason Hansen + +Archive: https://youtu.be/MtGSwsRnpcM + +May 2016 +~~~~~~~~ + +- Introduction & Community Updates +- LTS Update +- Beta 3 Release and Beta 4 Status +- Helm Update, Helm Classic and Kubernetes Helm +- Wrap-up! + +Archive: https://www.youtube.com/watch?v=Jb-X_yVE2-w + +April 2016 (Deis LTS Release) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Intro +- Move to Slack https://slack.deis.io +- Notes on Deis Workflow Beta +- Road to Workflow Stable +- Deis Workflow e2e Testing and Improvements +- LTS Release (v1.13.0) for Deis v1 +- Call for Community Demos! + +Archive: https://www.youtube.com/watch?v=72g9PxiR0iU + +March 2016 (Deis v2 Beta) +~~~~~~~~~~~~~~~~~~~~~~~~~ + +We demoed Deis v2 beta and shared the architecture of the Deis v2 CI/CD pipeline. + +Archive: https://www.youtube.com/watch?v=rIF3v1MZkJg + +February 2016 +~~~~~~~~~~~~~ + +There was no public release planning meeting for February. + +January 2016 (Deis 1.x LTS release and Deis v2 Alpha) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We're still taking feedback and suggestions on what our users would like to +see in the 1.x LTS release. Feel free to provide your input on the +`LTS release issue`_ on GitHub. + +Over the last month we've been hard at work on Deis v2 which puts the Deis +workflow people have come to know and love on top of the `Kubernetes`_ platform. +Aaron demoed what we've done so far with the release of Deis v2 Alpha. +(`v2 alpha walkthrough cheatsheet`_) + +If you've tried out v2 alpha, let us know what you think via the +`v2 alpha feedback`_ GitHub issue! + +You can also take a look at our `v2 beta milestone`_ goals and chime in there. + +Archive: https://www.youtube.com/watch?v=8LNVluUFh1M + December 2015 (Deis 1.x LTS release) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -104,6 +209,10 @@ Thanks to `Amy Lindburg`_ and our friends at `Docker`_ for their inspiration. .. _`Amy Lindburg`: https://twitter.com/amylindburg .. _`Docker`: https://www.docker.com/ -.. _`event`: https://goo.gl/iFvIv6 +.. _`event`: https://goo.gl/FGNnOC .. _`LTS release issue`: https://github.com/deis/deis/issues/4776 .. _`Maintainers`: https://github.com/deis/deis/blob/master/MAINTAINERS.md +.. _`Kubernetes`: http://kubernetes.io/ +.. _`v2 alpha feedback`: https://github.com/deis/deis/issues/4827 +.. _`v2 alpha walkthrough cheatsheet`: https://gist.github.com/arschles/5b7a75a50938913d3eb1 +.. _`v2 beta milestone`: https://github.com/deis/deis/issues/4809 diff --git a/docs/roadmap/releases.rst b/docs/roadmap/releases.rst index 8f738d96b3..7a9094b2dd 100644 --- a/docs/roadmap/releases.rst +++ b/docs/roadmap/releases.rst @@ -49,13 +49,10 @@ Patch Release docs/troubleshooting_deis/index.rst \ logger/image/Dockerfile \ logspout/image/Dockerfile \ - mesos/template \ - mesos/zookeeper/Dockerfile \ publisher/image/Dockerfile \ registry/Dockerfile \ router/Dockerfile \ store/base/Dockerfile \ - swarm/image/Dockerfile \ version/version.go - Update the CHANGELOG to include all commits since the last release. Since diff --git a/docs/roadmap/roadmap.rst b/docs/roadmap/roadmap.rst index 1273bbf6d3..fdc97265bb 100644 --- a/docs/roadmap/roadmap.rst +++ b/docs/roadmap/roadmap.rst @@ -12,135 +12,19 @@ important to the future of Deis. Given the project's rapid :ref:`Release Schedule, ` roadmap items are designed to provide a sense of direction over many releases. -Deis v2 -------- +Deis Workflow +------------- -Much of the Deis team's efforts are focused on Deis v2, which will see the Deis -platform running on Kubernetes. +Much of the Deis team's efforts are focused on Deis Workflow which represents +the next major stable release. For more information about Deis Workflow visit +`https://deis.com, `_ or `https://github.com/deis/workflow, `_. -For details on Deis v2, see the `Deis v2 Design Document`_ -and issues with the `v2`_ tag. +Deis Workflow will follow the same open planning and roadmap process as Deis v1 +published `here, `_. -Etcd Hardening --------------- +Deis v1 Maintenance +------------------- -We have recently seen various issues with etcd performance, which can cause -issues with Deis. We will focus on re-evaluating our implementation and use -of etcd in Deis, with a focus on performance and reliability. - -This feature is tracked as GitHub issue `#4404`_. - -Rigger ------- - -We want the provisioning process of CoreOS hosts to be as smooth as the -``deisctl`` experience when users install and start Deis. We've started working -on a tool called `Rigger`_ which provisions hosts and deploys Deis on -any supported provider. - -We're still defining the interface between Rigger and the provider-specific -provision logic. We'd love input from the community on this discussion in -`#4345`_. - -New Default Scheduler ---------------------- -Deis now has support for Docker Swarm, Apache Mesos, and Google Kubernetes as -application schedulers. With the known limitations of fleet (primarily, not being -a resource-aware scheduler), we should investigate using a different scheduler -as our default. - -This feature is tracked as GitHub issue `#4222`_. - -Permissions and Teams ---------------------- -Deis deployments in larger organizations require more fine-grained control -over users and permissions. Implementation of teams and improved user permissions -are tracked in separate issues: - - - [ ] `Permissions`_ - - [ ] `Teams`_ - -Monitoring & Telemetry ----------------------- -Deis installations today use custom solutions for monitoring, alerting and operational visibility. -Deis will standardize the monitoring interfaces and provide open source agent(s) that can be used to ship telemetry to arbitrary endpoints. - - - [ ] Host Telemetry (cpu, memory, network, disk) - - [ ] Container Telemetry (cpu, memory, network, disk) - - [ ] Platform Telemetry (control plane, data plane) - - [ ] Controller Telemetry (app created, build created, containers scaled) - -This feature is tracked as GitHub issue `#3699`_. - -Internal Service Discovery --------------------------- -To provide a better container networking experience, Deis must provide -internal service discovery for components to coordinate. - -This feature is tracked as GitHub issue `#3072`_. - -Update Service --------------- -Deis must support 100% automated, zero-downtime updates of the control plane. -Like CoreOS, Deis clusters should be attached to an alpha, beta or stable channel and rely on an automatic update mechanism. -To accomplish this, Deis plans to use the `Google Omaha Protocol`_ as implemented by `CoreUpdate`_. - - - [X] `Update client/agent`_ - - [ ] Update server - - [ ] `Automatic CoreOS upgrades`_ - - [ ] CI Integration - -This feature is tracked as GitHub issue `#2106`_. - -Deis Push ---------- -End-users should be able to push Docker-based applications into Deis from their local machine using ``deis push user/app``. -This works around a number of authentication issues with private registries and ``deis pull``. - - - [ ] `Docker Registry v2`_ - - [ ] `Deis Push`_ - -TTY Broker ----------- -Today Deis cannot provide bi-directional streams needed for log tailing and interactive batch processes. -By having the :ref:`Controller` drive a TTY Broker component, Deis can securely open WebSockets -through the routing mesh. - - - [ ] `TTY Broker component`_ - - [ ] `Interactive Deis Run`_ (``deis run bash``) - - [ ] `Log Tailing`_ (``deis logs -f``) - -Service Broker --------------- -In Deis, connections to :ref:`concepts_backing_services` are meant to be explicit and modeled as a series of environment variables. -Deis believes the Cloud Foundry `Service Broker API`_ is the best embodiment of this today. - - - [ ] Deis Addons CLI (deis addons) - - [ ] PostgreSQL Service Broker - - [ ] Redis Service Broker - -This feature is tracked as GitHub issue `#231`_. - -.. _`#231`: https://github.com/deis/deis/issues/231 -.. _`#2106`: https://github.com/deis/deis/issues/2106 -.. _`#3072`: https://github.com/deis/deis/issues/3072 -.. _`#3699`: https://github.com/deis/deis/issues/3699 -.. _`#4222`: https://github.com/deis/deis/issues/4222 -.. _`#4345`: https://github.com/deis/deis/issues/4345 -.. _`#4404`: https://github.com/deis/deis/issues/4404 -.. _`Automatic CoreOS upgrades`: https://github.com/deis/deis/issues/1043 -.. _`CoreUpdate`: https://coreos.com/docs/coreupdate/custom-apps/coreupdate-protocol/ -.. _`Deis Push`: https://github.com/deis/deis/issues/2680 -.. _`Deis v2 Design Document`: https://github.com/deis/deis/issues/4582 -.. _`Docker Registry v2`: https://github.com/deis/deis/issues/3814 -.. _`Google Omaha Protocol`: https://code.google.com/p/omaha/wiki/ServerProtocol -.. _`Interactive Deis Run`: https://github.com/deis/deis/issues/117 -.. _`like CoreOS`: https://coreos.com/releases/ -.. _`Log Tailing`: https://github.com/deis/deis/issues/465 -.. _`Permissions`: https://github.com/deis/deis/issues/4150 -.. _`Rigger`: https://github.com/deis/rigger -.. _`Service Broker API`: http://docs.cloudfoundry.org/services/api.html -.. _`Teams`: https://github.com/deis/deis/issues/4173 -.. _`TTY Broker component`: https://github.com/deis/deis/issues/3808 -.. _`Update client/agent`: https://github.com/deis/deis/issues/3811 -.. _`v2`: https://github.com/deis/deis/labels/v2 +Now that Deis Workflow has seen its first stable release, Deis v1 is officially +under maintenance. We will continue to track upstream CoreOS version bumps, +security patches and patch submissions made by the community. diff --git a/docs/theme/deis/layout.html b/docs/theme/deis/layout.html index 4b845f7672..a5eccfa914 100644 --- a/docs/theme/deis/layout.html +++ b/docs/theme/deis/layout.html @@ -85,6 +85,11 @@
+ +
+

Version 1

+

This documentation is for Deis v1 PaaS. For Workflow (v2) documentation visit https://deis.com/docs/workflow/.

+
{% block body %} {% endblock %} {%- if pagename != 'index' %} diff --git a/docs/troubleshooting_deis/index.rst b/docs/troubleshooting_deis/index.rst index 22de7a69f4..6f5bb682da 100644 --- a/docs/troubleshooting_deis/index.rst +++ b/docs/troubleshooting_deis/index.rst @@ -107,7 +107,7 @@ If you built ``deisctl`` locally or didn't use its installer, you may see an err This is because ``deisctl`` could not find unit files for Deis locally. Run ``deisctl help refresh-units`` to see where ``deisctl`` searches, and then run a command such as -``deisctl refresh-units --tag=v1.12.2``, or set the ``$DEISCTL_UNITS`` environment variable to a directory +``deisctl refresh-units --tag=v1.13.4``, or set the ``$DEISCTL_UNITS`` environment variable to a directory containing the unit files. Other issues diff --git a/docs/understanding_deis/DeisControlPlane.png b/docs/understanding_deis/DeisControlPlane.png index 5d8e7f182f..25835502f5 100644 Binary files a/docs/understanding_deis/DeisControlPlane.png and b/docs/understanding_deis/DeisControlPlane.png differ diff --git a/docs/understanding_deis/DeisDataPlane.png b/docs/understanding_deis/DeisDataPlane.png index 4c28464351..eb56681f9d 100644 Binary files a/docs/understanding_deis/DeisDataPlane.png and b/docs/understanding_deis/DeisDataPlane.png differ diff --git a/docs/understanding_deis/DeisGitPushWorkflow.png b/docs/understanding_deis/DeisGitPushWorkflow.png index 4f65c81e8d..238e840360 100644 Binary files a/docs/understanding_deis/DeisGitPushWorkflow.png and b/docs/understanding_deis/DeisGitPushWorkflow.png differ diff --git a/docs/understanding_deis/DeisRouterMesh.png b/docs/understanding_deis/DeisRouterMesh.png index 5375f003f2..79cccbf3db 100644 Binary files a/docs/understanding_deis/DeisRouterMesh.png and b/docs/understanding_deis/DeisRouterMesh.png differ diff --git a/docs/understanding_deis/DeisSystemDiagram.png b/docs/understanding_deis/DeisSystemDiagram.png index a286b251bc..d737630c45 100644 Binary files a/docs/understanding_deis/DeisSystemDiagram.png and b/docs/understanding_deis/DeisSystemDiagram.png differ diff --git a/docs/understanding_deis/components.rst b/docs/understanding_deis/components.rst index 782366b8ff..904ecaafa0 100644 --- a/docs/understanding_deis/components.rst +++ b/docs/understanding_deis/components.rst @@ -14,7 +14,7 @@ Each Deis component is deployed as a container or set of containers. Controller ---------- The controller component is an HTTP API server. Among other functions, the -controller contains :ref:`the scheduler `, which decides +controller contains :ref:`the scheduler `, which decides where to run app containers. The ``deis`` command-line client interacts with this component. diff --git a/docs/using_deis/using-docker-images.rst b/docs/using_deis/using-docker-images.rst index f4cbd85241..90d55098a7 100644 --- a/docs/using_deis/using-docker-images.rst +++ b/docs/using_deis/using-docker-images.rst @@ -30,9 +30,10 @@ Docker Image Requirements ^^^^^^^^^^^^^^^^^^^^^^^^^ In order to deploy Docker images, they must conform to the following requirements: - * The Docker image must EXPOSE only one port - * The port must be listening for a HTTP connection - * A default CMD must be specified for running the container +* The Docker image must EXPOSE only one port +* The port must be listening for a HTTP connection +* A default CMD must be specified for running the container +* Bash should be available in the Docker image .. note:: diff --git a/docs/using_deis/using-dockerfiles.rst b/docs/using_deis/using-dockerfiles.rst index 5a4370a529..ac0813360e 100644 --- a/docs/using_deis/using-dockerfiles.rst +++ b/docs/using_deis/using-dockerfiles.rst @@ -21,9 +21,10 @@ Dockerfile Requirements ^^^^^^^^^^^^^^^^^^^^^^^ In order to deploy Dockerfile applications, they must conform to the following requirements: - * The Dockerfile must EXPOSE only one port - * The port must be listening for a HTTP connection - * A default CMD must be specified for running the container +* The Dockerfile must EXPOSE only one port +* The port must be listening for a HTTP connection +* A default CMD must be specified for running the container +* Bash should be available in the Docker image .. note:: diff --git a/logger/Makefile b/logger/Makefile index 329bde4b34..33c25011ff 100644 --- a/logger/Makefile +++ b/logger/Makefile @@ -44,7 +44,7 @@ run: install start dev-release: push set-image push: check-registry - docker tag -f $(IMAGE) $(DEV_IMAGE) + docker tag $(IMAGE) $(DEV_IMAGE) docker push $(DEV_IMAGE) set-image: check-deisctl diff --git a/logger/image/Dockerfile b/logger/image/Dockerfile index 95191afa5c..4b4a10896b 100644 --- a/logger/image/Dockerfile +++ b/logger/image/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.2 +FROM alpine:3.4 ENTRYPOINT ["/bin/logger"] CMD ["--enable-publish"] @@ -7,4 +7,4 @@ EXPOSE 8088 ADD . / -ENV DEIS_RELEASE 1.13.0-dev +ENV DEIS_RELEASE 1.13.4 diff --git a/logspout/Makefile b/logspout/Makefile index 1a3b6341f6..927b7c7a4a 100644 --- a/logspout/Makefile +++ b/logspout/Makefile @@ -44,7 +44,7 @@ run: install start dev-release: push set-image push: check-registry - docker tag -f $(RELEASE_IMAGE) $(DEV_DOCKER_IMAGE) + docker tag $(RELEASE_IMAGE) $(DEV_DOCKER_IMAGE) docker push $(DEV_DOCKER_IMAGE) set-image: check-deisctl diff --git a/logspout/attacher.go b/logspout/attacher.go index 7af181783f..be3fee92e4 100644 --- a/logspout/attacher.go +++ b/logspout/attacher.go @@ -26,15 +26,19 @@ func NewAttachManager(client *docker.Client) *AttachManager { containers, err := client.ListContainers(docker.ListContainersOptions{}) assert(err, "attacher") for _, listing := range containers { - m.attach(listing.ID[:12]) + m.attach(listing.ID) } go func() { events := make(chan *docker.APIEvents) assert(client.AddEventListener(events), "attacher") for msg := range events { - debug("event:", msg.ID[:12], msg.Status) - if msg.Status == "start" { - go m.attach(msg.ID[:12]) + if msg.Status != "" { + debug("event:", msg.Status, msg.From, msg.ID, "(Status/From/ID)") + if msg.Status == "start" { + go m.attach(msg.ID) + } + } else { + debug("event: unknown (probably post API v1.22)") } } log.Fatal("ruh roh") // todo: loop? @@ -44,6 +48,10 @@ func NewAttachManager(client *docker.Client) *AttachManager { func (m *AttachManager) attach(id string) { container, err := m.client.InspectContainer(id) + shortId := id + if len(shortId) > 12 { + shortId = id[:12] + } assert(err, "attacher") name := container.Name[1:] success := make(chan struct{}) @@ -63,7 +71,7 @@ func (m *AttachManager) attach(id string) { }) outwr.Close() errwr.Close() - debug("attach:", id, "finished") + debug("attach:", shortId, "finished") if err != nil { close(success) failure <- err @@ -80,10 +88,10 @@ func (m *AttachManager) attach(id string) { m.Unlock() success <- struct{}{} m.send(&AttachEvent{ID: id, Name: name, Type: "attach"}) - debug("attach:", id, name, "success") + debug("attach:", shortId, name, "success") return } - debug("attach:", id, "failure:", <-failure) + debug("attach:", shortId, "failure:", <-failure) } func (m *AttachManager) send(event *AttachEvent) { diff --git a/logspout/image/Dockerfile b/logspout/image/Dockerfile index 6e52dabd9d..5adfcb5eb7 100644 --- a/logspout/image/Dockerfile +++ b/logspout/image/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.2 +FROM alpine:3.4 ENV DOCKER_HOST unix:///tmp/docker.sock ENV ROUTESPATH /tmp @@ -6,4 +6,4 @@ CMD ["/bin/logspout"] ADD logspout /bin/logspout -ENV DEIS_RELEASE 1.13.0-dev +ENV DEIS_RELEASE 1.13.4 diff --git a/logspout/logspout.go b/logspout/logspout.go index a44da119d0..e76c15ed87 100644 --- a/logspout/logspout.go +++ b/logspout/logspout.go @@ -20,6 +20,11 @@ import ( "golang.org/x/net/websocket" ) +const ( + MAX_UDP_MSG_BYTES = 65507 + MAX_TCP_MSG_BYTES = 1048576 +) + var debugMode bool func debug(v ...interface{}) { @@ -65,32 +70,42 @@ func syslogStreamer(target Target, types []string, logstream chan *Log) { continue } tag, pid, data := getLogParts(logline) - var conn net.Conn + + // HACK: Go's syslog package hardcodes the log format, so let's send our own message + data = fmt.Sprintf("%s %s[%s]: %s", + time.Now().Format(getopt("DATETIME_FORMAT", dtime.DeisDatetimeFormat)), + tag, + pid, + data) + if strings.EqualFold(target.Protocol, "tcp") { addr, err := net.ResolveTCPAddr("tcp", target.Addr) assert(err, "syslog") - tcpconn, err := net.DialTCP("tcp", nil, addr) + conn, err := net.DialTCP("tcp", nil, addr) + assert(err, "syslog") + assert(conn.SetWriteBuffer(MAX_TCP_MSG_BYTES), "syslog") + _, err = fmt.Fprintln(conn, data) assert(err, "syslog") - assert(tcpconn.SetWriteBuffer(1048576), "syslog") - conn = tcpconn } else if strings.EqualFold(target.Protocol, "udp") { + // Truncate the message if it's too long to fit in a single UDP packet. + // Get the bytes first. If the string has non-UTF8 chars, the number of + // bytes might exceed the number of characters and it would be good to + // know that up front. + dataBytes := []byte(data) + if len(dataBytes) > MAX_UDP_MSG_BYTES { + // Truncate the bytes and add ellipses. + dataBytes = append(dataBytes[:MAX_UDP_MSG_BYTES-3], "..."...) + } addr, err := net.ResolveUDPAddr("udp", target.Addr) assert(err, "syslog") - udpconn, err := net.DialUDP("udp", nil, addr) + conn, err := net.DialUDP("udp", nil, addr) + assert(err, "syslog") + assert(conn.SetWriteBuffer(MAX_UDP_MSG_BYTES), "syslog") + _, err = conn.Write(dataBytes) assert(err, "syslog") - assert(udpconn.SetWriteBuffer(1048576), "syslog") - conn = udpconn } else { assert(fmt.Errorf("%s is not a supported protocol, use either udp or tcp", target.Protocol), "syslog") } - // HACK: Go's syslog package hardcodes the log format, so let's send our own message - _, err := fmt.Fprintf(conn, - "%s %s[%s]: %s", - time.Now().Format(getopt("DATETIME_FORMAT", dtime.DeisDatetimeFormat)), - tag, - pid, - data) - assert(err, "syslog") } } @@ -106,10 +121,6 @@ func getLogParts(logline *Log) (string, string, string) { if match != nil { return match[1], match[3], logline.Data } - match = getMatch(`^k8s_([a-z0-9-]+)-[a-z]+\.[\da-f]+_[a-z0-9-]+-([a-z]+-[\da-z]*)_`, logline.Name) - if match != nil { - return match[1], match[2], logline.Data - } if logline.Name == "deis-controller" { data_match := getMatch(`^[A-Z]+ \[([a-z0-9-]+)\]: (.*)`, logline.Data) if data_match != nil { @@ -194,6 +205,17 @@ func getEtcdValueOrDefault(c *etcd.Client, key string, defaultValue string) stri return resp.Node.Value } +func getEtcdRoute(client *etcd.Client) *Route { + hostResp, err := client.Get("/deis/logs/host", false, false) + assert(err, "url") + portResp, err := client.Get("/deis/logs/port", false, false) + assert(err, "url") + protocol := getEtcdValueOrDefault(client, "/deis/logs/protocol", "udp") + host := fmt.Sprintf("%s:%s", hostResp.Node.Value, portResp.Node.Value) + log.Printf("routing all to %s://%s", protocol, host) + return &Route{ID: "etcd", Target: Target{Type: "syslog", Addr: host, Protocol: protocol}} +} + func main() { runtime.GOMAXPROCS(1) debugMode = getopt("DEBUG", "") != "" @@ -213,14 +235,26 @@ func main() { debug("etcd:", connectionString[0]) etcd := etcd.NewClient(connectionString) etcd.SetDialTimeout(3 * time.Second) - hostResp, err := etcd.Get("/deis/logs/host", false, false) - assert(err, "url") - portResp, err := etcd.Get("/deis/logs/port", false, false) - assert(err, "url") - protocol := getEtcdValueOrDefault(etcd, "/deis/logs/protocol", "udp") - host := fmt.Sprintf("%s:%s", hostResp.Node.Value, portResp.Node.Value) - log.Printf("routing all to %s://%s", protocol, host) - router.Add(&Route{Target: Target{Type: "syslog", Addr: host, Protocol: protocol}}) + router.Add(getEtcdRoute(etcd)) + go func() { + for { + // NOTE(bacongobbler): sleep for a bit before doing the discovery loop again + time.Sleep(10 * time.Second) + newRoute := getEtcdRoute(etcd) + oldRoute, err := router.Get(newRoute.ID) + // router.Get only returns an error if the route doesn't exist. If it does, + // then we can skip this check and just add the new route to the routing table + if err == nil && + newRoute.Target.Protocol == oldRoute.Target.Protocol && + newRoute.Target.Addr == oldRoute.Target.Addr { + // NOTE(bacongobbler): the two targets are the same; perform a no-op + continue + } + // NOTE(bacongobbler): this operation is a no-op if the route doesn't exist + router.Remove(oldRoute.ID) + router.Add(newRoute) + } + }() } if len(os.Args) > 1 { diff --git a/mesos/Makefile b/mesos/Makefile deleted file mode 100644 index f084bcd79f..0000000000 --- a/mesos/Makefile +++ /dev/null @@ -1,134 +0,0 @@ -include ../includes.mk - -REPO = deis -MESOS = 0.22.1-1.0.ubuntu1404 -ZOOKEEPER_VERSION = 3.5.0 -MARATHON_VERSION = 0.8.2-RC3 - -repo_path = github.com/deis/deis/mesos - -GO = godep go -GOFMT = gofmt -l -GOLINT = golint -GOTEST = $(GO) test -cover -race -v -GOVET = $(GO) vet - -COMPONENT = $(notdir $(repo_path)) -GO_PACKAGES = pkg/boot pkg/confd pkg/etcd pkg/fleet pkg/log pkg/net pkg/os pkg/types -GO_PACKAGES_REPO_PATH = $(addprefix $(repo_path)/,$(GO_PACKAGES)) - -SHELL_SCRIPTS = $(shell find "." -name '*.sh') - -MASTER_IMAGE = $(IMAGE_PREFIX)mesos-master:$(BUILD_TAG) -MASTER_DEV_IMAGE = $(REGISTRY)$(MASTER_IMAGE) -MARATHON_IMAGE = $(IMAGE_PREFIX)mesos-marathon:$(BUILD_TAG) -MARATHON_DEV_IMAGE = $(REGISTRY)$(MARATHON_IMAGE) -SLAVE_IMAGE = $(IMAGE_PREFIX)mesos-slave:$(BUILD_TAG) -SLAVE_DEV_IMAGE = $(REGISTRY)$(SLAVE_IMAGE) -ZOOKEEPER_IMAGE = $(IMAGE_PREFIX)zookeeper:$(BUILD_TAG) -ZOOKEEPER_DEV_IMAGE = $(REGISTRY)$(ZOOKEEPER_IMAGE) - -build: test-style zookeeper-go mesos-template mesos-master mesos-slave mesos-marathon zookeeper - -install: check-deisctl - deisctl install $(COMPONENT) - -uninstall: check-deisctl - deisctl uninstall $(COMPONENT) - -start: check-deisctl - deisctl start $(COMPONENT) - -stop: check-deisctl - deisctl stop $(COMPONENT) - -restart: stop start - -run: install start - -dev-release: push set-image - -push: check-registry - docker tag -f $(MASTER_IMAGE) $(MASTER_DEV_IMAGE) - docker push $(MASTER_DEV_IMAGE) - docker tag -f $(SLAVE_IMAGE) $(SLAVE_DEV_IMAGE) - docker push $(SLAVE_DEV_IMAGE) - docker tag -f $(MARATHON_IMAGE) $(MARATHON_DEV_IMAGE) - docker push $(MARATHON_DEV_IMAGE) - docker tag -f $(ZOOKEEPER_IMAGE) $(ZOOKEEPER_DEV_IMAGE) - docker push $(ZOOKEEPER_DEV_IMAGE) - -set-image: check-deisctl - deisctl config mesos-master set image=$(MASTER_DEV_IMAGE) - deisctl config mesos-slave set image=$(SLAVE_DEV_IMAGE) - deisctl config mesos-marathon set image=$(MARATHON_DEV_IMAGE) - deisctl config zookeeper set image=$(ZOOKEEPER_DEV_IMAGE) - -release: - docker push $(MASTER_IMAGE) - docker push $(SLAVE_IMAGE) - docker push $(MARATHON_IMAGE) - docker push $(ZOOKEEPER_IMAGE) - -deploy: build dev-release restart - -setup-gotools: - go get -u github.com/jteeuwen/go-bindata/... - -mesos-go: setup-gotools - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 godep go build -a -installsuffix cgo -ldflags '-s' -o bin/master-boot pkg/boot/mesos/master/main.go - @$(call check-static-binary,bin/master-boot) - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 godep go build -a -installsuffix cgo -ldflags '-s' -o bin/slave-boot pkg/boot/mesos/slave/main.go - @$(call check-static-binary,bin/slave-boot) - go-bindata -pkg bindata -o bindata/marathon/bindata.go pkg/boot/mesos/marathon/bash/; \ - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 godep go build -a -installsuffix cgo -ldflags '-s' -o bin/marathon-boot pkg/boot/mesos/marathon/main.go - @$(call check-static-binary,bin/marathon-boot) - -mesos-template: - sed "s/#VERSION#/$(MESOS)/g" template > Dockerfile - docker build -t $(IMAGE_PREFIX)$@:$(BUILD_TAG) . - rm -f Dockerfile - -mesos-master: mesos-go mesos-template - sed "s@#PREFIX#@$(IMAGE_PREFIX)@;s/#VERSION#/$(BUILD_TAG)/g" master > Dockerfile - docker build -t $(IMAGE_PREFIX)$@:$(BUILD_TAG) . - rm -f Dockerfile - -mesos-slave: mesos-go mesos-template - sed "s@#PREFIX#@$(IMAGE_PREFIX)@;s/#VERSION#/$(BUILD_TAG)/g" slave > Dockerfile - docker build -t $(IMAGE_PREFIX)$@:$(BUILD_TAG) . - rm -f Dockerfile - -build-mesos-marathon: mesos-template - sed "s/#MARATHON_VERSION#/$(MARATHON_VERSION)/;s@#PREFIX#@$(IMAGE_PREFIX)@;s/#VERSION#/$(BUILD_TAG)/g" build-marathon > Dockerfile - docker build -t $(IMAGE_PREFIX)$@:$(BUILD_TAG) . - rm -f Dockerfile - -mesos-marathon: mesos-go build-mesos-marathon - cp marathon Dockerfile - docker cp `docker create $(IMAGE_PREFIX)build-mesos-marathon:$(BUILD_TAG) /bin/bash`:/marathon/target/marathon-assembly-$(MARATHON_VERSION).jar . - mv marathon-assembly-$(MARATHON_VERSION).jar marathon-assembly.jar - sed "s/#MARATHON_VERSION#/$(MARATHON_VERSION)/;s@#PREFIX#@$(IMAGE_PREFIX)@;s/#VERSION#/$(BUILD_TAG)/" marathon > Dockerfile - docker build -t $(IMAGE_PREFIX)$@:$(BUILD_TAG) . - rm -f Dockerfile - -zookeeper: zookeeper-go - docker build -t $(IMAGE_PREFIX)$@:$(BUILD_TAG) zookeeper/. - -zookeeper-go: - echo "Building..." - go-bindata -pkg bindata -o bindata/zookeeper/bindata.go pkg/boot/zookeeper/bash/; \ - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 godep go build -a -installsuffix cgo -ldflags '-s' -o zookeeper/bin/boot pkg/boot/zookeeper/main/boot.go - @$(call check-static-binary,zookeeper/bin/boot) - -test: mesos-go zookeeper-go - @$(GOFMT) -timeout 10s $(GO_PACKAGES) - -test-style: - @$(GOFMT) $(GO_PACKAGES) - @$(GOFMT) $(GO_PACKAGES) | read; if [ $$? == 0 ]; then echo "gofmt check failed."; exit 1; fi - $(GOVET) $(GO_PACKAGES_REPO_PATH) - @for i in $(addsuffix /...,$(GO_PACKAGES)); do \ - $(GOLINT) $$i; \ - done - shellcheck $(SHELL_SCRIPTS) diff --git a/mesos/README.md b/mesos/README.md deleted file mode 100644 index 6645b1e670..0000000000 --- a/mesos/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Mesos with Marathon Framework - -[Marathon Framework](https://github.com/mesosphere/marathon) components for use as -an optional scheduler in Deis, the open source PaaS. - -## Usage - -Please consult the [Mesos with Marathon framework](http://docs.deis.io/en/latest/customizing_deis/choosing-a-scheduler/#mesos-with-marathon-framework) -for instructions on installing and activating Mesos with Marathon Framework in Deis. - -## License - -© 2015 Engine Yard, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may -not use this file except in compliance with the License. You may obtain -a copy of the License at - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/mesos/build-marathon b/mesos/build-marathon deleted file mode 100644 index 7de576751d..0000000000 --- a/mesos/build-marathon +++ /dev/null @@ -1,16 +0,0 @@ -FROM #PREFIX#mesos-template:#VERSION# - -ENV DEBIAN_FRONTEND noninteractive -ENV MARATHON_VERSION=#MARATHON_VERSION# - -COPY build-marathon-jar.sh /tmp/build.sh - -RUN DOCKER_BUILD=true /tmp/build.sh - -COPY bin/marathon-boot /app/bin/boot - -WORKDIR /app - -EXPOSE 8080 - -ENTRYPOINT ["/app/bin/boot"] diff --git a/mesos/build-marathon-jar.sh b/mesos/build-marathon-jar.sh deleted file mode 100755 index 88a140a90e..0000000000 --- a/mesos/build-marathon-jar.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -# fail on any command exiting non-zero -set -eo pipefail - -if [[ -z $DOCKER_BUILD ]]; then - echo - echo "Note: this script is intended for use by the Dockerfile and not as a way to build marathon locally" - echo - exit 1 -fi - -# shellcheck disable=SC2034 -DEBIAN_FRONTEND=noninteractive - -apt-get update && apt-get install --no-install-recommends -y \ - openjdk-7-jdk \ - scala \ - curl - -curl -SsL -O http://dl.bintray.com/sbt/debian/sbt-0.13.5.deb && \ - dpkg -i sbt-0.13.5.deb - -curl -sSL "https://github.com/mesosphere/marathon/archive/v$MARATHON_VERSION.tar.gz" | tar -xzf - -C /opt -ln -s "/opt/marathon-$MARATHON_VERSION" /app -ln -s "/opt/marathon-$MARATHON_VERSION" /marathon - -cd /app - -# Word splitting wanted in this situation. -# shellcheck disable=SC2046 -sbt assembly && \ - mv $(find target -name 'marathon-assembly-*.jar' | sort | tail -1) ./ && \ - rm -rf target/* ~/.sbt ~/.ivy2 && \ - mv marathon-assembly-*.jar target - -# cleanup. indicate that python, libpq and libyanl are required packages. -apt-get clean -y && \ - rm -rf /tmp/* /var/tmp/* && \ - rm -rf /var/lib/apt/lists/* diff --git a/mesos/build-marathon.sh b/mesos/build-marathon.sh deleted file mode 100755 index 125012e9a1..0000000000 --- a/mesos/build-marathon.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -# fail on any command exiting non-zero -set -eo pipefail - -if [[ -z $DOCKER_BUILD ]]; then - echo - echo "Note: this script is intended for use by the Dockerfile and not as a way to build zoopeeper locally" - echo - exit 1 -fi - -apt-get update && apt-get install -y curl - -cd /tmp - -mkdir -p /opt - -curl -sSL "https://github.com/mesosphere/marathon/archive/v$MARATHON_VERSION.tar.gz" | tar -xzf - -C /opt -ln -s "/opt/marathon-$MARATHON_VERSION" /app -ln -s "/opt/marathon-$MARATHON_VERSION" /marathon - -mkdir -p "/opt/marathon-$MARATHON_VERSION/target" -ln -s "/marathon-assembly.jar" "/opt/marathon-$MARATHON_VERSION/target/marathon-assembly-$MARATHON_VERSION.jar" - -apt-get autoremove -y --purge && \ - apt-get clean -y && \ - rm -Rf /usr/share/man /usr/share/doc && \ - rm -rf /tmp/* /var/tmp/* && \ - rm -rf /var/lib/apt/lists/* diff --git a/mesos/build-mesos.sh b/mesos/build-mesos.sh deleted file mode 100755 index f5ac783760..0000000000 --- a/mesos/build-mesos.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -# fail on any command exiting non-zero -set -eo pipefail - -if [[ -z $DOCKER_BUILD ]]; then - echo - echo "Note: this script is intended for use by the Dockerfile and not as a way to build mesos locally" - echo - exit 1 -fi - -# shellcheck disable=SC2034 -DEBIAN_FRONTEND=noninteractive - -echo "deb http://repos.mesosphere.io/ubuntu/ trusty main" > /etc/apt/sources.list.d/mesosphere.list - -apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF - -apt-get update && \ - apt-get -y install mesos="$MESOS" - -apt-get autoremove -y --purge && \ - apt-get clean -y && \ - rm -Rf /usr/share/man /usr/share/doc && \ - rm -rf /tmp/* /var/tmp/* && \ - rm -rf /var/lib/apt/lists/* diff --git a/mesos/marathon b/mesos/marathon deleted file mode 100644 index 7dffd87e6e..0000000000 --- a/mesos/marathon +++ /dev/null @@ -1,15 +0,0 @@ -FROM #PREFIX#mesos-template:#VERSION# - -EXPOSE 8080 - -COPY marathon-assembly.jar /marathon-assembly.jar - -COPY build-marathon.sh /tmp/build.sh - -COPY bin/marathon-boot /app/bin/boot - -RUN DOCKER_BUILD=true MARATHON_VERSION=#MARATHON_VERSION# /tmp/build.sh - -WORKDIR /app - -ENTRYPOINT ["/app/bin/boot"] diff --git a/mesos/master b/mesos/master deleted file mode 100644 index 119e9bf520..0000000000 --- a/mesos/master +++ /dev/null @@ -1,5 +0,0 @@ -FROM #PREFIX#mesos-template:#VERSION# - -COPY bin/master-boot /app/bin/boot - -ENTRYPOINT ["/app/bin/boot"] diff --git a/mesos/pkg/boot/extpoints/extpoints.go b/mesos/pkg/boot/extpoints/extpoints.go deleted file mode 100644 index 5667530c6d..0000000000 --- a/mesos/pkg/boot/extpoints/extpoints.go +++ /dev/null @@ -1,177 +0,0 @@ -// generated by go-extpoints -- DO NOT EDIT -package extpoints - -import ( - "reflect" - "runtime" - "strings" - "sync" -) - -var extRegistry = ®istryType{m: make(map[string]*extensionPoint)} - -type registryType struct { - sync.Mutex - m map[string]*extensionPoint -} - -// Top level registration - -func extensionTypes(extension interface{}) []string { - var ifaces []string - typ := reflect.TypeOf(extension) - for name, ep := range extRegistry.m { - if ep.iface.Kind() == reflect.Func && typ.AssignableTo(ep.iface) { - ifaces = append(ifaces, name) - } - if ep.iface.Kind() != reflect.Func && typ.Implements(ep.iface) { - ifaces = append(ifaces, name) - } - } - return ifaces -} - -func RegisterExtension(extension interface{}, name string) []string { - extRegistry.Lock() - defer extRegistry.Unlock() - var ifaces []string - for _, iface := range extensionTypes(extension) { - if extRegistry.m[iface].register(extension, name) { - ifaces = append(ifaces, iface) - } - } - return ifaces -} - -func UnregisterExtension(name string) []string { - extRegistry.Lock() - defer extRegistry.Unlock() - var ifaces []string - for iface, extpoint := range extRegistry.m { - if extpoint.unregister(name) { - ifaces = append(ifaces, iface) - } - } - return ifaces -} - -// Base extension point - -type extensionPoint struct { - sync.Mutex - iface reflect.Type - extensions map[string]interface{} -} - -func newExtensionPoint(iface interface{}) *extensionPoint { - ep := &extensionPoint{ - iface: reflect.TypeOf(iface).Elem(), - extensions: make(map[string]interface{}), - } - extRegistry.Lock() - extRegistry.m[ep.iface.Name()] = ep - extRegistry.Unlock() - return ep -} - -func (ep *extensionPoint) lookup(name string) interface{} { - ep.Lock() - defer ep.Unlock() - ext, ok := ep.extensions[name] - if !ok { - return nil - } - return ext -} - -func (ep *extensionPoint) all() map[string]interface{} { - ep.Lock() - defer ep.Unlock() - all := make(map[string]interface{}) - for k, v := range ep.extensions { - all[k] = v - } - return all -} - -func (ep *extensionPoint) register(extension interface{}, name string) bool { - ep.Lock() - defer ep.Unlock() - if name == "" { - typ := reflect.TypeOf(extension) - if typ.Kind() == reflect.Func { - nameParts := strings.Split(runtime.FuncForPC( - reflect.ValueOf(extension).Pointer()).Name(), ".") - name = nameParts[len(nameParts)-1] - } else { - name = typ.Elem().Name() - } - } - _, exists := ep.extensions[name] - if exists { - return false - } - ep.extensions[name] = extension - return true -} - -func (ep *extensionPoint) unregister(name string) bool { - ep.Lock() - defer ep.Unlock() - _, exists := ep.extensions[name] - if !exists { - return false - } - delete(ep.extensions, name) - return true -} - -// BootComponent - -var BootComponents = &bootComponentExt{ - newExtensionPoint(new(BootComponent)), -} - -type bootComponentExt struct { - *extensionPoint -} - -func (ep *bootComponentExt) Unregister(name string) bool { - return ep.unregister(name) -} - -func (ep *bootComponentExt) Register(extension BootComponent, name string) bool { - return ep.register(extension, name) -} - -func (ep *bootComponentExt) Lookup(name string) BootComponent { - ext := ep.lookup(name) - if ext == nil { - return nil - } - return ext.(BootComponent) -} - -func (ep *bootComponentExt) Select(names []string) []BootComponent { - var selected []BootComponent - for _, name := range names { - selected = append(selected, ep.Lookup(name)) - } - return selected -} - -func (ep *bootComponentExt) All() map[string]BootComponent { - all := make(map[string]BootComponent) - for k, v := range ep.all() { - all[k] = v.(BootComponent) - } - return all -} - -func (ep *bootComponentExt) Names() []string { - var names []string - for k := range ep.all() { - names = append(names, k) - } - return names -} diff --git a/mesos/pkg/boot/extpoints/interfaces.go b/mesos/pkg/boot/extpoints/interfaces.go deleted file mode 100644 index c9851ee3ec..0000000000 --- a/mesos/pkg/boot/extpoints/interfaces.go +++ /dev/null @@ -1,42 +0,0 @@ -package extpoints - -import ( - "github.com/deis/deis/mesos/pkg/types" -) - -// BootComponent interface that defines the steps -// required to initialize a component -type BootComponent interface { - // EtcdDefaults required initial values in etcd - EtcdDefaults() map[string]string - - // MkdirsEtcd required directories in etcd - MkdirsEtcd() []string - - // PreBoot custom pre-boot task (custom go code) - PreBoot(currentBoot *types.CurrentBoot) - - // PreBootScripts scripts to execute before the component starts - PreBootScripts(currentBoot *types.CurrentBoot) []*types.Script - - // UseConfd is required the use of confd? - UseConfd() (bool, bool) - - // BootDaemons required commands to start the component - BootDaemons(currentBoot *types.CurrentBoot) []*types.ServiceDaemon - - // WaitForPorts ports that must be open to indicate that the component is running - WaitForPorts() []int - - // PostBootScripts scripts to execute after the component starts - PostBootScripts(currentBoot *types.CurrentBoot) []*types.Script - - // PostBoot custom post-boot task (custom go code) - PostBoot(currentBoot *types.CurrentBoot) - - // ScheduleTasks tasks that must run during the lifecycle of the component - ScheduleTasks(currentBoot *types.CurrentBoot) []*types.Cron - - // PreShutdownScripts scripts to execute before the component execution ends - PreShutdownScripts(currentBoot *types.CurrentBoot) []*types.Script -} diff --git a/mesos/pkg/boot/main.go b/mesos/pkg/boot/main.go deleted file mode 100644 index d7ac9f3ed3..0000000000 --- a/mesos/pkg/boot/main.go +++ /dev/null @@ -1,248 +0,0 @@ -//go:generate go-extpoints - -package boot - -import ( - "net/http" - _ "net/http/pprof" //pprof is used for profiling servers - "os" - "os/signal" - "runtime" - "strconv" - "strings" - "syscall" - "time" - - "github.com/deis/deis/mesos/pkg/boot/extpoints" - "github.com/deis/deis/mesos/pkg/confd" - "github.com/deis/deis/mesos/pkg/etcd" - logger "github.com/deis/deis/mesos/pkg/log" - "github.com/deis/deis/mesos/pkg/net" - oswrapper "github.com/deis/deis/mesos/pkg/os" - "github.com/deis/deis/mesos/pkg/types" - "github.com/deis/deis/version" - "github.com/robfig/cron" -) - -const ( - timeout time.Duration = 10 * time.Second - ttl time.Duration = timeout * 2 -) - -var ( - signalChan = make(chan os.Signal, 1) - log = logger.New() - bootProcess = extpoints.BootComponents - component extpoints.BootComponent -) - -func init() { - runtime.GOMAXPROCS(runtime.NumCPU()) -} - -// RegisterComponent register an externsion to be used with this application -func RegisterComponent(component extpoints.BootComponent, name string) bool { - return bootProcess.Register(component, name) -} - -// Start initiate the boot process of the current component -// etcdPath is the base path used to publish the component in etcd -// externalPort is the base path used to publish the component in etcd -func Start(etcdPath string, externalPort int) { - log.Infof("boot version [%v]", version.Version) - - go func() { - log.Debugf("starting pprof http server in port 6060") - http.ListenAndServe("localhost:6060", nil) - }() - - signal.Notify(signalChan, - syscall.SIGHUP, - syscall.SIGINT, - syscall.SIGKILL, - syscall.SIGTERM, - syscall.SIGQUIT, - os.Interrupt, - ) - - // Wait for a signal and exit - exitChan := make(chan int) - go func() { - for { - s := <-signalChan - log.Debugf("Signal received: %v", s) - switch s { - case syscall.SIGTERM: - exitChan <- 0 - case syscall.SIGQUIT: - exitChan <- 0 - case syscall.SIGKILL: - exitChan <- 1 - default: - exitChan <- 1 - } - } - }() - - component = bootProcess.Lookup("boot") - if component == nil { - log.Error("error loading boot extension...") - signalChan <- syscall.SIGINT - } - - host := oswrapper.Getopt("HOST", "127.0.0.1") - etcdPort, _ := strconv.Atoi(oswrapper.Getopt("ETCD_PORT", "4001")) - etcdPeers := oswrapper.Getopt("ETCD_PEERS", "127.0.0.1:"+strconv.Itoa(etcdPort)) - etcdClient := etcd.NewClient(etcd.GetHTTPEtcdUrls(host+":"+strconv.Itoa(etcdPort), etcdPeers)) - - etcdURL := etcd.GetHTTPEtcdUrls(host+":"+strconv.Itoa(etcdPort), etcdPeers) - - currentBoot := &types.CurrentBoot{ - ConfdNodes: getConfdNodes(host+":"+strconv.Itoa(etcdPort), etcdPeers), - EtcdClient: etcdClient, - EtcdPath: etcdPath, - EtcdPort: etcdPort, - EtcdPeers: etcdPeers, - EtcdURL: etcdURL, - Host: net.ParseIP(host), - Timeout: timeout, - TTL: timeout * 2, - Port: externalPort, - } - - // do the real work in a goroutine to be able to exit if - // a signal is received during the boot process - go start(currentBoot) - - code := <-exitChan - - // pre shutdown tasks - log.Debugf("executing pre shutdown scripts") - preShutdownScripts := component.PreShutdownScripts(currentBoot) - runAllScripts(signalChan, preShutdownScripts) - - log.Debugf("execution terminated with exit code %v", code) - os.Exit(code) -} - -func start(currentBoot *types.CurrentBoot) { - log.Info("starting component...") - - log.Debug("creating required etcd directories") - for _, key := range component.MkdirsEtcd() { - etcd.Mkdir(currentBoot.EtcdClient, key) - } - - log.Debug("setting default etcd values") - for key, value := range component.EtcdDefaults() { - etcd.SetDefault(currentBoot.EtcdClient, key, value) - } - - // component.PreBoot(currentBoot) - - initial, daemon := component.UseConfd() - if initial { - // wait for confd to run once and install initial templates - log.Debug("waiting for initial confd configuration") - confd.WaitForInitialConf(currentBoot.ConfdNodes, currentBoot.Timeout) - } - - log.Debug("running preboot code") - component.PreBoot(currentBoot) - - log.Debug("running pre boot scripts") - preBootScripts := component.PreBootScripts(currentBoot) - runAllScripts(signalChan, preBootScripts) - - if daemon { - // spawn confd in the background to update services based on etcd changes - log.Debug("launching confd") - go confd.Launch(signalChan, currentBoot.ConfdNodes) - } - - log.Debug("running boot daemons") - servicesToStart := component.BootDaemons(currentBoot) - for _, daemon := range servicesToStart { - go oswrapper.RunProcessAsDaemon(signalChan, daemon.Command, daemon.Args) - } - - // if the returned ips contains the value contained in $HOST it means - // that we are running docker with --net=host - ipToListen := "0.0.0.0" - netIfaces := net.GetNetworkInterfaces() - for _, iface := range netIfaces { - if strings.Index(iface.IP, currentBoot.Host.String()) > -1 { - ipToListen = currentBoot.Host.String() - break - } - } - - portsToWaitFor := component.WaitForPorts() - log.Debugf("waiting for a service in the port %v in ip %v", portsToWaitFor, ipToListen) - for _, portToWait := range portsToWaitFor { - if portToWait > 0 { - err := net.WaitForPort("tcp", ipToListen, portToWait, timeout) - if err != nil { - log.Errorf("error waiting for port %v using ip %v: %v", portToWait, ipToListen, err) - signalChan <- syscall.SIGINT - } - } - } - - time.Sleep(60 * time.Second) - - // we only publish the service in etcd if the port if > 0 - if currentBoot.Port > 0 { - log.Debug("starting periodic publication in etcd...") - log.Debugf("etcd publication path %s, host %s and port %v", currentBoot.EtcdPath, currentBoot.Host, currentBoot.Port) - go etcd.PublishService(currentBoot.EtcdClient, currentBoot.EtcdPath+"/"+currentBoot.Host.String(), currentBoot.Host.String(), currentBoot.Port, uint64(ttl.Seconds()), timeout) - - // Wait for the first publication - time.Sleep(timeout / 2) - } - - log.Debug("running post boot scripts") - postBootScripts := component.PostBootScripts(currentBoot) - runAllScripts(signalChan, postBootScripts) - - log.Debug("checking for cron tasks...") - crons := component.ScheduleTasks(currentBoot) - _cron := cron.New() - for _, cronTask := range crons { - _cron.AddFunc(cronTask.Frequency, cronTask.Code) - } - _cron.Start() - - component.PostBoot(currentBoot) -} - -func getConfdNodes(host, etcdCtlPeers string) []string { - if etcdCtlPeers != "127.0.0.1:4001" { - hosts := strings.Split(etcdCtlPeers, ",") - result := []string{} - for _, _host := range hosts { - result = append(result, _host) - } - return result - } - - return []string{host} -} - -func runAllScripts(signalChan chan os.Signal, scripts []*types.Script) { - for _, script := range scripts { - if script.Params == nil { - script.Params = map[string]string{} - } - // add HOME variable to avoid warning from ceph commands - script.Params["HOME"] = "/tmp" - if log.Level.String() == "debug" { - script.Params["DEBUG"] = "true" - } - err := oswrapper.RunScript(script.Name, script.Params, script.Content) - if err != nil { - log.Errorf("script %v execution finished with error: %v", script.Name, err) - signalChan <- syscall.SIGTERM - } - } -} diff --git a/mesos/pkg/boot/mesos/marathon/bash/update-hosts-file.bash b/mesos/pkg/boot/mesos/marathon/bash/update-hosts-file.bash deleted file mode 100755 index 9d72fb76e9..0000000000 --- a/mesos/pkg/boot/mesos/marathon/bash/update-hosts-file.bash +++ /dev/null @@ -1,8 +0,0 @@ -set -eo pipefail - -# set debug based on envvar -[[ $DEBUG ]] && set -x - -main() { - echo "$HOST $(hostname)" >> /etc/hosts -} diff --git a/mesos/pkg/boot/mesos/marathon/main.go b/mesos/pkg/boot/mesos/marathon/main.go deleted file mode 100644 index 423d94cbf4..0000000000 --- a/mesos/pkg/boot/mesos/marathon/main.go +++ /dev/null @@ -1,116 +0,0 @@ -package main - -import ( - "strings" - - "github.com/deis/deis/mesos/bindata/marathon" - - "github.com/deis/deis/mesos/pkg/boot" - "github.com/deis/deis/mesos/pkg/etcd" - logger "github.com/deis/deis/mesos/pkg/log" - "github.com/deis/deis/mesos/pkg/os" - "github.com/deis/deis/mesos/pkg/types" -) - -const ( - mesosPort = 8180 -) - -var ( - etcdPath = os.Getopt("ETCD_PATH", "/mesos/marathon") - log = logger.New() -) - -func init() { - boot.RegisterComponent(new(MesosBoot), "boot") -} - -func main() { - boot.Start(etcdPath, mesosPort) -} - -// MesosBoot struct for mesos boot. -type MesosBoot struct{} - -// MkdirsEtcd creates a directory in etcd. -func (mb *MesosBoot) MkdirsEtcd() []string { - return []string{etcdPath} -} - -// EtcdDefaults returns default values for etcd. -func (mb *MesosBoot) EtcdDefaults() map[string]string { - return map[string]string{} -} - -// PreBootScripts runs preboot scripts. -func (mb *MesosBoot) PreBootScripts(currentBoot *types.CurrentBoot) []*types.Script { - params := make(map[string]string) - params["HOST"] = currentBoot.Host.String() - err := os.RunScript("pkg/boot/mesos/marathon/bash/update-hosts-file.bash", params, bindata.Asset) - if err != nil { - log.Printf("command finished with error: %v", err) - } - - return []*types.Script{} -} - -// PreBoot to log starting of marathon. -func (mb *MesosBoot) PreBoot(currentBoot *types.CurrentBoot) { - log.Info("mesos-marathon: starting...") -} - -// BootDaemons starts marathon. -func (mb *MesosBoot) BootDaemons(currentBoot *types.CurrentBoot) []*types.ServiceDaemon { - args := gatherArgs(currentBoot.EtcdClient) - log.Infof("mesos marathon args: %v", args) - return []*types.ServiceDaemon{&types.ServiceDaemon{Command: "/marathon/bin/start", Args: args}} -} - -// WaitForPorts returns an array of ports. -func (mb *MesosBoot) WaitForPorts() []int { - return []int{} -} - -// PostBootScripts returns type script. -func (mb *MesosBoot) PostBootScripts(currentBoot *types.CurrentBoot) []*types.Script { - return []*types.Script{} -} - -// PostBoot returns type script. -func (mb *MesosBoot) PostBoot(currentBoot *types.CurrentBoot) { - log.Info("mesos-marathon: running...") -} - -// ScheduleTasks returns a cron job. -func (mb *MesosBoot) ScheduleTasks(currentBoot *types.CurrentBoot) []*types.Cron { - return []*types.Cron{} -} - -// UseConfd uses confd. -func (mb *MesosBoot) UseConfd() (bool, bool) { - return false, false -} - -// PreShutdownScripts returns type script. -func (mb *MesosBoot) PreShutdownScripts(currentBoot *types.CurrentBoot) []*types.Script { - return []*types.Script{} -} - -func gatherArgs(c *etcd.Client) []string { - var args []string - - nodes := etcd.GetList(c, "/zookeeper/nodes") - var hosts []string - for _, node := range nodes { - hosts = append(hosts, node+":3888") - } - zkHosts := strings.Join(hosts, ",") - args = append(args, "--master", "zk://"+zkHosts+"/mesos") - args = append(args, "--zk", "zk://"+zkHosts+"/marathon") - // 20min task launch timeout for large docker image pulls - args = append(args, "--task_launch_timeout", "1200000") - args = append(args, "--ha") - args = append(args, "--http_port", "8180") - - return args -} diff --git a/mesos/pkg/boot/mesos/master/main.go b/mesos/pkg/boot/mesos/master/main.go deleted file mode 100644 index 87f2f00c62..0000000000 --- a/mesos/pkg/boot/mesos/master/main.go +++ /dev/null @@ -1,106 +0,0 @@ -package main - -import ( - "strings" - - "github.com/deis/deis/mesos/pkg/boot" - "github.com/deis/deis/mesos/pkg/etcd" - logger "github.com/deis/deis/mesos/pkg/log" - "github.com/deis/deis/mesos/pkg/os" - "github.com/deis/deis/mesos/pkg/types" -) - -const ( - mesosPort = 5050 -) - -var ( - etcdPath = os.Getopt("ETCD_PATH", "/mesos/master") - log = logger.New() -) - -func init() { - boot.RegisterComponent(new(MesosBoot), "boot") -} - -func main() { - boot.Start(etcdPath, mesosPort) -} - -// MesosBoot struct to boot mesos. -type MesosBoot struct{} - -// MkdirsEtcd creates a directory in etcd. -func (mb *MesosBoot) MkdirsEtcd() []string { - return []string{etcdPath} -} - -// EtcdDefaults returns default values for etcd. -func (mb *MesosBoot) EtcdDefaults() map[string]string { - return map[string]string{} -} - -// PreBootScripts runs preboot scripts. -func (mb *MesosBoot) PreBootScripts(currentBoot *types.CurrentBoot) []*types.Script { - return []*types.Script{} -} - -// PreBoot to log starting of marathon. -func (mb *MesosBoot) PreBoot(currentBoot *types.CurrentBoot) { - log.Info("mesos-master: starting...") -} - -// BootDaemons starts mesos-master. -func (mb *MesosBoot) BootDaemons(currentBoot *types.CurrentBoot) []*types.ServiceDaemon { - args := gatherArgs(currentBoot.EtcdClient) - args = append(args, "--ip="+currentBoot.Host.String()) - args = append(args, "--hostname="+currentBoot.Host.String()) - log.Infof("mesos master args: %v", args) - return []*types.ServiceDaemon{&types.ServiceDaemon{Command: "mesos-master", Args: args}} -} - -// WaitForPorts returns an array of ports. -func (mb *MesosBoot) WaitForPorts() []int { - return []int{} -} - -// PostBootScripts returns type script. -func (mb *MesosBoot) PostBootScripts(currentBoot *types.CurrentBoot) []*types.Script { - return []*types.Script{} -} - -// PostBoot returns type script. -func (mb *MesosBoot) PostBoot(currentBoot *types.CurrentBoot) { - log.Info("mesos-master: running...") -} - -// ScheduleTasks returns a cron job. -func (mb *MesosBoot) ScheduleTasks(currentBoot *types.CurrentBoot) []*types.Cron { - return []*types.Cron{} -} - -// UseConfd uses confd. -func (mb *MesosBoot) UseConfd() (bool, bool) { - return false, false -} - -// PreShutdownScripts returns type script. -func (mb *MesosBoot) PreShutdownScripts(currentBoot *types.CurrentBoot) []*types.Script { - return []*types.Script{} -} - -func gatherArgs(c *etcd.Client) []string { - var args []string - - nodes := etcd.GetList(c, "/zookeeper/nodes") - var hosts []string - for _, node := range nodes { - hosts = append(hosts, node+":3888") - } - zkHosts := strings.Join(hosts, ",") - args = append(args, "--zk=zk://"+zkHosts+"/mesos") - args = append(args, "--quorum=1") - args = append(args, "--work_dir=/tmp/mesos-master") - - return args -} diff --git a/mesos/pkg/boot/mesos/slave/main.go b/mesos/pkg/boot/mesos/slave/main.go deleted file mode 100644 index 55d8454d5a..0000000000 --- a/mesos/pkg/boot/mesos/slave/main.go +++ /dev/null @@ -1,104 +0,0 @@ -package main - -import ( - "strings" - - "github.com/deis/deis/mesos/pkg/boot" - "github.com/deis/deis/mesos/pkg/etcd" - logger "github.com/deis/deis/mesos/pkg/log" - "github.com/deis/deis/mesos/pkg/os" - "github.com/deis/deis/mesos/pkg/types" -) - -const ( - mesosPort = 5051 -) - -var ( - etcdPath = os.Getopt("ETCD_PATH", "/mesos/slave") - log = logger.New() -) - -func init() { - boot.RegisterComponent(new(MesosBoot), "boot") -} - -func main() { - boot.Start(etcdPath, mesosPort) -} - -// MesosBoot struct for mesos boot. -type MesosBoot struct{} - -// MkdirsEtcd creates a directory in etcd. -func (mb *MesosBoot) MkdirsEtcd() []string { - return []string{etcdPath} -} - -// EtcdDefaults returns default values for etcd. -func (mb *MesosBoot) EtcdDefaults() map[string]string { - return map[string]string{} -} - -// PreBootScripts runs preboot scripts. -func (mb *MesosBoot) PreBootScripts(currentBoot *types.CurrentBoot) []*types.Script { - return []*types.Script{} -} - -// PreBoot to log starting of marathon. -func (mb *MesosBoot) PreBoot(currentBoot *types.CurrentBoot) { - log.Info("mesos-slave: starting...") -} - -// BootDaemons starts mesos-salve. -func (mb *MesosBoot) BootDaemons(currentBoot *types.CurrentBoot) []*types.ServiceDaemon { - args := gatherArgs(currentBoot.EtcdClient) - args = append(args, "--ip="+currentBoot.Host.String()) - log.Infof("mesos slave args: %v", args) - return []*types.ServiceDaemon{&types.ServiceDaemon{Command: "mesos-slave", Args: args}} -} - -// WaitForPorts returns an array of ports. -func (mb *MesosBoot) WaitForPorts() []int { - return []int{} -} - -// PostBootScripts returns type script. -func (mb *MesosBoot) PostBootScripts(currentBoot *types.CurrentBoot) []*types.Script { - return []*types.Script{} -} - -// PostBoot returns type script. -func (mb *MesosBoot) PostBoot(currentBoot *types.CurrentBoot) { - log.Info("mesos-slave: running...") -} - -// ScheduleTasks returns a cron job. -func (mb *MesosBoot) ScheduleTasks(currentBoot *types.CurrentBoot) []*types.Cron { - return []*types.Cron{} -} - -// UseConfd uses confd. -func (mb *MesosBoot) UseConfd() (bool, bool) { - return false, false -} - -// PreShutdownScripts returns type script. -func (mb *MesosBoot) PreShutdownScripts(currentBoot *types.CurrentBoot) []*types.Script { - return []*types.Script{} -} - -func gatherArgs(c *etcd.Client) []string { - var args []string - - nodes := etcd.GetList(c, "/zookeeper/nodes") - var hosts []string - for _, node := range nodes { - hosts = append(hosts, node+":3888") - } - zkHosts := strings.Join(hosts, ",") - args = append(args, "--master=zk://"+zkHosts+"/mesos") - args = append(args, "--containerizers=docker,mesos") - - return args -} diff --git a/mesos/pkg/boot/zookeeper/bash/add-node.bash b/mesos/pkg/boot/zookeeper/bash/add-node.bash deleted file mode 100755 index ab10d2cc6a..0000000000 --- a/mesos/pkg/boot/zookeeper/bash/add-node.bash +++ /dev/null @@ -1,20 +0,0 @@ -set -eo pipefail - -# set debug based on envvar -[[ $DEBUG ]] && set -x - -main() { - export PATH=$PATH:/jre/bin - - cp /app/zoo.cfg /opt/zookeeper-data/zoo.cfg - ln -s /opt/zookeeper-data/zoo.cfg /opt/zookeeper/conf/zoo.cfg - - cp /opt/zookeeper/conf/fleet-zoo_cfg.dynamic /opt/zookeeper-data/zoo_cfg.dynamic - - # # We need to add this node to the cluster if is not configured in the cluster - # ZKHOST=$(sed -e "s/$HOST:3888//;s/^,//;s/,$//" < /opt/zookeeper/conf/server.list | cut -d ',' -f 1) - - # echo "adding $HOST as server to the zookeeper cluster" - # echo "" - # /opt/zookeeper/bin/zkCli.sh -server "$ZKHOST" reconfig -add "server.$(cat /opt/zookeeper-data/data/myid)=$HOST:2181:2888:participant;$HOST:3888" -} diff --git a/mesos/pkg/boot/zookeeper/bash/remove-node.bash b/mesos/pkg/boot/zookeeper/bash/remove-node.bash deleted file mode 100755 index bccc82b51e..0000000000 --- a/mesos/pkg/boot/zookeeper/bash/remove-node.bash +++ /dev/null @@ -1,18 +0,0 @@ -set -eo pipefail - -# set debug based on envvar -[[ $DEBUG ]] && set -x - -main() { - export PATH=$PATH:/jre/bin - - # We cannot use the IP of this node to performe the removal of this node of the cluster - ZKHOST=$(sed -e "s/$HOST:3888//;s/^,//;s/,$//" < /opt/zookeeper/conf/server.list | cut -d ',' -f 1) - ACTUAL_SERVERS=$(/opt/zookeeper/bin/zkCli.sh -server "$ZKHOST" config | grep "^server.") - - if echo "$ACTUAL_SERVERS" | grep -q "$HOST"; then - echo "Removing $HOST server from zookeeper cluster" - echo "" - /opt/zookeeper/bin/zkCli.sh -server "$ZKHOST" reconfig -remove "$(cat /opt/zookeeper-data/data/myid)" - fi -} diff --git a/mesos/pkg/boot/zookeeper/main/boot.go b/mesos/pkg/boot/zookeeper/main/boot.go deleted file mode 100644 index 88a80f62ea..0000000000 --- a/mesos/pkg/boot/zookeeper/main/boot.go +++ /dev/null @@ -1,130 +0,0 @@ -package main - -import ( - "io/ioutil" - "net/http" - "os" - "os/signal" - "strconv" - "strings" - "syscall" - "time" - - "github.com/deis/deis/mesos/bindata/zookeeper" - "github.com/deis/deis/mesos/pkg/boot/zookeeper" - "github.com/deis/deis/mesos/pkg/confd" - "github.com/deis/deis/mesos/pkg/etcd" - logger "github.com/deis/deis/mesos/pkg/log" - oswrapper "github.com/deis/deis/mesos/pkg/os" - "github.com/deis/deis/version" -) - -var ( - etcdPath = oswrapper.Getopt("ETCD_PATH", "/zookeeper/nodes") - log = logger.New() - signalChan = make(chan os.Signal, 1) -) - -func main() { - host := oswrapper.Getopt("HOST", "127.0.0.1") - etcdPort := oswrapper.Getopt("ETCD_PORT", "4001") - etcdCtlPeers := oswrapper.Getopt("ETCD_PEERS", "127.0.0.1:"+etcdPort) - etcdURL := etcd.GetHTTPEtcdUrls(host+":"+etcdPort, etcdCtlPeers) - etcdClient := etcd.NewClient(etcdURL) - - etcd.Mkdir(etcdClient, etcdPath) - - log.Infof("boot version [%v]", version.Version) - log.Info("zookeeper: starting...") - - zookeeper.CheckZkMappingInFleet(etcdPath, etcdClient, etcdURL) - - // we need to write the file /opt/zookeeper-data/data/myid with the id of this node - os.MkdirAll("/opt/zookeeper-data/data", 0640) - zkID := etcd.Get(etcdClient, etcdPath+"/"+host+"/id") - ioutil.WriteFile("/opt/zookeeper-data/data/myid", []byte(zkID), 0640) - - zkServer := &zookeeper.ZkServer{ - Stdout: os.Stdout, - Stderr: os.Stderr, - } - - signal.Notify(signalChan, - syscall.SIGHUP, - syscall.SIGINT, - syscall.SIGKILL, - syscall.SIGTERM, - syscall.SIGQUIT, - os.Interrupt, - ) - - // Wait for a signal and exit - exitChan := make(chan int) - go func() { - for { - s := <-signalChan - log.Debugf("Signal received: %v", s) - switch s { - case syscall.SIGTERM: - exitChan <- 0 - case syscall.SIGQUIT: - exitChan <- 0 - case syscall.SIGKILL: - exitChan <- 1 - default: - exitChan <- 1 - } - } - }() - - // wait for confd to run once and install initial templates - confd.WaitForInitialConf(getConfdNodes(host, etcdCtlPeers, 4001), 10*time.Second) - - params := make(map[string]string) - params["HOST"] = host - if log.Level.String() == "debug" { - params["DEBUG"] = "true" - } - - err := oswrapper.RunScript("pkg/boot/zookeeper/bash/add-node.bash", params, bindata.Asset) - if err != nil { - log.Printf("command finished with error: %v", err) - } - - if err := zkServer.Start(); err != nil { - panic(err) - } - - log.Info("zookeeper: running...") - - go func() { - log.Debugf("starting pprof http server in port 6060") - http.ListenAndServe("localhost:6060", nil) - }() - - code := <-exitChan - log.Debugf("execution terminated with exit code %v", code) - - log.Debugf("executing pre shutdown script") - err = oswrapper.RunScript("pkg/boot/zookeeper/bash/remove-node.bash", params, bindata.Asset) - if err != nil { - log.Printf("command finished with error: %v", err) - } - - log.Info("stopping zookeeper node") - zkServer.Stop() -} - -func getConfdNodes(host, etcdCtlPeers string, port int) []string { - result := []string{host + ":" + strconv.Itoa(port)} - - if etcdCtlPeers != "127.0.0.1" { - hosts := strings.Split(etcdCtlPeers, ",") - result = []string{} - for _, _host := range hosts { - result = append(result, _host) - } - } - - return result -} diff --git a/mesos/pkg/boot/zookeeper/zookeeper_map.go b/mesos/pkg/boot/zookeeper/zookeeper_map.go deleted file mode 100644 index ef29afeea8..0000000000 --- a/mesos/pkg/boot/zookeeper/zookeeper_map.go +++ /dev/null @@ -1,115 +0,0 @@ -package zookeeper - -import ( - "strconv" - - "github.com/deis/deis/mesos/pkg/etcd" - "github.com/deis/deis/mesos/pkg/fleet" - logger "github.com/deis/deis/mesos/pkg/log" -) - -const ( - etcdLock = "/zookeeper/setupLock" -) - -var ( - log = logger.New() -) - -// CheckZkMappingInFleet verifies if there is a mapping for each node in -// the CoreOS cluster using the metadata zookeeper=true to filter wich -// nodes zookeeper should run -func CheckZkMappingInFleet(etcdPath string, etcdClient *etcd.Client, etcdURL []string) { - // check if the nodes with the required role already have the an id. - // If not get fleet nodes with the required role and preassing the - // ids for every node in the cluster - err := etcd.AcquireLock(etcdClient, etcdLock, 10) - if err != nil { - panic(err) - } - - zkNodes := etcd.GetList(etcdClient, etcdPath) - log.Debugf("zookeeper nodes %v", zkNodes) - - machines, err := getMachines(etcdURL) - if err != nil { - panic(err) - } - log.Debugf("machines %v", machines) - - if len(machines) == 0 { - log.Warning("") - log.Warning("there is no machine using metadata zookeeper=true in the cluster to run zookeeper") - log.Warning("we will create the mapping with for all the nodes") - log.Warning("") - machines = fleet.GetNodesInCluster(etcdURL) - } - - if len(zkNodes) == 0 { - log.Debug("initializing zookeeper cluster") - for index, newZkNode := range machines { - log.Debug("adding node %v to zookeeper cluster", newZkNode) - etcd.Set(etcdClient, etcdPath+"/"+newZkNode+"/id", strconv.Itoa(index+1), 0) - } - } else { - // we check if some machine in the fleet cluster with the - // required role is not initialized (no zookeeper node id). - machinesNotInitialized := difference(machines, zkNodes) - if len(machinesNotInitialized) > 0 { - nextNodeID := getNextNodeID(etcdPath, etcdClient, zkNodes) - for _, zkNode := range machinesNotInitialized { - etcd.Set(etcdClient, etcdPath+"/"+zkNode+"/id", strconv.Itoa(nextNodeID), 0) - nextNodeID++ - } - } - } - - // release the etcd lock - etcd.ReleaseLock(etcdClient) -} - -// getMachines return the list of machines that can run zookeeper or an empty list -func getMachines(etcdURL []string) ([]string, error) { - metadata, err := fleet.ParseMetadata("zookeeper=true") - if err != nil { - panic(err) - } - - return fleet.GetNodesWithMetadata(etcdURL, metadata) -} - -// getNextNodeID returns the next id to use as zookeeper node index -func getNextNodeID(etcdPath string, etcdClient *etcd.Client, nodes []string) int { - result := 0 - for _, node := range nodes { - id := etcd.Get(etcdClient, etcdPath+"/"+node+"/id") - numericID, err := strconv.Atoi(id) - if id != "" && err == nil && numericID > result { - result = numericID - } - } - - return result + 1 -} - -// difference get the elements present in the first slice and not in -// the second one returning those elemenets in a new string slice. -func difference(slice1 []string, slice2 []string) []string { - diffStr := []string{} - m := map[string]int{} - - for _, s1Val := range slice1 { - m[s1Val] = 1 - } - for _, s2Val := range slice2 { - m[s2Val] = m[s2Val] + 1 - } - - for mKey, mVal := range m { - if mVal == 1 { - diffStr = append(diffStr, mKey) - } - } - - return diffStr -} diff --git a/mesos/pkg/boot/zookeeper/zookeeper_server.go b/mesos/pkg/boot/zookeeper/zookeeper_server.go deleted file mode 100644 index 943004a422..0000000000 --- a/mesos/pkg/boot/zookeeper/zookeeper_server.go +++ /dev/null @@ -1,36 +0,0 @@ -package zookeeper - -import ( - "io" - "os/exec" - "time" -) - -// ZkServer struct to execute zookeeper commands. -type ZkServer struct { - Stdout, Stderr io.Writer - - cmd *exec.Cmd -} - -// Start starts a zookeeper server -func (srv *ZkServer) Start() error { - srv.cmd = exec.Command("/opt/zookeeper/bin/zkServer.sh", "start-foreground") - srv.cmd.Stdout = srv.Stdout - srv.cmd.Stderr = srv.Stderr - return srv.cmd.Start() -} - -// Pid returns the process id of the running zookeeper server -func (srv *ZkServer) Pid() int { - return srv.cmd.Process.Pid -} - -// Stop stops a running zookeeper server -func (srv *ZkServer) Stop() { - go func() { - time.Sleep(1 * time.Second) - srv.cmd.Process.Kill() - }() - srv.cmd.Process.Wait() -} diff --git a/mesos/pkg/confd/confd.go b/mesos/pkg/confd/confd.go deleted file mode 100644 index 5c4c01fdc7..0000000000 --- a/mesos/pkg/confd/confd.go +++ /dev/null @@ -1,119 +0,0 @@ -package confd - -import ( - "bufio" - "fmt" - "io" - "os" - "os/exec" - "regexp" - "strings" - "syscall" - "time" - - logger "github.com/deis/deis/mesos/pkg/log" - oswrapper "github.com/deis/deis/mesos/pkg/os" -) - -const ( - confdInterval = 5 // seconds - errorTickInterval = 60 * time.Second // 1 minute - maxErrorsInInterval = 5 // up to 5 errors per time interval -) - -var ( - log = logger.New() - templateErrorRegex = "(\\d{4})-(\\d{2})-(\\d{2})T(\\d{2}):(\\d{2}):\\d{2}Z.*ERROR template:" -) - -// WaitForInitialConf wait until the compilation of the templates is correct -func WaitForInitialConf(etcd []string, timeout time.Duration) { - log.Info("waiting for confd to write initial templates...") - for { - cmdAsString := fmt.Sprintf("confd -onetime -node %v -confdir /app", strings.Join(etcd, ",")) - log.Debugf("running %s", cmdAsString) - cmd, args := oswrapper.BuildCommandFromString(cmdAsString) - err := oswrapper.RunCommand(cmd, args) - if err == nil { - break - } - - time.Sleep(timeout) - } -} - -// Launch launch confd as a daemon process. -func Launch(signalChan chan os.Signal, etcd []string) { - confdLogLevel := "error" - if log.Level.String() == "debug" { - confdLogLevel = "debug" - } - cmdAsString := fmt.Sprintf("confd -node %v -confdir /app --interval %v --log-level %v", confdInterval, strings.Join(etcd, ","), confdLogLevel) - cmd, args := oswrapper.BuildCommandFromString(cmdAsString) - go runConfdDaemon(signalChan, cmd, args) -} - -func runConfdDaemon(signalChan chan os.Signal, command string, args []string) { - cmd := exec.Command(command, args...) - - stdout, err := cmd.StdoutPipe() - checkError(signalChan, err) - // stderr, err := cmd.StderrPipe() - // checkError(signalChan, err) - - go io.Copy(os.Stdout, stdout) - // go io.Copy(os.Stderr, stderr) - - go checkNumberOfErrors(stdout, maxErrorsInInterval, errorTickInterval, signalChan) - - err = cmd.Start() - if err != nil { - log.Errorf("an error ocurred executing confd: [%s params %v], %v", command, args, err) - signalChan <- syscall.SIGKILL - } - - err = cmd.Wait() - log.Errorf("confd command finished with error: %v", err) - signalChan <- syscall.SIGKILL -} - -func checkError(signalChan chan os.Signal, err error) { - if err != nil { - log.Errorf("%v", err) - signalChan <- syscall.SIGKILL - } -} - -func checkNumberOfErrors(std io.ReadCloser, count uint64, tick time.Duration, signalChan chan os.Signal) { - testRegex := regexp.MustCompile(templateErrorRegex) - var tickErrors uint64 - lines := make(chan string) - go func() { - scanner := bufio.NewScanner(std) - scanner.Split(bufio.ScanLines) - for scanner.Scan() { - lines <- scanner.Text() - } - }() - - timer := time.NewTicker(tick) - - for { - select { - case <-timer.C: - if tickErrors > count { - log.Debugf("number of errors %v", tickErrors) - log.Error("too many confd errors in the last minute. restarting component") - signalChan <- syscall.SIGKILL - return - } - - tickErrors = 0 - case line := <-lines: - match := testRegex.FindStringSubmatch(line) - if match != nil { - tickErrors++ - } - } - } -} diff --git a/mesos/pkg/confd/confd_test.go b/mesos/pkg/confd/confd_test.go deleted file mode 100644 index 6abf493675..0000000000 --- a/mesos/pkg/confd/confd_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package confd - -import ( - "os" - "os/exec" - "syscall" - "testing" - "time" -) - -const ( - confdError = "Apr 29 17:00:54 deis-1 sh: 2015-04-29T15:00:54Z fceb2accfbf5 confd[1484]: ERROR template: builder:59:47: executing \"builder\" at : error calling getv: key does not exist" -) - -func TestReturnError(t *testing.T) { - signalChan := make(chan os.Signal, 1) - args := "while(true);do echo '" + confdError + "'; sleep 1;done" - cmd := exec.Command("/bin/bash", "-c", args) - - stdout, err := cmd.StdoutPipe() - checkError(signalChan, err) - - cmd.Start() - - go checkNumberOfErrors(stdout, 1, 2*time.Second, signalChan) - - for { - select { - case <-time.Tick(5 * time.Second): - return - case s := <-signalChan: - log.Debugf("Signal received: %v", s) - switch s { - case syscall.SIGKILL: - // we expect this - return - } - } - } -} - -func TestReturnWithoutError(t *testing.T) { - signalChan := make(chan os.Signal, 1) - args := "while(true);do echo '" + confdError + "'; sleep 1;done" - cmd := exec.Command("/bin/bash", "-c", args) - - stdout, err := cmd.StdoutPipe() - checkError(signalChan, err) - - cmd.Start() - - go checkNumberOfErrors(stdout, 2, 2*time.Second, signalChan) - - for { - select { - case <-time.Tick(5 * time.Second): - return - case s := <-signalChan: - log.Debugf("Signal received: %v", s) - switch s { - case syscall.SIGKILL: - t.Fatal("Unexpected error received") - } - } - } -} diff --git a/mesos/pkg/etcd/etcd.go b/mesos/pkg/etcd/etcd.go deleted file mode 100644 index 868d8ab8dd..0000000000 --- a/mesos/pkg/etcd/etcd.go +++ /dev/null @@ -1,163 +0,0 @@ -package etcd - -import ( - "errors" - "path" - "strconv" - "strings" - "time" - - "github.com/coreos/go-etcd/etcd" - logger "github.com/deis/deis/mesos/pkg/log" - etcdlock "github.com/leeor/etcd-sync" -) - -// Client etcd client -type Client struct { - client *etcd.Client - lock *etcdlock.EtcdMutex -} - -// Error etcd error -type Error struct { - ErrorCode int `json:"errorCode"` - Message string `json:"message"` - Cause string `json:"cause,omitempty"` - Index uint64 `json:"index"` -} - -var log = logger.New() - -// NewClient create a etcd client using the given machine list -func NewClient(machines []string) *Client { - log.Debugf("connecting to %v etcd server/s", machines) - return &Client{etcd.NewClient(machines), nil} -} - -// SetDefault sets the value of a key without expiration -func SetDefault(client *Client, key, value string) { - Create(client, key, value, 0) -} - -// Mkdir creates a directory only if does not exists -func Mkdir(c *Client, path string) { - _, err := c.client.CreateDir(path, 0) - if err != nil { - log.Debug(err) - } -} - -// WaitForKeys wait for the required keys up to the timeout or forever if is nil -func WaitForKeys(c *Client, keys []string, ttl time.Duration) error { - start := time.Now() - wait := true - - for { - for _, key := range keys { - _, err := c.client.Get(key, false, false) - if err != nil { - log.Debugf("key \"%s\" error %v", key, err) - wait = true - } - } - - if !wait { - return nil - } - - log.Debug("waiting for missing etcd keys...") - time.Sleep(1 * time.Second) - wait = false - - if time.Since(start) > ttl { - return errors.New("maximum ttl reached. aborting") - } - } -} - -// Get returns the value inside a key or an empty string -func Get(c *Client, key string) string { - result, err := c.client.Get(key, false, false) - if err != nil { - log.Debugf("%v", err) - return "" - } - - return result.Node.Value -} - -// GetList returns the list of elements inside a key or an empty list -func GetList(c *Client, key string) []string { - values, err := c.client.Get(key, true, false) - if err != nil { - log.Debugf("getlist %v", err) - return []string{} - } - - result := []string{} - for _, node := range values.Node.Nodes { - result = append(result, path.Base(node.Key)) - } - - log.Debugf("getlist %s -> %v", key, result) - return result -} - -// Set sets the value of a key. -// If the ttl is bigger than 0 it will expire after the specified time -func Set(c *Client, key, value string, ttl uint64) { - log.Debugf("set %s -> %s", key, value) - _, err := c.client.Set(key, value, ttl) - if err != nil { - log.Debugf("%v", err) - } -} - -// Create set the value of a key only if it does not exits -func Create(c *Client, key, value string, ttl uint64) { - log.Debugf("create %s -> %s", key, value) - _, err := c.client.Create(key, value, ttl) - if err != nil { - log.Debugf("%v", err) - } -} - -// PublishService publish a service to etcd periodically -func PublishService( - client *Client, - etcdPath string, - host string, - externalPort int, - ttl uint64, - timeout time.Duration) { - - for { - Set(client, etcdPath+"/host", host, ttl) - Set(client, etcdPath+"/port", strconv.Itoa(externalPort), ttl) - time.Sleep(timeout) - } -} - -func convertEtcdError(err error) *Error { - etcdError := err.(*etcd.EtcdError) - return &Error{ - ErrorCode: etcdError.ErrorCode, - Message: etcdError.Message, - Cause: etcdError.Cause, - Index: etcdError.Index, - } -} - -// GetHTTPEtcdUrls returns an array of urls that contains at least one host -func GetHTTPEtcdUrls(host, etcdPeers string) []string { - if etcdPeers != "127.0.0.1:4001" { - hosts := strings.Split(etcdPeers, ",") - result := []string{} - for _, _host := range hosts { - result = append(result, "http://"+_host+":4001") - } - return result - } - - return []string{"http://" + host} -} diff --git a/mesos/pkg/etcd/etcd_test.go b/mesos/pkg/etcd/etcd_test.go deleted file mode 100644 index 8ddafe277f..0000000000 --- a/mesos/pkg/etcd/etcd_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package etcd - -import ( - "io/ioutil" - "os" - "os/exec" - "reflect" - "testing" - "time" -) - -func init() { - _, err := exec.Command("etcd", "--version").Output() - if err != nil { - log.Fatal(err) - } -} - -var etcdServer *exec.Cmd - -func startEtcd() { - tmpDir, err := ioutil.TempDir(os.TempDir(), "etcd-test") - if err != nil { - log.Fatal("creating temp dir:", err) - } - log.Debugf("temp dir: %v", tmpDir) - - etcdServer = exec.Command("etcd", "-data-dir="+tmpDir, "-name=default") - etcdServer.Start() - time.Sleep(1 * time.Second) -} - -func stopEtcd() { - etcdServer.Process.Kill() -} - -func TestGetSetEtcd(t *testing.T) { - startEtcd() - defer stopEtcd() - - etcdClient := NewClient([]string{"http://localhost:4001"}) - SetDefault(etcdClient, "/path", "value") - value := Get(etcdClient, "/path") - - if value != "value" { - t.Fatalf("Expected '%v' but returned '%v'", "value", value) - } - - Set(etcdClient, "/path", "", 0) - value = Get(etcdClient, "/path") - - if value != "" { - t.Fatalf("Expected '%v' but returned '%v'", "", value) - } - - Set(etcdClient, "/path", "value", uint64((1 * time.Second).Seconds())) - time.Sleep(2 * time.Second) - value = Get(etcdClient, "/path") - - if value != "" { - t.Fatalf("Expected '%v' but returned '%v'", "", value) - } -} - -func TestMkdirEtcd(t *testing.T) { - startEtcd() - defer stopEtcd() - - etcdClient := NewClient([]string{"http://localhost:4001"}) - - Mkdir(etcdClient, "/directory") - values := GetList(etcdClient, "/directory") - if len(values) != 2 { - t.Fatalf("Expected '%v' but returned '%v'", 0, len(values)) - } - - Set(etcdClient, "/directory/item_1", "value", 0) - Set(etcdClient, "/directory/item_2", "value", 0) - values = GetList(etcdClient, "/directory") - if len(values) != 2 { - t.Fatalf("Expected '%v' but returned '%v'", 2, len(values)) - } - - lsResult := []string{"item_1", "item_2"} - if !reflect.DeepEqual(values, lsResult) { - t.Fatalf("Expected '%v' but returned '%v'", lsResult, values) - } -} - -func TestWaitForKeysEtcd(t *testing.T) { - startEtcd() - defer stopEtcd() - - etcdClient := NewClient([]string{"http://localhost:4001"}) - Set(etcdClient, "/key", "value", 0) - start := time.Now() - err := WaitForKeys(etcdClient, []string{"/key"}, (10 * time.Second)) - if err != nil { - t.Fatalf("%v", err) - } - end := time.Since(start) - if end.Seconds() > (2 * time.Second).Seconds() { - t.Fatalf("Expected '%vs' but returned '%vs'", 2, end.Seconds()) - } - - err = WaitForKeys(etcdClient, []string{"/key2"}, (2 * time.Second)) - if err == nil { - t.Fatalf("Expected an error") - } -} diff --git a/mesos/pkg/etcd/lock.go b/mesos/pkg/etcd/lock.go deleted file mode 100644 index c4e231cdc4..0000000000 --- a/mesos/pkg/etcd/lock.go +++ /dev/null @@ -1,16 +0,0 @@ -package etcd - -import ( - etcdlock "github.com/leeor/etcd-sync" -) - -// AcquireLock creates a pseudo lock in etcd with a specific ttl -func AcquireLock(c *Client, key string, ttl uint64) error { - c.lock = etcdlock.NewMutexFromClient(c.client, key, ttl) - return c.lock.Lock() -} - -// ReleaseLock releases the existing lock -func ReleaseLock(c *Client) { - c.lock.Unlock() -} diff --git a/mesos/pkg/etcd/lock_test.go b/mesos/pkg/etcd/lock_test.go deleted file mode 100644 index ebc7431203..0000000000 --- a/mesos/pkg/etcd/lock_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package etcd - -import ( - "os/exec" - "testing" -) - -func init() { - _, err := exec.Command("etcd", "--version").Output() - if err != nil { - log.Fatal(err) - } -} - -func TestAcquireReleaseLock(t *testing.T) { - startEtcd() - defer stopEtcd() - - etcdClient := NewClient([]string{"http://localhost:4001"}) - - err := AcquireLock(etcdClient, "/lock", 10) - if err != nil { - t.Fatalf("Unexpected error '%v'", err) - } - - value := Get(etcdClient, "/lock") - if value == "" { - t.Fatalf("Expected '%v' arguments but returned '%v'", "locked", value) - } - - if value != "locked" { - t.Fatalf("Expected '%v' arguments but returned '%v'", "locked", value) - } - - ReleaseLock(etcdClient) - - value = Get(etcdClient, "/lock") - if value != "released" { - t.Fatalf("Expected '%v' arguments but returned '%v'", "released", value) - } - -} diff --git a/mesos/pkg/fleet/machine.go b/mesos/pkg/fleet/machine.go deleted file mode 100644 index 191cfb0235..0000000000 --- a/mesos/pkg/fleet/machine.go +++ /dev/null @@ -1,39 +0,0 @@ -package fleet - -import ( - "errors" - "strings" - - "github.com/coreos/fleet/machine" -) - -func hasMetadata(ms machine.MachineState, metadata map[string][]string) bool { - for k, v := range metadata { - for _, s := range v { - if ms.Metadata[k] == s { - return true - } - } - } - return false -} - -// ParseMetadata parses a string that could contain a comma-delimited key/value -// pairs published in the fleet registry returning an equivalen map to represent -// the same structure -func ParseMetadata(rawMetadata string) (map[string][]string, error) { - metadataList := strings.Split(rawMetadata, ",") - metadata := make(map[string][]string) - for _, kv := range metadataList { - i := strings.Index(kv, "=") - if i > 0 { - if _, ok := metadata[kv[:i]]; !ok { - metadata[kv[:i]] = []string{} - } - metadata[kv[:i]] = append(metadata[kv[:i]], kv[i+1:]) - } else { - return nil, errors.New("invalid key/value pair " + kv) - } - } - return metadata, nil -} diff --git a/mesos/pkg/fleet/machine_test.go b/mesos/pkg/fleet/machine_test.go deleted file mode 100644 index 11dd2b4906..0000000000 --- a/mesos/pkg/fleet/machine_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package fleet - -import ( - "reflect" - "testing" -) - -func TestParseMetadata(t *testing.T) { - data, err := ParseMetadata("zookeeper=true") - if err != nil { - t.Fatalf("Unexpected error '%v'", err) - } - - expected := make(map[string][]string) - expected["zookeeper"] = append(expected["zookeeper"], "true") - - if !reflect.DeepEqual(data, expected) { - t.Fatalf("Expected map with zookeeper=true but it returned %v", expected) - } -} diff --git a/mesos/pkg/fleet/machines.go b/mesos/pkg/fleet/machines.go deleted file mode 100644 index ce83786400..0000000000 --- a/mesos/pkg/fleet/machines.go +++ /dev/null @@ -1,61 +0,0 @@ -package fleet - -import ( - "net/http" - "time" - - "github.com/coreos/fleet/etcd" - "github.com/coreos/fleet/registry" - logger "github.com/deis/deis/mesos/pkg/log" -) - -var log = logger.New() - -// GetNodesWithMetadata returns the ip address of the nodes with all the specified roles -func GetNodesWithMetadata(url []string, metadata map[string][]string) ([]string, error) { - etcdClient, err := etcd.NewClient(url, &http.Transport{}, time.Second) - if err != nil { - log.Debugf("error creating new fleet etcd client: %v", err) - return nil, err - } - - fleetClient := registry.NewEtcdRegistry(etcdClient, "/_coreos.com/fleet/") - machines, err := fleetClient.Machines() - if err != nil { - log.Debugf("error creating new fleet etcd client: %v", err) - return nil, err - } - - var machineList []string - for _, m := range machines { - if hasMetadata(m, metadata) { - machineList = append(machineList, m.PublicIP) - } - } - - return machineList, nil -} - -// GetNodesInCluster return the list of ip address of all the nodes -// running in the cluster currently active (fleetctl list-machines) -func GetNodesInCluster(url []string) []string { - etcdClient, err := etcd.NewClient(url, &http.Transport{}, time.Second) - if err != nil { - log.Debugf("error creating new fleet etcd client: %v", err) - return []string{} - } - - fleetClient := registry.NewEtcdRegistry(etcdClient, "/_coreos.com/fleet/") - machines, err := fleetClient.Machines() - if err != nil { - log.Debugf("error creating new fleet etcd client: %v", err) - return []string{} - } - - var machineList []string - for _, m := range machines { - machineList = append(machineList, m.PublicIP) - } - - return machineList -} diff --git a/mesos/pkg/fleet/machines_test.go b/mesos/pkg/fleet/machines_test.go deleted file mode 100644 index af916b8ee3..0000000000 --- a/mesos/pkg/fleet/machines_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package fleet - -import ( - "io/ioutil" - "os" - "os/exec" - "testing" - "time" -) - -func init() { - _, err := exec.Command("etcd", "--version").Output() - if err != nil { - log.Fatal(err) - } -} - -var etcdServer *exec.Cmd - -func startEtcd() { - tmpDir, err := ioutil.TempDir(os.TempDir(), "etcd-test") - if err != nil { - log.Fatal("creating temp dir:", err) - } - log.Debugf("temp dir: %v", tmpDir) - - etcdServer = exec.Command("etcd", "-data-dir="+tmpDir, "-name=default") - etcdServer.Start() - time.Sleep(1 * time.Second) -} - -func stopEtcd() { - etcdServer.Process.Kill() -} - -func TestGetNodesWithMetadata(t *testing.T) { - startEtcd() - defer stopEtcd() - - data, err := ParseMetadata("zookeeper=true") - if err != nil { - t.Fatalf("Unexpected error '%v'", err) - } - - machines, err := GetNodesWithMetadata([]string{"http://172.17.8.100:4001"}, data) - if err != nil { - t.Fatalf("Expected '%v' arguments but returned '%v'", "", err) - } - - if len(machines) <= 0 { - t.Fatalf("Expected at least one machines but %v were returned ", len(machines)) - } -} diff --git a/mesos/pkg/log/log.go b/mesos/pkg/log/log.go deleted file mode 100644 index b4b9d8d156..0000000000 --- a/mesos/pkg/log/log.go +++ /dev/null @@ -1,35 +0,0 @@ -package log - -import ( - "os" - - "github.com/Sirupsen/logrus" -) - -// Logger embed logrus Logger struct -type Logger struct { - logrus.Logger -} - -// New create a new logger using the StdOutFormatter and the level -// specified in the env variable LOG_LEVEL -func New() *Logger { - log := &Logger{} - - log.Out = os.Stdout - log.Formatter = new(StdOutFormatter) - - logLevel := os.Getenv("LOG_LEVEL") - log.SetLevel(logLevel) - - return log -} - -// SetLevel change the level of the logger -func (log *Logger) SetLevel(logLevel string) { - if logLevel != "" { - if level, err := logrus.ParseLevel(logLevel); err == nil { - log.Level = level - } - } -} diff --git a/mesos/pkg/log/stdout_formatter.go b/mesos/pkg/log/stdout_formatter.go deleted file mode 100644 index 71490bc0b2..0000000000 --- a/mesos/pkg/log/stdout_formatter.go +++ /dev/null @@ -1,20 +0,0 @@ -package log - -import ( - "bytes" - "fmt" - "strings" - - "github.com/Sirupsen/logrus" -) - -// StdOutFormatter struct -type StdOutFormatter struct { -} - -// Format change the default output format to incluse the log level -func (f *StdOutFormatter) Format(entry *logrus.Entry) ([]byte, error) { - b := &bytes.Buffer{} - fmt.Fprintf(b, "[%s] - %s\n", strings.ToUpper(entry.Level.String()), entry.Message) - return b.Bytes(), nil -} diff --git a/mesos/pkg/net/net.go b/mesos/pkg/net/net.go deleted file mode 100644 index c5a6ea6196..0000000000 --- a/mesos/pkg/net/net.go +++ /dev/null @@ -1,69 +0,0 @@ -package net - -import ( - "errors" - "net" - "strconv" - "strings" - "time" -) - -// InterfaceIPAddress is used to know the interface and ip address in the sytem -type InterfaceIPAddress struct { - Iface string - IP string -} - -// WaitForPort wait for successful network connection -func WaitForPort(proto string, ip string, port int, timeout time.Duration) error { - for { - con, err := net.DialTimeout(proto, ip+":"+strconv.Itoa(port), timeout) - if err == nil { - con.Close() - break - } - } - - return nil -} - -// RandomPort return a random not used TCP port -func RandomPort(proto string) (int, error) { - switch proto { - case "tcp": - l, _ := net.Listen(proto, "127.0.0.1:0") - defer l.Close() - port := l.Addr() - lPort, _ := strconv.Atoi(strings.Split(port.String(), ":")[1]) - return lPort, nil - case "udp": - addr, _ := net.ResolveUDPAddr(proto, "127.0.0.1:0") - l, _ := net.ListenUDP(proto, addr) - defer l.Close() - return addr.Port, nil - default: - return -1, errors.New("invalid protocol") - } -} - -// GetNetworkInterfaces return the list of -// network interfaces and IP address -func GetNetworkInterfaces() []InterfaceIPAddress { - result := []InterfaceIPAddress{} - - interfaces, _ := net.Interfaces() - for _, inter := range interfaces { - if addrs, err := inter.Addrs(); err == nil { - for _, addr := range addrs { - result = append(result, InterfaceIPAddress{inter.Name, addr.String()}) - } - } - } - - return result -} - -// ParseIP parses s as an IP address -func ParseIP(s string) net.IP { - return net.ParseIP(s) -} diff --git a/mesos/pkg/net/net_test.go b/mesos/pkg/net/net_test.go deleted file mode 100644 index ee795f302b..0000000000 --- a/mesos/pkg/net/net_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package net - -import ( - "net" - "strconv" - "testing" - "time" -) - -func TestListenTCP(t *testing.T) { - port, err := RandomPort("tcp") - if err != nil { - t.Fatal(err) - } - - listeningPort, err := net.Listen("tcp", "127.0.0.1:"+strconv.Itoa(port)) - defer listeningPort.Close() - if err != nil { - t.Fatal(err) - } - - err = WaitForPort("tcp", "127.0.0.1", port, time.Second) - if err != nil { - t.Fatal(err) - } -} - -// TODO: fix -// func TestListenUDP(t *testing.T) { -// port, err := RandomPort("udp") -// if err != nil { -// t.Fatal(err) -// } - -// addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1"+strconv.Itoa(port)) -// listeningPort, err := net.ListenUDP("udp", addr) -// defer listeningPort.Close() -// if err != nil { -// t.Fatal(err) -// } - -// err = WaitForPort("udp", "127.0.0.1", port, time.Second) -// if err != nil { -// t.Fatal(err) -// } -// } diff --git a/mesos/pkg/os/os.go b/mesos/pkg/os/os.go deleted file mode 100644 index 787aecfaf1..0000000000 --- a/mesos/pkg/os/os.go +++ /dev/null @@ -1,131 +0,0 @@ -package os - -import ( - "crypto/rand" - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "strings" - "syscall" - - logger "github.com/deis/deis/mesos/pkg/log" - basher "github.com/progrium/go-basher" -) - -const ( - alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" -) - -var log = logger.New() - -// Getopt return the value of and environment variable or a default -func Getopt(name, dfault string) string { - value := os.Getenv(name) - if value == "" { - log.Debugf("returning default value \"%s\" for key \"%s\"", dfault, name) - value = dfault - } - return value -} - -// RunProcessAsDaemon start a child process that will run indefinitely -func RunProcessAsDaemon(signalChan chan os.Signal, command string, args []string) { - cmd := exec.Command(command, args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - err := cmd.Start() - if err != nil { - log.Errorf("an error ocurred executing command: [%s params %v], %v", command, args, err) - signalChan <- syscall.SIGKILL - } - - err = cmd.Wait() - log.Errorf("command finished with error: %v", err) - signalChan <- syscall.SIGKILL -} - -// RunScript run a shell script using go-basher and if it returns an error -// send a signal to terminate the execution -func RunScript(script string, params map[string]string, loader func(string) ([]byte, error)) error { - log.Debugf("running script %v", script) - bash, _ := basher.NewContext("/bin/bash", log.Level.String() == "debug") - bash.Source(script, loader) - if params != nil { - for key, value := range params { - bash.Export(key, value) - } - } - - status, err := bash.Run("main", []string{}) - if err != nil { - return err - } - if status != 0 { - return fmt.Errorf("invalid exit code running script [%v]", status) - } - - return nil -} - -// RunCommand run a command and return. -func RunCommand(command string, args []string) error { - cmd := exec.Command(command, args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - err := cmd.Run() - if err != nil { - return err - } - - return nil -} - -// BuildCommandFromString parses a string containing a command and multiple -// arguments and returns a valid tuple to pass to exec.Command -func BuildCommandFromString(input string) (string, []string) { - command := strings.Split(input, " ") - - if len(command) > 1 { - return command[0], command[1:] - } - - return command[0], []string{} -} - -// Random return a random string -func Random(size int) (string, error) { - if size <= 0 { - return "", errors.New("invalid size. It must be bigger or equal to 1") - } - - var bytes = make([]byte, size) - rand.Read(bytes) - for i, b := range bytes { - bytes[i] = alphanum[b%byte(len(alphanum))] - } - return string(bytes), nil -} - -// Hostname returns the host name reported by the kernel. -func Hostname() (name string, err error) { - return os.Hostname() -} - -// CopyFile copies a source file to a destination. -func CopyFile(src string, dst string) error { - data, err := ioutil.ReadFile(src) - if err != nil { - return err - } - - err = ioutil.WriteFile(dst, data, 0644) - if err != nil { - return err - } - - return nil -} diff --git a/mesos/pkg/os/os_test.go b/mesos/pkg/os/os_test.go deleted file mode 100644 index 7237fd0ba8..0000000000 --- a/mesos/pkg/os/os_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package os - -import ( - "testing" -) - -func TestGetoptEmpty(t *testing.T) { - value := Getopt("", "") - if value != "" { - t.Fatalf("Expected '' as value of empty env name but %s returned", value) - } -} - -func TestGetoptValid(t *testing.T) { - value := Getopt("valid", "value") - if value != "value" { - t.Fatalf("Expected 'value' as value of 'valid' but %s returned", value) - } -} - -func TestGetoptDefault(t *testing.T) { - value := Getopt("", "default") - if value != "default" { - t.Fatalf("Expected 'default' as value of empty env name but %s returned", value) - } -} - -func TestBuildCommandFromStringSingle(t *testing.T) { - command, args := BuildCommandFromString("ls") - if command != "ls" { - t.Fatalf("Expected 'ls' as value of empty env name but %s returned", command) - } - - if len(args) != 0 { - t.Fatalf("Expected '%v' arguments but %v returned", 0, len(args)) - } - - command, args = BuildCommandFromString("docker -d -D") - if command != "docker" { - t.Fatalf("Expected 'docker' as value of empty env name but %s returned", command) - } - - if len(args) != 2 { - t.Fatalf("Expected '%v' arguments but %v returned", 0, len(args)) - } - - command, args = BuildCommandFromString("ls -lat") - if command != "ls" { - t.Fatalf("Expected 'ls' as value of empty env name but %s returned", command) - } - - if len(args) != 1 { - t.Fatalf("Expected '%v' arguments but %v returned", 1, len(args)) - } -} - -func TestRandom(t *testing.T) { - rnd, err := Random(1) - if err != nil { - t.Fatal(err) - } - - if len(rnd) != 1 { - t.Fatalf("Expected a string of 1 character but %s returned", rnd) - } -} - -func TestRandomError(t *testing.T) { - rnd, err := Random(0) - if err == nil { - t.Fatalf("Expected an error requiring a random string of length 0 but %s returned", rnd) - } -} diff --git a/mesos/pkg/types/command.go b/mesos/pkg/types/command.go deleted file mode 100644 index 54b7de8749..0000000000 --- a/mesos/pkg/types/command.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -import ( - "io" - "os/exec" -) - -// Command struct to execute commands. -type Command struct { - Stdout, Stderr io.Writer - - cmd *exec.Cmd -} diff --git a/mesos/pkg/types/cron.go b/mesos/pkg/types/cron.go deleted file mode 100644 index d9ed8a7d7f..0000000000 --- a/mesos/pkg/types/cron.go +++ /dev/null @@ -1,7 +0,0 @@ -package types - -// Cron struct executes code with a time interval -type Cron struct { - Frequency string - Code func() -} diff --git a/mesos/pkg/types/current-boot.go b/mesos/pkg/types/current-boot.go deleted file mode 100644 index b4cdba129f..0000000000 --- a/mesos/pkg/types/current-boot.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -import ( - "net" - "time" - - "github.com/deis/deis/mesos/pkg/etcd" -) - -// CurrentBoot information about the boot -// process related to the component -type CurrentBoot struct { - ConfdNodes []string - EtcdClient *etcd.Client - EtcdPath string - EtcdPort int - EtcdPeers string - EtcdURL []string - Host net.IP - Port int - Timeout time.Duration - TTL time.Duration -} diff --git a/mesos/pkg/types/script.go b/mesos/pkg/types/script.go deleted file mode 100644 index 9d457457f7..0000000000 --- a/mesos/pkg/types/script.go +++ /dev/null @@ -1,8 +0,0 @@ -package types - -// Script struct to specify a script. -type Script struct { - Name string - Params map[string]string - Content func(string) ([]byte, error) -} diff --git a/mesos/pkg/types/service-daemon.go b/mesos/pkg/types/service-daemon.go deleted file mode 100644 index b1a3af9a99..0000000000 --- a/mesos/pkg/types/service-daemon.go +++ /dev/null @@ -1,7 +0,0 @@ -package types - -// ServiceDaemon struct to a service daemon. -type ServiceDaemon struct { - Command string - Args []string -} diff --git a/mesos/slave b/mesos/slave deleted file mode 100644 index 4b5437f83d..0000000000 --- a/mesos/slave +++ /dev/null @@ -1,5 +0,0 @@ -FROM #PREFIX#mesos-template:#VERSION# - -COPY bin/slave-boot /app/bin/boot - -ENTRYPOINT ["/app/bin/boot"] diff --git a/mesos/template b/mesos/template deleted file mode 100644 index 49fed455ac..0000000000 --- a/mesos/template +++ /dev/null @@ -1,9 +0,0 @@ -FROM ubuntu-debootstrap:14.04 - -ENV DEBIAN_FRONTEND noninteractive - -COPY build-mesos.sh /tmp/build.sh - -RUN DOCKER_BUILD=true MESOS="#VERSION#" /tmp/build.sh - -ENV DEIS_RELEASE 1.13.0-dev diff --git a/mesos/zookeeper/Dockerfile b/mesos/zookeeper/Dockerfile deleted file mode 100644 index f4aaf9eac9..0000000000 --- a/mesos/zookeeper/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM alpine:3.2 - -ENV JAVA_HOME /jre -ENV PATH ${PATH}:${JAVA_HOME}/bin - -EXPOSE 2181 2888 3888 - -ADD . /app - -RUN DOCKER_BUILD=true /app/build.sh - -WORKDIR /app - -VOLUME ["/opt/zookeeper-data"] - -ENTRYPOINT ["/app/bin/boot"] - -ENV DEIS_RELEASE 1.13.0-dev diff --git a/mesos/zookeeper/build.sh b/mesos/zookeeper/build.sh deleted file mode 100755 index 77f3af55d4..0000000000 --- a/mesos/zookeeper/build.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env sh - -# fail on any command exiting non-zero -set -eo pipefail - -if [ -z "$DOCKER_BUILD" ]; then - echo - echo "Note: this script is intended for use by the Dockerfile and not as a way to build zoopeeper locally" - echo - exit 1 -fi - -apk add --update \ - curl \ - ca-certificates \ - bash \ - readline \ - ncurses-libs - -cd /tmp - -curl -sSL -o glibc-2.21-r2.apk "https://circle-artifacts.com/gh/andyshinn/alpine-pkg-glibc/6/artifacts/0/home/ubuntu/alpine-pkg-glibc/packages/x86_64/glibc-2.21-r2.apk" - -apk add --allow-untrusted glibc-2.21-r2.apk - -curl -sSL -o glibc-bin-2.21-r2.apk "https://circle-artifacts.com/gh/andyshinn/alpine-pkg-glibc/6/artifacts/0/home/ubuntu/alpine-pkg-glibc/packages/x86_64/glibc-bin-2.21-r2.apk" - -apk add --allow-untrusted glibc-bin-2.21-r2.apk - -/usr/glibc/usr/bin/ldconfig /lib /usr/glibc/usr/lib - -# install confd -echo "Downloading confd..." -curl -sSL -o /sbin/confd https://github.com/kelseyhightower/confd/releases/download/v0.9.0/confd-0.9.0-linux-amd64 \ - && chmod +x /sbin/confd - -echo "Downloading Oracle JDK..." -JAVA_VERSION_MAJOR=8 -JAVA_VERSION_MINOR=45 -JAVA_VERSION_BUILD=14 -JAVA_PACKAGE=server-jre - -curl -jksSLH "Cookie: oraclelicense=accept-securebackup-cookie"\ - http://download.oracle.com/otn-pub/java/jdk/${JAVA_VERSION_MAJOR}u${JAVA_VERSION_MINOR}-b${JAVA_VERSION_BUILD}/${JAVA_PACKAGE}-${JAVA_VERSION_MAJOR}u${JAVA_VERSION_MINOR}-linux-x64.tar.gz | gunzip -c - | tar -xf - - -mkdir -p /tmp/zookeeper /opt - -echo "Downloading zookeeper..." -curl -sSL http://apache.mirrors.pair.com/zookeeper/zookeeper-3.5.0-alpha/zookeeper-3.5.0-alpha.tar.gz | tar -xzf - -C /opt - -ln -s /opt/zookeeper-3.5.0-alpha /opt/zookeeper - -mv jdk1.${JAVA_VERSION_MAJOR}.0_${JAVA_VERSION_MINOR}/jre /jre - -# cleanup -apk del curl ca-certificates - -rm /jre/bin/jjs -rm /jre/bin/keytool -rm /jre/bin/orbd -rm /jre/bin/pack200 -rm /jre/bin/policytool -rm /jre/bin/rmid -rm /jre/bin/rmiregistry -rm /jre/bin/servertool -rm /jre/bin/tnameserv -rm /jre/bin/unpack200 -rm /jre/lib/ext/nashorn.jar -rm /jre/lib/jfr.jar - -rm -rf /jre/lib/jfr -rm -rf /jre/lib/oblique-fonts - -rm -rf /tmp/* /var/cache/apk/* diff --git a/mesos/zookeeper/conf.d/dynamic-cfg.toml b/mesos/zookeeper/conf.d/dynamic-cfg.toml deleted file mode 100644 index db12a0e185..0000000000 --- a/mesos/zookeeper/conf.d/dynamic-cfg.toml +++ /dev/null @@ -1,6 +0,0 @@ -[template] -src = "zoo_cfg.dynamic" -dest = "/opt/zookeeper/conf/fleet-zoo_cfg.dynamic" -keys = [ - "/zookeeper/nodes", -] diff --git a/mesos/zookeeper/conf.d/server-list.toml b/mesos/zookeeper/conf.d/server-list.toml deleted file mode 100644 index d14e2e5348..0000000000 --- a/mesos/zookeeper/conf.d/server-list.toml +++ /dev/null @@ -1,6 +0,0 @@ -[template] -src = "server.list" -dest = "/opt/zookeeper/conf/server.list" -keys = [ - "/zookeeper/nodes", -] diff --git a/mesos/zookeeper/templates/server.list b/mesos/zookeeper/templates/server.list deleted file mode 100644 index 2e1142ae34..0000000000 --- a/mesos/zookeeper/templates/server.list +++ /dev/null @@ -1 +0,0 @@ -{{ range $index, $node := lsdir "/zookeeper/nodes" }}{{ if $index }},{{ end }}{{ $node }}:3888{{ end }} \ No newline at end of file diff --git a/mesos/zookeeper/templates/zoo_cfg.dynamic b/mesos/zookeeper/templates/zoo_cfg.dynamic deleted file mode 100644 index 9a3c39adb4..0000000000 --- a/mesos/zookeeper/templates/zoo_cfg.dynamic +++ /dev/null @@ -1,2 +0,0 @@ -{{ range $node := lsdir "/zookeeper/nodes" }}server.{{ getv (printf "/zookeeper/nodes/%s/id" $node) }}={{ $node }}:2181:2888:participant;{{ $node }}:3888 -{{ end }} \ No newline at end of file diff --git a/mesos/zookeeper/zoo.cfg b/mesos/zookeeper/zoo.cfg deleted file mode 100644 index 0cd761572a..0000000000 --- a/mesos/zookeeper/zoo.cfg +++ /dev/null @@ -1,7 +0,0 @@ -tickTime=2000 -initLimit=100 -syncLimit=5 -dataDir=/opt/zookeeper-data/data -standaloneEnabled=false -dynamicConfigFile=/opt/zookeeper-data/zoo_cfg.dynamic - diff --git a/publisher/Makefile b/publisher/Makefile index cb2edae7b3..e85ef8fd4f 100644 --- a/publisher/Makefile +++ b/publisher/Makefile @@ -32,7 +32,7 @@ install: check-deisctl dev-release: push set-image push: check-registry - docker tag -f $(RELEASE_IMAGE) $(REMOTE_IMAGE) + docker tag $(RELEASE_IMAGE) $(REMOTE_IMAGE) docker push $(REMOTE_IMAGE) set-image: check-deisctl diff --git a/publisher/image/Dockerfile b/publisher/image/Dockerfile index 7ff07539e8..a318a6507c 100644 --- a/publisher/image/Dockerfile +++ b/publisher/image/Dockerfile @@ -1,10 +1,10 @@ -FROM alpine:3.2 +FROM alpine:3.4 # install curl in the image so it is possible to get the runtime # profiling information without any additional package installation. -RUN apk add --update-cache curl && rm -rf /var/cache/apk/* +RUN apk add --no-cache curl ADD bin/publisher /usr/local/bin/publisher ENTRYPOINT ["/usr/local/bin/publisher"] -ENV DEIS_RELEASE 1.13.0-dev +ENV DEIS_RELEASE 1.13.4 diff --git a/publisher/server/publisher.go b/publisher/server/publisher.go index fa1673a804..0bad65735d 100644 --- a/publisher/server/publisher.go +++ b/publisher/server/publisher.go @@ -75,10 +75,17 @@ func (s *Server) Poll(ttl time.Duration) { if err != nil { log.Fatal(err) } + var wg sync.WaitGroup for _, container := range containers { - // send container to channel for processing - s.publishContainer(&container, ttl) + wg.Add(1) + go func(container docker.APIContainers, ttl time.Duration) { + defer wg.Done() + // send container to channel for processing + s.publishContainer(&container, ttl) + }(container, ttl) } + // Wait for all publish operations to complete. + wg.Wait() } // getContainer retrieves a container from the docker client based on id diff --git a/registry/Dockerfile b/registry/Dockerfile index 7277ab282a..ff405f1b76 100644 --- a/registry/Dockerfile +++ b/registry/Dockerfile @@ -1,7 +1,7 @@ -FROM alpine:3.1 +FROM alpine:3.4 # install common packages -RUN apk add --update-cache curl bash sudo && rm -rf /var/cache/apk/* +RUN apk add --no-cache curl bash sudo # install etcdctl RUN curl -sSL -o /usr/local/bin/etcdctl https://s3-us-west-2.amazonaws.com/get-deis/etcdctl-v0.4.9 \ @@ -25,4 +25,4 @@ RUN DOCKER_BUILD=true /app/build.sh ADD . /app -ENV DEIS_RELEASE 1.13.0-dev +ENV DEIS_RELEASE 1.13.4 diff --git a/registry/Makefile b/registry/Makefile index 3edc6aa137..9f69f2cad3 100644 --- a/registry/Makefile +++ b/registry/Makefile @@ -34,7 +34,7 @@ run: install start dev-release: push set-image push: check-registry - docker tag -f $(IMAGE) $(DEV_IMAGE) + docker tag $(IMAGE) $(DEV_IMAGE) docker push $(DEV_IMAGE) set-image: check-deisctl diff --git a/registry/build.sh b/registry/build.sh index 0eb5e788b0..e377d6177b 100755 --- a/registry/build.sh +++ b/registry/build.sh @@ -11,7 +11,7 @@ if [[ -z $DOCKER_BUILD ]]; then fi # install required packages (copied from dotcloud/docker-registry Dockerfile) -apk add --update-cache \ +apk add --no-cache \ build-base \ git \ openssl-dev \ @@ -22,16 +22,13 @@ apk add --update-cache \ xz-dev # install pip -curl -sSL https://raw.githubusercontent.com/pypa/pip/7.0.3/contrib/get-pip.py | python - - -# workaround to python > 2.7.8 SSL issues -pip install --disable-pip-version-check --no-cache-dir pyopenssl ndg-httpsclient pyasn1 +curl -sSL https://bootstrap.pypa.io/get-pip.py | python - pip==8.1.1 # create a registry user adduser -D -s /bin/bash registry # add the docker registry source from github -git clone -b new-repository-import-master --single-branch https://github.com/deis/docker-registry /docker-registry && \ +git clone -b deis-v1-lts --single-branch https://github.com/deis/docker-registry /docker-registry && \ chown -R registry:registry /docker-registry # install boto configuration @@ -44,14 +41,12 @@ pip install --disable-pip-version-check --no-cache-dir /docker-registry/depends/ # Install registry pip install --disable-pip-version-check --no-cache-dir "file:///docker-registry#egg=docker-registry[bugsnag,newrelic,cors]" -patch \ - "$(python -c 'import boto; import os; print os.path.dirname(boto.__file__)')/connection.py" \ - < /docker-registry/contrib/boto_header_patch.diff +# patch boto +cd "$(python -c 'import boto; import os; print os.path.dirname(boto.__file__)')" \ + && patch -i /docker-registry/contrib/boto_header_patch.diff connection.py # cleanup. indicate that python is a required package. -apk del --purge \ +apk del --no-cache \ build-base \ linux-headers \ python-dev - -rm -rf /var/cache/apk/* diff --git a/router/Dockerfile b/router/Dockerfile index 33dbe111cc..85477405fb 100644 --- a/router/Dockerfile +++ b/router/Dockerfile @@ -1,15 +1,14 @@ -FROM alpine:3.2 +FROM alpine:3.4 # install common packages -RUN apk add --update-cache \ +RUN apk add --no-cache \ bash \ curl \ geoip \ libssl1.0 \ openssl \ pcre \ - sudo \ - && rm -rf /var/cache/apk/* + sudo # install confd RUN curl -sSL -o /usr/local/bin/confd https://s3-us-west-2.amazonaws.com/opdemand/confd-git-73f7489 \ @@ -27,4 +26,4 @@ RUN build CMD ["boot"] EXPOSE 80 2222 9090 -ENV DEIS_RELEASE 1.13.0-dev +ENV DEIS_RELEASE 1.13.4 diff --git a/router/Makefile b/router/Makefile index 29d1fe17ac..07fc78a0ab 100644 --- a/router/Makefile +++ b/router/Makefile @@ -46,7 +46,7 @@ run: install start dev-release: push set-image push: check-registry - docker tag -f $(IMAGE) $(DEV_IMAGE) + docker tag $(IMAGE) $(DEV_IMAGE) docker push $(DEV_IMAGE) set-image: check-deisctl diff --git a/router/cmd/boot/boot.go b/router/cmd/boot/boot.go index a6ae42c96f..982bdfc001 100644 --- a/router/cmd/boot/boot.go +++ b/router/cmd/boot/boot.go @@ -60,7 +60,6 @@ func main() { mkdirEtcd(client, "/deis/certs") mkdirEtcd(client, "/deis/router/hosts") mkdirEtcd(client, "/deis/router/hsts") - mkdirEtcd(client, "/registry/services/specs/default") setDefaultEtcd(client, etcdPath+"/gzip", "on") diff --git a/router/rootfs/bin/build b/router/rootfs/bin/build index e16ce96a03..bd329f2e13 100755 --- a/router/rootfs/bin/build +++ b/router/rootfs/bin/build @@ -2,7 +2,7 @@ set -eof pipefail -export NGINX_VERSION=1.9.6 +export NGINX_VERSION=1.10.1 export NAXSI_VERSION=0d53a64ed856e694fcb4038748c8cf6d5551a603 export NDK_VERSION=0.2.19 export VTS_VERSION=22c51e201a550bb94e96239fef541347beb4eeca @@ -20,7 +20,7 @@ mkdir "$BUILD_PATH" cd "$BUILD_PATH" # install required packages to build -apk add --update-cache \ +apk add --no-cache \ build-base \ curl \ geoip-dev \ @@ -33,7 +33,7 @@ apk add --update-cache \ zlib-dev # download, verify and extract the source files -get_src ed501fc6d0eff9d3bc1049cc1ba3a3ac8c602de046acb2a4c108392bbfa865ea \ +get_src 1fd35846566485e03c0e318989561c135c598323ff349c503a6c14826487a801 \ "http://nginx.org/download/nginx-$NGINX_VERSION.tar.gz" get_src 128b56873eedbd3f240dc0f88a8b260d791321db92f14ba2fc5c49fc5307e04d \ @@ -78,7 +78,7 @@ cd "$BUILD_PATH/nginx-$NGINX_VERSION" && make && make install rm -rf "$BUILD_PATH" -apk del --purge \ +apk del --no-cache \ build-base \ curl \ geoip-dev \ @@ -89,4 +89,3 @@ apk del --purge \ openssl-dev \ zlib \ zlib-dev -rm -rf /var/cache/apk/* diff --git a/router/rootfs/etc/confd/conf.d/nginx.conf.toml b/router/rootfs/etc/confd/conf.d/nginx.conf.toml index 5f54379ca1..8bfb5d4ffc 100644 --- a/router/rootfs/etc/confd/conf.d/nginx.conf.toml +++ b/router/rootfs/etc/confd/conf.d/nginx.conf.toml @@ -13,7 +13,6 @@ keys = [ "/deis/builder", "/deis/store/gateway", "/deis/certs", - "/registry/services/", ] check_cmd = "check {{ .src }}" reload_cmd = "/opt/nginx/sbin/nginx -s reload" diff --git a/router/rootfs/etc/confd/templates/deis.conf b/router/rootfs/etc/confd/templates/deis.conf index 895d5cd3c2..67dfbd0e7d 100644 --- a/router/rootfs/etc/confd/templates/deis.conf +++ b/router/rootfs/etc/confd/templates/deis.conf @@ -1,9 +1,10 @@ +{{ $useProxyProtocol := or (getv "/deis/router/proxyProtocol") "false" }} server_name_in_redirect off; port_in_redirect off; -listen 80{{ if exists "/deis/router/proxyProtocol" }} proxy_protocol{{ end }}; +listen 80{{ if ne $useProxyProtocol "false" }} proxy_protocol{{ end }}; {{ if exists "/deis/router/sslCert" }} -listen 443 ssl http2{{ if exists "/deis/router/proxyProtocol" }} proxy_protocol{{ end }}; +listen 443 ssl http2{{ if ne $useProxyProtocol "false" }} proxy_protocol{{ end }}; ssl_certificate /etc/ssl/deis.cert; ssl_certificate_key /etc/ssl/deis.key; include ssl.conf; diff --git a/router/rootfs/etc/confd/templates/nginx.conf b/router/rootfs/etc/confd/templates/nginx.conf index 7a8420b19a..638d23b2f5 100644 --- a/router/rootfs/etc/confd/templates/nginx.conf +++ b/router/rootfs/etc/confd/templates/nginx.conf @@ -12,8 +12,11 @@ events { http { + # Server signature toggle (default is on) + server_tokens {{ or (getv "/deis/router/serverTokens") "on" }}; + # basic settings - vhost_traffic_status_zone; + vhost_traffic_status_zone shared:vhost_traffic_status:{{ or (getv "/deis/router/trafficStatusZoneSize") "1m" }}; sendfile on; tcp_nopush on; @@ -49,12 +52,12 @@ http { {{ $firewallErrorCode := or (getv "/deis/router/firewall/errorCode") "400" }} client_max_body_size "{{ or (getv "/deis/router/bodySize") "1m" }}"; - {{ $useProxyProtocol := or (getv "/deis/router/proxyProtocol") "false" }}{{ if ne $useProxyProtocol "false" }} set_real_ip_from {{ or (getv "/deis/router/proxyRealIpCidr") "10.0.0.0/8" }}; - real_ip_header proxy_protocol; + {{ $useProxyProtocol := or (getv "/deis/router/proxyProtocol") "false" }}{{ if ne $useProxyProtocol "false" }} + real_ip_header proxy_protocol;{{ else }}real_ip_header X-Forwarded-For; {{ end }} - log_format upstreaminfo '[$time_local] - {{ if ne $useProxyProtocol "false" }}$proxy_protocol_addr{{ else }}$remote_addr{{ end }} - $remote_user - $status - "$request" - $bytes_sent - "$http_referer" - "$http_user_agent" - "$server_name" - $upstream_addr - $http_host - $upstream_response_time - $request_time'; + log_format upstreaminfo '[$time_local] - $remote_addr - $remote_user - $status - "$request" - $bytes_sent - "$http_referer" - "$http_user_agent" - "$server_name" - $upstream_addr - $http_host - $upstream_response_time - $request_time'; # send logs to STDOUT so they can be seen using 'docker logs' access_log /opt/nginx/logs/access.log upstreaminfo; @@ -65,10 +68,19 @@ http { '' close; } - # trust http_x_forwarded_proto headers correctly indicate ssl offloading - map $http_x_forwarded_proto $access_scheme { - default $http_x_forwarded_proto; - '' $scheme; + # The next two maps work together to determine the $access_scheme: + # 1. Determine if SSL may have been offloaded by the load balancer, in such cases, an HTTP request should be + # treated as if it were HTTPs. + map $http_x_forwarded_proto $tmp_access_scheme { + default $scheme; # if X-Forwarded-Proto header is empty, $tmp_access_scheme will be the actual protocol used + "~^(.*, ?)?http$" "http"; # account for the possibility of a comma-delimited X-Forwarded-Proto header value + "~^(.*, ?)?https$" "https"; # account for the possibility of a comma-delimited X-Forwarded-Proto header value + } + # 2. If the request is an HTTPS request, upgrade $access_scheme to https, regardless of what the X-Forwarded-Proto + # header might say. + map $scheme $access_scheme { + default $tmp_access_scheme; + "https" "https"; } ## HSTS instructs the browser to replace all HTTP links with HTTPS links for this domain until maxAge seconds from now @@ -116,11 +128,7 @@ http { {{ if eq $useFirewall "true" }}include /opt/nginx/firewall/active-mode.rules;{{ end }} proxy_buffering off; proxy_set_header Host $host; - {{ if ne $useProxyProtocol "false" }} - proxy_set_header X-Forwarded-For $proxy_protocol_addr; - {{ else }} - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - {{ end }} + proxy_set_header X-Forwarded-For $remote_addr; proxy_redirect off; proxy_connect_timeout {{ or (getv "/deis/router/controller/timeout/connect") "10s" }}; proxy_send_timeout {{ or (getv "/deis/router/controller/timeout/send") "20m" }}; @@ -169,11 +177,7 @@ http { {{ if eq $useFirewall "true" }}include /opt/nginx/firewall/active-mode.rules;{{ end }} proxy_buffering off; proxy_set_header Host $host; - {{ if ne $useProxyProtocol "false" }} - proxy_set_header X-Forwarded-For $proxy_protocol_addr; - {{ else }} - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - {{ end }} + proxy_set_header X-Forwarded-For $remote_addr; proxy_redirect off; proxy_connect_timeout 10s; proxy_send_timeout {{ $defaultTimeout }}s; @@ -250,11 +254,7 @@ http { } proxy_set_header X-Forwarded-Port $access_port; proxy_set_header X-Forwarded-Proto $access_scheme; - {{ if ne $useProxyProtocol "false" }} - proxy_set_header X-Forwarded-For $proxy_protocol_addr; - {{ else }} - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - {{ end }} + proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-Ssl $access_ssl; proxy_redirect off; proxy_connect_timeout 30s; @@ -327,11 +327,7 @@ http { } proxy_set_header X-Forwarded-Port $access_port; proxy_set_header X-Forwarded-Proto $access_scheme; - {{ if ne $useProxyProtocol "false" }} - proxy_set_header X-Forwarded-For $proxy_protocol_addr; - {{ else }} - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - {{ end }} + proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-Ssl $access_ssl; proxy_redirect off; proxy_connect_timeout 30s; @@ -367,24 +363,18 @@ http { }{{ end }} ## end service definitions for each application - {{ $scheduler := or (getv "/deis/controller/schedulerModule") "fleet" }} - # default server, including "classic" healthcheck server { listen 80 default_server reuseport{{ if ne $useProxyProtocol "false" }} proxy_protocol{{ end }}; location /health-check { access_log off; - {{ if eq $scheduler "k8s" }} - proxy_pass http://{{ getenv "HOST" }}:10249/healthz; - {{ else }} default_type 'text/plain'; return 200; - {{ end }} } - location /router-nginx-status { + {{ if eq (getv "/deis/router/enableNginxStatus") "true" }}location /router-nginx-status { vhost_traffic_status_display; vhost_traffic_status_display_format html; - } + }{{ end }} location / { return 404; } @@ -395,166 +385,13 @@ http { listen 9090 default_server; location /health-check { access_log off; - {{ if eq $scheduler "k8s" }} - proxy_pass http://{{ getenv "HOST" }}:10249/healthz; - {{ else }} default_type 'text/plain'; return 200; - {{ end }} } location / { return 404; } } - - #start k8s apps - {{ range $k8namespace := lsdir "/registry/services/specs/" }} - {{ $k8appdir := printf "/registry/services/specs/%s/" $k8namespace}}{{ range $kapp := ls $k8appdir }} - {{ $k8appPath := printf "/registry/services/specs/%s/%s" $k8namespace $kapp}}{{ $k8Svc := json (getv $k8appPath) }} - {{ $upstreams := printf "/registry/services/specs/%s/%s" $k8namespace $kapp}} - upstream {{ if $k8Svc.metadata.labels.name }}{{ $k8Svc.metadata.labels.name }}{{ else }}{{ $k8Svc.metadata.name }}{{ end }} { - {{ if exists "/deis/router/affinityArg" }} - hash $arg_{{ getv "/deis/router/affinityArg" }} consistent; - {{ end }} - server {{ $k8Svc.spec.clusterIP }}:80; - } - {{ $appContainers := gets $upstreams }}{{ $appContainerLen := len $appContainers }} - {{ $k8sappname := or $k8Svc.metadata.labels.name $k8Svc.metadata.name }} - ## server entries for custom domains - {{ range $app_domain := $domains }}{{ if eq $k8sappname (getv (printf "/deis/domains/%s" $app_domain)) }} - server { - server_name {{ $app_domain }}; - {{/* if a SSL certificate is installed for this domain, use SSL */}} - {{/* NOTE (bacongobbler): domains are separate from the default platform domain, */}} - {{/* so we can't rely on deis.conf as each domain is an island */}} - {{ if exists (printf "/deis/certs/%s/cert" $app_domain) }} - server_name_in_redirect off; - port_in_redirect off; - listen 80{{ if ne $useProxyProtocol "false" }} proxy_protocol{{ end }}; - listen 443 ssl http2{{ if ne $useProxyProtocol "false" }} proxy_protocol{{ end }}; - ssl_certificate /etc/ssl/deis/certs/{{ $app_domain }}.cert; - ssl_certificate_key /etc/ssl/deis/keys/{{ $app_domain }}.key; - include ssl.conf; - {{/* if there's no app SSL cert but we have a router SSL cert, enable that instead */}} - {{/* TODO (bacongobbler): wait for https://github.com/kelseyhightower/confd/issues/270 */}} - {{/* so we can apply this config to just subdomains of the platform domain. */}} - {{/* ref: https://github.com/deis/deis/pull/3519 */}} - {{ else }} - include deis.conf; - {{ end }} - {{ if ne $appContainerLen 0 }} - location / { - {{ if eq $useFirewall "true" }}include /opt/nginx/firewall/active-mode.rules;{{ end }} - proxy_buffering off; - proxy_set_header Host $host; - set $access_ssl 'off'; - set $access_port '80'; - if ($access_scheme ~ https) { - set $access_ssl 'on'; - set $access_port '443'; - } - proxy_set_header X-Forwarded-Port $access_port; - proxy_set_header X-Forwarded-Proto $access_scheme; - {{ if ne $useProxyProtocol "false" }} - proxy_set_header X-Forwarded-For $proxy_protocol_addr; - {{ else }} - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - {{ end }} - proxy_set_header X-Forwarded-Ssl $access_ssl; - proxy_redirect off; - proxy_connect_timeout 30s; - proxy_send_timeout {{ $defaultTimeout }}s; - proxy_read_timeout {{ $defaultTimeout }}s; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - - proxy_next_upstream error timeout http_502 http_503 http_504; - - {{ if eq $enforceHTTPS "true" }} - if ($access_scheme != "https") { - return 301 https://$host$request_uri; - } - {{ end }} - - {{ if eq $enableHSTS "true" }} - add_header Strict-Transport-Security $sts always; - {{ end }} - - ## workaround for nginx hashing empty string bug http://trac.nginx.org/nginx/ticket/765 - {{ if exists "/deis/router/affinityArg" }} - set_random $prng 0 99; - set_if_empty $arg_{{ getv "/deis/router/affinityArg" }} $prng; - {{ end }} - - proxy_pass http://{{ if $k8Svc.metadata.labels.name }}{{ $k8Svc.metadata.labels.name }}{{ else }}{{ $k8Svc.metadata.name }}{{ end }}; - } - {{ else }} - location / { - return 503; - } - {{ end }} - {{ if eq $useFirewall "true" }}location /RequestDenied { - return {{ $firewallErrorCode }}; - } - {{ end }} - }{{ end }}{{ end }} - ## end entries for custom domains - - server { - server_name ~^{{ if $k8Svc.metadata.labels.name }}{{ $k8Svc.metadata.labels.name }}{{ else }}{{ $k8Svc.metadata.name }}{{ end }}\.(?.+)$; - include deis.conf; - {{ if ne $appContainerLen 0 }} - location / { - {{ if eq $useFirewall "true" }}include /opt/nginx/firewall/active-mode.rules;{{ end }} - proxy_buffering off; - proxy_set_header Host $host; - set $access_ssl 'off'; - set $access_port '80'; - if ($access_scheme ~ https) { - set $access_ssl 'on'; - set $access_port '443'; - } - proxy_set_header X-Forwarded-Port $access_port; - proxy_set_header X-Forwarded-Proto $access_scheme; - {{ if ne $useProxyProtocol "false" }} - proxy_set_header X-Forwarded-For $proxy_protocol_addr; - {{ else }} - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - {{ end }} - proxy_set_header X-Forwarded-Ssl $access_ssl; - proxy_redirect off; - proxy_connect_timeout 30s; - proxy_send_timeout {{ $defaultTimeout }}s; - proxy_read_timeout {{ $defaultTimeout }}s; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - - proxy_next_upstream error timeout http_502 http_503 http_504; - - {{ if eq $enforceHTTPS "true" }} - if ($access_scheme != "https") { - return 301 https://$host$request_uri; - } - {{ end }} - - {{ if eq $enableHSTS "true" }} - add_header Strict-Transport-Security $sts always; - {{ end }} - - proxy_pass http://{{ if $k8Svc.metadata.labels.name }}{{ $k8Svc.metadata.labels.name }}{{ else }}{{ $k8Svc.metadata.name }}{{ end }}; - } - {{ else }} - location / { - return 503; - } - {{ end }} - {{ if eq $useFirewall "true" }}location /RequestDenied { - return {{ $firewallErrorCode }}; - } - {{ end }} - }{{end}}{{end}} } ## start builder diff --git a/store/Makefile b/store/Makefile index 8f1880fcda..2aa0f6d6d4 100644 --- a/store/Makefile +++ b/store/Makefile @@ -70,15 +70,15 @@ run: install start dev-release: push set-image push: check-registry - docker tag -f $(ADMIN_IMAGE) $(ADMIN_DEV_IMAGE) + docker tag $(ADMIN_IMAGE) $(ADMIN_DEV_IMAGE) docker push $(ADMIN_DEV_IMAGE) - docker tag -f $(DAEMON_IMAGE) $(DAEMON_DEV_IMAGE) + docker tag $(DAEMON_IMAGE) $(DAEMON_DEV_IMAGE) docker push $(DAEMON_DEV_IMAGE) - docker tag -f $(GATEWAY_IMAGE) $(GATEWAY_DEV_IMAGE) + docker tag $(GATEWAY_IMAGE) $(GATEWAY_DEV_IMAGE) docker push $(GATEWAY_DEV_IMAGE) - docker tag -f $(METADATA_IMAGE) $(METADATA_DEV_IMAGE) + docker tag $(METADATA_IMAGE) $(METADATA_DEV_IMAGE) docker push $(METADATA_DEV_IMAGE) - docker tag -f $(MONITOR_IMAGE) $(MONITOR_DEV_IMAGE) + docker tag $(MONITOR_IMAGE) $(MONITOR_DEV_IMAGE) docker push $(MONITOR_DEV_IMAGE) set-image: check-deisctl diff --git a/store/base/Dockerfile b/store/base/Dockerfile index 297d926396..df66c9aa33 100644 --- a/store/base/Dockerfile +++ b/store/base/Dockerfile @@ -9,5 +9,5 @@ RUN DOCKER_BUILD=true /tmp/build.sh # Add shared confd configuration ADD . /app -ENV DEIS_RELEASE 1.13.0-dev +ENV DEIS_RELEASE 1.13.4 diff --git a/swarm/Dockerfile b/swarm/Dockerfile deleted file mode 100644 index 78f053b3e4..0000000000 --- a/swarm/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM golang:1.5 - -WORKDIR /go/src/github.com/docker -RUN git clone https://github.com/deis/swarm -WORKDIR /go/src/github.com/docker/swarm -RUN git fetch origin nodefailover -RUN git checkout nodefailover - -ENV GOPATH /go/src/github.com/docker/swarm/Godeps/_workspace:$GOPATH -RUN CGO_ENABLED=0 go build -a -tags netgo -installsuffix cgo \ - -ldflags "-s -w -X github.com/docker/swarm/version.GITCOMMIT `git rev-parse --short HEAD`" \ - -o deis-swarm diff --git a/swarm/Makefile b/swarm/Makefile deleted file mode 100644 index 6d26a3365e..0000000000 --- a/swarm/Makefile +++ /dev/null @@ -1,76 +0,0 @@ -include ../includes.mk - -# the filepath to this repository, relative to $GOPATH/src -repo_path = github.com/deis/deis/swarm - -GO_FILES = $(wildcard *.go) -GO_PACKAGES = -GO_PACKAGES_REPO_PATH = $(addprefix $(repo_path)/,$(GO_PACKAGES)) - -COMPONENT = $(notdir $(repo_path)) -IMAGE = $(IMAGE_PREFIX)$(COMPONENT):$(BUILD_TAG) -DEV_IMAGE = $(REGISTRY)$(IMAGE) -BUILD_IMAGE = $(COMPONENT)-build -BINARY_DEST_DIR = image/bin - -build: check-docker - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 godep go build -a -installsuffix cgo -ldflags '-s' -o $(BINARY_DEST_DIR)/swarm || exit 1 - @$(call check-static-binary,$(BINARY_DEST_DIR)/swarm) - docker build -t $(BUILD_IMAGE) . - docker cp `docker run -d $(BUILD_IMAGE)`:/go/src/github.com/docker/swarm/deis-swarm $(BINARY_DEST_DIR)/ - @$(call check-static-binary,$(BINARY_DEST_DIR)/deis-swarm) - docker build -t $(IMAGE) image - -clean: check-docker check-registry - rm -f $(BINARY_DEST_DIR)/deis-swarm - rm -f $(BINARY_DEST_DIR)/swarm - -docker rmi -f $(BUILD_IMAGE) - -docker rmi -f $(IMAGE) - -full-clean: check-docker check-registry - docker images -q $(IMAGE_PREFIX)$(COMPONENT) | xargs docker rmi -f - -install: check-deisctl - deisctl install $(COMPONENT) - -uninstall: check-deisctl - deisctl uninstall $(COMPONENT) - -start: check-deisctl - deisctl start $(COMPONENT) - -stop: check-deisctl - deisctl stop $(COMPONENT) - -restart: stop start - -run: install start - -dev-release: push set-image - -push: check-registry - docker tag -f $(IMAGE) $(DEV_IMAGE) - docker push $(DEV_IMAGE) - -set-image: check-deisctl - deisctl config $(COMPONENT) set image=$(DEV_IMAGE) - -release: - docker push $(IMAGE) - -deploy: build dev-release restart - -test: test-style test-unit test-functional - -test-unit: - @echo no unit tests - -test-style: -# display output, then check - $(GOFMT) $(GO_PACKAGES) $(GO_FILES) - @$(GOFMT) $(GO_PACKAGES) $(GO_FILES) | read; if [ $$? == 0 ]; then echo "gofmt check failed."; exit 1; fi - $(GOVET) $(repo_path) $(GO_PACKAGES_REPO_PATH) - $(GOLINT) ./... - -test-functional: - @echo no functional tests diff --git a/swarm/README.md b/swarm/README.md deleted file mode 100644 index d43d6942be..0000000000 --- a/swarm/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Deis Swarm Scheduler - -[Docker swarm](https://github.com/docker/swarm/) components for use as -an optional scheduler in Deis, the open source PaaS. - -## Usage - -Please consult the [swarm scheduler documentation](http://docs.deis.io/en/latest/customizing_deis/choosing-a-scheduler/#swarm-scheduler) -for instructions on installing and activating swarm in Deis. - -## License - -© 2015 Engine Yard, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may -not use this file except in compliance with the License. You may obtain -a copy of the License at - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/swarm/image/Dockerfile b/swarm/image/Dockerfile deleted file mode 100644 index e32bee6851..0000000000 --- a/swarm/image/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM alpine:3.2 - -COPY bin /app/bin/ -WORKDIR /app/bin - -ENTRYPOINT ["/app/bin/swarm"] -CMD ["--help"] -ENV SWARM_HOST :2375 -EXPOSE 2375 -VOLUME $HOME/.swarm - -ENV DEIS_RELEASE 1.13.0-dev diff --git a/swarm/swarm.go b/swarm/swarm.go deleted file mode 100644 index 79cd9b2e5c..0000000000 --- a/swarm/swarm.go +++ /dev/null @@ -1,115 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "os" - "os/exec" - "strings" - "text/template" - "time" - - "github.com/coreos/go-etcd/etcd" - "github.com/deis/deis/tests/utils" -) - -// EtcdCluster information about the nodes in the etcd cluster -type EtcdCluster struct { - Members []etcd.Member `json:"members"` -} - -// NodeStat information about the local node in etcd -type NodeStats struct { - LeaderInfo struct { - Name string `json:"leader"` - Uptime string `json:"uptime"` - StartTime time.Time `json:"startTime"` - } `json:"leaderInfo"` -} - -const ( - swarmpath = "/deis/scheduler/swarm/node" - swarmetcd = "/deis/scheduler/swarm/host" - etcdport = "4001" - timeout time.Duration = 3 * time.Second - ttl time.Duration = timeout * 2 -) - -func run(cmd string) { - var cmdBuf bytes.Buffer - tmpl := template.Must(template.New("cmd").Parse(cmd)) - if err := tmpl.Execute(&cmdBuf, nil); err != nil { - log.Fatal(err) - } - cmdString := cmdBuf.String() - fmt.Println(cmdString) - var cmdl *exec.Cmd - cmdl = exec.Command("sh", "-c", cmdString) - if _, _, err := utils.RunCommandWithStdoutStderr(cmdl); err != nil { - log.Fatal(err) - } else { - fmt.Println("ok") - } -} - -func getleaderHost() string { - var nodeStats NodeStats - client := &http.Client{} - resp, _ := client.Get("http://" + os.Getenv("HOST") + ":2379/v2/stats/self") - - body, _ := ioutil.ReadAll(resp.Body) - json.Unmarshal(body, &nodeStats) - - etcdLeaderID := nodeStats.LeaderInfo.Name - - var etcdCluster EtcdCluster - resp, _ = client.Get("http://" + os.Getenv("HOST") + ":2379/v2/members") - defer resp.Body.Close() - - body, _ = ioutil.ReadAll(resp.Body) - json.Unmarshal(body, &etcdCluster) - - for _, node := range etcdCluster.Members { - if node.ID == etcdLeaderID { - u, err := url.Parse(node.ClientURLs[0]) - if err == nil { - return u.Host - } - } - } - - return "" -} - -func publishService(client *etcd.Client, host string, ttl uint64) { - for { - setEtcd(client, swarmetcd, host, ttl) - time.Sleep(timeout) - } -} - -func setEtcd(client *etcd.Client, key, value string, ttl uint64) { - _, err := client.Set(key, value, ttl) - if err != nil && !strings.Contains(err.Error(), "Key already exists") { - log.Println(err) - } -} - -func main() { - etcdproto := "etcd://" + getleaderHost() + swarmpath - etcdhost := os.Getenv("HOST") - addr := "--addr=" + etcdhost + ":2375" - client := etcd.NewClient([]string{"http://" + etcdhost + ":" + etcdport}) - switch os.Args[1] { - case "join": - run("./deis-swarm join " + addr + " " + etcdproto) - case "manage": - go publishService(client, etcdhost, uint64(ttl.Seconds())) - run("./deis-swarm manage " + etcdproto) - } -} diff --git a/tests/apps_test.go b/tests/apps_test.go index a8318b5b3e..5686f1bc70 100644 --- a/tests/apps_test.go +++ b/tests/apps_test.go @@ -6,7 +6,9 @@ import ( "fmt" "math/rand" "os" + "strings" "testing" + "time" "github.com/deis/deis/tests/utils" ) @@ -27,6 +29,9 @@ var ( ) func randomString(n int) string { + // Be sure we've seeded the random number generator, otherwise we could get the same string + // every time. + rand.Seed(time.Now().UnixNano()) var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") b := make([]rune, n) for i := range b { @@ -121,9 +126,27 @@ func appsRunTest(t *testing.T, params *utils.DeisTestConfig) { } utils.CheckList(t, cmd, params, "Hello, 世界", false) utils.Execute(t, "apps:run env", params, true, "GIT_SHA") - // run a REALLY large command to test https://github.com/deis/deis/issues/2046 + // Fleet/systemd unit files have a limit of 2048 characters per line or else one encounters + // problems parsing the unit. To verify long log messages are truncated and do not crash + // logspout (see https://github.com/deis/deis/issues/2046) we must issue a (relatively) short + // command via `deis apps:run` that produces a LONG, but testable (predictable) log message we + // can search for in the output of `deis logs`. + // + // The strategy for achieving this is to generate 1k random characters, then use that with a + // command submitted via `deis apps:run` that will echo those 1k bytes 64x (on a single line). + // Such a message is long enough to crash logspout if handled improperly and ALSO gives us a + // large, distinct, and predictable string we can search for in the logs to assert success (and + // assert that the message didn't crash logspout) WITHOUT ever needing to transmit such an + // egregiously long command via `deis apps:run`. largeString := randomString(1024) - utils.Execute(t, "apps:run echo "+largeString, params, false, largeString) + utils.Execute(t, fmt.Sprintf("apps:run \"printf '%s%%.0s' {1..64}\"", largeString), params, false, largeString) + // To assert the long message didn't crash logspout AND made it to the logger, we will search + // the logs for a fragment of the long message-- specifically 2x the random string we generated. + // This will help us ensure the actual log message made it through and not JUST the log message + // that states the command being execured via `deis apps:run`. We want to find the former, not + // the latter because the latter is too short a message to have possibly crashed logspout if + // mishandled. + utils.Execute(t, "logs", params, false, strings.Repeat(largeString, 2)) if err := utils.Chdir(".."); err != nil { t.Fatal(err) } diff --git a/tests/auth_test.go b/tests/auth_test.go index 80f825b613..5da6294bfe 100644 --- a/tests/auth_test.go +++ b/tests/auth_test.go @@ -49,8 +49,8 @@ func authCancel(t *testing.T, params *utils.DeisTestConfig) { admin := utils.GetGlobalConfig() utils.Execute(t, authLoginCmd, admin, false, "") utils.Execute(t, authCancelAdminCmd, user, false, "Account cancelled") - // Make sure the user's config was purged after auth:cancel - utils.Execute(t, authWhoamiCmd, admin, true, "Error: Not logged in") + // Make sure the user's config was not purged after auth:cancel --username + utils.Execute(t, authWhoamiCmd, admin, false, "You are "+admin.UserName) } func authLoginTest(t *testing.T, params *utils.DeisTestConfig) { diff --git a/tests/bin/setup-node.sh b/tests/bin/setup-node.sh index c020479b8e..f10275a58b 100755 --- a/tests/bin/setup-node.sh +++ b/tests/bin/setup-node.sh @@ -14,7 +14,7 @@ apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 58118E89F3A912897C070AD echo deb https://apt.dockerproject.org/repo ubuntu-trusty main > /etc/apt/sources.list.d/docker.list apt-get update apt-get purge lxc-docker* -apt-get install -yq docker-engine=1.8.3-0~trusty +apt-get install -yq --force-yes docker-engine=1.10.3-0~trusty # install extra extensions (AUFS, requires reboot) apt-get -y install "linux-image-extra-$(uname -r)" @@ -40,20 +40,20 @@ apt-get install -yq build-essential \ # install virtualbox if ! virtualbox --help &> /dev/null; then - wget -nv http://download.virtualbox.org/virtualbox/5.0.2/virtualbox-5.0_5.0.2-102096~Ubuntu~trusty_amd64.deb - dpkg -i virtualbox-5.0_5.0.2-102096~Ubuntu~trusty_amd64.deb - rm virtualbox-5.0_5.0.2-102096~Ubuntu~trusty_amd64.deb + wget -nv http://download.virtualbox.org/virtualbox/5.0.16/virtualbox-5.0_5.0.16-105871~Ubuntu~trusty_amd64.deb + dpkg -i virtualbox-5.0_5.0.16-105871~Ubuntu~trusty_amd64.deb + rm virtualbox-5.0_5.0.16-105871~Ubuntu~trusty_amd64.deb fi # install vagrant if ! vagrant -v &> /dev/null; then - wget -nv https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.4_x86_64.deb - dpkg -i vagrant_1.7.4_x86_64.deb - rm vagrant_1.7.4_x86_64.deb + wget -nv https://releases.hashicorp.com/vagrant/1.8.1/vagrant_1.8.1_x86_64.deb + dpkg -i vagrant_1.8.1_x86_64.deb + rm vagrant_1.8.1_x86_64.deb fi # install go -wget -nv -O- https://storage.googleapis.com/golang/go1.5.linux-amd64.tar.gz | tar -C /usr/local -xz +wget -nv -O- https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz | tar -C /usr/local -xz echo 'export PATH=$PATH:/usr/local/go/bin' >> /etc/profile echo "You must reboot for the global $PATH changes to take effect." @@ -70,7 +70,7 @@ apt-get install -yq curl \ libldap2-dev \ libsasl2-dev -curl -sSL https://raw.githubusercontent.com/pypa/pip/7.0.3/contrib/get-pip.py | python - +curl -sSL https://bootstrap.pypa.io/get-pip.py | python - pip==8.1.1 pip install virtualenv # TODO: rely on virtualenvs' pip instead of system pip on slaves diff --git a/tests/fixtures/mock-store/Dockerfile b/tests/fixtures/mock-store/Dockerfile index 704ccb8e02..700a53fc39 100644 --- a/tests/fixtures/mock-store/Dockerfile +++ b/tests/fixtures/mock-store/Dockerfile @@ -1,7 +1,7 @@ -FROM alpine:3.2 +FROM alpine:3.4 # install common packages -RUN apk add --update-cache curl bash sudo && rm -rf /var/cache/apk/* +RUN apk add --no-cache curl bash sudo WORKDIR /app EXPOSE 8888 diff --git a/tests/fixtures/mock-store/build.sh b/tests/fixtures/mock-store/build.sh index e7eae50b99..8f606d6e4d 100755 --- a/tests/fixtures/mock-store/build.sh +++ b/tests/fixtures/mock-store/build.sh @@ -11,12 +11,13 @@ if [[ -z $DOCKER_BUILD ]]; then fi # install required packages to build -apk add --update-cache \ +apk add --no-cache \ build-base \ curl \ file \ gcc \ git \ + openssl \ python-dev # install etcdctl @@ -30,13 +31,13 @@ git checkout 4c3c3752f990db97e8969c00666251a3b427ef4c git apply /tmp/mock-s3-patch.diff # install pip -curl -sSL https://raw.githubusercontent.com/pypa/pip/7.0.3/contrib/get-pip.py | python - +curl -sSL https://bootstrap.pypa.io/get-pip.py | python - pip==8.1.1 python setup.py install # cleanup. -apk del --purge \ +apk del --no-cache \ build-base \ gcc \ git -rm -rf /var/cache/apk/* /tmp/* +rm -rf /tmp/* diff --git a/tests/fixtures/test-etcd/Dockerfile b/tests/fixtures/test-etcd/Dockerfile index 594d2c032a..d24d6a298f 100644 --- a/tests/fixtures/test-etcd/Dockerfile +++ b/tests/fixtures/test-etcd/Dockerfile @@ -1,11 +1,11 @@ -FROM alpine:3.2 +FROM alpine:3.4 # install common packages -RUN apk add --update-cache curl tar && rm -rf /var/cache/apk/* +RUN apk add --no-cache curl tar # ETCD_VERSION is actually used by the etcd daemon, and causes an issue if we # format it for our use here. So, we call this something else. -ENV INSTALL_ETCD_VERSION v2.1.2 +ENV INSTALL_ETCD_VERSION v2.2.3 # install etcd and etcdctl RUN curl -sSL https://github.com/coreos/etcd/releases/download/$INSTALL_ETCD_VERSION/etcd-$INSTALL_ETCD_VERSION-linux-amd64.tar.gz \ diff --git a/version/version.go b/version/version.go index f2b6778704..386f8cd7f1 100644 --- a/version/version.go +++ b/version/version.go @@ -1,7 +1,7 @@ package version // Version identifies this Deis product revision. -const Version = "1.13.0-dev" +const Version = "1.13.4" -// API identifies the latest Deis api verison +// APIVersion identifies the latest Deis api verison const APIVersion = "1.7"